`_.
-
- Args:
- encoder (TransformerEncoder): the encoder
- decoder (TransformerDecoder): the decoder
-
- The Transformer model provides the following named architectures and
- command-line arguments:
-
- .. argparse::
- :ref: fairseq.models.transformer_parser
- :prog:
- """
-
- def __init__(self, args, encoder, decoder):
- super().__init__(encoder, decoder)
- self.args = args
- self.supports_align_args = True
-
- @staticmethod
- def add_args(parser):
- """Add model-specific arguments to the parser."""
- # fmt: off
- parser.add_argument('--activation-fn',
- choices=utils.get_available_activation_fns(),
- help='activation function to use')
- parser.add_argument('--dropout', type=float, metavar='D',
- help='dropout probability')
- parser.add_argument('--attention-dropout', type=float, metavar='D',
- help='dropout probability for attention weights')
- parser.add_argument('--activation-dropout', '--relu-dropout', type=float, metavar='D',
- help='dropout probability after activation in FFN.')
- parser.add_argument('--encoder-embed-path', type=str, metavar='STR',
- help='path to pre-trained encoder embedding')
- parser.add_argument('--encoder-embed-dim', type=int, metavar='N',
- help='encoder embedding dimension')
- parser.add_argument('--encoder-ffn-embed-dim', type=int, metavar='N',
- help='encoder embedding dimension for FFN')
- parser.add_argument('--encoder-layers', type=int, metavar='N',
- help='num encoder layers')
- parser.add_argument('--encoder-attention-heads', type=int, metavar='N',
- help='num encoder attention heads')
- parser.add_argument('--encoder-normalize-before', action='store_true',
- help='apply layernorm before each encoder block')
- parser.add_argument('--encoder-learned-pos', action='store_true',
- help='use learned positional embeddings in the encoder')
- parser.add_argument('--decoder-embed-path', type=str, metavar='STR',
- help='path to pre-trained decoder embedding')
- parser.add_argument('--decoder-embed-dim', type=int, metavar='N',
- help='decoder embedding dimension')
- parser.add_argument('--decoder-ffn-embed-dim', type=int, metavar='N',
- help='decoder embedding dimension for FFN')
- parser.add_argument('--decoder-layers', type=int, metavar='N',
- help='num decoder layers')
- parser.add_argument('--decoder-attention-heads', type=int, metavar='N',
- help='num decoder attention heads')
- parser.add_argument('--decoder-learned-pos', action='store_true',
- help='use learned positional embeddings in the decoder')
- parser.add_argument('--decoder-normalize-before', action='store_true',
- help='apply layernorm before each decoder block')
- parser.add_argument('--decoder-output-dim', type=int, metavar='N',
- help='decoder output dimension (extra linear layer '
- 'if different from decoder embed dim')
- parser.add_argument('--share-decoder-input-output-embed', action='store_true',
- help='share decoder input and output embeddings')
- parser.add_argument('--share-all-embeddings', action='store_true',
- help='share encoder, decoder and output embeddings'
- ' (requires shared dictionary and embed dim)')
- parser.add_argument('--no-token-positional-embeddings', default=False, action='store_true',
- help='if set, disables positional embeddings (outside self attention)')
- parser.add_argument('--adaptive-softmax-cutoff', metavar='EXPR',
- help='comma separated list of adaptive softmax cutoff points. '
- 'Must be used with adaptive_loss criterion'),
- parser.add_argument('--adaptive-softmax-dropout', type=float, metavar='D',
- help='sets adaptive softmax dropout for the tail projections')
- parser.add_argument('--layernorm-embedding', action='store_true',
- help='add layernorm to embedding')
- parser.add_argument('--no-scale-embedding', action='store_true',
- help='if True, dont scale embeddings')
- parser.add_argument('--checkpoint-activations', action='store_true',
- help='checkpoint activations at each layer, which saves GPU '
- 'memory usage at the cost of some additional compute')
- parser.add_argument('--offload-activations', action='store_true',
- help='checkpoint activations at each layer, then save to gpu. Sets --checkpoint-activations.')
- # args for "Cross+Self-Attention for Transformer Models" (Peitz et al., 2019)
- parser.add_argument('--no-cross-attention', default=False, action='store_true',
- help='do not perform cross-attention')
- parser.add_argument('--cross-self-attention', default=False, action='store_true',
- help='perform cross+self-attention')
- # args for "Reducing Transformer Depth on Demand with Structured Dropout" (Fan et al., 2019)
- parser.add_argument('--encoder-layerdrop', type=float, metavar='D', default=0,
- help='LayerDrop probability for encoder')
- parser.add_argument('--decoder-layerdrop', type=float, metavar='D', default=0,
- help='LayerDrop probability for decoder')
- parser.add_argument('--encoder-layers-to-keep', default=None,
- help='which layers to *keep* when pruning as a comma-separated list')
- parser.add_argument('--decoder-layers-to-keep', default=None,
- help='which layers to *keep* when pruning as a comma-separated list')
- # args for Training with Quantization Noise for Extreme Model Compression ({Fan*, Stock*} et al., 2020)
- parser.add_argument('--quant-noise-pq', type=float, metavar='D', default=0,
- help='iterative PQ quantization noise at training time')
- parser.add_argument('--quant-noise-pq-block-size', type=int, metavar='D', default=8,
- help='block size of quantization noise at training time')
- parser.add_argument('--quant-noise-scalar', type=float, metavar='D', default=0,
- help='scalar quantization noise and scalar quantization at training time')
- # args for Fully Sharded Data Parallel (FSDP) training
- parser.add_argument(
- '--min-params-to-wrap', type=int, metavar='D', default=DEFAULT_MIN_PARAMS_TO_WRAP,
- help=(
- 'minimum number of params for a layer to be wrapped with FSDP() when '
- 'training with --ddp-backend=fully_sharded. Smaller values will '
- 'improve memory efficiency, but may make torch.distributed '
- 'communication less efficient due to smaller input sizes. This option '
- 'is set to 0 (i.e., always wrap) when --checkpoint-activations or '
- '--offload-activations are passed.'
- )
- )
-
- parser.add_argument('--resnet-drop-path-rate', type=float,
- help='resnet drop path rate')
- parser.add_argument('--encoder-drop-path-rate', type=float,
- help='encoder drop path rate')
- parser.add_argument('--decoder-drop-path-rate', type=float,
- help='encoder drop path rate')
-
- parser.add_argument('--token-bucket-size', type=int,
- help='token bucket size')
- parser.add_argument('--image-bucket-size', type=int,
- help='image bucket size')
-
- parser.add_argument('--attn-scale-factor', type=float,
- help='attention scale factor')
- parser.add_argument('--freeze-resnet', action='store_true',
- help='freeze resnet')
- parser.add_argument('--freeze-encoder-embedding', action='store_true',
- help='freeze encoder token embedding')
- parser.add_argument('--freeze-decoder-embedding', action='store_true',
- help='freeze decoder token embedding')
- parser.add_argument('--add-type-embedding', action='store_true',
- help='add source/region/patch type embedding')
-
- parser.add_argument('--resnet-type', choices=['resnet50', 'resnet101', 'resnet152'],
- help='resnet type')
- parser.add_argument('--resnet-model-path', type=str, metavar='STR',
- help='path to load resnet')
- parser.add_argument('--code-image-size', type=int,
- help='code image size')
- parser.add_argument('--patch-layernorm-embedding', action='store_true',
- help='add layernorm to patch embedding')
- parser.add_argument('--code-layernorm-embedding', action='store_true',
- help='add layernorm to code embedding')
- parser.add_argument('--entangle-position-embedding', action='store_true',
- help='entangle position embedding')
- parser.add_argument('--disable-entangle', action='store_true',
- help='disable entangle')
- parser.add_argument('--sync-bn', action='store_true',
- help='sync batchnorm')
-
- parser.add_argument('--scale-attn', action='store_true',
- help='scale attn')
- parser.add_argument('--scale-fc', action='store_true',
- help='scale fc')
- parser.add_argument('--scale-heads', action='store_true',
- help='scale heads')
- parser.add_argument('--scale-resids', action='store_true',
- help='scale resids')
- # fmt: on
-
- @classmethod
- def build_model(cls, args, task):
- """Build a new model instance."""
-
- # make sure all arguments are present in older models
- base_architecture(args)
-
- if args.encoder_layers_to_keep:
- args.encoder_layers = len(args.encoder_layers_to_keep.split(","))
- if args.decoder_layers_to_keep:
- args.decoder_layers = len(args.decoder_layers_to_keep.split(","))
-
- if getattr(args, "max_source_positions", None) is None:
- args.max_source_positions = DEFAULT_MAX_SOURCE_POSITIONS
- if getattr(args, "max_target_positions", None) is None:
- args.max_target_positions = DEFAULT_MAX_TARGET_POSITIONS
-
- src_dict, tgt_dict = task.source_dictionary, task.target_dictionary
-
- if args.share_all_embeddings:
- if src_dict != tgt_dict:
- raise ValueError("--share-all-embeddings requires a joined dictionary")
- if args.encoder_embed_dim != args.decoder_embed_dim:
- raise ValueError(
- "--share-all-embeddings requires --encoder-embed-dim to match --decoder-embed-dim"
- )
- if args.decoder_embed_path and (
- args.decoder_embed_path != args.encoder_embed_path
- ):
- raise ValueError(
- "--share-all-embeddings not compatible with --decoder-embed-path"
- )
- encoder_embed_tokens = cls.build_embedding(
- args, src_dict, args.encoder_embed_dim, args.encoder_embed_path
- )
- decoder_embed_tokens = encoder_embed_tokens
- args.share_decoder_input_output_embed = True
- else:
- encoder_embed_tokens = cls.build_embedding(
- args, src_dict, args.encoder_embed_dim, args.encoder_embed_path
- )
- decoder_embed_tokens = cls.build_embedding(
- args, tgt_dict, args.decoder_embed_dim, args.decoder_embed_path
- )
- if getattr(args, "freeze_encoder_embedding", False):
- encoder_embed_tokens.weight.requires_grad = False
- if getattr(args, "freeze_decoder_embedding", False):
- decoder_embed_tokens.weight.requires_grad = False
- if getattr(args, "offload_activations", False):
- args.checkpoint_activations = True # offloading implies checkpointing
- encoder = cls.build_encoder(args, src_dict, encoder_embed_tokens)
- decoder = cls.build_decoder(args, tgt_dict, decoder_embed_tokens)
- if not args.share_all_embeddings:
- min_params_to_wrap = getattr(
- args, "min_params_to_wrap", DEFAULT_MIN_PARAMS_TO_WRAP
- )
- # fsdp_wrap is a no-op when --ddp-backend != fully_sharded
- encoder = fsdp_wrap(encoder, min_num_params=min_params_to_wrap)
- decoder = fsdp_wrap(decoder, min_num_params=min_params_to_wrap)
- return cls(args, encoder, decoder)
-
- @classmethod
- def build_embedding(cls, args, dictionary, embed_dim, path=None):
- num_embeddings = len(dictionary)
- padding_idx = dictionary.pad()
-
- emb = Embedding(num_embeddings, embed_dim, padding_idx)
- # if provided, load from preloaded dictionaries
- if path:
- embed_dict = utils.parse_embedding(path)
- utils.load_embedding(embed_dict, dictionary, emb)
- return emb
-
- @classmethod
- def build_encoder(cls, args, src_dict, embed_tokens):
- return TransformerEncoder(args, src_dict, embed_tokens)
-
- @classmethod
- def build_decoder(cls, args, tgt_dict, embed_tokens):
- return TransformerDecoder(
- args,
- tgt_dict,
- embed_tokens,
- no_encoder_attn=getattr(args, "no_cross_attention", False),
- )
-
- # TorchScript doesn't support optional arguments with variable length (**kwargs).
- # Current workaround is to add union of all arguments in child classes.
- def forward(
- self,
- src_tokens,
- src_lengths,
- prev_output_tokens,
- return_all_hiddens: bool = True,
- features_only: bool = False,
- alignment_layer: Optional[int] = None,
- alignment_heads: Optional[int] = None,
- ):
- """
- Run the forward pass for an encoder-decoder model.
-
- Copied from the base class, but without ``**kwargs``,
- which are not supported by TorchScript.
- """
- encoder_out = self.encoder(
- src_tokens, src_lengths=src_lengths, return_all_hiddens=return_all_hiddens
- )
- decoder_out = self.decoder(
- prev_output_tokens,
- encoder_out=encoder_out,
- features_only=features_only,
- alignment_layer=alignment_layer,
- alignment_heads=alignment_heads,
- src_lengths=src_lengths,
- return_all_hiddens=return_all_hiddens,
- )
- return decoder_out
-
- # Since get_normalized_probs is in the Fairseq Model which is not scriptable,
- # I rewrite the get_normalized_probs from Base Class to call the
- # helper function in the Base Class.
- @torch.jit.export
- def get_normalized_probs(
- self,
- net_output: Tuple[Tensor, Optional[Dict[str, List[Optional[Tensor]]]]],
- log_probs: bool,
- sample: Optional[Dict[str, Tensor]] = None,
- ):
- """Get normalized probabilities (or log probs) from a net's output."""
- return self.get_normalized_probs_scriptable(net_output, log_probs, sample)
-
-
-class TransformerEncoder(FairseqEncoder):
- """
- Transformer encoder consisting of *args.encoder_layers* layers. Each layer
- is a :class:`TransformerEncoderLayer`.
-
- Args:
- args (argparse.Namespace): parsed command-line arguments
- dictionary (~fairseq.data.Dictionary): encoding dictionary
- embed_tokens (torch.nn.Embedding): input embedding
- """
-
- def __init__(self, args, dictionary, embed_tokens):
- self.args = args
- super().__init__(dictionary)
- self.register_buffer("version", torch.Tensor([3]))
-
- self.dropout_module = FairseqDropout(
- args.dropout, module_name=self.__class__.__name__
- )
- self.encoder_layerdrop = args.encoder_layerdrop
-
- embed_dim = embed_tokens.embedding_dim
- self.padding_idx = embed_tokens.padding_idx
- self.max_source_positions = args.max_source_positions
- self.num_attention_heads = args.encoder_attention_heads
-
- self.embed_tokens = embed_tokens
-
- self.embed_scale = 1.0 if args.no_scale_embedding else math.sqrt(embed_dim)
-
- if getattr(args, "layernorm_embedding", False):
- self.layernorm_embedding = LayerNorm(embed_dim)
- else:
- self.layernorm_embedding = None
-
- if getattr(args, "add_type_embedding", False):
- self.type_embedding = Embedding(2, embed_dim, padding_idx=None)
- else:
- self.type_embedding = None
-
- if getattr(args, "sync_bn", False):
- norm_layer = BatchNorm2d
- else:
- norm_layer = None
-
- if args.resnet_type == 'resnet101':
- self.embed_images = ResNet([3, 4, 23], norm_layer=norm_layer, drop_path_rate=args.resnet_drop_path_rate)
- elif args.resnet_type == 'resnet152':
- self.embed_images = ResNet([3, 8, 36], norm_layer=norm_layer, drop_path_rate=args.resnet_drop_path_rate)
- else:
- raise NotImplementedError
- self.image_proj = Linear(1024, embed_dim)
- if getattr(args, "resnet_model_path", None):
- print("load resnet {}".format(args.resnet_model_path))
- resnet_state_dict = torch.load(self.args.resnet_model_path)
- self.embed_images.load_state_dict(resnet_state_dict)
- if getattr(args, "patch_layernorm_embedding", False):
- self.patch_layernorm_embedding = LayerNorm(embed_dim)
- else:
- self.patch_layernorm_embedding = None
-
- self.embed_positions = Embedding(args.max_source_positions + 2, embed_dim)
- self.embed_image_positions = Embedding(args.image_bucket_size ** 2 + 1, embed_dim)
- self.pos_ln = LayerNorm(embed_dim)
- self.image_pos_ln = LayerNorm(embed_dim)
- self.pos_scaling = float(embed_dim / args.encoder_attention_heads * args.attn_scale_factor) ** -0.5
- self.pos_q_linear = nn.Linear(embed_dim, embed_dim)
- self.pos_k_linear = nn.Linear(embed_dim, embed_dim)
-
- if not args.adaptive_input and args.quant_noise_pq > 0:
- self.quant_noise = apply_quant_noise_(
- nn.Linear(embed_dim, embed_dim, bias=False),
- args.quant_noise_pq,
- args.quant_noise_pq_block_size,
- )
- else:
- self.quant_noise = None
-
- if self.encoder_layerdrop > 0.0:
- self.layers = LayerDropModuleList(p=self.encoder_layerdrop)
- else:
- self.layers = nn.ModuleList([])
-
- dpr = [x.item() for x in torch.linspace(0, args.encoder_drop_path_rate, args.encoder_layers)]
- self.layers.extend(
- [self.build_encoder_layer(args, drop_path_rate=dpr[i]) for i in range(args.encoder_layers)]
- )
- self.num_layers = len(self.layers)
-
- if args.encoder_normalize_before:
- self.layer_norm = LayerNorm(embed_dim)
- else:
- self.layer_norm = None
-
- token_bucket_size = args.token_bucket_size
- token_num_rel_dis = 2 * token_bucket_size - 1
- token_rp_bucket = make_token_bucket_position(token_bucket_size)
- self.token_rel_pos_table_list = nn.ModuleList(
- [Embedding(token_num_rel_dis, self.num_attention_heads, zero_init=True) for _ in range(args.encoder_layers)]
- )
-
- image_bucket_size = args.image_bucket_size
- image_num_rel_dis = (2 * image_bucket_size - 1) * (2 * image_bucket_size - 1) + 3
- image_rp_bucket = make_image_bucket_position(image_bucket_size, image_num_rel_dis)
- self.image_rel_pos_table_list = nn.ModuleList(
- [Embedding(image_num_rel_dis, self.num_attention_heads, zero_init=True) for _ in range(args.encoder_layers)]
- )
-
- self.register_buffer("token_rp_bucket", token_rp_bucket)
- self.register_buffer("image_rp_bucket", image_rp_bucket)
- self.entangle_position_embedding = args.entangle_position_embedding
-
- def train(self, mode=True):
- super(TransformerEncoder, self).train(mode)
- if getattr(self.args, "freeze_resnet", False):
- for m in self.embed_images.modules():
- if isinstance(m, nn.BatchNorm2d):
- m.eval()
- m.weight.requires_grad = False
- m.bias.requires_grad = False
-
- def build_encoder_layer(self, args, drop_path_rate=0.0):
- layer = TransformerEncoderLayer(args, drop_path_rate=drop_path_rate)
- checkpoint = getattr(args, "checkpoint_activations", False)
- if checkpoint:
- offload_to_cpu = getattr(args, "offload_activations", False)
- layer = checkpoint_wrapper(layer, offload_to_cpu=offload_to_cpu)
- # if we are checkpointing, enforce that FSDP always wraps the
- # checkpointed layer, regardless of layer size
- min_params_to_wrap = (
- getattr(args, "min_params_to_wrap", DEFAULT_MIN_PARAMS_TO_WRAP)
- if not checkpoint else 0
- )
- layer = fsdp_wrap(layer, min_num_params=min_params_to_wrap)
- return layer
-
- def get_rel_pos_bias(self, x, idx):
- seq_len = x.size(1)
- rp_bucket = self.token_rp_bucket[:seq_len, :seq_len]
- values = F.embedding(rp_bucket, self.token_rel_pos_table_list[idx].weight)
- values = values.unsqueeze(0).expand(x.size(0), -1, -1, -1)
- values = values.permute([0, 3, 1, 2])
- return values.contiguous()
-
- def get_image_rel_pos_bias(self, image_position_ids, idx):
- bsz, seq_len = image_position_ids.shape
- rp_bucket_size = self.image_rp_bucket.size(1)
-
- rp_bucket = self.image_rp_bucket.unsqueeze(0).expand(
- bsz, rp_bucket_size, rp_bucket_size
- ).gather(1, image_position_ids[:, :, None].expand(bsz, seq_len, rp_bucket_size)
- ).gather(2, image_position_ids[:, None, :].expand(bsz, seq_len, seq_len))
- values = F.embedding(rp_bucket, self.image_rel_pos_table_list[idx].weight)
- values = values.permute(0, 3, 1, 2)
- return values
-
- def get_patch_images_info(self, patch_images, sample_patch_num, device):
- image_embed = self.embed_images(patch_images)
- h, w = image_embed.shape[-2:]
- image_num_patches = h * w
- image_padding_mask = patch_images.new_zeros((patch_images.size(0), image_num_patches)).bool()
- image_position_idx = torch.arange(w).unsqueeze(0).expand(h, w) + \
- torch.arange(h).unsqueeze(1) * self.args.image_bucket_size + 1
- image_position_idx = image_position_idx.view(-1).to(device)
- image_position_ids = image_position_idx[None, :].expand(patch_images.size(0), image_num_patches)
-
- image_embed = image_embed.flatten(2).transpose(1, 2)
- if sample_patch_num is not None:
- patch_orders = [
- random.sample(range(image_num_patches), k=sample_patch_num)
- for _ in range(patch_images.size(0))
- ]
- patch_orders = torch.LongTensor(patch_orders).to(device)
- image_embed = image_embed.gather(
- 1, patch_orders.unsqueeze(2).expand(-1, -1, image_embed.size(2))
- )
- image_num_patches = sample_patch_num
- image_padding_mask = image_padding_mask.gather(1, patch_orders)
- image_position_ids = image_position_ids.gather(1, patch_orders)
- image_pos_embed = self.embed_image_positions(image_position_ids)
-
- return image_embed, image_num_patches, image_padding_mask, image_position_ids, image_pos_embed
-
- def forward_embedding(
- self,
- src_tokens,
- image_embed: Optional[torch.Tensor] = None,
- image_embed_2: Optional[torch.Tensor] = None,
- token_embedding: Optional[torch.Tensor] = None,
- pos_embed: Optional[torch.Tensor] = None,
- image_pos_embed: Optional[torch.Tensor] = None,
- image_pos_embed_2: Optional[torch.Tensor] = None
- ):
- # embed tokens and positions
- if token_embedding is None:
- token_embedding = self.embed_tokens(src_tokens)
- x = embed = self.embed_scale * token_embedding
- if self.entangle_position_embedding and pos_embed is not None:
- x += pos_embed
- if self.type_embedding is not None:
- x += self.type_embedding(src_tokens.new_zeros(x.size()[:2]))
- if self.layernorm_embedding is not None:
- x = self.layernorm_embedding(x)
- x = self.dropout_module(x)
- if self.quant_noise is not None:
- x = self.quant_noise(x)
-
- # embed raw images
- if image_embed is not None:
- image_embed = self.image_proj(image_embed)
- image_x = image_embed = self.embed_scale * image_embed
- if self.entangle_position_embedding and image_pos_embed is not None:
- image_x += image_pos_embed
- if self.type_embedding is not None:
- image_x += self.type_embedding(src_tokens.new_ones(image_x.size()[:2]))
- if self.patch_layernorm_embedding is not None:
- image_x = self.patch_layernorm_embedding(image_x)
- image_x = self.dropout_module(image_x)
- if self.quant_noise is not None:
- image_x = self.quant_noise(image_x)
- x = torch.cat([image_x, x], dim=1)
- embed = torch.cat([image_embed, embed], dim=1)
-
- if image_embed_2 is not None:
- assert self.type_embedding is not None
- image_embed_2 = self.image_proj(image_embed_2)
- image_x_2 = image_embed_2 = self.embed_scale * image_embed_2
- if self.entangle_position_embedding and image_pos_embed_2 is not None:
- image_x_2 += image_pos_embed_2
- if self.type_embedding is not None:
- image_x_2 += self.type_embedding(src_tokens.new_full(image_x_2.size()[:2], fill_value=2))
- if self.patch_layernorm_embedding is not None:
- image_x_2 = self.patch_layernorm_embedding(image_x_2)
- image_x_2 = self.dropout_module(image_x_2)
- if self.quant_noise is not None:
- image_x_2 = self.quant_noise(image_x_2)
- x = torch.cat([image_x_2, x], dim=1)
- embed = torch.cat([image_embed_2, embed], dim=1)
-
- return x, embed
-
- def forward(
- self,
- src_tokens,
- src_lengths,
- patch_images: Optional[torch.Tensor] = None,
- patch_images_2: Optional[torch.Tensor] = None,
- patch_masks: Optional[torch.Tensor] = None,
- code_masks: Optional[torch.Tensor] = None,
- return_all_hiddens: bool = False,
- token_embeddings: Optional[torch.Tensor] = None,
- sample_patch_num: Optional[int] = None
- ):
- """
- Args:
- src_tokens (LongTensor): tokens in the source language of shape
- `(batch, src_len)`
- src_lengths (torch.LongTensor): lengths of each source sentence of
- shape `(batch)`
- return_all_hiddens (bool, optional): also return all of the
- intermediate hidden states (default: False).
- token_embeddings (torch.Tensor, optional): precomputed embeddings
- default `None` will recompute embeddings
-
- Returns:
- dict:
- - **encoder_out** (Tensor): the last encoder layer's output of
- shape `(src_len, batch, embed_dim)`
- - **encoder_padding_mask** (ByteTensor): the positions of
- padding elements of shape `(batch, src_len)`
- - **encoder_embedding** (Tensor): the (scaled) embedding lookup
- of shape `(batch, src_len, embed_dim)`
- - **encoder_states** (List[Tensor]): all intermediate
- hidden states of shape `(src_len, batch, embed_dim)`.
- Only populated if *return_all_hiddens* is True.
- """
- return self.forward_scriptable(src_tokens,
- src_lengths,
- patch_images,
- patch_images_2,
- patch_masks,
- return_all_hiddens,
- token_embeddings,
- sample_patch_num)
-
- # TorchScript doesn't support super() method so that the scriptable Subclass
- # can't access the base class model in Torchscript.
- # Current workaround is to add a helper function with different name and
- # call the helper function from scriptable Subclass.
- def forward_scriptable(
- self,
- src_tokens,
- src_lengths,
- patch_images: Optional[torch.Tensor] = None,
- patch_images_2: Optional[torch.Tensor] = None,
- patch_masks: Optional[torch.Tensor] = None,
- return_all_hiddens: bool = False,
- token_embeddings: Optional[torch.Tensor] = None,
- sample_patch_num: Optional[int] = None
- ):
- """
- Args:
- src_tokens (LongTensor): tokens in the source language of shape
- `(batch, src_len)`
- src_lengths (torch.LongTensor): lengths of each source sentence of
- shape `(batch)`
- return_all_hiddens (bool, optional): also return all of the
- intermediate hidden states (default: False).
- token_embeddings (torch.Tensor, optional): precomputed embeddings
- default `None` will recompute embeddings
-
- Returns:
- dict:
- - **encoder_out** (Tensor): the last encoder layer's output of
- shape `(src_len, batch, embed_dim)`
- - **encoder_padding_mask** (ByteTensor): the positions of
- padding elements of shape `(batch, src_len)`
- - **encoder_embedding** (Tensor): the (scaled) embedding lookup
- of shape `(batch, src_len, embed_dim)`
- - **encoder_states** (List[Tensor]): all intermediate
- hidden states of shape `(src_len, batch, embed_dim)`.
- Only populated if *return_all_hiddens* is True.
- """
- image_embed = None
- image_embed_2 = None
- image_pos_embed = None
- image_pos_embed_2 = None
- if patch_images is not None:
- image_embed, image_num_patches, image_padding_mask, image_position_ids, image_pos_embed = \
- self.get_patch_images_info(patch_images, sample_patch_num, src_tokens.device)
- image_padding_mask[~patch_masks] = True
- if patch_images_2 is not None:
- image_embed_2, image_num_patches_2, image_padding_mask_2, image_position_ids_2, image_pos_embed_2 = \
- self.get_patch_images_info(patch_images_2, sample_patch_num, src_tokens.device)
- image_padding_mask_2[~patch_masks] = True
-
- encoder_padding_mask = src_tokens.eq(self.padding_idx)
- if patch_images is not None:
- encoder_padding_mask = torch.cat([image_padding_mask, encoder_padding_mask], dim=1)
- if patch_images_2 is not None:
- encoder_padding_mask = torch.cat([image_padding_mask_2, encoder_padding_mask], dim=1)
- has_pads = (src_tokens.device.type == "xla" or encoder_padding_mask.any())
-
- pos_embed = self.embed_positions(utils.new_arange(src_tokens))
- x, encoder_embedding = self.forward_embedding(
- src_tokens, image_embed, image_embed_2, token_embeddings,
- pos_embed, image_pos_embed, image_pos_embed_2
- )
-
- # account for padding while computing the representation
- if has_pads:
- x = x * (1 - encoder_padding_mask.unsqueeze(-1).type_as(x))
-
- # B x T x C -> T x B x C
- x = x.transpose(0, 1)
-
- pos_embed = self.pos_ln(pos_embed)
- if patch_images is not None:
- image_pos_embed = self.image_pos_ln(image_pos_embed)
- pos_embed = torch.cat([image_pos_embed, pos_embed], dim=1)
- if patch_images_2 is not None:
- image_pos_embed_2 = self.image_pos_ln(image_pos_embed_2)
- pos_embed = torch.cat([image_pos_embed_2, pos_embed], dim=1)
-
- pos_q = self.pos_q_linear(pos_embed).view(
- x.size(1), x.size(0), self.num_attention_heads, -1
- ).transpose(1, 2) * self.pos_scaling
- pos_k = self.pos_k_linear(pos_embed).view(
- x.size(1), x.size(0), self.num_attention_heads, -1
- ).transpose(1, 2)
- abs_pos_bias = torch.matmul(pos_q, pos_k.transpose(2, 3))
-
- encoder_states = []
-
- if return_all_hiddens:
- encoder_states.append(x)
-
- # encoder layers
- for idx, layer in enumerate(self.layers):
- self_attn_bias = abs_pos_bias.clone()
- self_attn_bias[:, :, -src_tokens.size(1):, -src_tokens.size(1):] += self.get_rel_pos_bias(src_tokens, idx)
- if patch_images_2 is not None:
- self_attn_bias[:, :, :image_num_patches_2, :image_num_patches_2] += \
- self.get_image_rel_pos_bias(image_position_ids_2, idx)
- self_attn_bias[:, :, image_num_patches_2:image_num_patches_2+image_num_patches, image_num_patches_2:image_num_patches_2+image_num_patches] += \
- self.get_image_rel_pos_bias(image_position_ids, idx)
- elif patch_images is not None:
- self_attn_bias[:, :, :x.size(0) - src_tokens.size(1), :x.size(0) - src_tokens.size(1)] += \
- self.get_image_rel_pos_bias(image_position_ids, idx)
- self_attn_bias = self_attn_bias.reshape(-1, x.size(0), x.size(0))
-
- x = layer(
- x, encoder_padding_mask=encoder_padding_mask if has_pads else None, self_attn_bias=self_attn_bias
- )
- if return_all_hiddens:
- assert encoder_states is not None
- encoder_states.append(x)
-
- if self.layer_norm is not None:
- x = self.layer_norm(x)
-
- # The Pytorch Mobile lite interpreter does not supports returning NamedTuple in
- # `forward` so we use a dictionary instead.
- # TorchScript does not support mixed values so the values are all lists.
- # The empty list is equivalent to None.
- return {
- "encoder_out": [x], # T x B x C
- "encoder_padding_mask": [encoder_padding_mask], # B x T
- "encoder_embedding": [], # B x T x C
- "encoder_states": encoder_states, # List[T x B x C]
- "src_tokens": [],
- "src_lengths": [],
- "position_embeddings": [pos_embed], # B x T x C
- }
-
- @torch.jit.export
- def reorder_encoder_out(self, encoder_out: Dict[str, List[Tensor]], new_order):
- """
- Reorder encoder output according to *new_order*.
-
- Args:
- encoder_out: output from the ``forward()`` method
- new_order (LongTensor): desired order
-
- Returns:
- *encoder_out* rearranged according to *new_order*
- """
- if len(encoder_out["encoder_out"]) == 0:
- new_encoder_out = []
- else:
- new_encoder_out = [encoder_out["encoder_out"][0].index_select(1, new_order)]
- if len(encoder_out["encoder_padding_mask"]) == 0:
- new_encoder_padding_mask = []
- else:
- new_encoder_padding_mask = [
- encoder_out["encoder_padding_mask"][0].index_select(0, new_order)
- ]
- if len(encoder_out["encoder_embedding"]) == 0:
- new_encoder_embedding = []
- else:
- new_encoder_embedding = [
- encoder_out["encoder_embedding"][0].index_select(0, new_order)
- ]
-
- if len(encoder_out["src_tokens"]) == 0:
- new_src_tokens = []
- else:
- new_src_tokens = [(encoder_out["src_tokens"][0]).index_select(0, new_order)]
-
- if len(encoder_out["src_lengths"]) == 0:
- new_src_lengths = []
- else:
- new_src_lengths = [(encoder_out["src_lengths"][0]).index_select(0, new_order)]
-
- if len(encoder_out["position_embeddings"]) == 0:
- new_position_embeddings = []
- else:
- new_position_embeddings = [(encoder_out["position_embeddings"][0]).index_select(0, new_order)]
-
- encoder_states = encoder_out["encoder_states"]
- if len(encoder_states) > 0:
- for idx, state in enumerate(encoder_states):
- encoder_states[idx] = state.index_select(1, new_order)
-
- return {
- "encoder_out": new_encoder_out, # T x B x C
- "encoder_padding_mask": new_encoder_padding_mask, # B x T
- "encoder_embedding": new_encoder_embedding, # B x T x C
- "encoder_states": encoder_states, # List[T x B x C]
- "src_tokens": new_src_tokens, # B x T
- "src_lengths": new_src_lengths, # B x 1
- "position_embeddings": new_position_embeddings, # B x T x C
- }
-
- def max_positions(self):
- """Maximum input length supported by the encoder."""
- if self.embed_positions is None:
- return self.max_source_positions
- return self.max_source_positions
-
- def upgrade_state_dict_named(self, state_dict, name):
- """Upgrade a (possibly old) state dict for new versions of fairseq."""
- if isinstance(self.embed_positions, SinusoidalPositionalEmbedding):
- weights_key = "{}.embed_positions.weights".format(name)
- if weights_key in state_dict:
- print("deleting {0}".format(weights_key))
- del state_dict[weights_key]
- state_dict[
- "{}.embed_positions._float_tensor".format(name)
- ] = torch.FloatTensor(1)
- for i in range(self.num_layers):
- # update layer norms
- self.layers[i].upgrade_state_dict_named(
- state_dict, "{}.layers.{}".format(name, i)
- )
-
- # version_key = "{}.version".format(name)
- # if utils.item(state_dict.get(version_key, torch.Tensor([1]))[0]) < 2:
- # # earlier checkpoints did not normalize after the stack of layers
- # self.layer_norm = None
- # self.normalize = False
- # state_dict[version_key] = torch.Tensor([1])
-
- prefix = name + "." if name != "" else ""
- for param_name, param_tensor in self.state_dict().items():
- if (prefix + param_name) not in state_dict and param_name in self.state_dict():
- state_dict[prefix + param_name] = self.state_dict()[param_name]
-
- if len(state_dict["encoder.embed_image_positions.weight"]) < len(self.state_dict()["embed_image_positions.weight"]):
- num_posids_to_add = len(self.state_dict()["embed_image_positions.weight"]) - len(state_dict["encoder.embed_image_positions.weight"])
- embed_dim = state_dict["encoder.embed_image_positions.weight"].size(1)
- new_pos_embed_to_add = torch.zeros(num_posids_to_add, embed_dim)
- nn.init.normal_(new_pos_embed_to_add, mean=0, std=embed_dim ** -0.5)
- new_pos_embed_to_add = new_pos_embed_to_add.to(
- dtype=state_dict["encoder.embed_image_positions.weight"].dtype,
- )
- state_dict["encoder.embed_image_positions.weight"] = torch.cat(
- [state_dict["encoder.embed_image_positions.weight"], new_pos_embed_to_add]
- )
- return state_dict
-
-
-class TransformerDecoder(FairseqIncrementalDecoder):
- """
- Transformer decoder consisting of *args.decoder_layers* layers. Each layer
- is a :class:`TransformerDecoderLayer`.
-
- Args:
- args (argparse.Namespace): parsed command-line arguments
- dictionary (~fairseq.data.Dictionary): decoding dictionary
- embed_tokens (torch.nn.Embedding): output embedding
- no_encoder_attn (bool, optional): whether to attend to encoder outputs
- (default: False).
- """
-
- def __init__(
- self,
- args,
- dictionary,
- embed_tokens,
- no_encoder_attn=False,
- output_projection=None,
- ):
- self.args = args
- super().__init__(dictionary)
- self.register_buffer("version", torch.Tensor([3]))
- self._future_mask = torch.empty(0)
-
- self.dropout_module = FairseqDropout(
- args.dropout, module_name=self.__class__.__name__
- )
- self.decoder_layerdrop = args.decoder_layerdrop
- self.share_input_output_embed = args.share_decoder_input_output_embed
- self.num_attention_heads = args.decoder_attention_heads
-
- input_embed_dim = embed_tokens.embedding_dim
- embed_dim = args.decoder_embed_dim
- self.embed_dim = embed_dim
- self.output_embed_dim = args.decoder_output_dim
-
- self.padding_idx = embed_tokens.padding_idx
- self.max_target_positions = args.max_target_positions
-
- self.embed_tokens = embed_tokens
-
- self.embed_scale = 1.0 if args.no_scale_embedding else math.sqrt(embed_dim)
-
- if not args.adaptive_input and args.quant_noise_pq > 0:
- self.quant_noise = apply_quant_noise_(
- nn.Linear(embed_dim, embed_dim, bias=False),
- args.quant_noise_pq,
- args.quant_noise_pq_block_size,
- )
- else:
- self.quant_noise = None
-
- self.project_in_dim = (
- Linear(input_embed_dim, embed_dim, bias=False)
- if embed_dim != input_embed_dim
- else None
- )
-
- if getattr(args, "layernorm_embedding", False):
- self.layernorm_embedding = LayerNorm(embed_dim)
- else:
- self.layernorm_embedding = None
-
- self.window_size = args.code_image_size // 8
-
- self.embed_positions = Embedding(args.max_target_positions + 2, embed_dim)
- self.embed_image_positions = Embedding(args.image_bucket_size ** 2 + 1, embed_dim)
- self.pos_ln = LayerNorm(embed_dim)
- self.image_pos_ln = LayerNorm(embed_dim)
- self.pos_scaling = float(embed_dim / self.num_attention_heads * args.attn_scale_factor) ** -0.5
- self.self_pos_q_linear = nn.Linear(embed_dim, embed_dim)
- self.self_pos_k_linear = nn.Linear(embed_dim, embed_dim)
- self.cross_pos_q_linear = nn.Linear(embed_dim, embed_dim)
- self.cross_pos_k_linear = nn.Linear(embed_dim, embed_dim)
-
- if getattr(args, "code_layernorm_embedding", False):
- self.code_layernorm_embedding = LayerNorm(embed_dim)
- else:
- self.code_layernorm_embedding = None
-
- self.cross_self_attention = getattr(args, "cross_self_attention", False)
-
- if self.decoder_layerdrop > 0.0:
- self.layers = LayerDropModuleList(p=self.decoder_layerdrop)
- else:
- self.layers = nn.ModuleList([])
-
- dpr = [x.item() for x in torch.linspace(0, args.decoder_drop_path_rate, args.decoder_layers)]
- self.layers.extend(
- [
- self.build_decoder_layer(args, no_encoder_attn, drop_path_rate=dpr[i])
- for i in range(args.decoder_layers)
- ]
- )
- self.num_layers = len(self.layers)
-
- if args.decoder_normalize_before:
- self.layer_norm = LayerNorm(embed_dim)
- else:
- self.layer_norm = None
-
- self.project_out_dim = (
- Linear(embed_dim, self.output_embed_dim, bias=False)
- if embed_dim != self.output_embed_dim and not args.tie_adaptive_weights
- else None
- )
-
- self.adaptive_softmax = None
- self.output_projection = output_projection
- if self.output_projection is None:
- self.build_output_projection(args, dictionary, embed_tokens)
-
- token_bucket_size = args.token_bucket_size
- token_num_rel_dis = 2 * token_bucket_size - 1
- token_rp_bucket = make_token_bucket_position(token_bucket_size)
- self.token_rel_pos_table_list = nn.ModuleList(
- [Embedding(token_num_rel_dis, self.num_attention_heads, zero_init=True) for _ in range(args.decoder_layers)]
- )
-
- image_bucket_size = args.image_bucket_size
- image_num_rel_dis = (2 * image_bucket_size - 1) * (2 * image_bucket_size - 1) + 3
- image_rp_bucket = make_image_bucket_position(image_bucket_size, image_num_rel_dis)
- image_position_idx = torch.arange(self.window_size).unsqueeze(0).expand(self.window_size, self.window_size) + \
- torch.arange(self.window_size).unsqueeze(1) * image_bucket_size + 1
- image_position_idx = torch.cat([torch.tensor([0]), image_position_idx.view(-1)])
- image_position_idx = torch.cat([image_position_idx, torch.tensor([1024] * 768)])
- self.image_rel_pos_table_list = nn.ModuleList(
- [Embedding(image_num_rel_dis, self.num_attention_heads, zero_init=True) for _ in range(args.decoder_layers)]
- )
-
- self.register_buffer("token_rp_bucket", token_rp_bucket)
- self.register_buffer("image_rp_bucket", image_rp_bucket)
- self.register_buffer("image_position_idx", image_position_idx)
- self.entangle_position_embedding = args.entangle_position_embedding
-
- def build_output_projection(self, args, dictionary, embed_tokens):
- if args.adaptive_softmax_cutoff is not None:
- self.adaptive_softmax = AdaptiveSoftmax(
- len(dictionary),
- self.output_embed_dim,
- utils.eval_str_list(args.adaptive_softmax_cutoff, type=int),
- dropout=args.adaptive_softmax_dropout,
- adaptive_inputs=embed_tokens if args.tie_adaptive_weights else None,
- factor=args.adaptive_softmax_factor,
- tie_proj=args.tie_adaptive_proj,
- )
- elif self.share_input_output_embed:
- self.output_projection = nn.Linear(
- self.embed_tokens.weight.shape[1],
- self.embed_tokens.weight.shape[0],
- bias=False,
- )
- self.output_projection.weight = self.embed_tokens.weight
- else:
- self.output_projection = nn.Linear(
- self.output_embed_dim, len(dictionary), bias=False
- )
- nn.init.normal_(
- self.output_projection.weight, mean=0, std=self.output_embed_dim ** -0.5
- )
- num_base_layers = getattr(args, "base_layers", 0)
- for i in range(num_base_layers):
- self.layers.insert(((i+1) * args.decoder_layers) // (num_base_layers + 1), BaseLayer(args))
-
- def build_decoder_layer(self, args, no_encoder_attn=False, drop_path_rate=0.0):
- layer = TransformerDecoderLayer(args, no_encoder_attn, drop_path_rate=drop_path_rate)
- checkpoint = getattr(args, "checkpoint_activations", False)
- if checkpoint:
- offload_to_cpu = getattr(args, "offload_activations", False)
- layer = checkpoint_wrapper(layer, offload_to_cpu=offload_to_cpu)
- # if we are checkpointing, enforce that FSDP always wraps the
- # checkpointed layer, regardless of layer size
- min_params_to_wrap = (
- getattr(args, "min_params_to_wrap", DEFAULT_MIN_PARAMS_TO_WRAP)
- if not checkpoint else 0
- )
- layer = fsdp_wrap(layer, min_num_params=min_params_to_wrap)
- return layer
-
- def get_rel_pos_bias(self, x, idx):
- seq_len = x.size(1)
- rp_bucket = self.token_rp_bucket[:seq_len, :seq_len]
- values = F.embedding(rp_bucket, self.token_rel_pos_table_list[idx].weight)
- values = values.permute([2, 0, 1])
- return values.contiguous()
-
- def get_image_rel_pos_bias(self, x, idx):
- seq_len = x.size(1)
- image_position_idx = self.image_position_idx[:seq_len]
- rp_bucket = self.image_rp_bucket[image_position_idx][:, image_position_idx]
- values = F.embedding(rp_bucket, self.image_rel_pos_table_list[idx].weight)
- values = values.permute(2, 0, 1)
- return values
-
- def get_pos_info(self, tokens, tgt_pos_embed, src_pos_embed=None, use_image=False):
- batch_size = tokens.size(0)
- tgt_len = tokens.size(1)
- tgt_pos_embed = self.image_pos_ln(tgt_pos_embed) if use_image else self.pos_ln(tgt_pos_embed)
- if src_pos_embed is not None:
- src_len = src_pos_embed.size(1)
- pos_q = self.cross_pos_q_linear(tgt_pos_embed).view(
- batch_size, tgt_len, self.num_attention_heads, -1
- ).transpose(1, 2) * self.pos_scaling
- pos_k = self.cross_pos_k_linear(src_pos_embed).view(
- batch_size, src_len, self.num_attention_heads, -1
- ).transpose(1, 2)
- else:
- src_len = tgt_pos_embed.size(1)
- pos_q = self.self_pos_q_linear(tgt_pos_embed).view(
- batch_size, tgt_len, self.num_attention_heads, -1
- ).transpose(1, 2) * self.pos_scaling
- pos_k = self.self_pos_k_linear(tgt_pos_embed).view(
- batch_size, src_len, self.num_attention_heads, -1
- ).transpose(1, 2)
- abs_pos_bias = torch.matmul(pos_q, pos_k.transpose(2, 3))
- return abs_pos_bias
-
- def forward(
- self,
- prev_output_tokens,
- code_masks: Optional[torch.Tensor] = None,
- encoder_out: Optional[Dict[str, List[Tensor]]] = None,
- incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None,
- features_only: bool = False,
- full_context_alignment: bool = False,
- alignment_layer: Optional[int] = None,
- alignment_heads: Optional[int] = None,
- src_lengths: Optional[Any] = None,
- return_all_hiddens: bool = False,
- ):
- """
- Args:
- prev_output_tokens (LongTensor): previous decoder outputs of shape
- `(batch, tgt_len)`, for teacher forcing
- encoder_out (optional): output from the encoder, used for
- encoder-side attention, should be of size T x B x C
- incremental_state (dict): dictionary used for storing state during
- :ref:`Incremental decoding`
- features_only (bool, optional): only return features without
- applying output layer (default: False).
- full_context_alignment (bool, optional): don't apply
- auto-regressive mask to self-attention (default: False).
-
- Returns:
- tuple:
- - the decoder's output of shape `(batch, tgt_len, vocab)`
- - a dictionary with any model-specific outputs
- """
-
- x, extra = self.extract_features(
- prev_output_tokens,
- code_masks=code_masks,
- encoder_out=encoder_out,
- incremental_state=incremental_state,
- full_context_alignment=full_context_alignment,
- alignment_layer=alignment_layer,
- alignment_heads=alignment_heads,
- )
-
- if not features_only:
- x = self.output_layer(x)
- return x, extra
-
- def extract_features(
- self,
- prev_output_tokens,
- code_masks: Optional[torch.Tensor],
- encoder_out: Optional[Dict[str, List[Tensor]]],
- incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None,
- full_context_alignment: bool = False,
- alignment_layer: Optional[int] = None,
- alignment_heads: Optional[int] = None,
- ):
- return self.extract_features_scriptable(
- prev_output_tokens,
- code_masks,
- encoder_out,
- incremental_state,
- full_context_alignment,
- alignment_layer,
- alignment_heads,
- )
-
- """
- A scriptable subclass of this class has an extract_features method and calls
- super().extract_features, but super() is not supported in torchscript. A copy of
- this function is made to be used in the subclass instead.
- """
-
- def extract_features_scriptable(
- self,
- prev_output_tokens,
- code_masks: Optional[torch.Tensor],
- encoder_out: Optional[Dict[str, List[Tensor]]],
- incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None,
- full_context_alignment: bool = False,
- alignment_layer: Optional[int] = None,
- alignment_heads: Optional[int] = None,
- ):
- """
- Similar to *forward* but only return features.
-
- Includes several features from "Jointly Learning to Align and
- Translate with Transformer Models" (Garg et al., EMNLP 2019).
-
- Args:
- full_context_alignment (bool, optional): don't apply
- auto-regressive mask to self-attention (default: False).
- alignment_layer (int, optional): return mean alignment over
- heads at this layer (default: last layer).
- alignment_heads (int, optional): only average alignment over
- this many heads (default: all heads).
-
- Returns:
- tuple:
- - the decoder's features of shape `(batch, tgt_len, embed_dim)`
- - a dictionary with any model-specific outputs
- """
- bs, slen = prev_output_tokens.size()
- if alignment_layer is None:
- alignment_layer = self.num_layers - 1
-
- enc: Optional[Tensor] = None
- padding_mask: Optional[Tensor] = None
- if encoder_out is not None and len(encoder_out["encoder_out"]) > 0:
- enc = encoder_out["encoder_out"][0]
- assert (
- enc.size()[1] == bs
- ), f"Expected enc.shape == (t, {bs}, c) got {enc.shape}"
- if encoder_out is not None and len(encoder_out["encoder_padding_mask"]) > 0:
- padding_mask = encoder_out["encoder_padding_mask"][0]
-
- bsz, tgt_len = prev_output_tokens.shape
- token_position_idx = utils.new_arange(prev_output_tokens)
- tgt_pos_embed = self.embed_positions(token_position_idx)
- if code_masks is not None and torch.any(code_masks):
- image_position_idx = self.image_position_idx[:prev_output_tokens.size(1)].unsqueeze(0).expand(bsz, tgt_len)
- tgt_pos_embed[code_masks] = self.embed_image_positions(image_position_idx)[code_masks]
-
- # self attn position bias
- self_abs_pos_bias = self.get_pos_info(prev_output_tokens, tgt_pos_embed, use_image=False)
- if code_masks is not None and torch.any(code_masks):
- self_image_abs_pos_bias = self.get_pos_info(prev_output_tokens, tgt_pos_embed, use_image=True)
- self_abs_pos_bias[code_masks] = self_image_abs_pos_bias[code_masks]
- # cross attn position bias
- src_pos_embed = encoder_out['position_embeddings'][0]
- cross_abs_pos_bias = self.get_pos_info(prev_output_tokens, tgt_pos_embed, src_pos_embed=src_pos_embed)
- if code_masks is not None and torch.any(code_masks):
- cross_image_abs_pos_bias = self.get_pos_info(prev_output_tokens, tgt_pos_embed, src_pos_embed=src_pos_embed, use_image=True)
- cross_abs_pos_bias[code_masks] = cross_image_abs_pos_bias[code_masks]
- cross_abs_pos_bias = cross_abs_pos_bias.reshape(-1, *cross_abs_pos_bias.size()[-2:])
-
- all_prev_output_tokens = prev_output_tokens.clone()
- if incremental_state is not None:
- prev_output_tokens = prev_output_tokens[:, -1:]
- cross_abs_pos_bias = cross_abs_pos_bias[:, -1:, :]
- tgt_pos_embed = tgt_pos_embed[:, -1:, :]
-
- # embed tokens and positions
- x = self.embed_scale * self.embed_tokens(prev_output_tokens)
-
- if self.quant_noise is not None:
- x = self.quant_noise(x)
-
- if self.project_in_dim is not None:
- x = self.project_in_dim(x)
-
- if self.entangle_position_embedding is not None and not self.args.disable_entangle:
- x += tgt_pos_embed
-
- if self.layernorm_embedding is not None:
- if code_masks is None or not code_masks.any() or not getattr(self, "code_layernorm_embedding", False):
- x = self.layernorm_embedding(x)
- elif code_masks is not None and code_masks.all():
- x = self.code_layernorm_embedding(x)
- else:
- x[~code_masks] = self.layernorm_embedding(x[~code_masks])
- x[code_masks] = self.code_layernorm_embedding(x[code_masks])
-
- x = self.dropout_module(x)
-
- # B x T x C -> T x B x C
- x = x.transpose(0, 1)
-
- self_attn_padding_mask: Optional[Tensor] = None
- if self.cross_self_attention or prev_output_tokens.eq(self.padding_idx).any():
- self_attn_padding_mask = prev_output_tokens.eq(self.padding_idx)
-
- # decoder layers
- attn: Optional[Tensor] = None
- inner_states: List[Optional[Tensor]] = [x]
- for idx, layer in enumerate(self.layers):
- if incremental_state is None and not full_context_alignment:
- self_attn_mask = self.buffered_future_mask(x)
- else:
- self_attn_mask = None
-
- self_attn_bias = self_abs_pos_bias.clone()
- if code_masks is None or not code_masks.any():
- self_attn_bias += self.get_rel_pos_bias(all_prev_output_tokens, idx).unsqueeze(0)
- elif code_masks is not None and code_masks.all():
- self_attn_bias += self.get_image_rel_pos_bias(all_prev_output_tokens, idx).unsqueeze(0)
- else:
- self_attn_bias[~code_masks] += self.get_rel_pos_bias(all_prev_output_tokens, idx).unsqueeze(0)
- self_attn_bias[code_masks] += self.get_image_rel_pos_bias(all_prev_output_tokens, idx).unsqueeze(0)
- self_attn_bias = self_attn_bias.reshape(-1, *self_attn_bias.size()[-2:])
- if incremental_state is not None:
- self_attn_bias = self_attn_bias[:, -1:, :]
-
- x, layer_attn, _ = layer(
- x,
- enc,
- padding_mask,
- incremental_state,
- self_attn_mask=self_attn_mask,
- self_attn_padding_mask=self_attn_padding_mask,
- need_attn=bool((idx == alignment_layer)),
- need_head_weights=bool((idx == alignment_layer)),
- self_attn_bias=self_attn_bias,
- cross_attn_bias=cross_abs_pos_bias
- )
- inner_states.append(x)
- if layer_attn is not None and idx == alignment_layer:
- attn = layer_attn.float().to(x)
-
- if attn is not None:
- if alignment_heads is not None:
- attn = attn[:alignment_heads]
-
- # average probabilities over heads
- attn = attn.mean(dim=0)
-
- if self.layer_norm is not None:
- x = self.layer_norm(x)
-
- # T x B x C -> B x T x C
- x = x.transpose(0, 1)
-
- if self.project_out_dim is not None:
- x = self.project_out_dim(x)
-
- return x, {"attn": [attn], "inner_states": inner_states}
-
- def output_layer(self, features):
- """Project features to the vocabulary size."""
- if self.adaptive_softmax is None:
- # project back to size of vocabulary
- return self.output_projection(features)
- else:
- return features
-
- def max_positions(self):
- """Maximum output length supported by the decoder."""
- if self.embed_positions is None:
- return self.max_target_positions
- return self.max_target_positions
-
- def buffered_future_mask(self, tensor):
- dim = tensor.size(0)
- # self._future_mask.device != tensor.device is not working in TorchScript. This is a workaround.
- if (
- self._future_mask.size(0) == 0
- or (not self._future_mask.device == tensor.device)
- or self._future_mask.size(0) < dim
- ):
- self._future_mask = torch.triu(
- utils.fill_with_neg_inf(torch.zeros([dim, dim])), 1
- )
- self._future_mask = self._future_mask.to(tensor)
- return self._future_mask[:dim, :dim]
-
- def upgrade_state_dict_named(self, state_dict, name):
- """Upgrade a (possibly old) state dict for new versions of fairseq."""
- if isinstance(self.embed_positions, SinusoidalPositionalEmbedding):
- weights_key = "{}.embed_positions.weights".format(name)
- if weights_key in state_dict:
- del state_dict[weights_key]
- state_dict[
- "{}.embed_positions._float_tensor".format(name)
- ] = torch.FloatTensor(1)
-
- if f"{name}.output_projection.weight" not in state_dict:
- if self.share_input_output_embed:
- embed_out_key = f"{name}.embed_tokens.weight"
- else:
- embed_out_key = f"{name}.embed_out"
- if embed_out_key in state_dict:
- state_dict[f"{name}.output_projection.weight"] = state_dict[
- embed_out_key
- ]
- if not self.share_input_output_embed:
- del state_dict[embed_out_key]
-
- for i in range(self.num_layers):
- # update layer norms
- self.layers[i].upgrade_state_dict_named(
- state_dict, "{}.layers.{}".format(name, i)
- )
-
- # version_key = "{}.version".format(name)
- # if utils.item(state_dict.get(version_key, torch.Tensor([1]))[0]) <= 2:
- # # earlier checkpoints did not normalize after the stack of layers
- # self.layer_norm = None
- # self.normalize = False
- # state_dict[version_key] = torch.Tensor([1])
-
- prefix = name + "." if name != "" else ""
- image_params = ["image_position_idx"]
- for image_param in image_params:
- state_dict[prefix + image_param] = self.state_dict()[image_param]
- for param_name, param_tensor in self.state_dict().items():
- if (prefix + param_name) not in state_dict and param_name in self.state_dict():
- state_dict[prefix + param_name] = self.state_dict()[param_name]
-
- if len(state_dict["decoder.embed_image_positions.weight"]) < len(self.state_dict()["embed_image_positions.weight"]):
- num_posids_to_add = len(self.state_dict()["embed_image_positions.weight"]) - len(state_dict["decoder.embed_image_positions.weight"])
- embed_dim = state_dict["decoder.embed_image_positions.weight"].size(1)
- new_pos_embed_to_add = torch.zeros(num_posids_to_add, embed_dim)
- nn.init.normal_(new_pos_embed_to_add, mean=0, std=embed_dim ** -0.5)
- new_pos_embed_to_add = new_pos_embed_to_add.to(
- dtype=state_dict["decoder.embed_image_positions.weight"].dtype,
- )
- state_dict["decoder.embed_image_positions.weight"] = torch.cat(
- [state_dict["decoder.embed_image_positions.weight"], new_pos_embed_to_add]
- )
- return state_dict
-
-
-def Embedding(num_embeddings, embedding_dim, padding_idx=None, zero_init=False):
- m = nn.Embedding(num_embeddings, embedding_dim, padding_idx=padding_idx)
- nn.init.normal_(m.weight, mean=0, std=embedding_dim ** -0.5)
- if padding_idx is not None:
- nn.init.constant_(m.weight[padding_idx], 0)
- if zero_init:
- nn.init.constant_(m.weight, 0)
- return m
-
-
-def Linear(in_features, out_features, bias=True):
- m = nn.Linear(in_features, out_features, bias)
- nn.init.xavier_uniform_(m.weight)
- if bias:
- nn.init.constant_(m.bias, 0.0)
- return m
-
-
-@register_model_architecture("unify_transformer", "unify_transformer")
-def base_architecture(args):
- args.encoder_embed_path = getattr(args, "encoder_embed_path", None)
- args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512)
- args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 2048)
- args.encoder_layers = getattr(args, "encoder_layers", 6)
- args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 8)
- args.encoder_normalize_before = getattr(args, "encoder_normalize_before", False)
- args.encoder_learned_pos = getattr(args, "encoder_learned_pos", False)
- args.decoder_embed_path = getattr(args, "decoder_embed_path", None)
- args.decoder_embed_dim = getattr(args, "decoder_embed_dim", args.encoder_embed_dim)
- args.decoder_ffn_embed_dim = getattr(
- args, "decoder_ffn_embed_dim", args.encoder_ffn_embed_dim
- )
- args.decoder_layers = getattr(args, "decoder_layers", 6)
- args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 8)
- args.decoder_normalize_before = getattr(args, "decoder_normalize_before", False)
- args.decoder_learned_pos = getattr(args, "decoder_learned_pos", False)
- args.attention_dropout = getattr(args, "attention_dropout", 0.0)
- args.activation_dropout = getattr(args, "activation_dropout", 0.0)
- args.activation_fn = getattr(args, "activation_fn", "relu")
- args.dropout = getattr(args, "dropout", 0.1)
- args.adaptive_softmax_cutoff = getattr(args, "adaptive_softmax_cutoff", None)
- args.adaptive_softmax_dropout = getattr(args, "adaptive_softmax_dropout", 0)
- args.share_decoder_input_output_embed = getattr(
- args, "share_decoder_input_output_embed", False
- )
- args.share_all_embeddings = getattr(args, "share_all_embeddings", False)
- args.no_token_positional_embeddings = getattr(
- args, "no_token_positional_embeddings", False
- )
- args.adaptive_input = getattr(args, "adaptive_input", False)
- args.no_cross_attention = getattr(args, "no_cross_attention", False)
- args.cross_self_attention = getattr(args, "cross_self_attention", False)
-
- args.decoder_output_dim = getattr(
- args, "decoder_output_dim", args.decoder_embed_dim
- )
- args.decoder_input_dim = getattr(args, "decoder_input_dim", args.decoder_embed_dim)
-
- args.no_scale_embedding = getattr(args, "no_scale_embedding", False)
- args.layernorm_embedding = getattr(args, "layernorm_embedding", False)
- args.tie_adaptive_weights = getattr(args, "tie_adaptive_weights", False)
- args.checkpoint_activations = getattr(args, "checkpoint_activations", False)
- args.offload_activations = getattr(args, "offload_activations", False)
- if args.offload_activations:
- args.checkpoint_activations = True
- args.encoder_layers_to_keep = getattr(args, "encoder_layers_to_keep", None)
- args.decoder_layers_to_keep = getattr(args, "decoder_layers_to_keep", None)
- args.encoder_layerdrop = getattr(args, "encoder_layerdrop", 0)
- args.decoder_layerdrop = getattr(args, "decoder_layerdrop", 0)
- args.quant_noise_pq = getattr(args, "quant_noise_pq", 0)
- args.quant_noise_pq_block_size = getattr(args, "quant_noise_pq_block_size", 8)
- args.quant_noise_scalar = getattr(args, "quant_noise_scalar", 0)
\ No newline at end of file
diff --git a/spaces/starlit7/KorPoliticsTTS/text/__init__.py b/spaces/starlit7/KorPoliticsTTS/text/__init__.py
deleted file mode 100644
index 4e69c354dd24e3243980236eca962cd5945a92fc..0000000000000000000000000000000000000000
--- a/spaces/starlit7/KorPoliticsTTS/text/__init__.py
+++ /dev/null
@@ -1,32 +0,0 @@
-""" from https://github.com/keithito/tacotron """
-from text import cleaners
-
-
-def text_to_sequence(text, symbols, cleaner_names):
- '''Converts a string of text to a sequence of IDs corresponding to the symbols in the text.
- Args:
- text: string to convert to a sequence
- cleaner_names: names of the cleaner functions to run the text through
- Returns:
- List of integers corresponding to the symbols in the text
- '''
- _symbol_to_id = {s: i for i, s in enumerate(symbols)}
-
- sequence = []
-
- clean_text = _clean_text(text, cleaner_names)
- for symbol in clean_text:
- if symbol not in _symbol_to_id.keys():
- continue
- symbol_id = _symbol_to_id[symbol]
- sequence += [symbol_id]
- return sequence
-
-
-def _clean_text(text, cleaner_names):
- for name in cleaner_names:
- cleaner = getattr(cleaners, name)
- if not cleaner:
- raise Exception('Unknown cleaner: %s' % name)
- text = cleaner(text)
- return text
diff --git a/spaces/stomexserde/gpt4-ui/Examples/FairUse Wizard 2.9 Full Edition(Tutorial Included)(Pre-Cracked)[ Serial Key.md b/spaces/stomexserde/gpt4-ui/Examples/FairUse Wizard 2.9 Full Edition(Tutorial Included)(Pre-Cracked)[ Serial Key.md
deleted file mode 100644
index 1ba2de9f02dd8368b497385e6f5e037bfc616b56..0000000000000000000000000000000000000000
--- a/spaces/stomexserde/gpt4-ui/Examples/FairUse Wizard 2.9 Full Edition(Tutorial Included)(Pre-Cracked)[ Serial Key.md
+++ /dev/null
@@ -1,239 +0,0 @@
-
-How to Convert a DVD Movie to AVI with FairUse Wizard 2.9 Full Edition
- Do you have a collection of DVDs that you want to back up or watch on your computer or mobile device? Do you want a simple and easy way to convert your DVDs to AVI files without losing quality or spending hours on encoding? If so, you should try FairUse Wizard 2.9 Full Edition, a powerful and user-friendly DVD ripper that can help you achieve your goals.
- Introduction
- FairUse Wizard is a software program that uses the DivX, Xvid, or h.264 codec to convert DVD movies to AVI files. It comes in both a free version and a commercial version. The free, or "Light" version, can create files up to 700MB while the commercial version can output a 1400MB file. This will allow you to back up your movies to CD, or even multiple movies on a single DVD.
-FairUse Wizard 2.9 Full Edition(Tutorial Included)(Pre-Cracked)[ Serial Key
Download Zip ☆☆☆ https://urlgoal.com/2uIbls
- Converting DVD movies to AVI files has many benefits. You can save space on your hard drive or external storage device, as AVI files are much smaller than DVD files. You can also play your movies on any device that supports AVI format, such as computers, smartphones, tablets, media players, game consoles, etc. You can also edit your movies with video editing software, such as adding subtitles, cropping, trimming, merging, etc.
- FairUse Wizard 2.9 Full Edition is the latest version of FairUse Wizard that offers many features and improvements over the previous versions. Some of the features include:
-
-- Support for Windows XP, Vista, 7, 8, and 10
-- Support for DVD sources from discs, ISO images, or folders
-- Support for full auto mode or manual mode
-- Support for previewing and editing video segments
-- Support for two-pass encoding for higher quality
-- Support for batch mode for converting multiple DVDs
-- Support for customizing video resolution, bitrate, framerate, aspect ratio, audio track, etc.
-- Support for adding subtitles from SRT files
-- Support for creating chapters from IFO files
-- Support for creating menus from VOB files
-
- In this article, we will show you how to use FairUse Wizard 2.9 Full Edition (Pre-Cracked) with serial key to convert a DVD movie to AVI in a few simple steps. We will also provide some tips and tricks on how to optimize your conversion process and deal with some common issues.
- Requirements Requirements
-
Before you start converting your DVD movie to AVI with FairUse Wizard 2.9 Full Edition, you need to make sure you have the following requirements:
-
-- A DVD drive that can read your DVD movie
-- A computer that meets the minimum system requirements of FairUse Wizard 2.9 Full Edition, which are:
-
- - Windows XP, Vista, 7, 8, or 10
- - 1 GHz processor or higher
- - 512 MB of RAM or higher
- - At least 5 GB of free hard disk space
- - A monitor that supports at least 800x600 resolution
-
-- A copy of FairUse Wizard 2.9 Full Edition (Pre-Cracked) with serial key, which you can download from this link:
-- A valid serial key that you can enter during the installation process, which is: XXXX-XXXX-XXXX-XXXX (replace the Xs with the actual key)
-
- Once you have all the requirements, you can proceed to install FairUse Wizard 2.9 Full Edition on your computer.
- How to install FairUse Wizard 2.9 Full Edition
- Installing FairUse Wizard 2.9 Full Edition is very easy and straightforward. Just follow these steps:
-
-
-- Download the FairUse Wizard 2.9 Full Edition (Pre-Cracked) setup file from the link provided above and save it to your preferred location.
-- Double-click on the setup file to launch the installation wizard.
-- Follow the instructions on the screen and accept the terms and conditions.
-- When prompted, enter the serial key that you received with the download link and click Next.
-- Choose the destination folder where you want to install FairUse Wizard 2.9 Full Edition and click Next.
-- Wait for the installation process to complete and click Finish.
-- You can now launch FairUse Wizard 2.9 Full Edition from your desktop or start menu.
-
- Congratulations! You have successfully installed FairUse Wizard 2.9 Full Edition on your computer. You are now ready to convert your DVD movie to AVI.
- Step-by-step tutorial
- In this section, we will show you how to use FairUse Wizard 2.9 Full Edition to convert a DVD movie to AVI in a few simple steps. We will use an example DVD movie called "The Matrix" for demonstration purposes, but you can use any DVD movie that you own or have access to.
- How to create a new project and set the options
- The first thing you need to do is create a new project and set the options for your conversion. Here is how:
-
-- Launch FairUse Wizard 2.9 Full Edition from your desktop or start menu.
-- You will see the main interface of FairUse Wizard 2.9 Full Edition, which consists of four tabs: Project Options, Video Segments, Conversion Progress, and Batch Mode.
-- Click on the Project Options tab to access the settings for your conversion.
-- You will see a window with several sections: General Options, Video Options, Audio Options, Subtitle Options, Chapter Options, and Menu Options.
-- In the General Options section, you can choose between full auto mode and manual mode. Full auto mode will automatically detect and select the best settings for your conversion based on your DVD source and output file size. Manual mode will allow you to customize every aspect of your conversion according to your preferences.
-- We recommend using full auto mode for beginners or users who want a quick and easy conversion. However, if you want more control over your conversion, you can switch to manual mode by unchecking the box next to "Full Auto Mode".
-- In this tutorial, we will use full auto mode for simplicity.
-- In the Video Options section, you can choose the output file size for your AVI file. You can either enter a custom value in megabytes (MB) or choose from one of the predefined values: CD-R (700 MB), CD-RW (650 MB), DVD-R (4400 MB), DVD-RW (4200 MB), etc.
-- We recommend choosing a file size that matches your intended storage device or media player. For example, if you want to burn your AVI file to a CD-R disc, you should choose CD-R (700 MB) as your output file size.
-- In this tutorial, we will choose CD-R (700 MB) as our output file size.
-- In the Audio Options section, you can choose the audio track and the audio codec for your AVI file. You can either let FairUse Wizard automatically select the best audio track and codec for you, or you can manually choose them from the drop-down menus.
-- We recommend letting FairUse Wizard automatically select the audio track and codec for you, unless you have a specific preference or need. For example, if you want to keep the original language and sound quality of your DVD movie, you should choose the same audio track and codec as your DVD source.
-- In this tutorial, we will let FairUse Wizard automatically select the audio track and codec for us.
-- In the Subtitle Options section, you can choose whether to add subtitles to your AVI file or not. You can either let FairUse Wizard automatically select the best subtitle track for you, or you can manually choose one from the drop-down menu. You can also add external subtitles from SRT files by clicking on the "Add Subtitle" button and browsing for the SRT file on your computer.
-- We recommend adding subtitles to your AVI file if you want to watch your movie in a different language or with captions for the hearing impaired. However, if you don't need subtitles, you can skip this step.
-- In this tutorial, we will let FairUse Wizard automatically select the subtitle track for us.
-- In the Chapter Options section, you can choose whether to create chapters for your AVI file or not. Chapters are useful for navigating through your movie and skipping to specific scenes. You can either let FairUse Wizard automatically create chapters for you based on the IFO file of your DVD source, or you can manually create chapters by clicking on the "Add Chapter" button and entering the start time and end time of each chapter.
-- We recommend creating chapters for your AVI file if you want to have more control over your movie playback. However, if you don't need chapters, you can skip this step.
-- In this tutorial, we will let FairUse Wizard automatically create chapters for us based on the IFO file of our DVD source.
-- In the Menu Options section, you can choose whether to create a menu for your AVI file or not. A menu is a graphical interface that allows you to select different options for your movie, such as play, pause, stop, chapters, subtitles, etc. You can either let FairUse Wizard automatically create a menu for you based on the VOB file of your DVD source, or you can manually create a menu by clicking on the "Add Menu" button and choosing a template and a background image.
-- We recommend creating a menu for your AVI file if you want to have a more professional and interactive movie experience. However, if you don't need a menu, you can skip this step.
-- In this tutorial, we will let FairUse Wizard automatically create a menu for us based on the VOB file of our DVD source.
-
- After you have set all the options for your conversion, click on the "Next" button at the bottom right corner of the window to proceed to the next step.
- How to choose between full auto mode and manual mode
- The next step is to choose between full auto mode and manual mode for your conversion. Full auto mode will automatically detect and select the best video segments for your conversion based on your DVD source and output file size. Manual mode will allow you to manually select and edit the video segments according to your preferences.
- We recommend using full auto mode for beginners or users who want a quick and easy conversion. However, if you want more control over your conversion, you can switch to manual mode by checking the box next to "Manual Mode".
- In this tutorial, we will use full auto mode for simplicity.
-
-- Click on the "Next" button at the bottom right corner of the window to start full auto mode.
-- FairUse Wizard will scan your DVD source and analyze its structure. This may take a few minutes depending on the size and complexity of your DVD source.
-- FairUse Wizard will then display a list of video segments that it has detected and selected for your conversion. Each video segment represents a part of your DVD movie that has a consistent video quality and aspect ratio. You can see the details of each video segment, such as its title, duration, resolution, bitrate, etc., by clicking on it.
-- If you are satisfied with the video segments that FairUse Wizard has selected for you, click on the "Next" button at the bottom right corner of the window to proceed to the next step.
-- If you are not satisfied with the video segments that FairUse Wizard has selected for you, you can switch to manual mode by checking the box next to "Manual Mode". This will allow you to manually select and edit the video segments according to your preferences. You can do the following actions in manual mode:
-
- - Add a video segment by clicking on the "Add Segment" button and choosing a title from the drop-down menu.
- - Delete a video segment by clicking on the "Delete Segment" button and confirming your choice.
- - Move a video segment up or down in the list by clicking on the "Move Up" or "Move Down" button.
- - Edit a video segment by clicking on the "Edit Segment" button and adjusting the start time and end time of the segment. You can also preview the segment by clicking on the "Preview Segment" button.
-
-- After you have selected and edited the video segments that you want to convert, click on the "Next" button at the bottom right corner of the window to proceed to the next step.
-
- How to select the DVD source and the output folder
- The next step is to select the DVD source and the output folder for your conversion. The DVD source is where FairUse Wizard will read your DVD movie from, and the output folder is where FairUse Wizard will save your AVI file to. Here is how:
-
-- Click on the "Next" button at the bottom right corner of the window to access the DVD source and output folder selection window.
-- You will see two sections: DVD Source and Output Folder.
-- In the DVD Source section, you can choose between three options: DVD Disc, DVD Folder, or ISO Image. DVD Disc means that your DVD movie is inserted in your DVD drive. DVD Folder means that your DVD movie is copied to a folder on your hard drive. ISO Image means that your DVD movie is compressed into a single file on your hard drive.
-- We recommend choosing DVD Disc if you have a physical copy of your DVD movie. However, if you have a digital copy of your DVD movie, you can choose either DVD Folder or ISO Image depending on how you stored it.
-- In this tutorial, we will choose DVD Disc as our DVD source.
-- In the Output Folder section, you can choose where you want to save your AVI file. You can either enter a custom path in the text box or browse for a folder on your computer by clicking on the "Browse" button.
-- We recommend choosing a folder that has enough space to store your AVI file and that is easy to access and locate. For example, you can create a new folder on your desktop or in your documents folder and name it after your movie title.
-- In this tutorial, we will choose C:\Users\YourName\Desktop\The Matrix as our output folder.
-
- After you have selected the DVD source and the output folder for your conversion, click on the "Next" button at the bottom right corner of the window to proceed to the next step.
- How to preview and edit the video segments
- The next step is to preview and edit the video segments for your conversion. This step is optional, but it can help you improve the quality and accuracy of your conversion. You can preview each video segment and make adjustments to its resolution, aspect ratio, cropping, deinterlacing, etc. Here is how:
-
-- Click on the "Next" button at the bottom right corner of the window to access the video segment preview and editing window.
-- You will see a list of video segments that you have selected for your conversion on the left side of the window. You can click on each video segment to preview it on the right side of the window.
-- Below the preview window, you will see a toolbar with several buttons and sliders that allow you to edit the video segment. You can do the following actions:
-
- - Change the resolution of the video segment by clicking on the "Resolution" button and choosing a preset or a custom value.
- - Change the aspect ratio of the video segment by clicking on the "Aspect Ratio" button and choosing a preset or a custom value.
- - Crop the video segment by dragging the sliders on the top, bottom, left, and right edges of the preview window.
- - Deinterlace the video segment by clicking on the "Deinterlace" button and choosing a method.
- - Adjust the brightness, contrast, saturation, and gamma of the video segment by dragging the sliders below the preview window.
-
-- You can also compare the original and edited video segments by clicking on the "Compare" button at the bottom left corner of the window.
-- If you are satisfied with your edits, click on the "Apply" button at the bottom right corner of the window to save your changes. If you want to undo your edits, click on the "Reset" button at the bottom right corner of the window to restore the original settings.
-- You can repeat this process for each video segment that you want to edit. You can also skip this step if you don't want to edit any video segments.
-
- After you have previewed and edited the video segments for your conversion, click on the "Next" button at the bottom right corner of the window to proceed to the next step.
- How to start the conversion process and monitor the progress
- The final step is to start the conversion process and monitor its progress. This is where FairUse Wizard will use its powerful engine to convert your DVD movie to AVI file. Here is how:
-
-- Click on the "Next" button at the bottom right corner of the window to start the conversion process.
-- You will see a new window with a progress bar and some information about the conversion process, such as the current video segment, the elapsed time, the remaining time, the current bitrate, etc.
-- You can also see a preview of the output AVI file by clicking on the "Preview" button at the bottom left corner of the window.
-- You can pause or cancel the conversion process by clicking on the "Pause" or "Cancel" button at the bottom right corner of the window.
-- The conversion process may take a long time depending on the size and quality of your DVD movie and your computer specifications. We recommend that you do not use your computer for other tasks while the conversion process is running, as this may slow down or interfere with the conversion process.
-- When the conversion process is complete, you will see a message saying "Conversion Complete" and a summary of the conversion statistics, such as the output file size, the average bitrate, the total time, etc.
-- Click on the "OK" button to close the window and return to the main interface of FairUse Wizard 2.9 Full Edition.
-
- Congratulations! You have successfully converted your DVD movie to AVI file with FairUse Wizard 2.9 Full Edition. You can now find your AVI file in the output folder that you selected earlier. You can also play your AVI file with any media player that supports AVI format, such as VLC Media Player, Windows Media Player, etc.
- Tips and tricks
- In this section, we will provide some tips and tricks on how to optimize your conversion process and deal with some common issues that you may encounter while using FairUse Wizard 2.9 Full Edition.
- How to optimize the video quality and file size
- One of the main challenges of converting DVD movies to AVI files is finding the right balance between video quality and file size. You want your AVI file to have a high video quality that preserves the original details and colors of your DVD movie, but you also want your AVI file to have a small file size that saves space and bandwidth. Here are some tips on how to optimize the video quality and file size of your AVI file:
-
-- Use full auto mode if you are not sure about what settings to use for your conversion. Full auto mode will automatically detect and select the best settings for your conversion based on your DVD source and output file size.
-- Use two-pass encoding if you want to achieve a higher video quality with a lower bitrate. Two-pass encoding will analyze your DVD movie twice and adjust the bitrate accordingly to avoid wasting bits on simple scenes and allocate more bits on complex scenes. To enable two-pass encoding, check the box next to "Two Pass Encoding" in the Video Options section of the Project Options tab.
-- Use a lower resolution or a lower aspect ratio if you want to reduce the file size of your AVI file. A lower resolution or a lower aspect ratio will result in a smaller video frame that requires less bits to encode. However, this may also result in a loss of video quality or a distorted video image. To change the resolution or the aspect ratio of your video segment, click on the "Edit Segment" button in the Video Segments tab and choose a preset or a custom value from the drop-down menus.
-- Use a higher bitrate or a higher framerate if you want to increase the video quality of your AVI file. A higher bitrate or a higher framerate will result in a smoother and sharper video image that preserves more details and colors. However, this may also result in a larger file size that consumes more space and bandwidth. To change the bitrate or the framerate of your video segment, click on the "Edit Segment" button in the Video Segments tab and drag the sliders to adjust the values.
-- Use cropping and deinterlacing if you want to improve the video appearance of your AVI file. Cropping will remove the black bars or unwanted edges from your video image, making it more focused and clear. Deinterlacing will remove the horizontal lines or artifacts from your video image, making it more smooth and natural. To crop or deinterlace your video segment, click on the "Edit Segment" button in the Video Segments tab and drag the sliders or choose a method from the drop-down menu.
-
- By following these tips, you can optimize the video quality and file size of your AVI file according to your needs and preferences.
- How to deal with copy-protected DVDs
- Another challenge of converting DVD movies to AVI files is dealing with copy-protected DVDs. Copy-protected DVDs are DVDs that have encryption or other mechanisms that prevent unauthorized copying or playback. Some examples of copy-protection methods are CSS, ARccOS, RipGuard, etc. If you try to convert a copy-protected DVD with FairUse Wizard 2.9 Full Edition, you may encounter errors or failures.
- To deal with copy-protected DVDs, you need to use a third-party software program that can decrypt or bypass the copy-protection methods. Some examples of such software programs are DVD Decrypter, AnyDVD, DVDFab, etc. You can use these software programs to either rip your copy-protected DVD to your hard drive as an ISO image or a DVD folder, or run them in the background while using FairUse Wizard 2.9 Full Edition to convert your DVD movie to AVI file.
- We recommend using DVD Decrypter as it is free, easy to use, and compatible with FairUse Wizard 2.9 Full Edition. Here are some steps on how to use DVD Decrypter to rip your copy-protected DVD to your hard drive as an ISO image:
-
-- Download DVD Decrypter from this link:
-- Install DVD Decrypter on your computer and launch it.
-- Insert your copy-protected DVD into your DVD drive and select it as the source in DVD Decrypter.
-- Select "Mode" from the menu bar and choose "ISO" then "Read".
-- Select a destination folder where you want to save your ISO image on your hard drive.
-- Click on the "Decrypt" button at the bottom right corner of the window to start ripping your DVD.
-- Wait for the ripping process to complete and close DVD Decrypter.
-
- You can now use FairUse Wizard 2.9 Full Edition to convert your ISO image to AVI file by choosing ISO Image as your DVD source in the Project Options tab.
- How to convert multiple DVDs in batch mode
- If you have multiple DVDs that you want to convert to AVI files, you can use FairUse Wizard 2.9 Full Edition's batch mode feature. Batch mode allows you to create multiple projects and convert them one after another without any user intervention. This can save you time and effort as you don't have to manually start each conversion process.
- To use batch mode, you need to create each project separately and save it as a FUP file (FairUse Project File). You can then load all the FUP files into FairUse Wizard 2.9 Full Edition's batch mode window and start the batch conversion process. Here are some steps on how to use batch mode:
-
-- Create each project separately by following the steps in the previous sections of this article.
-- Repeat this process for each DVD that you want to convert to AVI file.
-- After you have created and saved all the projects that you want to convert, click on the "Batch Mode" tab in the main interface of FairUse Wizard 2.9 Full Edition.
-- You will see a window with a list of projects that you can load into batch mode. You can either drag and drop your FUP files into the window, or click on the "Add Project" button and browse for your FUP files on your computer.
-- You can also delete a project from the list by clicking on the "Delete Project" button, or move a project up or down in the list by clicking on the "Move Up" or "Move Down" button.
-- After you have loaded all the projects that you want to convert, click on the "Start Batch" button at the bottom right corner of the window to start the batch conversion process.
-- FairUse Wizard 2.9 Full Edition will convert each project one after another without any user intervention. You can see the progress and details of each conversion process in the Conversion Progress tab.
-- When the batch conversion process is complete, you will see a message saying "Batch Complete" and a summary of the batch conversion statistics, such as the number of projects converted, the total output file size, the average bitrate, the total time, etc.
-- Click on the "OK" button to close the window and return to the main interface of FairUse Wizard 2.9 Full Edition.
-
- Congratulations! You have successfully converted multiple DVDs to AVI files with FairUse Wizard 2.9 Full Edition's batch mode feature. You can now find your AVI files in the output folders that you selected earlier. You can also play your AVI files with any media player that supports AVI format, such as VLC Media Player, Windows Media Player, etc.
- Conclusion
- In this article, we have shown you how to use FairUse Wizard 2.9 Full Edition (Pre-Cracked) with serial key to convert a DVD movie to AVI file in a few simple steps. We have also provided some tips and tricks on how to optimize your conversion process and deal with some common issues. We hope that you have found this article helpful and informative, and that you have enjoyed using FairUse Wizard 2.9 Full Edition to convert your DVD movies to AVI files.
- FairUse Wizard 2.9 Full Edition is a powerful and user-friendly DVD ripper that can help you back up or watch your DVD movies on any device that supports AVI format. It offers many features and improvements over the previous versions, such as support for Windows 10, two-pass encoding, batch mode, subtitle options, chapter options, menu options, etc. It also comes with a pre-cracked version that does not require any activation or registration.
- If you want to download FairUse Wizard 2.9 Full Edition (Pre-Cracked) with serial key, you can use this link:
- If you want to learn more about FairUse Wizard 2.9 Full Edition or contact its support team, you can visit its official website:
- If you have any feedback or questions about this article or FairUse Wizard 2.9 Full Edition, please feel free to leave a comment below or send us an email at
- Thank you for reading this article and happy converting!
- FAQs
- Here are some frequently asked questions about FairUse Wizard 2.9 Full Edition and their answers:
- What are the differences between the free version and the full version of FairUse Wizard?
- The free version of FairUse Wizard is also known as the "Light" version. It has some limitations compared to the full version of FairUse Wizard, such as:
-
-- The maximum output file size is 700 MB
-- The maximum resolution is 720x576
-- The maximum framerate is 25 fps
-- The maximum bitrate is 1000 kbps
-- The subtitle options are disabled
-- The chapter options are disabled
-- The menu options are disabled
-
- The full version of FairUse Wizard does not have these limitations and offers more features and options for your conversion process.
- What are the supported video codecs and output formats of FairUse Wizard?
- FairUse Wizard supports three video codecs: DivX, Xvid , and h.264. These are the most popular and widely supported video codecs that can produce high-quality video files with low file sizes. You can choose the video codec that you prefer in the Project Options tab. FairUse Wizard only supports one output format: AVI. AVI is a container format that can store video and audio data encoded with different codecs. AVI is a common and compatible format that can be played on most devices and platforms. However, if you want to convert your AVI file to another format, such as MP4, MKV, MOV, etc., you can use a third-party software program, such as HandBrake, VLC Media Player, Format Factory, etc.
- How to play the converted AVI files on different devices and platforms?
- The converted AVI files can be played on any device or platform that supports AVI format and the video codec that you used for your conversion. However, some devices or platforms may have specific requirements or limitations for playing AVI files, such as resolution, bitrate, framerate, aspect ratio, etc. To ensure that your AVI files can be played smoothly and correctly on your desired device or platform, you may need to adjust some settings or use a compatible media player.
- Here are some examples of how to play the converted AVI files on different devices and platforms:
-
-- To play the converted AVI files on Windows PC, you can use any media player that supports AVI format and the video codec that you used for your conversion, such as Windows Media Player, VLC Media Player, Media Player Classic, etc. You may need to install the appropriate codec pack or plugin if your media player does not support the video codec that you used for your conversion.
-- To play the converted AVI files on Mac OS X, you can use VLC Media Player, which is a free and versatile media player that can play almost any video format and codec. You can download VLC Media Player from this link:
-- To play the converted AVI files on iOS devices, such as iPhone, iPad, iPod Touch, etc., you can use VLC for iOS, which is a free and powerful media player that can play most video formats and codecs. You can download VLC for iOS from this link:
-- To play the converted AVI files on Android devices, such as smartphones, tablets, etc., you can use MX Player, which is a free and popular media player that can play most video formats and codecs. You can download MX Player from this link:
-- To play the converted AVI files on game consoles, such as PlayStation 4, Xbox One, Nintendo Switch, etc., you may need to convert your AVI file to a compatible format, such as MP4 or MKV. You can use a third-party software program, such as HandBrake, to convert your AVI file to a compatible format. You can download HandBrake from this link:
-
- By following these examples, you can play your converted AVI files on different devices and platforms with ease and convenience.
- How to update FairUse Wizard to the latest version?
- FairUse Wizard is constantly updated with new features and improvements to enhance its performance and compatibility. To update FairUse Wizard to the latest version, you can do the following:
-
-- Launch FairUse Wizard 2.9 Full Edition from your desktop or start menu.
-- Select "Help" from the menu bar and choose "Check for Updates".
-- If there is a new version available, you will see a message saying "A new version of FairUse Wizard is available".
-- Click on the "Download" button to download the new version of FairUse Wizard.
-- Follow the instructions on the screen to install the new version of FairUse Wizard.
-- You may need to enter your serial key again during the installation process.
-
- You have now updated FairUse Wizard to the latest version. You can enjoy the new features and improvements of FairUse Wizard.
- How to contact FairUse Wizard support team?
- If you have any questions or problems regarding FairUse Wizard 2.9 Full Edition or its usage, you can contact FairUse Wizard support team by doing the following:
-
-- Launch FairUse Wizard 2.9 Full Edition from your desktop or start menu.
-- Select "Help" from the menu bar and choose "Contact Support".
-- You will see a window with a form where you can enter your name, email address, subject, and message.
-- Fill in the form with your details and describe your question or problem clearly and concisely.
-- Click on the "Send" button to send your message to FairUse Wizard support team.
-
- You will receive a confirmation email that your message has been sent. You will also receive a reply from FairUse Wizard support team within 24 hours.
- FairUse Wizard support team is friendly and helpful, and they will try their best to answer your questions or solve your problems.
b2dd77e56b
-
-
\ No newline at end of file
diff --git a/spaces/stomexserde/gpt4-ui/Examples/JetBrains AppCode 2019.2 MacOS Free Download.md b/spaces/stomexserde/gpt4-ui/Examples/JetBrains AppCode 2019.2 MacOS Free Download.md
deleted file mode 100644
index 58729f1afaae9b3d217b7bd6b98062739fbc5e63..0000000000000000000000000000000000000000
--- a/spaces/stomexserde/gpt4-ui/Examples/JetBrains AppCode 2019.2 MacOS Free Download.md
+++ /dev/null
@@ -1,143 +0,0 @@
-
-JetBrains AppCode 2019.2 macOS Free Download
-If you are looking for a smart and powerful integrated development environment (IDE) for macOS development, you should consider JetBrains AppCode. In this article, we will tell you what JetBrains AppCode is, how to download it for free, how to use it, and what are its pros and cons.
-JetBrains AppCode 2019.2 macOS Free Download
Download File > https://urlgoal.com/2uI9XE
- Introduction
-JetBrains AppCode is an IDE for Swift, Objective-C, C, C++, and JavaScript development on macOS. It is based on the IntelliJ IDEA platform and supports iOS, macOS, watchOS, tvOS, Linux, and Windows applications.
-JetBrains AppCode has many features that make it a great choice for macOS development, such as:
-
-- Smart code completion and navigation
-- Advanced code analysis and refactoring
-- Powerful debugging and testing tools
-- Seamless integration with Xcode, CocoaPods, Carthage, Git, SVN, and other tools
-- Customizable user interface and editor
-- Cross-platform compatibility and support for multiple languages
-
-With JetBrains AppCode, you can write high-quality code faster, easier, and more efficiently. You can also enjoy a smooth and productive development experience with a reliable and user-friendly IDE.
-
- How to download JetBrains AppCode 2019.2 for macOS for free
-If you want to download JetBrains AppCode 2019.2 for macOS for free, you can do so from the official website of JetBrains. Here is the link to the download page:
-https://www.jetbrains.com/objc/download/#section=mac
-On this page, you can choose between two options: a free trial or a free community edition. The free trial gives you access to all the features of JetBrains AppCode for 30 days, while the free community edition gives you access to a limited set of features for non-commercial use.
-To download JetBrains AppCode 2019.2 for macOS for free, you need to meet the following system requirements:
-
-- macOS 10.13 or higher
-- 4 GB RAM minimum, 8 GB RAM recommended
-- 2.5 GB hard disk space minimum, SSD recommended
-- 1024x768 minimum screen resolution
-
-To install JetBrains AppCode 2019.2 for macOS, you need to follow these steps:
-
-- Download the .dmg file from the link above
-- Open the .dmg file and drag the AppCode.app icon to the Applications folder
-- Launch the AppCode.app from the Applications folder or the Launchpad
-- Follow the instructions on the screen to complete the installation process
-- Enjoy using JetBrains AppCode for macOS development
-
- How to use JetBrains AppCode 2019.2 for macOS
-Once you have installed JetBrains AppCode 2019.2 for macOS, you can start using it to create and develop your macOS applications. Here are some of the main features and tools that you can use with JetBrains AppCode 2019.2 for macOS:
- The user interface and the main features
-The user interface of JetBrains AppCode 2019.2 for macOS is similar to other JetBrains IDEs, such as IntelliJ IDEA or PyCharm. It consists of several components, such as:
-
-- The menu bar, which contains the main menus and commands
-- The toolbar, which contains the frequently used buttons and actions
-- The tool window bar, which contains the icons for the various tool windows
-- The editor area, which displays the code editor and the tabs for the open files
-- The status bar, which shows the information about the current file, project, and IDE state
-
-You can customize the user interface of JetBrains AppCode 2019.2 for macOS according to your preferences and needs. You can change the theme, the font size, the layout, the shortcuts, and the plugins. You can also switch between different modes, such as full screen mode, distraction free mode, presentation mode, or zen mode.
-Some of the main features that you can access from the user interface of JetBrains AppCode 2019.2 for macOS are:
-
-- The project view, which shows the structure and the files of your project
-- The navigation bar, which allows you to navigate through your project and files
-- The search and replace functionality, which lets you find and modify any text or code in your project
-- The run and debug functionality, which lets you execute and debug your application
-- The code generation functionality, which lets you create new classes, methods, variables, and other code elements
-- The refactoring functionality, which lets you change the structure and the name of your code elements
-- The code formatting functionality, which lets you apply a consistent style and indentation to your code
-- The code documentation functionality, which lets you add comments and annotations to your code
-
- The code editor and the code analysis tools
-The code editor of JetBrains AppCode 2019.2 for macOS is where you write and edit your code. It supports syntax highlighting, auto-completion, auto-indentation, code folding, code snippets, and multiple cursors. It also supports multiple languages, such as Swift, Objective-C, C, C++, JavaScript, HTML, CSS, XML, JSON, YAML, and more.
-JetBrains AppCode 2019.2 for macOS also provides several code analysis tools that help you write better code. These tools include:
-
-- The code inspection tool, which detects and highlights errors, warnings, typos, and other issues in your code
-- The quick-fix tool, which suggests and applies solutions to fix the detected issues in your code
-- The intention action tool, which suggests and performs context-sensitive actions to improve your code
-- The code completion tool, which suggests and inserts relevant code elements based on your context and preferences
-- The parameter info tool, which shows the information about the parameters of a method or a function
-- The documentation info tool, which shows the documentation of a code element or a symbol
-- The type info tool, which shows the type of a variable or an expression
-- The rename tool, which changes the name of a code element or a symbol across your project
-- The extract tool, which extracts a code fragment into a variable, a method, or a function
-- The inline tool, which replaces a variable, a method, or a function with its code fragment
-- The move tool, which moves a code element or a symbol to another file, class, or module
-- The change signature tool, which changes the parameters, the return type, or the name of a method or a function
-- The optimize imports tool, which organizes and removes unused imports in your code
-
- The debugging and testing tools
-JetBrains AppCode 2019.2 for macOS also provides several debugging and testing tools that help you find and fix errors and bugs in your code. These tools include:
-
-- The debugger tool, which lets you run your application in debug mode and inspect its state and behavior
-- The breakpoint tool, which lets you set and manage breakpoints in your code to pause the execution and examine the values
-- The watch tool, which lets you monitor the values of variables and expressions during the debugging session
-- The evaluate expression tool, which lets you execute any code or expression in the context of the current frame
-- The console tool, which lets you interact with your application using standard input and output
-- The testing tool, which lets you create and run unit tests and UI tests for your application using various frameworks, such as XCTest, Quick, Kiwi, Specta, and more
-- The test runner tool, which lets you manage and execute your tests and view the results and the coverage
-- The test recorder tool, which lets you record UI actions and generate UI tests automatically
-
- The project management and version control tools
-JetBrains AppCode 2019.2 for macOS also provides several project management and version control tools that help you organize and maintain your code. These tools include:
-
-- The project tool, which lets you create and configure your project settings, such as the SDK, the target platform, the build system, the dependencies, and more
-- The module tool, which lets you create and manage sub-projects within your project
-- The file template tool, which lets you create new files using predefined or custom templates
-- The live template tool, which lets you insert common code snippets using abbreviations
-- The file comparison tool, which lets you compare and merge two or more files or directories
-- The version control tool, which lets you integrate your project with various version control systems, such as Git, SVN, Mercurial, Perforce, and more
-- The commit tool, which lets you review and commit your changes to the version control system
-- The push tool, which lets you upload your commits to the remote repository
-- The pull tool, which lets you download and merge the changes from the remote repository
-- The branch tool, which lets you create and switch between different branches of your project
-- The merge tool, which lets you resolve conflicts and merge branches
-- The history tool, which lets you view and manage the history of your project and files
-- The annotate tool, which lets you view the author, date, and revision of each line of code
-
- Pros and cons of JetBrains AppCode 2019.2 for macOS
-JetBrains AppCode 2019.2 for macOS is a powerful and smart IDE for macOS development, but it also has some pros and cons that you should be aware of. Here are some of them:
- Pros
-
-- It supports multiple languages and platforms, such as Swift, Objective-C, C, C++, JavaScript, iOS, macOS, watchOS, tvOS, Linux, and Windows
-- It has advanced code analysis and refactoring tools that help you write high-quality code faster and easier
-- It has powerful debugging and testing tools that help you find and fix errors and bugs in your code
-- It has seamless integration with Xcode, CocoaPods, Carthage, Git, SVN, and other tools that you need for macOS development
-- It has a customizable user interface and editor that you can adjust to your preferences and needs
-- It has a free trial and a free community edition that you can download and use for free
-
- Cons
-
-- It requires more RAM and disk space than other IDEs, which may affect your system performance
-- It may have some compatibility issues with the latest versions of Xcode or macOS
-- It may have some bugs or glitches that may affect your development experience
-- It has a steep learning curve for beginners or users who are used to other IDEs
-- It has a limited set of features in the free community edition compared to the paid professional edition
-
- Conclusion
-In conclusion, JetBrains AppCode 2019.2 for macOS is a smart and powerful IDE for macOS development that offers many features and tools that can help you create and develop your macOS applications. It supports multiple languages and platforms, such as Swift, Objective-C, C, C++, JavaScript, iOS, macOS, watchOS, tvOS, Linux, and Windows. It also integrates with Xcode, CocoaPods, Carthage, Git, SVN, and other tools that you need for macOS development.
-If you want to try JetBrains AppCode 2019.2 for macOS for free, you can download it from the official website of JetBrains. You can choose between a free trial or a free community edition. The free trial gives you access to all the features of JetBrains AppCode for 30 days, while the free community edition gives you access to a limited set of features for non-commercial use.
-We hope that this article has given you some useful information about JetBrains AppCode 2019.2 for macOS and how to download it for free. If you have any questions or feedback, please feel free to leave a comment below. Thank you for reading!
- FAQs
-Here are some frequently asked questions and answers related to JetBrains AppCode 2019.2 for macOS:
- Q: How do I update JetBrains AppCode 2019.2 for macOS?
-A: You can update JetBrains AppCode 2019.2 for macOS by using the built-in update functionality. To do so, go to AppCode > Check for Updates in the menu bar. If there is a new version available, you can download and install it from there.
- Q: How do I uninstall JetBrains AppCode 2019.2 for macOS?
-A: You can uninstall JetBrains AppCode 2019.2 for macOS by using the standard uninstall procedure. To do so, go to the Applications folder and drag the AppCode.app icon to the Trash. Alternatively, you can use a third-party app cleaner tool to remove all the associated files and folders of JetBrains AppCode 2019.2 for macOS.
- Q: How do I activate JetBrains AppCode 2019.2 for macOS?
-A: You can activate JetBrains AppCode 2019.2 for macOS by using a license key or a JetBrains account. To do so, go to AppCode > Register in the menu bar. You can enter your license key or sign in with your JetBrains account to activate your product.
- Q: How do I contact JetBrains support for JetBrains AppCode 2019.2 for macOS?
-A: You can contact JetBrains support for JetBrains AppCode 2019.2 for macOS by using the online form, the email, or the phone. To do so, go to Help > Contact Support in the menu bar. You can fill out the online form, send an email to appcode-support@jetbrains.com, or call +1 617 398 7999.
- Q: How do I get more information about JetBrains AppCode 2019.2 for macOS?
-A: You can get more information about JetBrains AppCode 2019.2 for macOS by visiting the official website, the documentation, the blog, or the forum of JetBrains. To do so, go to Help > AppCode Help in the menu bar. You can access the official website, the documentation, the blog, or the forum of JetBrains from there.
b2dd77e56b
-
-
\ No newline at end of file
diff --git a/spaces/sub314xxl/MusicGen/tests/__init__.py b/spaces/sub314xxl/MusicGen/tests/__init__.py
deleted file mode 100644
index 0952fcc3f57e34b3747962e9ebd6fc57aeea63fa..0000000000000000000000000000000000000000
--- a/spaces/sub314xxl/MusicGen/tests/__init__.py
+++ /dev/null
@@ -1,5 +0,0 @@
-# Copyright (c) Meta Platforms, Inc. and affiliates.
-# All rights reserved.
-#
-# This source code is licensed under the license found in the
-# LICENSE file in the root directory of this source tree.
diff --git a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Alan.Wake.v1.05.16.5341.Update-EyePatch Fixed Patch.md b/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Alan.Wake.v1.05.16.5341.Update-EyePatch Fixed Patch.md
deleted file mode 100644
index e5c17f9757ee9aefcb0d1d235b891165bb516c4f..0000000000000000000000000000000000000000
--- a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Alan.Wake.v1.05.16.5341.Update-EyePatch Fixed Patch.md
+++ /dev/null
@@ -1,9 +0,0 @@
-Alan.Wake.v1.05.16.5341.Update-EyePatch Fixed Patch
Download ✸ https://cinurl.com/2uEZa2
-
-June 26, 2021 — Xln Audio Addictive Trigger v1. 0.1 Mac Repack k screenshot. . Update-EyePatch. d299cc6e31 harry potter box set 720p tr dublaj filim . Xln Audio Addictive Trigger v1.0.1 Mac Repack k screenshot.
-Update-EyePatch. d299cc6e31 harry potter box set 720p tr dublaj filim .
-Xln Audio Addictive Trigger v1.0.1 Mac Repack k screenshot.
-Update-EyePatch. d299cc6e31 Harry Potter box set 720p tr dubla 8a78ff9644
-
-
-
diff --git a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Download NEW! Ntsd 2.6 Full Version 108 Tokyo Maquetas Rocket Regaetton}.md b/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Download NEW! Ntsd 2.6 Full Version 108 Tokyo Maquetas Rocket Regaetton}.md
deleted file mode 100644
index d25a6134ac54eba1c7555e4ab2afd1030ca76365..0000000000000000000000000000000000000000
--- a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Download NEW! Ntsd 2.6 Full Version 108 Tokyo Maquetas Rocket Regaetton}.md
+++ /dev/null
@@ -1,6 +0,0 @@
-Download Ntsd 2.6 Full Version} 108 tokyo maquetas rocket regaetton
DOWNLOAD >>> https://cinurl.com/2uEXR0
-
-NTSD (Naruto The Setting Dawn) is a 2.5D fighting game using the mechanics of Little Fighter 2 with characters created by ... NTSD 2.6 download link: ... NTSD: Naruto The Setting Down 2.6 is now finally out for a full release! 4d29de3e1b
-
-
-
diff --git a/spaces/surmensipa/VITS-Umamusume-voice-synthesizer/logs/Arnaldo Antunes - DiscografiaArnaldo Antunes - Discografia.md b/spaces/surmensipa/VITS-Umamusume-voice-synthesizer/logs/Arnaldo Antunes - DiscografiaArnaldo Antunes - Discografia.md
deleted file mode 100644
index 39d074df596433540c87fab0d60dd15e522e3046..0000000000000000000000000000000000000000
--- a/spaces/surmensipa/VITS-Umamusume-voice-synthesizer/logs/Arnaldo Antunes - DiscografiaArnaldo Antunes - Discografia.md
+++ /dev/null
@@ -1,44 +0,0 @@
-
-Arnaldo Antunes - Discografia
-Arnaldo Antunes is a Brazilian writer and composer who has been active since the late 1970s. He is known for his eclectic musical style, blending rock, pop, samba, rap, and experimental sounds. He has also collaborated with many other artists, such as Marisa Monte, Carlinhos Brown, Nando Reis, and Caetano Veloso.
-Arnaldo Antunes - DiscografiaArnaldo Antunes - Discografia
Download ✏ https://urluss.com/2uCH08
-His discography consists of 16 solo albums, 10 albums with the rock band Titãs, 4 albums with the trio Tribalistas, and several other projects and compilations. Here is a list of his main albums and their release dates:
-
-- Aguilar e Banda Performática (1979) - with Aguilar e Banda Performática
-- Titãs (1984) - with Titãs
-- Televisão (1985) - with Titãs
-- Cabeça Dinossauro (1986) - with Titãs
-- Jesus Não Tem Dentes no PaÃs dos Banguelas (1987) - with Titãs
-- à Blésq Blom (1989) - with Titãs
-- Tudo ao Mesmo Tempo Agora (1991) - with Titãs
-- Nome (1993) - solo debut
-- Titanomaquia (1993) - with Titãs
-- Ninguém (1995) - solo
-- Domingo (1995) - with Titãs
-- O Silêncio (1996) - solo
-- Um Sonho (1998) - solo
-- Volume Dois (1998) - with Titãs
-- O Corpo (2000) - solo
-- As Canções de Eu Tu Eles (2000) - soundtrack for the movie Eu Tu Eles
-- Paradeiro (2001) - solo
-- Tribalistas (2002) - with Tribalistas
-- Saiba (2004) - solo
-- A Curva da Cintura (2005) - with Edgard Scandurra and Toumani Diabaté
-- Qualquer (2006) - solo
-- Ao Vivo no Estúdio (2007) - live album
-- Iê Iê Iê (2009) - solo
-- Ao Vivo Lá em Casa (2010) - live album and DVD
-- Tribalistas ao Vivo (2012) - live album and DVD with Tribalistas
-- Disco (2012) - solo
-- Já à (2015) - solo
-- Nheengatu ao Vivo (2015) - live album and DVD with Titãs
-- RSTUVXZ (2018) - solo
-- Tribalistas 2017/2018 Tour (2019) - live album and DVD with Tribalistas
-
-
-If you want to know more about Arnaldo Antunes and his discography, you can visit his official website[^1^] or his page on Discogs. He is also active on social media platforms such as Instagram, Facebook, Twitter, and YouTube.
Arnaldo Antunes is not only a musician, but also a poet, visual artist, and performer. He has published several books of poetry, such as Psia (1986), Tudos (1990), As Coisas (1992), 2 ou + Corpos no Mesmo Espaço (1997), and Palavra Desordem (2012). He has also exhibited his artworks in galleries and museums, such as the São Paulo Museum of Modern Art, the Pinacoteca do Estado de São Paulo, and the Centro Cultural Banco do Brasil. His works explore the relationship between language, image, sound, and space.
-He has received many awards and honors for his artistic achievements, such as the Prêmio Jabuti for literature in 1993 and 2013, the Prêmio Multishow for music in 2007 and 2013, the Prêmio da Música Brasileira in 2013 and 2016, and the Latin Grammy Award for Best Portuguese Language Contemporary Pop Album in 2019. He has also been nominated for several other awards, such as the MTV Video Music Brasil, the Troféu Imprensa, and the APCA Award.
-Arnaldo Antunes is one of the most influential and versatile artists in Brazil. He has contributed to the development and innovation of Brazilian culture and art. He continues to create new works and collaborate with other artists from different fields and backgrounds. He is a true icon of Brazilian music and poetry.
- d5da3c52bf
-
-
\ No newline at end of file
diff --git a/spaces/surmensipa/VITS-Umamusume-voice-synthesizer/logs/Auto Clicker Murgee 1.9 Crack.md b/spaces/surmensipa/VITS-Umamusume-voice-synthesizer/logs/Auto Clicker Murgee 1.9 Crack.md
deleted file mode 100644
index a1e369a5a2b283066f8dc17b017f435203c123ac..0000000000000000000000000000000000000000
--- a/spaces/surmensipa/VITS-Umamusume-voice-synthesizer/logs/Auto Clicker Murgee 1.9 Crack.md
+++ /dev/null
@@ -1,13 +0,0 @@
-Auto Clicker Murgee 1.9 Crack
Download ✒ ✒ ✒ https://urluss.com/2uCFLy
-
-February 12, 2022 - Auto Typer by MurGee 1.9 View All. There is no specific information about version 1.8. Visit the Auto Typer by MurGee on Software home page. com where you can find more information about version 1.9.
-MurGee Auto Typer free download is a utility for editing and creating PDF files.
-Auto Typer (Auto PDF) is a free program to view, create and print .
-Download program .
-Auto PDF program for Windows in Russian - free download.
-Auto PDF is a free program for viewing, creating and printing PDF files.
-The program has a nice and modern interface and has a huge.
-Auto PDF Program . 8a78ff9644
-
-
-
diff --git a/spaces/sxunwashere/rvc-voice/README.md b/spaces/sxunwashere/rvc-voice/README.md
deleted file mode 100644
index f077cd85340c26ebfcb0857816d0f1f511408242..0000000000000000000000000000000000000000
--- a/spaces/sxunwashere/rvc-voice/README.md
+++ /dev/null
@@ -1,14 +0,0 @@
----
-title: Rvc Models
-emoji: 🎤
-colorFrom: red
-colorTo: blue
-sdk: gradio
-sdk_version: 3.27.0
-app_file: app.py
-pinned: false
-license: mit
-duplicated_from: ardha27/rvc-models
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/taesiri/ChatGPT-ImageCaptioner/demo.py b/spaces/taesiri/ChatGPT-ImageCaptioner/demo.py
deleted file mode 100644
index 183f6c44c4ca8cdf32d1c4b57bd75a11a07dcbde..0000000000000000000000000000000000000000
--- a/spaces/taesiri/ChatGPT-ImageCaptioner/demo.py
+++ /dev/null
@@ -1,206 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-import argparse
-import glob
-import multiprocessing as mp
-import numpy as np
-import os
-import tempfile
-import time
-import warnings
-import cv2
-import tqdm
-import sys
-
-from detectron2.config import get_cfg
-from detectron2.data.detection_utils import read_image
-from detectron2.utils.logger import setup_logger
-
-sys.path.insert(0, 'third_party/CenterNet2/projects/CenterNet2/')
-sys.path.insert(0, 'third_party/CenterNet2/')
-
-from centernet.config import add_centernet_config
-from detic.config import add_detic_config
-
-from detic.predictor import VisualizationDemo
-
-
-# constants
-WINDOW_NAME = "Detic"
-
-def setup_cfg(args):
- cfg = get_cfg()
- add_centernet_config(cfg)
- add_detic_config(cfg)
- cfg.merge_from_file(args.config_file)
- cfg.merge_from_list(args.opts)
- # Set score_threshold for builtin models
- cfg.MODEL.RETINANET.SCORE_THRESH_TEST = args.confidence_threshold
- cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = args.confidence_threshold
- cfg.MODEL.PANOPTIC_FPN.COMBINE.INSTANCES_CONFIDENCE_THRESH = args.confidence_threshold
- cfg.MODEL.ROI_BOX_HEAD.ZEROSHOT_WEIGHT_PATH = 'rand' # load later
- if not args.pred_all_class:
- cfg.MODEL.ROI_HEADS.ONE_CLASS_PER_PROPOSAL = True
- cfg.freeze()
- return cfg
-
-
-def get_parser():
- parser = argparse.ArgumentParser(description="Detectron2 demo for builtin configs")
- parser.add_argument(
- "--config-file",
- default="configs/quick_schedules/mask_rcnn_R_50_FPN_inference_acc_test.yaml",
- metavar="FILE",
- help="path to config file",
- )
- parser.add_argument("--webcam", action="store_true", help="Take inputs from webcam.")
- parser.add_argument("--video-input", help="Path to video file.")
- parser.add_argument(
- "--input",
- nargs="+",
- help="A list of space separated input images; "
- "or a single glob pattern such as 'directory/*.jpg'",
- )
- parser.add_argument(
- "--output",
- help="A file or directory to save output visualizations. "
- "If not given, will show output in an OpenCV window.",
- )
- parser.add_argument(
- "--vocabulary",
- default="lvis",
- choices=['lvis', 'openimages', 'objects365', 'coco', 'custom'],
- help="",
- )
- parser.add_argument(
- "--custom_vocabulary",
- default="",
- help="",
- )
- parser.add_argument("--pred_all_class", action='store_true')
- parser.add_argument(
- "--confidence-threshold",
- type=float,
- default=0.5,
- help="Minimum score for instance predictions to be shown",
- )
- parser.add_argument(
- "--opts",
- help="Modify config options using the command-line 'KEY VALUE' pairs",
- default=[],
- nargs=argparse.REMAINDER,
- )
- return parser
-
-
-def test_opencv_video_format(codec, file_ext):
- with tempfile.TemporaryDirectory(prefix="video_format_test") as dir:
- filename = os.path.join(dir, "test_file" + file_ext)
- writer = cv2.VideoWriter(
- filename=filename,
- fourcc=cv2.VideoWriter_fourcc(*codec),
- fps=float(30),
- frameSize=(10, 10),
- isColor=True,
- )
- [writer.write(np.zeros((10, 10, 3), np.uint8)) for _ in range(30)]
- writer.release()
- if os.path.isfile(filename):
- return True
- return False
-
-
-if __name__ == "__main__":
- mp.set_start_method("spawn", force=True)
- args = get_parser().parse_args()
- setup_logger(name="fvcore")
- logger = setup_logger()
- logger.info("Arguments: " + str(args))
-
- cfg = setup_cfg(args)
-
- demo = VisualizationDemo(cfg, args)
-
- if args.input:
- if len(args.input) == 1:
- args.input = glob.glob(os.path.expanduser(args.input[0]))
- assert args.input, "The input path(s) was not found"
- for path in tqdm.tqdm(args.input, disable=not args.output):
- img = read_image(path, format="BGR")
- start_time = time.time()
- predictions, visualized_output = demo.run_on_image(img)
- logger.info(
- "{}: {} in {:.2f}s".format(
- path,
- "detected {} instances".format(len(predictions["instances"]))
- if "instances" in predictions
- else "finished",
- time.time() - start_time,
- )
- )
-
- if args.output:
- if os.path.isdir(args.output):
- assert os.path.isdir(args.output), args.output
- out_filename = os.path.join(args.output, os.path.basename(path))
- else:
- assert len(args.input) == 1, "Please specify a directory with args.output"
- out_filename = args.output
- visualized_output.save(out_filename)
- else:
- cv2.namedWindow(WINDOW_NAME, cv2.WINDOW_NORMAL)
- cv2.imshow(WINDOW_NAME, visualized_output.get_image()[:, :, ::-1])
- if cv2.waitKey(0) == 27:
- break # esc to quit
- elif args.webcam:
- assert args.input is None, "Cannot have both --input and --webcam!"
- assert args.output is None, "output not yet supported with --webcam!"
- cam = cv2.VideoCapture(0)
- for vis in tqdm.tqdm(demo.run_on_video(cam)):
- cv2.namedWindow(WINDOW_NAME, cv2.WINDOW_NORMAL)
- cv2.imshow(WINDOW_NAME, vis)
- if cv2.waitKey(1) == 27:
- break # esc to quit
- cam.release()
- cv2.destroyAllWindows()
- elif args.video_input:
- video = cv2.VideoCapture(args.video_input)
- width = int(video.get(cv2.CAP_PROP_FRAME_WIDTH))
- height = int(video.get(cv2.CAP_PROP_FRAME_HEIGHT))
- frames_per_second = video.get(cv2.CAP_PROP_FPS)
- num_frames = int(video.get(cv2.CAP_PROP_FRAME_COUNT))
- basename = os.path.basename(args.video_input)
- codec, file_ext = (
- ("x264", ".mkv") if test_opencv_video_format("x264", ".mkv") else ("mp4v", ".mp4")
- )
- if codec == ".mp4v":
- warnings.warn("x264 codec not available, switching to mp4v")
- if args.output:
- if os.path.isdir(args.output):
- output_fname = os.path.join(args.output, basename)
- output_fname = os.path.splitext(output_fname)[0] + file_ext
- else:
- output_fname = args.output
- assert not os.path.isfile(output_fname), output_fname
- output_file = cv2.VideoWriter(
- filename=output_fname,
- # some installation of opencv may not support x264 (due to its license),
- # you can try other format (e.g. MPEG)
- fourcc=cv2.VideoWriter_fourcc(*codec),
- fps=float(frames_per_second),
- frameSize=(width, height),
- isColor=True,
- )
- assert os.path.isfile(args.video_input)
- for vis_frame in tqdm.tqdm(demo.run_on_video(video), total=num_frames):
- if args.output:
- output_file.write(vis_frame)
- else:
- cv2.namedWindow(basename, cv2.WINDOW_NORMAL)
- cv2.imshow(basename, vis_frame)
- if cv2.waitKey(1) == 27:
- break # esc to quit
- video.release()
- if args.output:
- output_file.release()
- else:
- cv2.destroyAllWindows()
diff --git a/spaces/tdeshane/artists-of-data-science-chainlit/Dockerfile b/spaces/tdeshane/artists-of-data-science-chainlit/Dockerfile
deleted file mode 100644
index 67bc06e9dc2c3b1262e35d4fcba3187c9893b9d0..0000000000000000000000000000000000000000
--- a/spaces/tdeshane/artists-of-data-science-chainlit/Dockerfile
+++ /dev/null
@@ -1,11 +0,0 @@
-FROM python:3.9
-RUN useradd -m -u 1000 user
-USER user
-ENV HOME=/home/user \
- PATH=/home/user/.local/bin:$PATH
-WORKDIR $HOME/notebooks
-COPY --chown=user . $HOME/app
-COPY ./requirements.txt $HOME/app/requirements.txt
-RUN pip install -r $HOME/app/requirements.txt
-COPY . .
-CMD ["chainlit", "run", "notebooks/app.py", "--port", "7860"]
\ No newline at end of file
diff --git a/spaces/terfces0erbo/CollegeProjectV2/Chirita In Provincie.pdf.md b/spaces/terfces0erbo/CollegeProjectV2/Chirita In Provincie.pdf.md
deleted file mode 100644
index 2b7f951afb84d40c7e9fd9cf6adf80d6ffcbc99c..0000000000000000000000000000000000000000
--- a/spaces/terfces0erbo/CollegeProjectV2/Chirita In Provincie.pdf.md
+++ /dev/null
@@ -1,6 +0,0 @@
-Chirita In Provincie.pdf
Download Zip ⚙ https://bytlly.com/2uGjF4
-
-June 13, 2013 - Vasile Alexandri Chirita in the province. ... Vasile Alexandri Kirita In the province. Topics: theater. Language: Romanian. In the province. In the provinces, among the hills and turbulent rivers, under the canopy of the forest, on the slopes of the mountains and in the valleys, in dense forests and in shady river channels, in villages and large villages, cities and towns, interesting events were in full swing, interesting dramas unfolded, exciting, captivating and disturbing the people. The years of domination of the latifundists, which led to the impoverishment and ruin of the peasants, are behind us. 8a78ff9644
-
-
-
diff --git a/spaces/terfces0erbo/CollegeProjectV2/Faronics Anti-executable Standard 5.3 Full VERIFIED Serial Number.md b/spaces/terfces0erbo/CollegeProjectV2/Faronics Anti-executable Standard 5.3 Full VERIFIED Serial Number.md
deleted file mode 100644
index b94797ecea2019a628aac8ffcbdb8a20b0c9a3a5..0000000000000000000000000000000000000000
--- a/spaces/terfces0erbo/CollegeProjectV2/Faronics Anti-executable Standard 5.3 Full VERIFIED Serial Number.md
+++ /dev/null
@@ -1,6 +0,0 @@
-faronics anti-executable standard 5.3 full serial number
Download Zip — https://bytlly.com/2uGjGW
-
-Faronics Anti-Executable Standard 5.50.1111.655 Crack With Activator Latest 2020 ... Faronics Anti-Executable Standard Crack With Activation Code Latest 2020 ... As soon as you start taking full advantage of its functions, you may really grow fond of it. ... keygen के लिठFaronics Anti-Executable Standard के लिà¤Â ... 4d29de3e1b
-
-
-
diff --git a/spaces/tfwang/PITI-Synthesis/glide_text2im/train_util.py b/spaces/tfwang/PITI-Synthesis/glide_text2im/train_util.py
deleted file mode 100644
index 2b101fd029fd55cbea410c1cd97c67a0a30ce9d6..0000000000000000000000000000000000000000
--- a/spaces/tfwang/PITI-Synthesis/glide_text2im/train_util.py
+++ /dev/null
@@ -1,475 +0,0 @@
-import copy
-import functools
-import os
-
-import blobfile as bf
-import numpy as np
-import torch as th
-import torch.distributed as dist
-from torch.nn.parallel.distributed import DistributedDataParallel as DDP
-from torch.optim import AdamW
-from .glide_util import sample
-from . import logger
-from .fp16_util import (
- make_master_params,
- master_params_to_model_params,
- model_grads_to_master_grads,
- unflatten_master_params,
- zero_grad,
-)
-from .nn import update_ema
-from .vgg import VGG
-from .adv import AdversarialLoss
-from .resample import LossAwareSampler, UniformSampler
-import glob
-import torchvision.utils as tvu
-import PIL.Image as Image
-# For ImageNet experiments, this was a good default value.
-# We found that the lg_loss_scale quickly climbed to
-# 20-21 within the first ~1K steps of training.
-INITIAL_LOG_LOSS_SCALE = 20.0
-
-
-
-class TrainLoop:
- def __init__(
- self,
- model,
- glide_options,
- diffusion,
- data,
- val_data,
- batch_size,
- microbatch,
- lr,
- ema_rate,
- log_interval,
- save_interval,
- resume_checkpoint,
- use_fp16=False,
- fp16_scale_growth=1e-3,
- schedule_sampler=None,
- weight_decay=0.0,
- lr_anneal_steps=0,
- finetune_decoder = False,
- mode = '',
- use_vgg = False,
- use_gan = False,
- uncond_p = 0,
- super_res = 0,
- ):
- self.model = model
- self.glide_options=glide_options
- self.diffusion = diffusion
- self.data = data
- self.val_data=val_data
- self.batch_size = batch_size
- self.microbatch = microbatch if microbatch > 0 else batch_size
- self.lr = lr
- self.ema_rate = (
- [ema_rate]
- if isinstance(ema_rate, float)
- else [float(x) for x in ema_rate.split(",")]
- )
- self.log_interval = log_interval
- self.save_interval = save_interval
- self.resume_checkpoint = find_resume_checkpoint(resume_checkpoint)
- self.use_fp16 = use_fp16
- self.fp16_scale_growth = fp16_scale_growth
- self.schedule_sampler = schedule_sampler or UniformSampler(diffusion)
- self.weight_decay = weight_decay
- self.lr_anneal_steps = lr_anneal_steps
- self.step = 0
- self.resume_step = 0
- self.global_batch = self.batch_size * dist.get_world_size()
-
- if use_vgg:
- self.vgg = VGG(conv_index='22').cuda()
- print('use perc')
- else:
- self.vgg = None
-
- if use_gan:
- self.adv = AdversarialLoss()
- print('use adv')
- else:
- self.adv = None
-
- self.super_res = super_res
-
- self.uncond_p =uncond_p
- self.mode = mode
-
- self.finetune_decoder = finetune_decoder
- if finetune_decoder:
- self.optimize_model = self.model
- else:
- self.optimize_model = self.model.encoder
-
- self.model_params = list(self.optimize_model.parameters())
- self.master_params = self.model_params
- self.lg_loss_scale = INITIAL_LOG_LOSS_SCALE
- self.sync_cuda = th.cuda.is_available()
- self._load_and_sync_parameters()
- if self.use_fp16:
- self._setup_fp16()
-
- self.opt = AdamW(self.master_params, lr=self.lr, weight_decay=self.weight_decay)
- if self.resume_step:
- self._load_optimizer_state()
- # Model was resumed, either due to a restart or a checkpoint
- # being specified at the command line.
- self.ema_params = [
- self._load_ema_parameters(rate) for rate in self.ema_rate
- ]
- else:
- self.ema_params = [
- copy.deepcopy(self.master_params) for _ in range(len(self.ema_rate))
- ]
-
- if th.cuda.is_available():
- self.use_ddp = True
- self.ddp_model = DDP(
- self.model,
- device_ids=[torch.device('cuda')],
- output_device=torch.device('cuda'),
- broadcast_buffers=False,
- bucket_cap_mb=128,
- find_unused_parameters=False,
- )
- else:
- if dist.get_world_size() > 1:
- logger.warn(
- "Distributed training requires CUDA. "
- "Gradients will not be synchronized properly!"
- )
- self.use_ddp = False
- self.ddp_model = self.model
-
- def _load_and_sync_parameters(self):
- resume_checkpoint = self.resume_checkpoint
-
- if resume_checkpoint:
- self.resume_step = parse_resume_step_from_filename(resume_checkpoint)
- if dist.get_rank() == 0:
- logger.log(f"loading model from checkpoint: {resume_checkpoint}...")
- self.model.load_state_dict(th.load(resume_checkpoint, map_location="cpu"),strict=False)
-
- #dist_util.sync_params(self.model.parameters())
-
- def _load_ema_parameters(self, rate):
- ema_params = copy.deepcopy(self.master_params)
-
- main_checkpoint = self.resume_checkpoint
- ema_checkpoint = find_ema_checkpoint(main_checkpoint, self.resume_step, rate)
- if ema_checkpoint:
- if dist.get_rank() == 0:
- logger.log(f"loading EMA from checkpoint: {ema_checkpoint}...")
- state_dict = th.load(ema_checkpoint, map_location=torch.device('cuda'))
- ema_params = self._state_dict_to_master_params(state_dict)
-
- #dist_util.sync_params(ema_params)
- return ema_params
-
- def _load_optimizer_state(self):
- main_checkpoint = self.resume_checkpoint
- opt_checkpoint = bf.join(
- bf.dirname(main_checkpoint), f"opt{self.resume_step:06}.pt"
- )
- if bf.exists(opt_checkpoint):
- logger.log(f"loading optimizer state from checkpoint: {opt_checkpoint}")
- state_dict = th.load(opt_checkpoint, map_location="cpu")
- try:
- self.opt.load_state_dict(state_dict)
- except:
- pass
-
- def _setup_fp16(self):
- self.master_params = make_master_params(self.model_params)
- self.model.convert_to_fp16()
-
- def run_loop(self):
- while (
- not self.lr_anneal_steps
- or self.step <= self.lr_anneal_steps
- ):
-
- batch, model_kwargs = next(self.data)
-
- # uncond_p = 0
- # if self.super_res:
- # uncond_p = 0
- # elif self.finetune_decoder:
- # uncond_p = self.uncond_p
- # elif self.step > self.lr_anneal_steps - 40000:
- # uncond_p = self.uncond_p
-
- self.run_step(batch, model_kwargs)
- if self.step % self.log_interval == 0:
- logger.dumpkvs()
- if self.step % self.save_interval == 0:
- self.save()
- self.val(self.step)
- self.step += 1
-
- if (self.step - 1) % self.save_interval != 0:
- self.save()
-
-
- def run_step(self, batch, model_kwargs):
- self.forward_backward(batch, model_kwargs)
- if self.use_fp16:
- self.optimize_fp16()
- else:
- self.optimize_normal()
- self.log_step()
-
- def forward_backward(self, batch, model_kwargs):
- zero_grad(self.model_params)
- for i in range(0, batch.shape[0], self.microbatch):
- micro = batch[i : i + self.microbatch].cuda()
- micro_cond={n:model_kwargs[n][i:i+self.microbatch].cuda() for n in model_kwargs if n in ['ref', 'low_res']}
- last_batch = (i + self.microbatch) >= batch.shape[0]
- t, weights = self.schedule_sampler.sample(micro.shape[0], torch.device('cuda'))
-
- if self.step <100:
- vgg_loss = None
- adv_loss = None
- else:
- vgg_loss = self.vgg
- adv_loss = self.adv
- compute_losses = functools.partial(
- self.diffusion.training_losses,
- self.ddp_model,
- micro,
- t,
- vgg_loss,
- adv_loss,
- model_kwargs=micro_cond,
- )
-
- if last_batch or not self.use_ddp:
- losses = compute_losses()
- else:
- with self.ddp_model.no_sync():
- losses = compute_losses()
-
- if isinstance(self.schedule_sampler, LossAwareSampler):
- self.schedule_sampler.update_with_local_losses(
- t, losses["loss"].detach()
- )
-
- loss = (losses["loss"] * weights).mean()
- log_loss_dict(
- self.diffusion, t, {k: v * weights for k, v in losses.items()}
- )
- if self.use_fp16:
- loss_scale = 2 ** self.lg_loss_scale
- (loss * loss_scale).backward()
- else:
- loss.backward()
-
- def val(self, step):
- inner_model=self.ddp_model.module
- inner_model.eval()
- if dist.get_rank() == 0:
- print("sampling...")
-
- s_path = os.path.join(logger.get_dir(), 'results')
- os.makedirs(s_path,exist_ok=True)
- img_id = 0
- guidance_scale=self.glide_options['sample_c']
-
-
- while (True):
- if img_id >= self.glide_options['num_samples']:
- break
-
- batch, model_kwargs = next(self.val_data)
- with th.no_grad():
- samples=sample(
- glide_model=inner_model,
- glide_options=self.glide_options,
- side_x=self.glide_options['image_size'],
- side_y=self.glide_options['image_size'],
- prompt=model_kwargs,
- batch_size=self.glide_options['batch_size']//2,
- guidance_scale=guidance_scale,
- device=torch.device('cuda'),
- prediction_respacing=self.glide_options['sample_respacing'],
- upsample_enabled=self.glide_options['super_res'],
- upsample_temp=0.997,
- mode = self.mode,
- )
-
- samples = samples.cpu()
-
- ref = model_kwargs['ref_ori']
- # LR = model_kwargs['low_res'].cpu()
-
- for i in range(samples.size(0)):
- out_path = os.path.join(s_path, f"{dist.get_rank()}_{img_id}_step{step}_{guidance_scale}_output.png")
- tvu.save_image(
- (samples[i]+1)*0.5, out_path)
-
- out_path = os.path.join(s_path, f"{dist.get_rank()}_{img_id}_step{step}_{guidance_scale}_gt.png")
- tvu.save_image(
- (batch[i]+1)*0.5, out_path)
-
- out_path = os.path.join(s_path, f"{dist.get_rank()}_{img_id}_step{step}_{guidance_scale}_ref.png")
- tvu.save_image(
- (ref[i]+1)*0.5, out_path)
-
- # out_path = os.path.join(s_path, f"{dist.get_rank()}_{img_id}_step{step}_{guidance_scale}_lr.png")
- # tvu.save_image(
- # (LR[i]+1)*0.5, out_path)
-
- img_id += 1
- inner_model.train()
-
-
- def optimize_fp16(self):
- if any(not th.isfinite(p.grad).all() for p in self.model_params):
- self.lg_loss_scale -= 1
- logger.log(f"Found NaN, decreased lg_loss_scale to {self.lg_loss_scale}")
- return
-
- model_grads_to_master_grads(self.model_params, self.master_params)
- self.master_params[0].grad.mul_(1.0 / (2 ** self.lg_loss_scale))
- self._log_grad_norm()
- self._anneal_lr()
- self.opt.step()
- for rate, params in zip(self.ema_rate, self.ema_params):
- update_ema(params, self.master_params, rate=rate)
- master_params_to_model_params(self.model_params, self.master_params)
- self.lg_loss_scale += self.fp16_scale_growth
-
- def optimize_normal(self):
- self._log_grad_norm()
- self._anneal_lr()
- self.opt.step()
- for rate, params in zip(self.ema_rate, self.ema_params):
- update_ema(params, self.master_params, rate=rate)
-
- def _log_grad_norm(self):
- sqsum = 0.0
- for p in self.master_params:
- sqsum += (p.grad ** 2).sum().item()
- logger.logkv_mean("grad_norm", np.sqrt(sqsum))
-
- def _anneal_lr(self):
- return
-
-
- def log_step(self):
- logger.logkv("step", self.step + self.resume_step)
- logger.logkv("samples", (self.step + self.resume_step + 1) * self.global_batch)
- if self.use_fp16:
- logger.logkv("lg_loss_scale", self.lg_loss_scale)
-
- def save(self):
- def save_checkpoint(rate, params):
- state_dict = self._master_params_to_state_dict(params)
- if dist.get_rank() == 0:
- logger.log(f"saving model {rate}...")
- if not rate:
- filename = f"model{(self.step+self.resume_step):06d}.pt"
- else:
- filename = f"ema_{rate}_{(self.step+self.resume_step):06d}.pt"
- with bf.BlobFile(bf.join(get_blob_logdir(), filename), "wb") as f:
- th.save(state_dict, f)
-
- save_checkpoint(0, self.master_params)
- for rate, params in zip(self.ema_rate, self.ema_params):
- save_checkpoint(rate, params)
-
- if dist.get_rank() == 0:
- with bf.BlobFile(
- bf.join(get_blob_logdir(), f"opt{(self.step+self.resume_step):06d}.pt"),
- "wb",
- ) as f:
- th.save(self.opt.state_dict(), f)
-
- dist.barrier()
-
- def _master_params_to_state_dict(self, master_params):
- if self.use_fp16:
- master_params = unflatten_master_params(
- list(self.optimize_model.parameters()), master_params
- )
- state_dict = self.optimize_model.state_dict()
- for i, (name, _value) in enumerate(self.optimize_model.named_parameters()):
- assert name in state_dict
- state_dict[name] = master_params[i]
- return state_dict
-
- def _state_dict_to_master_params(self, state_dict):
- params = [state_dict[name] for name, _ in self.optimize_model.named_parameters()]
- if self.use_fp16:
- return make_master_params(params)
- else:
- return params
-
-
-def parse_resume_step_from_filename(filename):
- """
- Parse filenames of the form path/to/modelNNNNNN.pt, where NNNNNN is the
- checkpoint's number of steps.
- """
- filename=filename.split('/')[-1]
- assert(filename.endswith(".pt"))
- filename=filename[:-3]
- if filename.startswith("model"):
- split = filename[5:]
- elif filename.startswith("ema"):
- split = filename.split("_")[-1]
- else:
- return 0
- try:
- return int(split)
- except ValueError:
- return 0
-
-
-def get_blob_logdir():
- p=os.path.join(logger.get_dir(),"checkpoints")
- os.makedirs(p,exist_ok=True)
- return p
-
-def find_resume_checkpoint(resume_checkpoint):
- # On your infrastructure, you may want to override this to automatically
- # discover the latest checkpoint on your blob storage, etc.
- if not resume_checkpoint:
- return None
- if "ROOT" in resume_checkpoint:
- maybe_root=os.environ.get("AMLT_MAP_INPUT_DIR")
- maybe_root="OUTPUT/log" if not maybe_root else maybe_root
- root=os.path.join(maybe_root,"checkpoints")
- resume_checkpoint=resume_checkpoint.replace("ROOT",root)
- if "LATEST" in resume_checkpoint:
- files=glob.glob(resume_checkpoint.replace("LATEST","*.pt"))
- if not files:
- return None
- return max(files,key=parse_resume_step_from_filename)
- return resume_checkpoint
-
-
-
-def find_ema_checkpoint(main_checkpoint, step, rate):
- if main_checkpoint is None:
- return None
- filename = f"ema_{rate}_{(step):06d}.pt"
- path = bf.join(bf.dirname(main_checkpoint), filename)
- if bf.exists(path):
- return path
- return None
-
-
-def log_loss_dict(diffusion, ts, losses):
- for key, values in losses.items():
- logger.logkv_mean(key, values.mean().item())
- # Log the quantiles (four quartiles, in particular).
- for sub_t, sub_loss in zip(ts.cpu().numpy(), values.detach().cpu().numpy()):
- quartile = int(4 * sub_t / diffusion.num_timesteps)
- logger.logkv_mean(f"{key}_q{quartile}", sub_loss)
-
diff --git a/spaces/theaster/imoitari/README.md b/spaces/theaster/imoitari/README.md
deleted file mode 100644
index d23015aac4d15a255932f848e3bacd8152240130..0000000000000000000000000000000000000000
--- a/spaces/theaster/imoitari/README.md
+++ /dev/null
@@ -1,11 +0,0 @@
----
-title: imoitari
-emoji: 🏆
-colorFrom: yellow
-colorTo: green
-sdk: docker
-pinned: false
-duplicated_from: username84/g
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/thiagolira/ChatPequenoPrincipe/app.py b/spaces/thiagolira/ChatPequenoPrincipe/app.py
deleted file mode 100644
index 913b998a65c948c1aeb357b1098264130fe3cd12..0000000000000000000000000000000000000000
--- a/spaces/thiagolira/ChatPequenoPrincipe/app.py
+++ /dev/null
@@ -1,79 +0,0 @@
-import os
-from typing import Optional, Tuple
-
-import gradio as gr
-from query_data import get_chain
-from threading import Lock
-from langchain.vectorstores.faiss import FAISS
-from langchain.embeddings import OpenAIEmbeddings
-
-embeddings = OpenAIEmbeddings()
-vectorstore = FAISS.load_local('.', embeddings)
-
-# init chain
-chain = get_chain(vectorstore)
-
-class ChatWrapper:
-
- def __init__(self):
- self.lock = Lock()
- def __call__(
- self, inp: str, history: Optional[Tuple[str, str]]
- ):
- """Execute the chat functionality."""
- self.lock.acquire()
- try:
- history = history or []
- # Set OpenAI key
- import openai
- openai.api_key = os.environ['OPENAI_API_KEY']
- # Run chain and append input.
- output = chain({"question": inp, "chat_history": history})["answer"]
- history.append((inp, output))
- except Exception as e:
- raise e
- finally:
- self.lock.release()
- return history, history
-
-chat = ChatWrapper()
-
-block = gr.Blocks(css=".gradio-container {background-color: lightgray}")
-
-with block:
-
- markdown = '''
- Chat with The Little Prince!
- With this app you can ask questions directly to the book The Little Prince.
-
- '''
- gr.Markdown(markdown)
-
- chatbot = gr.Chatbot()
-
- with gr.Row():
- message = gr.Textbox(
- label="What's your question?",
- placeholder="Ask questions about The Little Prince",
- lines=1,
- )
- submit = gr.Button(value="Send", variant="secondary").style(full_width=False)
-
- gr.Examples(
- examples=[
- "How can you befriend the little fox?"
- ],
- inputs=message,
- )
-
- gr.HTML(
- "Powered by LangChain 🦜️🔗"
- )
-
- state = gr.State()
-
- submit.click(chat, inputs=[message, state], outputs=[chatbot, state])
- message.submit(chat, inputs=[message, state], outputs=[chatbot, state])
-
-
-block.launch(debug=True)
diff --git a/spaces/threadxl/bingo/README.md b/spaces/threadxl/bingo/README.md
deleted file mode 100644
index 5d6936218874c647b5d22e13ad4be7edb8936f92..0000000000000000000000000000000000000000
--- a/spaces/threadxl/bingo/README.md
+++ /dev/null
@@ -1,28 +0,0 @@
----
-title: bingo
-emoji: 😊
-colorFrom: red
-colorTo: red
-sdk: docker
-license: mit
-duplicated_from: hf4all/bingo
----
-
-
-
-# Bingo
-
-Bingo,一个让你呼吸顺畅 New Bing。
-
-高度还原 New Bing 网页版的主要操作,国内可用,兼容绝大多数微软 Bing AI 的功能,可自行部署使用。
-
-
-
-[](https://hub.docker.com/repository/docker/weaigc/bingo/)
-[](https://hub.docker.com/repository/docker/weaigc/bingo/)
-[](https://github.com/weaigc/bingo/blob/main/license)
-
-问题反馈请前往 https://github.com/weaigc/bingo/issues
-
-
-
diff --git a/spaces/tialenAdioni/chat-gpt-api/logs/Assassins creed iv black flag patch 1.07 Whats new and how to download it.md b/spaces/tialenAdioni/chat-gpt-api/logs/Assassins creed iv black flag patch 1.07 Whats new and how to download it.md
deleted file mode 100644
index 0df9bb42be3ae1ff6084f2ccb01a470d246e4ac2..0000000000000000000000000000000000000000
--- a/spaces/tialenAdioni/chat-gpt-api/logs/Assassins creed iv black flag patch 1.07 Whats new and how to download it.md
+++ /dev/null
@@ -1,93 +0,0 @@
-
-Assassins Creed IV Black Flag Patch 1.07: What's New and How to Download
-
-Assassins Creed IV Black Flag is one of the most popular action-adventure games set in the Caribbean during the Golden Age of Piracy. The game features an open world that players can explore as Edward Kenway, a pirate and assassin who gets involved in a conflict between the Assassins and the Templars.
-Assassins creed iv black flag patch 1.07
Download ✫ https://urlcod.com/2uKb1a
-
-If you are a fan of this game, you might be interested in the latest update that Ubisoft has released for it. Assassins Creed IV Black Flag Patch 1.07 is a sizeable single-player update that weighs in at 2.3GB and fixes several elements of the campaign[^1^]. Here are some of the highlights of this patch and how you can download it for your PC or console.
-
-What's New in Assassins Creed IV Black Flag Patch 1.07?
-
-This patch addresses various issues and bugs that affected the gameplay, graphics, sound, user interface, and other aspects of the game. Some of the most notable changes are:
-
-
-- Fixed the situation where the user could not kill or stun while both assassin and target were near a corner
-- Fixed the situation where character freezed for a brief moment before getting killed by an assassin hidden in a haystack
-- Fixed the situation where some zones could not be captured from an inclined terrain
-- Fixed inconsistencies on the score gauge in Wolfpack mode
-- Fixed various issues causing crashes and freezes
-- Fixed issues pushing players into limited mode
-- Fixed various issues on the HUD, menus, contract system, death cam, etc.
-- Improved graphics and sound effects
-
-
-You can find the full patch notes on the Ubisoft forum[^1^]. This patch applies to the PC, PS3, PS4, Xbox 360 and Xbox One versions of the game.
-
-How to Download Assassins Creed IV Black Flag Patch 1.07?
-
-If you want to enjoy the improved performance and stability of Assassins Creed IV Black Flag, you need to download and install this patch on your device. The process is simple and depends on your platform:
-
-
-- For PC users, you need to launch Uplay and log in to your account. The patch will be automatically downloaded and installed when you start the game.
-- For PS3 and PS4 users, you need to connect your console to the internet and launch the game. The patch will be automatically downloaded and installed when you start the game.
-- For Xbox 360 and Xbox One users, you need to connect your console to the internet and launch the game. The patch will be automatically downloaded and installed when you start the game.
-
-
-If you have any problems with downloading or installing this patch, you can contact Ubisoft support for assistance.
-
-Conclusion
-
-Assassins Creed IV Black Flag Patch 1.07 is a welcome update that enhances the gameplay experience and fixes various issues that players encountered. If you are a fan of this game, you should download this patch as soon as possible and enjoy the new features and improvements.
-How to download and install Assassins creed iv black flag patch 1.07
-Assassins creed iv black flag patch 1.07 fixes and improvements
-Assassins creed iv black flag patch 1.07 release date and notes
-Assassins creed iv black flag patch 1.07 gameplay and performance
-Assassins creed iv black flag patch 1.07 compatibility and requirements
-Assassins creed iv black flag patch 1.07 review and rating
-Assassins creed iv black flag patch 1.07 download size and speed
-Assassins creed iv black flag patch 1.07 error and bug solutions
-Assassins creed iv black flag patch 1.07 mod and cheat support
-Assassins creed iv black flag patch 1.07 multiplayer and online features
-Assassins creed iv black flag patch 1.07 graphics and sound quality
-Assassins creed iv black flag patch 1.07 new content and missions
-Assassins creed iv black flag patch 1.07 best settings and tips
-Assassins creed iv black flag patch 1.07 comparison and difference
-Assassins creed iv black flag patch 1.07 trailer and screenshots
-Assassins creed iv black flag patch 1.07 free download link and guide
-Assassins creed iv black flag patch 1.07 system update and verification
-Assassins creed iv black flag patch 1.07 backup and restore options
-Assassins creed iv black flag patch 1.07 uninstall and reinstall steps
-Assassins creed iv black flag patch 1.07 achievements and trophies
-Assassins creed iv black flag patch 1.07 story and characters
-Assassins creed iv black flag patch 1.07 weapons and outfits
-Assassins creed iv black flag patch 1.07 ships and naval combat
-Assassins creed iv black flag patch 1.07 exploration and collectibles
-Assassins creed iv black flag patch 1.07 customization and upgrades
-Assassins creed iv black flag patch 1.07 difficulty and challenges
-Assassins creed iv black flag patch 1.07 secrets and easter eggs
-Assassins creed iv black flag patch 1.07 fun facts and trivia
-Assassins creed iv black flag patch 1.07 fan art and memes
-Assassins creed iv black flag patch 1.07 merchandise and deals
-Assassins creed iv black flag patch 1.07 history and lore
-Assassins creed iv black flag patch 1.07 soundtrack and songs
-Assassins creed iv black flag patch 1.07 voice actors and cast
-Assassins creed iv black flag patch 1.07 developer and publisher
-Assassins creed iv black flag patch 1.07 awards and nominations
-Assassins creed iv black flag patch 1.07 sales and revenue
-Assassins creed iv black flag patch 1.07 sequel and prequel rumors
-Assassins creed iv black flag patch 1.07 crossover and spin-off ideas
-Assassins creed iv black flag patch 1.07 remake and remaster possibilities
-Assassins creed iv black flag patch 1.07 VR and AR support
-How to play assassins creed IV Black Flag without Patching it to version 107?
-What are the benefits of updating assassins Creed IV Black Flag to Patch version one point zero seven?
-Where can I find the official changelog for assassins Creed IV Black Flag Patch one point zero seven?
-How to fix assassins Creed IV Black Flag crashing after installing Patch one point zero seven?
-How to enable assassins Creed IV Black Flag DLCs with Patch one point zero seven?
-How to disable assassins Creed IV Black Flag auto-update feature for Patch one point zero seven?
-How to revert assassins Creed IV Black Flag to an earlier version before Patch one point zero seven?
-How to troubleshoot assassins Creed IV Black Flag Patch one point zero seven installation issues?
-How to optimize assassins Creed IV Black Flag for better performance after Patch one point zero seven?
-
-If you want to learn more about Assassins Creed IV Black Flag, you can check out our other articles on this game or visit our website for more information.
e753bf7129
-
-
\ No newline at end of file
diff --git a/spaces/tialenAdioni/chat-gpt-api/logs/Awm 2725 Vw-1 60 C 30v USB Cable Driver Why You Should Buy It Now.md b/spaces/tialenAdioni/chat-gpt-api/logs/Awm 2725 Vw-1 60 C 30v USB Cable Driver Why You Should Buy It Now.md
deleted file mode 100644
index 2afb31d38f6cdc3d6d0d67c32146ec8c3741c700..0000000000000000000000000000000000000000
--- a/spaces/tialenAdioni/chat-gpt-api/logs/Awm 2725 Vw-1 60 C 30v USB Cable Driver Why You Should Buy It Now.md
+++ /dev/null
@@ -1,89 +0,0 @@
-
-Awm 2725 Vw-1 60 C 30v USB Cable Driver: What You Need to Know
-If you have a USB device that requires a specific cable driver, you might have come across the term Awm 2725 Vw-1 60 C 30v USB Cable Driver. This is a type of USB cable that has certain specifications and features that make it compatible with certain devices. In this article, we will explain what Awm 2725 Vw-1 60 C 30v USB Cable Driver is, how to find and install it, and what are the benefits of using it. Let's get started!
-Awm 2725 Vw-1 60 C 30v USB Cable Driver
Download ☆☆☆☆☆ https://urlcod.com/2uK7FO
-What is Awm 2725 Vw-1 60 C 30v USB Cable Driver?
-Awm 2725 Vw-1 60 C 30v USB Cable Driver is a type of USB cable that has the following characteristics:
-
-- It has a standard USB A male connector on one end and a micro USB connector on the other end.
-- It has a high speed 2.0 cable that is compliant with USB 1.1, USB 2.0, and USB 3.0 standards.
-- It has one 28awg twisted pair for data transmission and two power conductors for power supply.
-- It has an overall shield that consists of an aluminized polyester shield with a drain wire and a tinned copper braid shield.
-- It has a rating of Awm 2725 Vw-1 60 C 30v, which means that it can withstand a voltage of 30 volts and a temperature of 60 degrees Celsius.
-
-Awm 2725 Vw-1 60 C 30v USB Cable Driver is designed to work with certain devices that require this type of cable driver. Some examples of these devices are webcams, digital cameras, hard drives, printers, scanners, and more. You can check the compatibility of your device with this cable driver by looking at the label or the manual of your device.
-How to Find and Install Awm 2725 Vw-1 60 C 30v USB Cable Driver?
-If you need to use Awm 2725 Vw-1 60 C 30v USB Cable Driver for your device, you need to find and install it on your computer. Here are some steps on how to do that:
-Awm 2725 Vw-1 60 C 30v USB Cable Driver download
-Awm 2725 Vw-1 60 C 30v USB Cable Driver installation
-Awm 2725 Vw-1 60 C 30v USB Cable Driver compatibility
-Awm 2725 Vw-1 60 C 30v USB Cable Driver update
-Awm 2725 Vw-1 60 C 30v USB Cable Driver error
-Awm 2725 Vw-1 60 C 30v USB Cable Driver manual
-Awm 2725 Vw-1 60 C 30v USB Cable Driver specifications
-Awm 2725 Vw-1 60 C 30v USB Cable Driver price
-Awm 2725 Vw-1 60 C 30v USB Cable Driver review
-Awm 2725 Vw-1 60 C 30v USB Cable Driver replacement
-Awm 2725 Vw-1 60 C 30v USB Cable Driver warranty
-Awm 2725 Vw-1 60 C 30v USB Cable Driver repair
-Awm 2725 Vw-1 60 C 30v USB Cable Driver connector
-Awm 2725 Vw-1 60 C 30v USB Cable Driver adapter
-Awm 2725 Vw-1 60 C 30v USB Cable Driver splitter
-Awm 2725 Vw-1 60 C 30v USB Cable Driver extension
-Awm 2725 Vw-1 60 C 30v USB Cable Driver converter
-Awm 2725 Vw-1 60 C 30v USB Cable Driver hub
-Awm 2725 Vw-1 60 C 30v USB Cable Driver charger
-Awm 2725 Vw-1 60 C 30v USB Cable Driver tester
-Awm 2725 Vw-1 60 C 30v USB Cable Driver meter
-Awm 2725 Vw-1 60 C 30v USB Cable Driver switch
-Awm 2725 Vw-1 60 C 30v USB Cable Driver type
-Awm
-
-- First, you need to find a website that offers Awm 2725 Vw-1 60 C 30v USB Cable Driver for download. You can use Google or other search engines to look for such websites. Some examples of these websites are Tom's Hardware Forum, Ko-fi, Totek International, Amazon, and more.
-- Second, you need to download the cable driver from the website. You can click on the download link or button and wait for the file to be saved on your computer.
-- Third, you need to extract the file using a program like WinRAR or 7-Zip. You can right-click on the file and choose Extract Here or Extract to Folder.
-- Fourth, you need to open the folder and double-click on the executable file to start the installation process. You can follow the instructions on the screen and agree to the terms and conditions.
-- Fifth, you need to restart your computer after the installation is complete. This will ensure that the cable driver is properly installed and ready to use.
-
-What are the Benefits of Using Awm 2725 Vw-1 60 C 30v USB Cable Driver?
-Using Awm 2725 Vw-1 60 C 30v USB Cable Driver has some benefits that can enhance your experience with your device. Here are some of them:
-
-- You can enjoy a fast and stable data transfer between your device and your computer. The high speed 2.0 cable and the twisted pair ensure that your data is transmitted without any interference or loss.
-- You can provide enough power supply for your device without any risk of damage. The two power conductors and the overall shield protect your device from overvoltage and overheating.
-- You can use your device with any operating system that supports USB standards. The cable driver is compatible with Windows XP, Windows Vista, Windows 7, Windows 8, Windows 10, Mac OS X, Linux, and more.
-- You can use your device with any computer that has a standard USB port. The cable driver has a universal USB A male connector that can fit into any USB port.
-
-Conclusion
-Awm 2725 Vw-1 60 C 30v USB Cable Driver is a type of USB cable that has specific specifications and features that make it compatible with certain devices. You can find and install it on your computer by following some simple steps. You can also enjoy some benefits of using it, such as fast data transfer, safe power supply, wide compatibility, and easy connection. If you have a device that requires Awm 2725 Vw-1 60 C 30v USB Cable Driver, you should definitely try it out!
-How to Get More Out of Awm 2725 Vw-1 60 C 30v USB Cable Driver
-If you want to get more out of Awm 2725 Vw-1 60 C 30v USB Cable Driver and enhance your experience with your device, you can try some of these tips:
-
-- You can adjust the settings of Awm 2725 Vw-1 60 C 30v USB Cable Driver to suit your preferences. You can change the resolution, the sound volume, the language, and the difficulty level. You can also enable or disable some features, such as the online mode, the hints, or the fullscreen mode.
-- You can explore the different modes and options of Awm 2725 Vw-1 60 C 30v USB Cable Driver. Besides the normal mode, you can also try the arcade mode, where you have to use Awm 2725 Vw-1 60 C 30v USB Cable Driver as fast as possible. You can also try the bonus mode, where you have to use Awm 2725 Vw-1 60 C 30v USB Cable Driver with different devices and challenges.
-- You can challenge yourself and try to achieve all the achievements and trophies of Awm 2725 Vw-1 60 C 30v USB Cable Driver. You can see your progress and statistics in the main menu. You can also compare your score and rank with other users in the online leaderboard.
-
-Conclusion
-Awm 2725 Vw-1 60 C 30v USB Cable Driver is a type of USB cable that has specific specifications and features that make it compatible with certain devices. You can find and install it on your computer by following some simple steps. You can also enjoy some benefits of using it, such as fast data transfer, safe power supply, wide compatibility, and easy connection. You can also share it with your friends, troubleshoot it if you have any problems, and get more out of it by trying some tips. If you have a device that requires Awm 2725 Vw-1 60 C 30v USB Cable Driver, you should definitely try it out!
-How to Review Awm 2725 Vw-1 60 C 30v USB Cable Driver
-If you have used Awm 2725 Vw-1 60 C 30v USB Cable Driver and want to share your opinion and feedback with other users, you can write a review of Awm 2725 Vw-1 60 C 30v USB Cable Driver. Here are some tips on how to write a good review:
-
-- Be honest and objective. Don't exaggerate or lie about your experience with Awm 2725 Vw-1 60 C 30v USB Cable Driver. Tell the truth about what you liked and disliked about Awm 2725 Vw-1 60 C 30v USB Cable Driver.
-- Be specific and detailed. Don't just say that Awm 2725 Vw-1 60 C 30v USB Cable Driver is good or bad. Explain why and how Awm 2725 Vw-1 60 C 30v USB Cable Driver worked for you or not. Give examples and evidence to support your claims.
-- Be helpful and informative. Don't just write a review for yourself. Write a review for other users who might be interested in Awm 2725 Vw-1 60 C 30v USB Cable Driver. Provide useful information and tips that can help them make a better decision.
-- Be respectful and polite. Don't use offensive or abusive language in your review. Don't insult or attack other users or the developer of Awm 2725 Vw-1 60 C 30v USB Cable Driver. Be courteous and constructive in your criticism.
-
-Here is an example of a possible review of Awm 2725 Vw-1 60 C 30v USB Cable Driver:
-I have been using Awm 2725 Vw-1 60 C 30v USB Cable Driver for a few months now and I am very satisfied with it. It is a very reliable and durable cable that works perfectly with my webcam and printer. It has a high speed and stable data transfer that never fails or slows down. It also provides enough power supply for my devices without any risk of damage or overheating. It is compatible with my Windows 10 operating system and my USB port. It is easy to install and use, and it has some nice features, such as the online mode, the arcade mode, and the bonus mode. It also has some achievements and trophies that make it more fun and challenging. I have also shared it with some of my friends and they also love it. I would highly recommend Awm 2725 Vw-1 60 C 30v USB Cable Driver to anyone who needs a good USB cable for their devices.
-How to Compare Awm 2725 Vw-1 60 C 30v USB Cable Driver with Other USB Cables
-If you want to know how Awm 2725 Vw-1 60 C 30v USB Cable Driver stands out from other USB cables, you can compare Awm 2725 Vw-1 60 C 30v USB Cable Driver with other USB cables based on some criteria. Here are some of them:
-
-- The type of connectors. Awm 2725 Vw-1 60 C 30v USB Cable Driver has a standard USB A male connector on one end and a micro USB connector on the other end. This makes it suitable for devices that have a micro USB port, such as webcams, digital cameras, hard drives, printers, scanners, and more. Other USB cables might have different types of connectors, such as mini USB, USB B, USB C, or lightning.
-- The speed and quality of data transfer. Awm 2725 Vw-1 60 C 30v USB Cable Driver has a high speed 2.0 cable that is compliant with USB 1.1, USB 2.0, and USB 3.0 standards. This means that it can transfer data at up to 480 Mbps without any interference or loss. Other USB cables might have lower or higher speeds depending on their standards and specifications.
-- The power supply and safety. Awm 2725 Vw-1 60 C 30v USB Cable Driver has two power conductors that can provide enough power supply for your device without any risk of damage or overheating. It also has an overall shield that protects your device from overvoltage and electromagnetic interference. Other USB cables might have different power ratings and shielding methods depending on their designs and materials.
-- The compatibility and versatility. Awm 2725 Vw-1 60 C 30v USB Cable Driver is compatible with any operating system that supports USB standards, such as Windows XP, Windows Vista, Windows 7, Windows 8, Windows 10, Mac OS X, Linux, and more. It is also compatible with any computer that has a standard USB port. It is also versatile in terms of its modes and options, such as the online mode, the arcade mode, the bonus mode, the achievements and trophies, and more. Other USB cables might have different compatibility and versatility depending on their features and functions.
-
-By comparing Awm 2725 Vw-1 60 C 30v USB Cable Driver with other USB cables based on these criteria, you can see how Awm
-
Conclusion
-Awm 2725 Vw-1 60 C 30v USB Cable Driver is a type of USB cable that has specific specifications and features that make it compatible with certain devices. You can find and install it on your computer by following some simple steps. You can also enjoy some benefits of using it, such as fast data transfer, safe power supply, wide compatibility, and easy connection. You can also share it with your friends, troubleshoot it if you have any problems, get more out of it by trying some tips, and compare it with other USB cables based on some criteria. If you have a device that requires Awm 2725 Vw-1 60 C 30v USB Cable Driver, you should definitely try it out!
679dcb208e
-
-
\ No newline at end of file
diff --git a/spaces/tialenAdioni/chat-gpt-api/logs/Crazytalk Animator Combo Pack Power Tools Vol 4 Mediafire 339.md b/spaces/tialenAdioni/chat-gpt-api/logs/Crazytalk Animator Combo Pack Power Tools Vol 4 Mediafire 339.md
deleted file mode 100644
index 8072d5df07610fae4c26eb1c4d8b33493081a826..0000000000000000000000000000000000000000
--- a/spaces/tialenAdioni/chat-gpt-api/logs/Crazytalk Animator Combo Pack Power Tools Vol 4 Mediafire 339.md
+++ /dev/null
@@ -1,23 +0,0 @@
-
-Crazytalk Animator Combo Pack Power Tools Vol 4 Mediafire 339
-Crazytalk Animator is a software that allows you to create 2D animations with ease. You can use pre-made characters, props, motions, and backgrounds, or create your own with the powerful tools and features. One of the most popular content packs for Crazytalk Animator is Power Tools Vol 4, which includes five packs of assets for different themes and genres.
-Power Tools Vol 4 contains the following packs:
-Crazytalk Animator Combo Pack Power Tools Vol 4 Mediafire 339
Download Zip ⇒⇒⇒ https://urlcod.com/2uK9qa
-
-- Real Life Character Bodies - Action Figures: This pack contains six realistic human characters with different body shapes and outfits. You can mix and match their body parts and accessories to create your own custom characters.
-- Historical Series - Titans of Time: This pack contains six historical characters from different eras, such as Julius Caesar, Cleopatra, Napoleon, and Genghis Khan. You can use them to recreate historical scenes or make your own stories.
-- Cosmic Hero: This pack contains five comic-style characters with superpowers and costumes. You can use them to create action-packed scenes or humorous situations.
-- Transportation Series - Modern Vehicles Vol 1: This pack contains six modern vehicles, such as a car, a motorcycle, a helicopter, and a jet. You can use them to add realism and movement to your scenes.
-- Fighting Motion Library Vol 3: This pack contains over 100 fighting motions for your characters, such as punches, kicks, blocks, and combos. You can use them to create dynamic and exciting fight scenes.
-
-Power Tools Vol 4 is a great addition to your Crazytalk Animator library, as it gives you more options and possibilities for your animations. You can download it from Mediafire for free using this link: https://www.mediafire.com/file/339/Crazytalk_Animator_Combo_Pack_Power_Tools_Vol_4.zip/file
-Enjoy creating amazing animations with Crazytalk Animator and Power Tools Vol 4!
If you want to see some examples of animations made with Power Tools Vol 4, you can check out some of these links:
-
-
-These examples will give you an idea of what you can do with Power Tools Vol 4, and how you can use it to create your own animations. You can also find more examples and tutorials on the official website of Reallusion, the developer of CrazyTalk Animator and Cartoon Animator.
7196e7f11a
-
-
\ No newline at end of file
diff --git a/spaces/tialenAdioni/chat-gpt-api/logs/Descargar enciclopedia universal micronet 2012 18 en espaol y con licencia.md b/spaces/tialenAdioni/chat-gpt-api/logs/Descargar enciclopedia universal micronet 2012 18 en espaol y con licencia.md
deleted file mode 100644
index b39cf22ad14560fd7c0bf73483de73565b7b7aa0..0000000000000000000000000000000000000000
--- a/spaces/tialenAdioni/chat-gpt-api/logs/Descargar enciclopedia universal micronet 2012 18 en espaol y con licencia.md
+++ /dev/null
@@ -1,86 +0,0 @@
-
-How to Download and Install Azurewave Aw-nb041h Driver for Windows 10
-If you are looking for a wireless network adapter that can provide fast and stable internet connection, you may want to consider the Azurewave Aw-nb041h. This device is compatible with Windows 10 and supports 802.11n Wi-Fi standards. However, to use it properly, you need to install the correct driver for your system.
-A driver is a software that allows your computer to communicate with your hardware devices. Without a driver, your device may not work properly or at all. Therefore, it is important to keep your drivers updated and avoid any compatibility issues.
-Azurewave Aw-nb041h Driver Download
DOWNLOAD ✅ https://urlcod.com/2uK8iN
-In this article, we will show you how to download and install the Azurewave Aw-nb041h driver for Windows 10 in a few simple steps. Follow the instructions below and enjoy your wireless internet connection.
-Step 1: Download the Azurewave Aw-nb041h Driver from the Official Website
-The best way to get the latest and most compatible driver for your device is to download it from the official website of the manufacturer. In this case, you can visit the Azurewave website and find the driver for your model.
-Here are the steps to download the driver:
-
-- Go to http://www.azurewave.com/product_Wireless%20LAN_1.asp and scroll down to find the Aw-nb041h model.
-- Click on the "Download" button next to the model name and select your operating system (Windows 10).
-- Save the file to your computer and remember its location.
-
-Step 2: Install the Azurewave Aw-nb041h Driver on Your Computer
-After downloading the driver file, you need to install it on your computer. This will update your device driver and enable it to work properly.
-Here are the steps to install the driver:
-
-- Locate the downloaded file and double-click on it to run it.
-- Follow the on-screen instructions and agree to the terms and conditions.
-- Wait for the installation process to complete and restart your computer if prompted.
-
-Step 3: Check if Your Device is Working Properly
-Once you have installed the driver, you can check if your device is working properly by connecting to a wireless network. You can also use the Device Manager to verify if your driver is updated and functioning.
-Azurewave Aw-nb041h Wireless Network Card Driver
-Azurewave Aw-nb041h Bluetooth Driver for Windows 10
-Azurewave Aw-nb041h WLAN Driver for Acer Veriton M6630G
-How to Install Azurewave Aw-nb041h Driver on Windows 7
-Azurewave Aw-nb041h Camera Module Driver Download
-Azurewave Aw-nb041h Driver Update Utility
-Azurewave Aw-nb041h Driver for Linux
-Azurewave Aw-nb041h Driver for Mac OS
-Azurewave Aw-nb041h Driver Problems and Solutions
-Azurewave Aw-nb041h Driver Compatibility with Windows 8.1
-Azurewave Aw-nb041h Driver Softpedia Download Link
-Azurewave Aw-nb041h Driver Official Website
-Azurewave Aw-nb041h Driver Reviews and Ratings
-Azurewave Aw-nb041h Driver Alternatives and Comparisons
-Azurewave Aw-nb041h Driver Features and Specifications
-Azurewave Aw-nb041h Driver Troubleshooting Guide
-Azurewave Aw-nb041h Driver Manual and Documentation
-Azurewave Aw-nb041h Driver Warranty and Support
-Azurewave Aw-nb041h Driver Price and Availability
-Azurewave Aw-nb041h Driver Benefits and Advantages
-Azurewave Aw-nb041h Driver for Acer Extensa M2610
-Azurewave Aw-nb041h Driver for Acer Aspire TC-100
-Azurewave Aw-nb041h Driver for Acer Veriton M4630G
-Azurewave Aw-nb041h Driver for Acer Veriton M2632G
-Azurewave Aw-nb041h Driver for Acer Veriton M2631G
-Azurewave Aw-nb041h Wireless LAN Adapter Driver
-Azurewave Aw-nb041h Bluetooth Device Driver
-Azurewave Aw-nb041h Wireless Combo Device Driver
-How to Uninstall Azurewave Aw-nb041h Driver on Windows 10
-How to Fix Azurewave Aw-nb041h Driver Error Code 10
-How to Enable Azurewave Aw-nb041h Wireless and Bluetooth Functionality
-How to Configure Azurewave Aw-nb041h Wireless Settings and Security Options
-How to Connect Azurewave Aw-nb041h Camera Module to PC or Laptop
-How to Capture Images and Videos with Azurewave Aw-nb041h Camera Module
-How to Adjust Azurewave Aw-nb041h Camera Module Settings and Quality Parameters
-How to Optimize Azurewave Aw-nb041h Wireless Performance and Battery Life
-How to Upgrade Azurewave Aw-nb041h Wireless Firmware and Software Version
-How to Test Azurewave Aw-nb041h Wireless Signal Strength and Speed
-How to Troubleshoot Azurewave Aw-nb041h Wireless Connection Issues and Interference Problems
-How to Contact AzureWave Technologies Customer Service and Technical Support Team
-How to Register AzureWave Products Online and Get Warranty Service
-How to Find Out the Serial Number and Model Number of Your AzureWave Device
-How to Download the Latest Drivers and Manuals for Your AzureWave Device from the Official Website[^1^]
-How to Join the AzureWave Community Forum and Share Your Feedback and Experience with Other Users[^1^]
-How to Follow the Latest News and Updates from AzureWave on Social Media Platforms[^1^]
-What is the Difference Between AzureWave AW-NB047H and AW-NB041H Drivers?
-What are the System Requirements and Hardware Compatibility for Installing the AW-NB041H Drivers?
-What are the Benefits of Using AW-NB041H Drivers over Generic or Third-party Drivers?
-What are the Common Problems and Errors that Users Encounter when Using AW-NB041H Drivers?
-What are the Best Practices and Tips for Using AW-NB041H Drivers Safely and Effectively?
-Here are the steps to check your device status:
-
-- Press Windows + X keys on your keyboard and select Device Manager from the menu.
-- Expand the Network adapters category and look for your Azurewave Aw-nb041h device.
-- Right-click on it and select Properties.
-- Go to the Driver tab and check if the driver version and date are correct.
-- If there are no errors or warnings, your device is working properly.
-
-Conclusion
-The Azurewave Aw-nb041h is a reliable wireless network adapter that can provide fast and stable internet connection for your Windows 10 computer. However, you need to install the correct driver for it to work properly. By following the steps above, you can easily download and install the Azurewave Aw-nb041h driver for Windows 10 and enjoy your wireless internet connection.
e753bf7129
-
-
\ No newline at end of file
diff --git a/spaces/ticomspire/turkey-syria-earthquake-tweets/logs/Black Yellow Icon Pack A Unique and Elegant Icon Set for Your Device.md b/spaces/ticomspire/turkey-syria-earthquake-tweets/logs/Black Yellow Icon Pack A Unique and Elegant Icon Set for Your Device.md
deleted file mode 100644
index f83cf0dd59c89df6b9ed75b70e39d348129ab3c4..0000000000000000000000000000000000000000
--- a/spaces/ticomspire/turkey-syria-earthquake-tweets/logs/Black Yellow Icon Pack A Unique and Elegant Icon Set for Your Device.md
+++ /dev/null
@@ -1,106 +0,0 @@
-
-Black Yellow Icon Pack APK: A Stylish and Vibrant Theme for Your Android Device
-If you are looking for a way to spice up your Android device's appearance, you might want to try out Black Yellow Icon Pack APK. This is a stunning theme that features black and yellow icons, wallpapers, and widgets that will give your device a fresh and modern look. In this article, we will tell you everything you need to know about this icon pack, including what it is, how to download and install it, why you should choose it, and how to use it. Let's get started!
- What is Black Yellow Icon Pack APK?
-A brief introduction to the icon pack and its features
-Black Yellow Icon Pack APK is a theme that changes the appearance of your device's icons, wallpapers, and widgets. It is designed by XDA Developers, a popular online community for Android enthusiasts. The icon pack consists of more than 1180+ icons that are inspired by Apple's iOS 16 design. The icons have a rounded square shape and bold colors that contrast well with the black background. The icon pack also comes with 10 high-quality wallpapers that match the theme, as well as 5 custom widgets that show the time, date, weather, battery, and music.
-black yellow icon pack apk
DOWNLOAD — https://bltlly.com/2uOnIm
-How to download and install the icon pack on your device
-To download and install Black Yellow Icon Pack APK on your device, you need to follow these simple steps:
-
-- Go to this link and download the latest version of the icon pack.
-- Once the download is complete, open the file manager app on your device and locate the downloaded file.
-- Tap on the file and allow the installation from unknown sources if prompted.
-- Wait for the installation to finish and then open the icon pack app from your app drawer.
-- Grant the necessary permissions to the app and enjoy your new theme!
-
- Why Choose Black Yellow Icon Pack APK?
-The benefits of using this icon pack over others
-There are many reasons why you should choose Black Yellow Icon Pack APK over other icon packs available on the market. Here are some of them:
-
-- It is free and easy to use. You don't have to pay anything or register for anything to use this icon pack. You just need to download it, install it, and apply it.
-- It is stylish and vibrant. The black and yellow colors create a striking contrast that makes your device stand out from the crowd. The icons are also sleek and elegant, giving your device a professional look.
-- It is updated regularly. The developer of this icon pack is constantly adding new icons, wallpapers, and widgets to keep up with the latest trends and requests from users. You can always expect new content and improvements from this icon pack.
-
-The compatibility and customization options of the icon pack
-Another advantage of using Black Yellow Icon Pack APK is that it is compatible with most Android devices and launchers. You can use this icon pack with any launcher that supports icon packs, such as Nova Launcher, Apex Launcher, ADW Launcher, and more. You can also customize the icon pack to suit your preferences. You can change the icon size, shape, color, and label. You can also apply different wallpapers and widgets to create your own unique theme.
- How to Use Black Yellow Icon Pack APK?
-How to apply the icon pack to your launcher
-After you have installed the icon pack on your device, you need to apply it to your launcher. Here is how you can do that:
-
-- Open the icon pack app from your app drawer and tap on the Apply button.
-- Select the launcher that you are using from the list of supported launchers.
-- Wait for the launcher to apply the icon pack and then go back to your home screen.
-- You should see the new icons, wallpapers, and widgets on your device.
-
-How to change the icons, wallpapers, and widgets of the icon pack
-If you want to change the icons, wallpapers, and widgets of the icon pack, you can do that easily from the icon pack app. Here is how you can do that:
-
-- Open the icon pack app from your app drawer and tap on the Icons, Wallpapers, or Widgets button.
-- Browse through the available icons, wallpapers, or widgets and select the ones that you like.
-- Tap on the Apply button to apply the changes to your device.
-- You can also use the Search button to find specific icons, wallpapers, or widgets by name or category.
-
- Conclusion
-Black Yellow Icon Pack APK is a great way to give your Android device a stylish and vibrant makeover. It is a free and easy-to-use theme that features black and yellow icons, wallpapers, and widgets that are inspired by Apple's iOS 16 design. It is compatible with most Android devices and launchers and offers a lot of customization options. If you are looking for a new theme for your device, you should definitely give Black Yellow Icon Pack APK a try. You won't regret it!
- FAQs
-Q1: Is Black Yellow Icon Pack APK free?
-A1: Yes, Black Yellow Icon Pack APK is completely free to download and use. You don't have to pay anything or register for anything to use this theme.
-dark yellow icon pack apk download
-black and yellow icon pack for android
-yellow black icon pack apk free
-black yellow icon changer apk
-dark yellow icon pack nova launcher
-black and yellow icon theme apk
-yellow black icon pack apk pro
-black yellow icon pack apk mod
-dark yellow icon pack evie launcher
-black and yellow icons apk
-yellow black icon pack apk latest
-black yellow icon pack apk premium
-dark yellow icon pack smart launcher
-black and yellow icon pack apk full
-yellow black icon pack apk cracked
-black yellow icon pack apk update
-dark yellow icon pack go launcher
-black and yellow icon pack apk paid
-yellow black icon pack apk unlocked
-black yellow icon pack apk 2023
-dark yellow icon pack apex launcher
-black and yellow icon pack apk hack
-yellow black icon pack apk old version
-black yellow icon pack apk 2022
-dark yellow icon pack adw launcher
-black and yellow icon pack apk modded
-yellow black icon pack apk new version
-black yellow icon pack apk 2021
-dark yellow icon pack action launcher
-black and yellow icon pack apk patched
-yellow black icon pack apk 2020
-black yellow icon pack apk 2019
-dark yellow icon pack atom launcher
-black and yellow icon pack apk 2018
-yellow black icon pack apk 2017
-black yellow icon pack apk 2016
-dark yellow icon pack holo launcher
-black and yellow icon pack apk 2015
-yellow black icon pack apk 2014
-black yellow icon pack apk 2013
-dark yellow icon pack next launcher
-black and yellow icon pack apk 2012
-yellow black icon pack apk 2011
-black yellow icon pack apk 2010
-dark yellow icon pack solo launcher
-black and yellow icon pack apk 2009
-yellow black icon pack apk 2008
-black yellow icon pack apk 2007
- Q2: Does Black Yellow Icon Pack APK require root access?
-A2: No, Black Yellow Icon Pack APK does not require root access or any special permissions to work on your device. You just need to enable the installation from unknown sources if prompted.
- Q3: What launchers are supported by Black Yellow Icon Pack APK?
-A3: Black Yellow Icon Pack APK supports most launchers that support icon packs, such as Nova Launcher, Apex Launcher, ADW Launcher, and more. You can check the list of supported launchers in the icon pack app.
- Q4: How can I request new icons for Black Yellow Icon Pack APK?
-A4: If you want to request new icons for Black Yellow Icon Pack APK, you can do that by contacting the developer of this theme. You can find their email address in the icon pack app or on their XDA Developers profile. You can also join their Telegram group to get updates and support.
- Q5: How can I contact the developer of Black Yellow Icon Pack APK?
-A5: If you have any questions, suggestions, or feedback about Black Yellow Icon Pack APK, you can contact the developer of this theme by email or Telegram. You can find their contact details in the icon pack app or on their XDA Developers profile.
401be4b1e0
-
-
\ No newline at end of file
diff --git a/spaces/tioseFevbu/cartoon-converter/scripts/Daz 3D - Toon Generations Mega-1.md b/spaces/tioseFevbu/cartoon-converter/scripts/Daz 3D - Toon Generations Mega-1.md
deleted file mode 100644
index 147b5178f4c7fbcdc116a037a11fa1e1b0618afe..0000000000000000000000000000000000000000
--- a/spaces/tioseFevbu/cartoon-converter/scripts/Daz 3D - Toon Generations Mega-1.md
+++ /dev/null
@@ -1,37 +0,0 @@
-
-How to Create Fun and Unique Cartoon Characters with Daz 3D - Toon Generations Mega-1
-
-If you love cartoons and want to create your own characters, you might be interested in Daz 3D - Toon Generations Mega-1. This is a product set that allows you to create cartoon characters of all different ages, sizes and genders using the Daz Studio software. You can customize their appearance, clothing, hair and personality with a few clicks and sliders. Whether you want to make a cute baby, a grumpy grandpa, a sassy teenager or anything in between, you can do it with Toon Generations Mega-1.
-Daz 3D - Toon Generations Mega-1
Download Zip ✔✔✔ https://urlcod.com/2uHxDX
-
-What is Daz 3D - Toon Generations Mega-1?
-
-Daz 3D - Toon Generations Mega-1 is a bundle of products that includes the following items[^1^]:
-
-
-- Toon Generations Base - This is the core product that contains the base figures for male and female toons. You can use them as they are or modify them with morphs and presets.
-- Toon Generations Clothing - This is a collection of clothing items for both male and female toons. You can mix and match them to create different outfits and styles.
-- Toon Generations Hair - This is a set of hair styles for both male and female toons. You can choose from different colors, lengths and shapes.
-- Toon Generations Personalities - This is a set of expressions and poses for both male and female toons. You can use them to give your characters different moods and emotions.
-
-
-With these products, you can create unlimited combinations of cartoon characters for your projects. You can also use them with other Daz 3D products and assets to enhance your scenes and animations.
-
-How to Use Daz 3D - Toon Generations Mega-1?
-
-To use Daz 3D - Toon Generations Mega-1, you need to have Daz Studio installed on your computer. Daz Studio is a free 3D software that lets you create and render 3D scenes and animations. You can download it from here.
-
-Once you have Daz Studio, you need to install the Toon Generations Mega-1 products. You can do this by using the Daz Install Manager (DIM) or by manually downloading and extracting the files. For more information on how to install Daz 3D products, please refer to this article.
-
-After installing the products, you can find them in your Daz Studio content library under People > Toon Generations. You can load the base figures by double-clicking on them or dragging them into your scene. You can then apply clothing, hair and personality presets by selecting the figure and choosing the desired options from the content library.
-
-
-You can also adjust the age, size and gender of your characters by using the sliders in the parameters tab. The age slider ranges from 0 (baby) to 100 (elderly), while the size slider ranges from -100 (tiny) to 100 (huge). The gender slider ranges from -100 (female) to 100 (male), but you can also use other morphs to fine-tune the features of your characters.
-
-You can pose your characters by using the pose presets or by manually manipulating their body parts. You can also change their expressions by using the expression presets or by adjusting their facial features. You can use the camera tools to change the perspective and angle of your scene. You can also add lights, backgrounds, props and other elements to make your scene more interesting.
-
-When you are happy with your scene, you can render it by clicking on the render button or by pressing Ctrl+R. You can choose from different render settings and formats depending on your needs. You can also save your scene as a .duf file for future use or editing.
-
-Why Choose Daz 3D 81aa517590
-
-
\ No newline at end of file
diff --git a/spaces/tioseFevbu/cartoon-converter/scripts/Final Destination 2 Full Movie In Hindi Free 20 HOT!.md b/spaces/tioseFevbu/cartoon-converter/scripts/Final Destination 2 Full Movie In Hindi Free 20 HOT!.md
deleted file mode 100644
index a29e32f467e969cb22732106db05601aaf0b6c67..0000000000000000000000000000000000000000
--- a/spaces/tioseFevbu/cartoon-converter/scripts/Final Destination 2 Full Movie In Hindi Free 20 HOT!.md
+++ /dev/null
@@ -1,28 +0,0 @@
-
-```html
-How to Watch Final Destination 2 Full Movie in Hindi for Free
-
If you are a fan of horror movies, you might be interested in watching Final Destination 2, the sequel to the popular 2000 film that follows a group of people who cheat death by escaping a deadly highway pileup. But what if you want to watch it in Hindi and for free? Is it possible to find a legal and safe way to do so?
-Final Destination 2 Full Movie In Hindi Free 20
Download File • https://urlcod.com/2uHykS
-In this article, we will show you how to watch Final Destination 2 full movie in Hindi for free using some of the best online streaming platforms available. We will also explain why you should avoid illegal and risky sites that offer pirated copies of the movie.
-Why You Should Avoid Pirated Sites
-Before we get into the details of how to watch Final Destination 2 full movie in Hindi for free, we want to warn you about the dangers of using pirated sites. These are websites that offer free downloads or streaming of movies and TV shows without the permission of the creators or distributors.
-While it might be tempting to use these sites to save money and time, there are several reasons why you should avoid them:
-
-- They are illegal. Downloading or streaming copyrighted content without authorization is a violation of intellectual property laws and can result in fines or legal action.
-- They are unsafe. Pirated sites often contain malware, viruses, or spyware that can harm your device or steal your personal information. They may also redirect you to phishing or scam sites that can trick you into giving up your credit card details or other sensitive data.
-- They are low-quality. Pirated sites usually offer poor-quality copies of movies and TV shows that have been compressed, cropped, or edited. They may also have annoying ads, pop-ups, or buffering issues that ruin your viewing experience.
-
-Therefore, we recommend that you stay away from pirated sites and use only legal and reputable platforms to watch Final Destination 2 full movie in Hindi for free.
-How to Watch Final Destination 2 Full Movie in Hindi for Free
-Now that we have established why you should avoid pirated sites, let's look at some of the best options to watch Final Destination 2 full movie in Hindi for free. These are platforms that offer legitimate and high-quality streaming of movies and TV shows in various languages and genres.
-
-Here are some of the platforms that we suggest:
-
-- Netflix. Netflix is one of the most popular and widely used streaming services in the world. It offers a huge library of movies and TV shows, including Final Destination 2, that you can watch on any device with an internet connection. You can also choose from different audio and subtitle options, including Hindi. Netflix offers a 30-day free trial for new users, so you can watch Final Destination 2 full movie in Hindi for free without paying anything.
-- Amazon Prime Video. Amazon Prime Video is another great streaming service that offers a variety of movies and TV shows, including Final Destination 2, that you can watch on any device with an internet connection. You can also choose from different audio and subtitle options, including Hindi. Amazon Prime Video offers a 30-day free trial for new users, so you can watch Final Destination 2 full movie in Hindi for free without paying anything.
-- Internet Archive. Internet Archive is a non-profit organization that preserves and provides access to digital media, including movies and TV shows. It has a collection of public domain and creative commons licensed content that you can watch for free online or download for offline viewing. You can find Final Destination 2 full movie in Hindi on Internet Archive and watch it for free without any ads or registration.
-
-These are some of the best ways to watch Final Destination 2 full movie in Hindi for free legally and safely. We hope you enjoy watching this thrilling horror movie and have a great time.
-``` cec2833e83
-
-
\ No newline at end of file
diff --git a/spaces/tioseFevbu/cartoon-converter/scripts/Get 100 Facebook Like WORK.md b/spaces/tioseFevbu/cartoon-converter/scripts/Get 100 Facebook Like WORK.md
deleted file mode 100644
index e7f09c4534688be7cc2b2256d09876292d97ca60..0000000000000000000000000000000000000000
--- a/spaces/tioseFevbu/cartoon-converter/scripts/Get 100 Facebook Like WORK.md
+++ /dev/null
@@ -1,24 +0,0 @@
-
-```
-How to Get 100 Facebook Like in a Day: A Simple Guide
-Facebook is one of the most popular social media platforms in the world, with over 2.8 billion monthly active users. Having a strong presence on Facebook can help you grow your brand awareness, reach new customers, and increase your sales. But how do you get more people to like your page and posts?
-get 100 facebook like
Download File ————— https://urlcod.com/2uHvYL
-In this article, we will show you how to get 100 Facebook like in a day, using some simple and effective strategies. Whether you are a business owner, a blogger, or an influencer, you can use these tips to boost your Facebook engagement and visibility.
-Why are Facebook likes important?
-Facebook likes are more than just numbers. They are a way of showing your audience that you care about their opinions and feedback. They are also a way of measuring your content's performance and popularity. The more likes you have, the more likely your posts are to be seen by more people, thanks to Facebook's algorithm.
-Getting more likes can also help you build trust and credibility with your potential customers. According to a study by BrightLocal, 88% of consumers trust online reviews as much as personal recommendations. If your page and posts have a lot of likes, it means that people find your content valuable and relevant.
-Moreover, getting more likes can help you generate more traffic to your website or blog. By adding a call-to-action button or a link to your posts, you can encourage your followers to visit your site and learn more about your products or services.
-
-How to get 100 Facebook like in a day
-Now that you know why Facebook likes are important, let's see how you can get 100 of them in a day. Here are some proven strategies that you can implement right away:
-
-- Create high-quality content. The first and most important step to get more likes is to create content that your audience will love. Your content should be relevant, informative, entertaining, or inspiring. It should also be original and unique, not copied from other sources. You can use different types of content, such as images, videos, live streams, polls, quizzes, infographics, etc., to keep your followers engaged and interested.
-- Post at optimal times. The second step is to post your content at the best times for your audience. This will ensure that your posts get maximum exposure and reach. To find out when your followers are most active on Facebook, you can use the Insights tool on your page. It will show you the days and hours when your fans are online. You can then schedule your posts accordingly using tools like Buffer or Hootsuite.
-- Use hashtags. The third step is to use hashtags to increase your visibility and discoverability on Facebook. Hashtags are keywords or phrases that describe your content and help people find it. You can use up to 30 hashtags per post, but make sure they are relevant and specific to your niche. You can also use popular or trending hashtags that relate to your topic or industry.
-- Engage with your followers. The fourth step is to interact with your followers and show them that you appreciate their support. You can do this by liking, commenting, and replying to their messages and feedback. You can also ask them questions, solicit their opinions, or run contests and giveaways to encourage them to participate. By engaging with your followers, you will build a loyal and active community around your page.
-- Promote your page. The fifth and final step is to promote your page and posts to reach more people who might be interested in your content. You can do this by sharing your page and posts on other social media platforms, such as Instagram, Twitter, or LinkedIn. You can also invite your friends, family, and contacts to like your page and posts. Another option is to use Facebook ads to target your ideal audience based on their demographics, interests, behaviors, and location.
-
-Conclusion
-Getting 100 Facebook like in a day is not impossible if you follow these simple steps. By creating high-quality content, posting at optimal times, using
7b8c122e87
-
-
\ No newline at end of file
diff --git a/spaces/tioseFevbu/cartoon-converter/scripts/Manual De Fallas Yamaha Xtz 250 __TOP__.md b/spaces/tioseFevbu/cartoon-converter/scripts/Manual De Fallas Yamaha Xtz 250 __TOP__.md
deleted file mode 100644
index 0efde1a369dbddfd2a962120e042c331991ffed8..0000000000000000000000000000000000000000
--- a/spaces/tioseFevbu/cartoon-converter/scripts/Manual De Fallas Yamaha Xtz 250 __TOP__.md
+++ /dev/null
@@ -1,47 +0,0 @@
-
-Manual De Fallas Yamaha Xtz 250: Cómo Solucionar Los Problemas Más Comunes
-La Yamaha Xtz 250 es una motocicleta versátil y confiable que se adapta tanto al asfalto como al off-road. Sin embargo, como cualquier vehÃculo, puede presentar algunas fallas que afecten su rendimiento o seguridad. En este artÃculo te mostraremos cómo identificar y solucionar los problemas más comunes que puede tener tu Yamaha Xtz 250, según el manual de taller oficial.
-Código De Fallas
-La Yamaha Xtz 250 cuenta con un sistema de autodiagnóstico que te permite saber si hay algún problema en el motor o en los sensores. Para acceder al código de fallas, debes seguir estos pasos:
-Manual De Fallas Yamaha Xtz 250
DOWNLOAD ✓✓✓ https://urlcod.com/2uHwSy
-
-- Apaga el interruptor de encendido.
-- Conecta el conector de diagnóstico que se encuentra debajo del asiento.
-- Enciende el interruptor de encendido y observa el indicador de diagnóstico en el panel de instrumentos.
-- Anota el número que aparece en el indicador, que corresponde al código de falla.
-- Consulta la tabla de código de fallas que se encuentra en el manual de taller o en la página 311 del siguiente enlace[^1^].
-
-Problemas Más Comunes
-A continuación te presentamos algunos de los problemas más comunes que puede tener tu Yamaha Xtz 250 y cómo solucionarlos.
-Falla 12: Ninguna señal recibida del sensor de posición del cigüeñal
-Este problema puede causar que el motor no arranque o se detenga repentinamente. Las posibles causas son:
-
-- Circuito abierto o en corto en el cableado.
-- Sensor de posición del cigüeñal defectuoso.
-- Mala conexión del conector del sensor.
-
-Para solucionarlo, debes revisar el cableado y el sensor, y reemplazarlos si es necesario. También debes asegurarte de que el conector esté bien conectado y limpio.
-Falla 19: Ninguna señal recibida del sensor de posición del acelerador
-Este problema puede causar que el motor funcione de forma irregular o tenga una aceleración deficiente. Las posibles causas son:
-
-- Circuito abierto o en corto en el cableado.
-- Sensor de posición del acelerador defectuoso.
-- Mala conexión del conector del sensor.
-
-Para solucionarlo, debes revisar el cableado y el sensor, y reemplazarlos si es necesario. También debes asegurarte de que el conector esté bien conectado y limpio.
-Falla 30: Señal anormal recibida del sensor de inclinación
-Este problema puede causar que el motor se apague cuando la motocicleta se inclina demasiado. Las posibles causas son:
-
-- Circuito abierto o en corto en el cableado.
-- Sensor de inclinación defectuoso.
-- Mala conexión del conector del sensor.
-- Motocicleta inclinada más de 65 grados.
-
-Para solucionarlo, debes revisar el cableado y el sensor, y reemplazarlos si es necesario. También debes asegurarte de que el conector esté bien conectado y limpio. Además, debes evitar inclinar la motocicleta más de lo necesario.
-
-
-Conclusión
-
-La Yamaha Xtz 250 es una motocicleta que ofrece un buen rendimiento tanto en la ciudad como en el campo. Sin embargo, como cualquier vehÃculo, puede presentar algunas fallas que requieren atención y mantenimiento. En este artÃculo te hemos mostrado cómo identificar y sol
7196e7f11a
-
-
\ No newline at end of file
diff --git a/spaces/tjburns/ask_marcus_aurelius/.venv/lib/python3.10/site-packages/pip/_vendor/pygments/formatters/terminal256.py b/spaces/tjburns/ask_marcus_aurelius/.venv/lib/python3.10/site-packages/pip/_vendor/pygments/formatters/terminal256.py
deleted file mode 100644
index 201b3c3283218f45d5cfa192a07c9e9d991eaaff..0000000000000000000000000000000000000000
--- a/spaces/tjburns/ask_marcus_aurelius/.venv/lib/python3.10/site-packages/pip/_vendor/pygments/formatters/terminal256.py
+++ /dev/null
@@ -1,338 +0,0 @@
-"""
- pygments.formatters.terminal256
- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
- Formatter for 256-color terminal output with ANSI sequences.
-
- RGB-to-XTERM color conversion routines adapted from xterm256-conv
- tool (http://frexx.de/xterm-256-notes/data/xterm256-conv2.tar.bz2)
- by Wolfgang Frisch.
-
- Formatter version 1.
-
- :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
- :license: BSD, see LICENSE for details.
-"""
-
-# TODO:
-# - Options to map style's bold/underline/italic/border attributes
-# to some ANSI attrbutes (something like 'italic=underline')
-# - An option to output "style RGB to xterm RGB/index" conversion table
-# - An option to indicate that we are running in "reverse background"
-# xterm. This means that default colors are white-on-black, not
-# black-on-while, so colors like "white background" need to be converted
-# to "white background, black foreground", etc...
-
-from pip._vendor.pygments.formatter import Formatter
-from pip._vendor.pygments.console import codes
-from pip._vendor.pygments.style import ansicolors
-
-
-__all__ = ['Terminal256Formatter', 'TerminalTrueColorFormatter']
-
-
-class EscapeSequence:
- def __init__(self, fg=None, bg=None, bold=False, underline=False, italic=False):
- self.fg = fg
- self.bg = bg
- self.bold = bold
- self.underline = underline
- self.italic = italic
-
- def escape(self, attrs):
- if len(attrs):
- return "\x1b[" + ";".join(attrs) + "m"
- return ""
-
- def color_string(self):
- attrs = []
- if self.fg is not None:
- if self.fg in ansicolors:
- esc = codes[self.fg.replace('ansi','')]
- if ';01m' in esc:
- self.bold = True
- # extract fg color code.
- attrs.append(esc[2:4])
- else:
- attrs.extend(("38", "5", "%i" % self.fg))
- if self.bg is not None:
- if self.bg in ansicolors:
- esc = codes[self.bg.replace('ansi','')]
- # extract fg color code, add 10 for bg.
- attrs.append(str(int(esc[2:4])+10))
- else:
- attrs.extend(("48", "5", "%i" % self.bg))
- if self.bold:
- attrs.append("01")
- if self.underline:
- attrs.append("04")
- if self.italic:
- attrs.append("03")
- return self.escape(attrs)
-
- def true_color_string(self):
- attrs = []
- if self.fg:
- attrs.extend(("38", "2", str(self.fg[0]), str(self.fg[1]), str(self.fg[2])))
- if self.bg:
- attrs.extend(("48", "2", str(self.bg[0]), str(self.bg[1]), str(self.bg[2])))
- if self.bold:
- attrs.append("01")
- if self.underline:
- attrs.append("04")
- if self.italic:
- attrs.append("03")
- return self.escape(attrs)
-
- def reset_string(self):
- attrs = []
- if self.fg is not None:
- attrs.append("39")
- if self.bg is not None:
- attrs.append("49")
- if self.bold or self.underline or self.italic:
- attrs.append("00")
- return self.escape(attrs)
-
-
-class Terminal256Formatter(Formatter):
- """
- Format tokens with ANSI color sequences, for output in a 256-color
- terminal or console. Like in `TerminalFormatter` color sequences
- are terminated at newlines, so that paging the output works correctly.
-
- The formatter takes colors from a style defined by the `style` option
- and converts them to nearest ANSI 256-color escape sequences. Bold and
- underline attributes from the style are preserved (and displayed).
-
- .. versionadded:: 0.9
-
- .. versionchanged:: 2.2
- If the used style defines foreground colors in the form ``#ansi*``, then
- `Terminal256Formatter` will map these to non extended foreground color.
- See :ref:`AnsiTerminalStyle` for more information.
-
- .. versionchanged:: 2.4
- The ANSI color names have been updated with names that are easier to
- understand and align with colornames of other projects and terminals.
- See :ref:`this table ` for more information.
-
-
- Options accepted:
-
- `style`
- The style to use, can be a string or a Style subclass (default:
- ``'default'``).
-
- `linenos`
- Set to ``True`` to have line numbers on the terminal output as well
- (default: ``False`` = no line numbers).
- """
- name = 'Terminal256'
- aliases = ['terminal256', 'console256', '256']
- filenames = []
-
- def __init__(self, **options):
- Formatter.__init__(self, **options)
-
- self.xterm_colors = []
- self.best_match = {}
- self.style_string = {}
-
- self.usebold = 'nobold' not in options
- self.useunderline = 'nounderline' not in options
- self.useitalic = 'noitalic' not in options
-
- self._build_color_table() # build an RGB-to-256 color conversion table
- self._setup_styles() # convert selected style's colors to term. colors
-
- self.linenos = options.get('linenos', False)
- self._lineno = 0
-
- def _build_color_table(self):
- # colors 0..15: 16 basic colors
-
- self.xterm_colors.append((0x00, 0x00, 0x00)) # 0
- self.xterm_colors.append((0xcd, 0x00, 0x00)) # 1
- self.xterm_colors.append((0x00, 0xcd, 0x00)) # 2
- self.xterm_colors.append((0xcd, 0xcd, 0x00)) # 3
- self.xterm_colors.append((0x00, 0x00, 0xee)) # 4
- self.xterm_colors.append((0xcd, 0x00, 0xcd)) # 5
- self.xterm_colors.append((0x00, 0xcd, 0xcd)) # 6
- self.xterm_colors.append((0xe5, 0xe5, 0xe5)) # 7
- self.xterm_colors.append((0x7f, 0x7f, 0x7f)) # 8
- self.xterm_colors.append((0xff, 0x00, 0x00)) # 9
- self.xterm_colors.append((0x00, 0xff, 0x00)) # 10
- self.xterm_colors.append((0xff, 0xff, 0x00)) # 11
- self.xterm_colors.append((0x5c, 0x5c, 0xff)) # 12
- self.xterm_colors.append((0xff, 0x00, 0xff)) # 13
- self.xterm_colors.append((0x00, 0xff, 0xff)) # 14
- self.xterm_colors.append((0xff, 0xff, 0xff)) # 15
-
- # colors 16..232: the 6x6x6 color cube
-
- valuerange = (0x00, 0x5f, 0x87, 0xaf, 0xd7, 0xff)
-
- for i in range(217):
- r = valuerange[(i // 36) % 6]
- g = valuerange[(i // 6) % 6]
- b = valuerange[i % 6]
- self.xterm_colors.append((r, g, b))
-
- # colors 233..253: grayscale
-
- for i in range(1, 22):
- v = 8 + i * 10
- self.xterm_colors.append((v, v, v))
-
- def _closest_color(self, r, g, b):
- distance = 257*257*3 # "infinity" (>distance from #000000 to #ffffff)
- match = 0
-
- for i in range(0, 254):
- values = self.xterm_colors[i]
-
- rd = r - values[0]
- gd = g - values[1]
- bd = b - values[2]
- d = rd*rd + gd*gd + bd*bd
-
- if d < distance:
- match = i
- distance = d
- return match
-
- def _color_index(self, color):
- index = self.best_match.get(color, None)
- if color in ansicolors:
- # strip the `ansi/#ansi` part and look up code
- index = color
- self.best_match[color] = index
- if index is None:
- try:
- rgb = int(str(color), 16)
- except ValueError:
- rgb = 0
-
- r = (rgb >> 16) & 0xff
- g = (rgb >> 8) & 0xff
- b = rgb & 0xff
- index = self._closest_color(r, g, b)
- self.best_match[color] = index
- return index
-
- def _setup_styles(self):
- for ttype, ndef in self.style:
- escape = EscapeSequence()
- # get foreground from ansicolor if set
- if ndef['ansicolor']:
- escape.fg = self._color_index(ndef['ansicolor'])
- elif ndef['color']:
- escape.fg = self._color_index(ndef['color'])
- if ndef['bgansicolor']:
- escape.bg = self._color_index(ndef['bgansicolor'])
- elif ndef['bgcolor']:
- escape.bg = self._color_index(ndef['bgcolor'])
- if self.usebold and ndef['bold']:
- escape.bold = True
- if self.useunderline and ndef['underline']:
- escape.underline = True
- if self.useitalic and ndef['italic']:
- escape.italic = True
- self.style_string[str(ttype)] = (escape.color_string(),
- escape.reset_string())
-
- def _write_lineno(self, outfile):
- self._lineno += 1
- outfile.write("%s%04d: " % (self._lineno != 1 and '\n' or '', self._lineno))
-
- def format(self, tokensource, outfile):
- return Formatter.format(self, tokensource, outfile)
-
- def format_unencoded(self, tokensource, outfile):
- if self.linenos:
- self._write_lineno(outfile)
-
- for ttype, value in tokensource:
- not_found = True
- while ttype and not_found:
- try:
- # outfile.write( "<" + str(ttype) + ">" )
- on, off = self.style_string[str(ttype)]
-
- # Like TerminalFormatter, add "reset colors" escape sequence
- # on newline.
- spl = value.split('\n')
- for line in spl[:-1]:
- if line:
- outfile.write(on + line + off)
- if self.linenos:
- self._write_lineno(outfile)
- else:
- outfile.write('\n')
-
- if spl[-1]:
- outfile.write(on + spl[-1] + off)
-
- not_found = False
- # outfile.write( '#' + str(ttype) + '#' )
-
- except KeyError:
- # ottype = ttype
- ttype = ttype.parent
- # outfile.write( '!' + str(ottype) + '->' + str(ttype) + '!' )
-
- if not_found:
- outfile.write(value)
-
- if self.linenos:
- outfile.write("\n")
-
-
-
-class TerminalTrueColorFormatter(Terminal256Formatter):
- r"""
- Format tokens with ANSI color sequences, for output in a true-color
- terminal or console. Like in `TerminalFormatter` color sequences
- are terminated at newlines, so that paging the output works correctly.
-
- .. versionadded:: 2.1
-
- Options accepted:
-
- `style`
- The style to use, can be a string or a Style subclass (default:
- ``'default'``).
- """
- name = 'TerminalTrueColor'
- aliases = ['terminal16m', 'console16m', '16m']
- filenames = []
-
- def _build_color_table(self):
- pass
-
- def _color_tuple(self, color):
- try:
- rgb = int(str(color), 16)
- except ValueError:
- return None
- r = (rgb >> 16) & 0xff
- g = (rgb >> 8) & 0xff
- b = rgb & 0xff
- return (r, g, b)
-
- def _setup_styles(self):
- for ttype, ndef in self.style:
- escape = EscapeSequence()
- if ndef['color']:
- escape.fg = self._color_tuple(ndef['color'])
- if ndef['bgcolor']:
- escape.bg = self._color_tuple(ndef['bgcolor'])
- if self.usebold and ndef['bold']:
- escape.bold = True
- if self.useunderline and ndef['underline']:
- escape.underline = True
- if self.useitalic and ndef['italic']:
- escape.italic = True
- self.style_string[str(ttype)] = (escape.true_color_string(),
- escape.reset_string())
diff --git a/spaces/tmaham/DS-Fusion-Express/ldm/modules/diffusionmodules/util.py b/spaces/tmaham/DS-Fusion-Express/ldm/modules/diffusionmodules/util.py
deleted file mode 100644
index a952e6c40308c33edd422da0ce6a60f47e73661b..0000000000000000000000000000000000000000
--- a/spaces/tmaham/DS-Fusion-Express/ldm/modules/diffusionmodules/util.py
+++ /dev/null
@@ -1,267 +0,0 @@
-# adopted from
-# https://github.com/openai/improved-diffusion/blob/main/improved_diffusion/gaussian_diffusion.py
-# and
-# https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py
-# and
-# https://github.com/openai/guided-diffusion/blob/0ba878e517b276c45d1195eb29f6f5f72659a05b/guided_diffusion/nn.py
-#
-# thanks!
-
-
-import os
-import math
-import torch
-import torch.nn as nn
-import numpy as np
-from einops import repeat
-
-from ldm.util import instantiate_from_config
-
-
-def make_beta_schedule(schedule, n_timestep, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3):
- if schedule == "linear":
- betas = (
- torch.linspace(linear_start ** 0.5, linear_end ** 0.5, n_timestep, dtype=torch.float64) ** 2
- )
-
- elif schedule == "cosine":
- timesteps = (
- torch.arange(n_timestep + 1, dtype=torch.float64) / n_timestep + cosine_s
- )
- alphas = timesteps / (1 + cosine_s) * np.pi / 2
- alphas = torch.cos(alphas).pow(2)
- alphas = alphas / alphas[0]
- betas = 1 - alphas[1:] / alphas[:-1]
- betas = np.clip(betas, a_min=0, a_max=0.999)
-
- elif schedule == "sqrt_linear":
- betas = torch.linspace(linear_start, linear_end, n_timestep, dtype=torch.float64)
- elif schedule == "sqrt":
- betas = torch.linspace(linear_start, linear_end, n_timestep, dtype=torch.float64) ** 0.5
- else:
- raise ValueError(f"schedule '{schedule}' unknown.")
- return betas.numpy()
-
-
-def make_ddim_timesteps(ddim_discr_method, num_ddim_timesteps, num_ddpm_timesteps, verbose=True):
- if ddim_discr_method == 'uniform':
- c = num_ddpm_timesteps // num_ddim_timesteps
- ddim_timesteps = np.asarray(list(range(0, num_ddpm_timesteps, c)))
- elif ddim_discr_method == 'quad':
- ddim_timesteps = ((np.linspace(0, np.sqrt(num_ddpm_timesteps * .8), num_ddim_timesteps)) ** 2).astype(int)
- else:
- raise NotImplementedError(f'There is no ddim discretization method called "{ddim_discr_method}"')
-
- # assert ddim_timesteps.shape[0] == num_ddim_timesteps
- # add one to get the final alpha values right (the ones from first scale to data during sampling)
- steps_out = ddim_timesteps + 1
- if verbose:
- print(f'Selected timesteps for ddim sampler: {steps_out}')
- return steps_out
-
-
-def make_ddim_sampling_parameters(alphacums, ddim_timesteps, eta, verbose=True):
- # select alphas for computing the variance schedule
- alphas = alphacums[ddim_timesteps]
- alphas_prev = np.asarray([alphacums[0]] + alphacums[ddim_timesteps[:-1]].tolist())
-
- # according the the formula provided in https://arxiv.org/abs/2010.02502
- sigmas = eta * np.sqrt((1 - alphas_prev) / (1 - alphas) * (1 - alphas / alphas_prev))
- if verbose:
- print(f'Selected alphas for ddim sampler: a_t: {alphas}; a_(t-1): {alphas_prev}')
- print(f'For the chosen value of eta, which is {eta}, '
- f'this results in the following sigma_t schedule for ddim sampler {sigmas}')
- return sigmas, alphas, alphas_prev
-
-
-def betas_for_alpha_bar(num_diffusion_timesteps, alpha_bar, max_beta=0.999):
- """
- Create a beta schedule that discretizes the given alpha_t_bar function,
- which defines the cumulative product of (1-beta) over time from t = [0,1].
- :param num_diffusion_timesteps: the number of betas to produce.
- :param alpha_bar: a lambda that takes an argument t from 0 to 1 and
- produces the cumulative product of (1-beta) up to that
- part of the diffusion process.
- :param max_beta: the maximum beta to use; use values lower than 1 to
- prevent singularities.
- """
- betas = []
- for i in range(num_diffusion_timesteps):
- t1 = i / num_diffusion_timesteps
- t2 = (i + 1) / num_diffusion_timesteps
- betas.append(min(1 - alpha_bar(t2) / alpha_bar(t1), max_beta))
- return np.array(betas)
-
-
-def extract_into_tensor(a, t, x_shape):
- b, *_ = t.shape
- out = a.gather(-1, t)
- return out.reshape(b, *((1,) * (len(x_shape) - 1)))
-
-
-def checkpoint(func, inputs, params, flag):
- """
- Evaluate a function without caching intermediate activations, allowing for
- reduced memory at the expense of extra compute in the backward pass.
- :param func: the function to evaluate.
- :param inputs: the argument sequence to pass to `func`.
- :param params: a sequence of parameters `func` depends on but does not
- explicitly take as arguments.
- :param flag: if False, disable gradient checkpointing.
- """
- if flag:
- args = tuple(inputs) + tuple(params)
- return CheckpointFunction.apply(func, len(inputs), *args)
- else:
- return func(*inputs)
-
-
-class CheckpointFunction(torch.autograd.Function):
- @staticmethod
- def forward(ctx, run_function, length, *args):
- ctx.run_function = run_function
- ctx.input_tensors = list(args[:length])
- ctx.input_params = list(args[length:])
-
- with torch.no_grad():
- output_tensors = ctx.run_function(*ctx.input_tensors)
- return output_tensors
-
- @staticmethod
- def backward(ctx, *output_grads):
- ctx.input_tensors = [x.detach().requires_grad_(True) for x in ctx.input_tensors]
- with torch.enable_grad():
- # Fixes a bug where the first op in run_function modifies the
- # Tensor storage in place, which is not allowed for detach()'d
- # Tensors.
- shallow_copies = [x.view_as(x) for x in ctx.input_tensors]
- output_tensors = ctx.run_function(*shallow_copies)
- input_grads = torch.autograd.grad(
- output_tensors,
- ctx.input_tensors + ctx.input_params,
- output_grads,
- allow_unused=True,
- )
- del ctx.input_tensors
- del ctx.input_params
- del output_tensors
- return (None, None) + input_grads
-
-
-def timestep_embedding(timesteps, dim, max_period=10000, repeat_only=False):
- """
- Create sinusoidal timestep embeddings.
- :param timesteps: a 1-D Tensor of N indices, one per batch element.
- These may be fractional.
- :param dim: the dimension of the output.
- :param max_period: controls the minimum frequency of the embeddings.
- :return: an [N x dim] Tensor of positional embeddings.
- """
- if not repeat_only:
- half = dim // 2
- freqs = torch.exp(
- -math.log(max_period) * torch.arange(start=0, end=half, dtype=torch.float32) / half
- ).to(device=timesteps.device)
- args = timesteps[:, None].float() * freqs[None]
- embedding = torch.cat([torch.cos(args), torch.sin(args)], dim=-1)
- if dim % 2:
- embedding = torch.cat([embedding, torch.zeros_like(embedding[:, :1])], dim=-1)
- else:
- embedding = repeat(timesteps, 'b -> b d', d=dim)
- return embedding
-
-
-def zero_module(module):
- """
- Zero out the parameters of a module and return it.
- """
- for p in module.parameters():
- p.detach().zero_()
- return module
-
-
-def scale_module(module, scale):
- """
- Scale the parameters of a module and return it.
- """
- for p in module.parameters():
- p.detach().mul_(scale)
- return module
-
-
-def mean_flat(tensor):
- """
- Take the mean over all non-batch dimensions.
- """
- return tensor.mean(dim=list(range(1, len(tensor.shape))))
-
-
-def normalization(channels):
- """
- Make a standard normalization layer.
- :param channels: number of input channels.
- :return: an nn.Module for normalization.
- """
- return GroupNorm32(32, channels)
-
-
-# PyTorch 1.7 has SiLU, but we support PyTorch 1.5.
-class SiLU(nn.Module):
- def forward(self, x):
- return x * torch.sigmoid(x)
-
-
-class GroupNorm32(nn.GroupNorm):
- def forward(self, x):
- return super().forward(x.float()).type(x.dtype)
-
-def conv_nd(dims, *args, **kwargs):
- """
- Create a 1D, 2D, or 3D convolution module.
- """
- if dims == 1:
- return nn.Conv1d(*args, **kwargs)
- elif dims == 2:
- return nn.Conv2d(*args, **kwargs)
- elif dims == 3:
- return nn.Conv3d(*args, **kwargs)
- raise ValueError(f"unsupported dimensions: {dims}")
-
-
-def linear(*args, **kwargs):
- """
- Create a linear module.
- """
- return nn.Linear(*args, **kwargs)
-
-
-def avg_pool_nd(dims, *args, **kwargs):
- """
- Create a 1D, 2D, or 3D average pooling module.
- """
- if dims == 1:
- return nn.AvgPool1d(*args, **kwargs)
- elif dims == 2:
- return nn.AvgPool2d(*args, **kwargs)
- elif dims == 3:
- return nn.AvgPool3d(*args, **kwargs)
- raise ValueError(f"unsupported dimensions: {dims}")
-
-
-class HybridConditioner(nn.Module):
-
- def __init__(self, c_concat_config, c_crossattn_config):
- super().__init__()
- self.concat_conditioner = instantiate_from_config(c_concat_config)
- self.crossattn_conditioner = instantiate_from_config(c_crossattn_config)
-
- def forward(self, c_concat, c_crossattn):
- c_concat = self.concat_conditioner(c_concat)
- c_crossattn = self.crossattn_conditioner(c_crossattn)
- return {'c_concat': [c_concat], 'c_crossattn': [c_crossattn]}
-
-
-def noise_like(shape, device, repeat=False):
- repeat_noise = lambda: torch.randn((1, *shape[1:]), device=device).repeat(shape[0], *((1,) * (len(shape) - 1)))
- noise = lambda: torch.randn(shape, device=device)
- return repeat_noise() if repeat else noise()
\ No newline at end of file
diff --git a/spaces/tomofi/ABINet-OCR/modules/model.py b/spaces/tomofi/ABINet-OCR/modules/model.py
deleted file mode 100644
index dc19b937690f82d388fc2f9bd8127567618df5e7..0000000000000000000000000000000000000000
--- a/spaces/tomofi/ABINet-OCR/modules/model.py
+++ /dev/null
@@ -1,50 +0,0 @@
-import torch
-import torch.nn as nn
-
-from utils import CharsetMapper
-
-
-_default_tfmer_cfg = dict(d_model=512, nhead=8, d_inner=2048, # 1024
- dropout=0.1, activation='relu')
-
-class Model(nn.Module):
-
- def __init__(self, config):
- super().__init__()
- self.max_length = config.dataset_max_length + 1
- self.charset = CharsetMapper(config.dataset_charset_path, max_length=self.max_length)
-
- def load(self, source, device=None, strict=True):
- state = torch.load(source, map_location=device)
- self.load_state_dict(state['model'], strict=strict)
-
- def _get_length(self, logit, dim=-1):
- """ Greed decoder to obtain length from logit"""
- out = (logit.argmax(dim=-1) == self.charset.null_label)
- abn = out.any(dim)
- out = ((out.cumsum(dim) == 1) & out).max(dim)[1]
- out = out + 1 # additional end token
- out = torch.where(abn, out, out.new_tensor(logit.shape[1]))
- return out
-
- @staticmethod
- def _get_padding_mask(length, max_length):
- length = length.unsqueeze(-1)
- grid = torch.arange(0, max_length, device=length.device).unsqueeze(0)
- return grid >= length
-
- @staticmethod
- def _get_square_subsequent_mask(sz, device, diagonal=0, fw=True):
- r"""Generate a square mask for the sequence. The masked positions are filled with float('-inf').
- Unmasked positions are filled with float(0.0).
- """
- mask = (torch.triu(torch.ones(sz, sz, device=device), diagonal=diagonal) == 1)
- if fw: mask = mask.transpose(0, 1)
- mask = mask.float().masked_fill(mask == 0, float('-inf')).masked_fill(mask == 1, float(0.0))
- return mask
-
- @staticmethod
- def _get_location_mask(sz, device=None):
- mask = torch.eye(sz, device=device)
- mask = mask.float().masked_fill(mask == 1, float('-inf'))
- return mask
diff --git a/spaces/tomofi/MMOCR/mmocr/models/textrecog/convertors/base.py b/spaces/tomofi/MMOCR/mmocr/models/textrecog/convertors/base.py
deleted file mode 100644
index 976299d9947dd1b3d32af37fd0ce03040b15c419..0000000000000000000000000000000000000000
--- a/spaces/tomofi/MMOCR/mmocr/models/textrecog/convertors/base.py
+++ /dev/null
@@ -1,116 +0,0 @@
-# Copyright (c) OpenMMLab. All rights reserved.
-from mmocr.models.builder import CONVERTORS
-from mmocr.utils import list_from_file
-
-
-@CONVERTORS.register_module()
-class BaseConvertor:
- """Convert between text, index and tensor for text recognize pipeline.
-
- Args:
- dict_type (str): Type of dict, should be either 'DICT36' or 'DICT90'.
- dict_file (None|str): Character dict file path. If not none,
- the dict_file is of higher priority than dict_type.
- dict_list (None|list[str]): Character list. If not none, the list
- is of higher priority than dict_type, but lower than dict_file.
- """
- start_idx = end_idx = padding_idx = 0
- unknown_idx = None
- lower = False
-
- DICT36 = tuple('0123456789abcdefghijklmnopqrstuvwxyz')
- DICT90 = tuple('0123456789abcdefghijklmnopqrstuvwxyz'
- 'ABCDEFGHIJKLMNOPQRSTUVWXYZ!"#$%&\'()'
- '*+,-./:;<=>?@[\\]_`~')
-
- def __init__(self, dict_type='DICT90', dict_file=None, dict_list=None):
- assert dict_type in ('DICT36', 'DICT90')
- assert dict_file is None or isinstance(dict_file, str)
- assert dict_list is None or isinstance(dict_list, list)
- self.idx2char = []
- if dict_file is not None:
- for line in list_from_file(dict_file):
- line = line.strip()
- if line != '':
- self.idx2char.append(line)
- elif dict_list is not None:
- self.idx2char = dict_list
- else:
- if dict_type == 'DICT36':
- self.idx2char = list(self.DICT36)
- else:
- self.idx2char = list(self.DICT90)
-
- self.char2idx = {}
- for idx, char in enumerate(self.idx2char):
- self.char2idx[char] = idx
-
- def num_classes(self):
- """Number of output classes."""
- return len(self.idx2char)
-
- def str2idx(self, strings):
- """Convert strings to indexes.
-
- Args:
- strings (list[str]): ['hello', 'world'].
- Returns:
- indexes (list[list[int]]): [[1,2,3,3,4], [5,4,6,3,7]].
- """
- assert isinstance(strings, list)
-
- indexes = []
- for string in strings:
- if self.lower:
- string = string.lower()
- index = []
- for char in string:
- char_idx = self.char2idx.get(char, self.unknown_idx)
- if char_idx is None:
- raise Exception(f'Chararcter: {char} not in dict,'
- f' please check gt_label and use'
- f' custom dict file,'
- f' or set "with_unknown=True"')
- index.append(char_idx)
- indexes.append(index)
-
- return indexes
-
- def str2tensor(self, strings):
- """Convert text-string to input tensor.
-
- Args:
- strings (list[str]): ['hello', 'world'].
- Returns:
- tensors (list[torch.Tensor]): [torch.Tensor([1,2,3,3,4]),
- torch.Tensor([5,4,6,3,7])].
- """
- raise NotImplementedError
-
- def idx2str(self, indexes):
- """Convert indexes to text strings.
-
- Args:
- indexes (list[list[int]]): [[1,2,3,3,4], [5,4,6,3,7]].
- Returns:
- strings (list[str]): ['hello', 'world'].
- """
- assert isinstance(indexes, list)
-
- strings = []
- for index in indexes:
- string = [self.idx2char[i] for i in index]
- strings.append(''.join(string))
-
- return strings
-
- def tensor2idx(self, output):
- """Convert model output tensor to character indexes and scores.
- Args:
- output (tensor): The model outputs with size: N * T * C
- Returns:
- indexes (list[list[int]]): [[1,2,3,3,4], [5,4,6,3,7]].
- scores (list[list[float]]): [[0.9,0.8,0.95,0.97,0.94],
- [0.9,0.9,0.98,0.97,0.96]].
- """
- raise NotImplementedError
diff --git a/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/configs/vfnet/vfnet_r101_fpn_mdconv_c3-c5_mstrain_2x_coco.py b/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/configs/vfnet/vfnet_r101_fpn_mdconv_c3-c5_mstrain_2x_coco.py
deleted file mode 100644
index f8ef6ec092db2e454ca5359b6df89d31365672c0..0000000000000000000000000000000000000000
--- a/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/configs/vfnet/vfnet_r101_fpn_mdconv_c3-c5_mstrain_2x_coco.py
+++ /dev/null
@@ -1,14 +0,0 @@
-_base_ = './vfnet_r50_fpn_mdconv_c3-c5_mstrain_2x_coco.py'
-model = dict(
- pretrained='torchvision://resnet101',
- backbone=dict(
- type='ResNet',
- depth=101,
- num_stages=4,
- out_indices=(0, 1, 2, 3),
- frozen_stages=1,
- norm_cfg=dict(type='BN', requires_grad=True),
- norm_eval=True,
- style='pytorch',
- dcn=dict(type='DCNv2', deform_groups=1, fallback_on_stride=False),
- stage_with_dcn=(False, True, True, True)))
diff --git a/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/mmdet/core/mask/structures.py b/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/mmdet/core/mask/structures.py
deleted file mode 100644
index d9ec5775f281ab8b76cb873e71a4edd9969ab905..0000000000000000000000000000000000000000
--- a/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/mmdet/core/mask/structures.py
+++ /dev/null
@@ -1,1024 +0,0 @@
-from abc import ABCMeta, abstractmethod
-
-import cv2
-import mmcv
-import numpy as np
-import pycocotools.mask as maskUtils
-import torch
-from mmcv.ops.roi_align import roi_align
-
-
-class BaseInstanceMasks(metaclass=ABCMeta):
- """Base class for instance masks."""
-
- @abstractmethod
- def rescale(self, scale, interpolation='nearest'):
- """Rescale masks as large as possible while keeping the aspect ratio.
- For details can refer to `mmcv.imrescale`.
-
- Args:
- scale (tuple[int]): The maximum size (h, w) of rescaled mask.
- interpolation (str): Same as :func:`mmcv.imrescale`.
-
- Returns:
- BaseInstanceMasks: The rescaled masks.
- """
-
- @abstractmethod
- def resize(self, out_shape, interpolation='nearest'):
- """Resize masks to the given out_shape.
-
- Args:
- out_shape: Target (h, w) of resized mask.
- interpolation (str): See :func:`mmcv.imresize`.
-
- Returns:
- BaseInstanceMasks: The resized masks.
- """
-
- @abstractmethod
- def flip(self, flip_direction='horizontal'):
- """Flip masks alone the given direction.
-
- Args:
- flip_direction (str): Either 'horizontal' or 'vertical'.
-
- Returns:
- BaseInstanceMasks: The flipped masks.
- """
-
- @abstractmethod
- def pad(self, out_shape, pad_val):
- """Pad masks to the given size of (h, w).
-
- Args:
- out_shape (tuple[int]): Target (h, w) of padded mask.
- pad_val (int): The padded value.
-
- Returns:
- BaseInstanceMasks: The padded masks.
- """
-
- @abstractmethod
- def crop(self, bbox):
- """Crop each mask by the given bbox.
-
- Args:
- bbox (ndarray): Bbox in format [x1, y1, x2, y2], shape (4, ).
-
- Return:
- BaseInstanceMasks: The cropped masks.
- """
-
- @abstractmethod
- def crop_and_resize(self,
- bboxes,
- out_shape,
- inds,
- device,
- interpolation='bilinear'):
- """Crop and resize masks by the given bboxes.
-
- This function is mainly used in mask targets computation.
- It firstly align mask to bboxes by assigned_inds, then crop mask by the
- assigned bbox and resize to the size of (mask_h, mask_w)
-
- Args:
- bboxes (Tensor): Bboxes in format [x1, y1, x2, y2], shape (N, 4)
- out_shape (tuple[int]): Target (h, w) of resized mask
- inds (ndarray): Indexes to assign masks to each bbox,
- shape (N,) and values should be between [0, num_masks - 1].
- device (str): Device of bboxes
- interpolation (str): See `mmcv.imresize`
-
- Return:
- BaseInstanceMasks: the cropped and resized masks.
- """
-
- @abstractmethod
- def expand(self, expanded_h, expanded_w, top, left):
- """see :class:`Expand`."""
-
- @property
- @abstractmethod
- def areas(self):
- """ndarray: areas of each instance."""
-
- @abstractmethod
- def to_ndarray(self):
- """Convert masks to the format of ndarray.
-
- Return:
- ndarray: Converted masks in the format of ndarray.
- """
-
- @abstractmethod
- def to_tensor(self, dtype, device):
- """Convert masks to the format of Tensor.
-
- Args:
- dtype (str): Dtype of converted mask.
- device (torch.device): Device of converted masks.
-
- Returns:
- Tensor: Converted masks in the format of Tensor.
- """
-
- @abstractmethod
- def translate(self,
- out_shape,
- offset,
- direction='horizontal',
- fill_val=0,
- interpolation='bilinear'):
- """Translate the masks.
-
- Args:
- out_shape (tuple[int]): Shape for output mask, format (h, w).
- offset (int | float): The offset for translate.
- direction (str): The translate direction, either "horizontal"
- or "vertical".
- fill_val (int | float): Border value. Default 0.
- interpolation (str): Same as :func:`mmcv.imtranslate`.
-
- Returns:
- Translated masks.
- """
-
- def shear(self,
- out_shape,
- magnitude,
- direction='horizontal',
- border_value=0,
- interpolation='bilinear'):
- """Shear the masks.
-
- Args:
- out_shape (tuple[int]): Shape for output mask, format (h, w).
- magnitude (int | float): The magnitude used for shear.
- direction (str): The shear direction, either "horizontal"
- or "vertical".
- border_value (int | tuple[int]): Value used in case of a
- constant border. Default 0.
- interpolation (str): Same as in :func:`mmcv.imshear`.
-
- Returns:
- ndarray: Sheared masks.
- """
-
- @abstractmethod
- def rotate(self, out_shape, angle, center=None, scale=1.0, fill_val=0):
- """Rotate the masks.
-
- Args:
- out_shape (tuple[int]): Shape for output mask, format (h, w).
- angle (int | float): Rotation angle in degrees. Positive values
- mean counter-clockwise rotation.
- center (tuple[float], optional): Center point (w, h) of the
- rotation in source image. If not specified, the center of
- the image will be used.
- scale (int | float): Isotropic scale factor.
- fill_val (int | float): Border value. Default 0 for masks.
-
- Returns:
- Rotated masks.
- """
-
-
-class BitmapMasks(BaseInstanceMasks):
- """This class represents masks in the form of bitmaps.
-
- Args:
- masks (ndarray): ndarray of masks in shape (N, H, W), where N is
- the number of objects.
- height (int): height of masks
- width (int): width of masks
-
- Example:
- >>> from mmdet.core.mask.structures import * # NOQA
- >>> num_masks, H, W = 3, 32, 32
- >>> rng = np.random.RandomState(0)
- >>> masks = (rng.rand(num_masks, H, W) > 0.1).astype(np.int)
- >>> self = BitmapMasks(masks, height=H, width=W)
-
- >>> # demo crop_and_resize
- >>> num_boxes = 5
- >>> bboxes = np.array([[0, 0, 30, 10.0]] * num_boxes)
- >>> out_shape = (14, 14)
- >>> inds = torch.randint(0, len(self), size=(num_boxes,))
- >>> device = 'cpu'
- >>> interpolation = 'bilinear'
- >>> new = self.crop_and_resize(
- ... bboxes, out_shape, inds, device, interpolation)
- >>> assert len(new) == num_boxes
- >>> assert new.height, new.width == out_shape
- """
-
- def __init__(self, masks, height, width):
- self.height = height
- self.width = width
- if len(masks) == 0:
- self.masks = np.empty((0, self.height, self.width), dtype=np.uint8)
- else:
- assert isinstance(masks, (list, np.ndarray))
- if isinstance(masks, list):
- assert isinstance(masks[0], np.ndarray)
- assert masks[0].ndim == 2 # (H, W)
- else:
- assert masks.ndim == 3 # (N, H, W)
-
- self.masks = np.stack(masks).reshape(-1, height, width)
- assert self.masks.shape[1] == self.height
- assert self.masks.shape[2] == self.width
-
- def __getitem__(self, index):
- """Index the BitmapMask.
-
- Args:
- index (int | ndarray): Indices in the format of integer or ndarray.
-
- Returns:
- :obj:`BitmapMasks`: Indexed bitmap masks.
- """
- masks = self.masks[index].reshape(-1, self.height, self.width)
- return BitmapMasks(masks, self.height, self.width)
-
- def __iter__(self):
- return iter(self.masks)
-
- def __repr__(self):
- s = self.__class__.__name__ + '('
- s += f'num_masks={len(self.masks)}, '
- s += f'height={self.height}, '
- s += f'width={self.width})'
- return s
-
- def __len__(self):
- """Number of masks."""
- return len(self.masks)
-
- def rescale(self, scale, interpolation='nearest'):
- """See :func:`BaseInstanceMasks.rescale`."""
- if len(self.masks) == 0:
- new_w, new_h = mmcv.rescale_size((self.width, self.height), scale)
- rescaled_masks = np.empty((0, new_h, new_w), dtype=np.uint8)
- else:
- rescaled_masks = np.stack([
- mmcv.imrescale(mask, scale, interpolation=interpolation)
- for mask in self.masks
- ])
- height, width = rescaled_masks.shape[1:]
- return BitmapMasks(rescaled_masks, height, width)
-
- def resize(self, out_shape, interpolation='nearest'):
- """See :func:`BaseInstanceMasks.resize`."""
- if len(self.masks) == 0:
- resized_masks = np.empty((0, *out_shape), dtype=np.uint8)
- else:
- resized_masks = np.stack([
- mmcv.imresize(
- mask, out_shape[::-1], interpolation=interpolation)
- for mask in self.masks
- ])
- return BitmapMasks(resized_masks, *out_shape)
-
- def flip(self, flip_direction='horizontal'):
- """See :func:`BaseInstanceMasks.flip`."""
- assert flip_direction in ('horizontal', 'vertical', 'diagonal')
-
- if len(self.masks) == 0:
- flipped_masks = self.masks
- else:
- flipped_masks = np.stack([
- mmcv.imflip(mask, direction=flip_direction)
- for mask in self.masks
- ])
- return BitmapMasks(flipped_masks, self.height, self.width)
-
- def pad(self, out_shape, pad_val=0):
- """See :func:`BaseInstanceMasks.pad`."""
- if len(self.masks) == 0:
- padded_masks = np.empty((0, *out_shape), dtype=np.uint8)
- else:
- padded_masks = np.stack([
- mmcv.impad(mask, shape=out_shape, pad_val=pad_val)
- for mask in self.masks
- ])
- return BitmapMasks(padded_masks, *out_shape)
-
- def crop(self, bbox):
- """See :func:`BaseInstanceMasks.crop`."""
- assert isinstance(bbox, np.ndarray)
- assert bbox.ndim == 1
-
- # clip the boundary
- bbox = bbox.copy()
- bbox[0::2] = np.clip(bbox[0::2], 0, self.width)
- bbox[1::2] = np.clip(bbox[1::2], 0, self.height)
- x1, y1, x2, y2 = bbox
- w = np.maximum(x2 - x1, 1)
- h = np.maximum(y2 - y1, 1)
-
- if len(self.masks) == 0:
- cropped_masks = np.empty((0, h, w), dtype=np.uint8)
- else:
- cropped_masks = self.masks[:, y1:y1 + h, x1:x1 + w]
- return BitmapMasks(cropped_masks, h, w)
-
- def crop_and_resize(self,
- bboxes,
- out_shape,
- inds,
- device='cpu',
- interpolation='bilinear'):
- """See :func:`BaseInstanceMasks.crop_and_resize`."""
- if len(self.masks) == 0:
- empty_masks = np.empty((0, *out_shape), dtype=np.uint8)
- return BitmapMasks(empty_masks, *out_shape)
-
- # convert bboxes to tensor
- if isinstance(bboxes, np.ndarray):
- bboxes = torch.from_numpy(bboxes).to(device=device)
- if isinstance(inds, np.ndarray):
- inds = torch.from_numpy(inds).to(device=device)
-
- num_bbox = bboxes.shape[0]
- fake_inds = torch.arange(
- num_bbox, device=device).to(dtype=bboxes.dtype)[:, None]
- rois = torch.cat([fake_inds, bboxes], dim=1) # Nx5
- rois = rois.to(device=device)
- if num_bbox > 0:
- gt_masks_th = torch.from_numpy(self.masks).to(device).index_select(
- 0, inds).to(dtype=rois.dtype)
- targets = roi_align(gt_masks_th[:, None, :, :], rois, out_shape,
- 1.0, 0, 'avg', True).squeeze(1)
- resized_masks = (targets >= 0.5).cpu().numpy()
- else:
- resized_masks = []
- return BitmapMasks(resized_masks, *out_shape)
-
- def expand(self, expanded_h, expanded_w, top, left):
- """See :func:`BaseInstanceMasks.expand`."""
- if len(self.masks) == 0:
- expanded_mask = np.empty((0, expanded_h, expanded_w),
- dtype=np.uint8)
- else:
- expanded_mask = np.zeros((len(self), expanded_h, expanded_w),
- dtype=np.uint8)
- expanded_mask[:, top:top + self.height,
- left:left + self.width] = self.masks
- return BitmapMasks(expanded_mask, expanded_h, expanded_w)
-
- def translate(self,
- out_shape,
- offset,
- direction='horizontal',
- fill_val=0,
- interpolation='bilinear'):
- """Translate the BitmapMasks.
-
- Args:
- out_shape (tuple[int]): Shape for output mask, format (h, w).
- offset (int | float): The offset for translate.
- direction (str): The translate direction, either "horizontal"
- or "vertical".
- fill_val (int | float): Border value. Default 0 for masks.
- interpolation (str): Same as :func:`mmcv.imtranslate`.
-
- Returns:
- BitmapMasks: Translated BitmapMasks.
-
- Example:
- >>> from mmdet.core.mask.structures import BitmapMasks
- >>> self = BitmapMasks.random(dtype=np.uint8)
- >>> out_shape = (32, 32)
- >>> offset = 4
- >>> direction = 'horizontal'
- >>> fill_val = 0
- >>> interpolation = 'bilinear'
- >>> # Note, There seem to be issues when:
- >>> # * out_shape is different than self's shape
- >>> # * the mask dtype is not supported by cv2.AffineWarp
- >>> new = self.translate(out_shape, offset, direction, fill_val,
- >>> interpolation)
- >>> assert len(new) == len(self)
- >>> assert new.height, new.width == out_shape
- """
- if len(self.masks) == 0:
- translated_masks = np.empty((0, *out_shape), dtype=np.uint8)
- else:
- translated_masks = mmcv.imtranslate(
- self.masks.transpose((1, 2, 0)),
- offset,
- direction,
- border_value=fill_val,
- interpolation=interpolation)
- if translated_masks.ndim == 2:
- translated_masks = translated_masks[:, :, None]
- translated_masks = translated_masks.transpose(
- (2, 0, 1)).astype(self.masks.dtype)
- return BitmapMasks(translated_masks, *out_shape)
-
- def shear(self,
- out_shape,
- magnitude,
- direction='horizontal',
- border_value=0,
- interpolation='bilinear'):
- """Shear the BitmapMasks.
-
- Args:
- out_shape (tuple[int]): Shape for output mask, format (h, w).
- magnitude (int | float): The magnitude used for shear.
- direction (str): The shear direction, either "horizontal"
- or "vertical".
- border_value (int | tuple[int]): Value used in case of a
- constant border.
- interpolation (str): Same as in :func:`mmcv.imshear`.
-
- Returns:
- BitmapMasks: The sheared masks.
- """
- if len(self.masks) == 0:
- sheared_masks = np.empty((0, *out_shape), dtype=np.uint8)
- else:
- sheared_masks = mmcv.imshear(
- self.masks.transpose((1, 2, 0)),
- magnitude,
- direction,
- border_value=border_value,
- interpolation=interpolation)
- if sheared_masks.ndim == 2:
- sheared_masks = sheared_masks[:, :, None]
- sheared_masks = sheared_masks.transpose(
- (2, 0, 1)).astype(self.masks.dtype)
- return BitmapMasks(sheared_masks, *out_shape)
-
- def rotate(self, out_shape, angle, center=None, scale=1.0, fill_val=0):
- """Rotate the BitmapMasks.
-
- Args:
- out_shape (tuple[int]): Shape for output mask, format (h, w).
- angle (int | float): Rotation angle in degrees. Positive values
- mean counter-clockwise rotation.
- center (tuple[float], optional): Center point (w, h) of the
- rotation in source image. If not specified, the center of
- the image will be used.
- scale (int | float): Isotropic scale factor.
- fill_val (int | float): Border value. Default 0 for masks.
-
- Returns:
- BitmapMasks: Rotated BitmapMasks.
- """
- if len(self.masks) == 0:
- rotated_masks = np.empty((0, *out_shape), dtype=self.masks.dtype)
- else:
- rotated_masks = mmcv.imrotate(
- self.masks.transpose((1, 2, 0)),
- angle,
- center=center,
- scale=scale,
- border_value=fill_val)
- if rotated_masks.ndim == 2:
- # case when only one mask, (h, w)
- rotated_masks = rotated_masks[:, :, None] # (h, w, 1)
- rotated_masks = rotated_masks.transpose(
- (2, 0, 1)).astype(self.masks.dtype)
- return BitmapMasks(rotated_masks, *out_shape)
-
- @property
- def areas(self):
- """See :py:attr:`BaseInstanceMasks.areas`."""
- return self.masks.sum((1, 2))
-
- def to_ndarray(self):
- """See :func:`BaseInstanceMasks.to_ndarray`."""
- return self.masks
-
- def to_tensor(self, dtype, device):
- """See :func:`BaseInstanceMasks.to_tensor`."""
- return torch.tensor(self.masks, dtype=dtype, device=device)
-
- @classmethod
- def random(cls,
- num_masks=3,
- height=32,
- width=32,
- dtype=np.uint8,
- rng=None):
- """Generate random bitmap masks for demo / testing purposes.
-
- Example:
- >>> from mmdet.core.mask.structures import BitmapMasks
- >>> self = BitmapMasks.random()
- >>> print('self = {}'.format(self))
- self = BitmapMasks(num_masks=3, height=32, width=32)
- """
- from mmdet.utils.util_random import ensure_rng
- rng = ensure_rng(rng)
- masks = (rng.rand(num_masks, height, width) > 0.1).astype(dtype)
- self = cls(masks, height=height, width=width)
- return self
-
-
-class PolygonMasks(BaseInstanceMasks):
- """This class represents masks in the form of polygons.
-
- Polygons is a list of three levels. The first level of the list
- corresponds to objects, the second level to the polys that compose the
- object, the third level to the poly coordinates
-
- Args:
- masks (list[list[ndarray]]): The first level of the list
- corresponds to objects, the second level to the polys that
- compose the object, the third level to the poly coordinates
- height (int): height of masks
- width (int): width of masks
-
- Example:
- >>> from mmdet.core.mask.structures import * # NOQA
- >>> masks = [
- >>> [ np.array([0, 0, 10, 0, 10, 10., 0, 10, 0, 0]) ]
- >>> ]
- >>> height, width = 16, 16
- >>> self = PolygonMasks(masks, height, width)
-
- >>> # demo translate
- >>> new = self.translate((16, 16), 4., direction='horizontal')
- >>> assert np.all(new.masks[0][0][1::2] == masks[0][0][1::2])
- >>> assert np.all(new.masks[0][0][0::2] == masks[0][0][0::2] + 4)
-
- >>> # demo crop_and_resize
- >>> num_boxes = 3
- >>> bboxes = np.array([[0, 0, 30, 10.0]] * num_boxes)
- >>> out_shape = (16, 16)
- >>> inds = torch.randint(0, len(self), size=(num_boxes,))
- >>> device = 'cpu'
- >>> interpolation = 'bilinear'
- >>> new = self.crop_and_resize(
- ... bboxes, out_shape, inds, device, interpolation)
- >>> assert len(new) == num_boxes
- >>> assert new.height, new.width == out_shape
- """
-
- def __init__(self, masks, height, width):
- assert isinstance(masks, list)
- if len(masks) > 0:
- assert isinstance(masks[0], list)
- assert isinstance(masks[0][0], np.ndarray)
-
- self.height = height
- self.width = width
- self.masks = masks
-
- def __getitem__(self, index):
- """Index the polygon masks.
-
- Args:
- index (ndarray | List): The indices.
-
- Returns:
- :obj:`PolygonMasks`: The indexed polygon masks.
- """
- if isinstance(index, np.ndarray):
- index = index.tolist()
- if isinstance(index, list):
- masks = [self.masks[i] for i in index]
- else:
- try:
- masks = self.masks[index]
- except Exception:
- raise ValueError(
- f'Unsupported input of type {type(index)} for indexing!')
- if len(masks) and isinstance(masks[0], np.ndarray):
- masks = [masks] # ensure a list of three levels
- return PolygonMasks(masks, self.height, self.width)
-
- def __iter__(self):
- return iter(self.masks)
-
- def __repr__(self):
- s = self.__class__.__name__ + '('
- s += f'num_masks={len(self.masks)}, '
- s += f'height={self.height}, '
- s += f'width={self.width})'
- return s
-
- def __len__(self):
- """Number of masks."""
- return len(self.masks)
-
- def rescale(self, scale, interpolation=None):
- """see :func:`BaseInstanceMasks.rescale`"""
- new_w, new_h = mmcv.rescale_size((self.width, self.height), scale)
- if len(self.masks) == 0:
- rescaled_masks = PolygonMasks([], new_h, new_w)
- else:
- rescaled_masks = self.resize((new_h, new_w))
- return rescaled_masks
-
- def resize(self, out_shape, interpolation=None):
- """see :func:`BaseInstanceMasks.resize`"""
- if len(self.masks) == 0:
- resized_masks = PolygonMasks([], *out_shape)
- else:
- h_scale = out_shape[0] / self.height
- w_scale = out_shape[1] / self.width
- resized_masks = []
- for poly_per_obj in self.masks:
- resized_poly = []
- for p in poly_per_obj:
- p = p.copy()
- p[0::2] *= w_scale
- p[1::2] *= h_scale
- resized_poly.append(p)
- resized_masks.append(resized_poly)
- resized_masks = PolygonMasks(resized_masks, *out_shape)
- return resized_masks
-
- def flip(self, flip_direction='horizontal'):
- """see :func:`BaseInstanceMasks.flip`"""
- assert flip_direction in ('horizontal', 'vertical', 'diagonal')
- if len(self.masks) == 0:
- flipped_masks = PolygonMasks([], self.height, self.width)
- else:
- flipped_masks = []
- for poly_per_obj in self.masks:
- flipped_poly_per_obj = []
- for p in poly_per_obj:
- p = p.copy()
- if flip_direction == 'horizontal':
- p[0::2] = self.width - p[0::2]
- elif flip_direction == 'vertical':
- p[1::2] = self.height - p[1::2]
- else:
- p[0::2] = self.width - p[0::2]
- p[1::2] = self.height - p[1::2]
- flipped_poly_per_obj.append(p)
- flipped_masks.append(flipped_poly_per_obj)
- flipped_masks = PolygonMasks(flipped_masks, self.height,
- self.width)
- return flipped_masks
-
- def crop(self, bbox):
- """see :func:`BaseInstanceMasks.crop`"""
- assert isinstance(bbox, np.ndarray)
- assert bbox.ndim == 1
-
- # clip the boundary
- bbox = bbox.copy()
- bbox[0::2] = np.clip(bbox[0::2], 0, self.width)
- bbox[1::2] = np.clip(bbox[1::2], 0, self.height)
- x1, y1, x2, y2 = bbox
- w = np.maximum(x2 - x1, 1)
- h = np.maximum(y2 - y1, 1)
-
- if len(self.masks) == 0:
- cropped_masks = PolygonMasks([], h, w)
- else:
- cropped_masks = []
- for poly_per_obj in self.masks:
- cropped_poly_per_obj = []
- for p in poly_per_obj:
- # pycocotools will clip the boundary
- p = p.copy()
- p[0::2] -= bbox[0]
- p[1::2] -= bbox[1]
- cropped_poly_per_obj.append(p)
- cropped_masks.append(cropped_poly_per_obj)
- cropped_masks = PolygonMasks(cropped_masks, h, w)
- return cropped_masks
-
- def pad(self, out_shape, pad_val=0):
- """padding has no effect on polygons`"""
- return PolygonMasks(self.masks, *out_shape)
-
- def expand(self, *args, **kwargs):
- """TODO: Add expand for polygon"""
- raise NotImplementedError
-
- def crop_and_resize(self,
- bboxes,
- out_shape,
- inds,
- device='cpu',
- interpolation='bilinear'):
- """see :func:`BaseInstanceMasks.crop_and_resize`"""
- out_h, out_w = out_shape
- if len(self.masks) == 0:
- return PolygonMasks([], out_h, out_w)
-
- resized_masks = []
- for i in range(len(bboxes)):
- mask = self.masks[inds[i]]
- bbox = bboxes[i, :]
- x1, y1, x2, y2 = bbox
- w = np.maximum(x2 - x1, 1)
- h = np.maximum(y2 - y1, 1)
- h_scale = out_h / max(h, 0.1) # avoid too large scale
- w_scale = out_w / max(w, 0.1)
-
- resized_mask = []
- for p in mask:
- p = p.copy()
- # crop
- # pycocotools will clip the boundary
- p[0::2] -= bbox[0]
- p[1::2] -= bbox[1]
-
- # resize
- p[0::2] *= w_scale
- p[1::2] *= h_scale
- resized_mask.append(p)
- resized_masks.append(resized_mask)
- return PolygonMasks(resized_masks, *out_shape)
-
- def translate(self,
- out_shape,
- offset,
- direction='horizontal',
- fill_val=None,
- interpolation=None):
- """Translate the PolygonMasks.
-
- Example:
- >>> self = PolygonMasks.random(dtype=np.int)
- >>> out_shape = (self.height, self.width)
- >>> new = self.translate(out_shape, 4., direction='horizontal')
- >>> assert np.all(new.masks[0][0][1::2] == self.masks[0][0][1::2])
- >>> assert np.all(new.masks[0][0][0::2] == self.masks[0][0][0::2] + 4) # noqa: E501
- """
- assert fill_val is None or fill_val == 0, 'Here fill_val is not '\
- f'used, and defaultly should be None or 0. got {fill_val}.'
- if len(self.masks) == 0:
- translated_masks = PolygonMasks([], *out_shape)
- else:
- translated_masks = []
- for poly_per_obj in self.masks:
- translated_poly_per_obj = []
- for p in poly_per_obj:
- p = p.copy()
- if direction == 'horizontal':
- p[0::2] = np.clip(p[0::2] + offset, 0, out_shape[1])
- elif direction == 'vertical':
- p[1::2] = np.clip(p[1::2] + offset, 0, out_shape[0])
- translated_poly_per_obj.append(p)
- translated_masks.append(translated_poly_per_obj)
- translated_masks = PolygonMasks(translated_masks, *out_shape)
- return translated_masks
-
- def shear(self,
- out_shape,
- magnitude,
- direction='horizontal',
- border_value=0,
- interpolation='bilinear'):
- """See :func:`BaseInstanceMasks.shear`."""
- if len(self.masks) == 0:
- sheared_masks = PolygonMasks([], *out_shape)
- else:
- sheared_masks = []
- if direction == 'horizontal':
- shear_matrix = np.stack([[1, magnitude],
- [0, 1]]).astype(np.float32)
- elif direction == 'vertical':
- shear_matrix = np.stack([[1, 0], [magnitude,
- 1]]).astype(np.float32)
- for poly_per_obj in self.masks:
- sheared_poly = []
- for p in poly_per_obj:
- p = np.stack([p[0::2], p[1::2]], axis=0) # [2, n]
- new_coords = np.matmul(shear_matrix, p) # [2, n]
- new_coords[0, :] = np.clip(new_coords[0, :], 0,
- out_shape[1])
- new_coords[1, :] = np.clip(new_coords[1, :], 0,
- out_shape[0])
- sheared_poly.append(
- new_coords.transpose((1, 0)).reshape(-1))
- sheared_masks.append(sheared_poly)
- sheared_masks = PolygonMasks(sheared_masks, *out_shape)
- return sheared_masks
-
- def rotate(self, out_shape, angle, center=None, scale=1.0, fill_val=0):
- """See :func:`BaseInstanceMasks.rotate`."""
- if len(self.masks) == 0:
- rotated_masks = PolygonMasks([], *out_shape)
- else:
- rotated_masks = []
- rotate_matrix = cv2.getRotationMatrix2D(center, -angle, scale)
- for poly_per_obj in self.masks:
- rotated_poly = []
- for p in poly_per_obj:
- p = p.copy()
- coords = np.stack([p[0::2], p[1::2]], axis=1) # [n, 2]
- # pad 1 to convert from format [x, y] to homogeneous
- # coordinates format [x, y, 1]
- coords = np.concatenate(
- (coords, np.ones((coords.shape[0], 1), coords.dtype)),
- axis=1) # [n, 3]
- rotated_coords = np.matmul(
- rotate_matrix[None, :, :],
- coords[:, :, None])[..., 0] # [n, 2, 1] -> [n, 2]
- rotated_coords[:, 0] = np.clip(rotated_coords[:, 0], 0,
- out_shape[1])
- rotated_coords[:, 1] = np.clip(rotated_coords[:, 1], 0,
- out_shape[0])
- rotated_poly.append(rotated_coords.reshape(-1))
- rotated_masks.append(rotated_poly)
- rotated_masks = PolygonMasks(rotated_masks, *out_shape)
- return rotated_masks
-
- def to_bitmap(self):
- """convert polygon masks to bitmap masks."""
- bitmap_masks = self.to_ndarray()
- return BitmapMasks(bitmap_masks, self.height, self.width)
-
- @property
- def areas(self):
- """Compute areas of masks.
-
- This func is modified from `detectron2
- `_.
- The function only works with Polygons using the shoelace formula.
-
- Return:
- ndarray: areas of each instance
- """ # noqa: W501
- area = []
- for polygons_per_obj in self.masks:
- area_per_obj = 0
- for p in polygons_per_obj:
- area_per_obj += self._polygon_area(p[0::2], p[1::2])
- area.append(area_per_obj)
- return np.asarray(area)
-
- def _polygon_area(self, x, y):
- """Compute the area of a component of a polygon.
-
- Using the shoelace formula:
- https://stackoverflow.com/questions/24467972/calculate-area-of-polygon-given-x-y-coordinates
-
- Args:
- x (ndarray): x coordinates of the component
- y (ndarray): y coordinates of the component
-
- Return:
- float: the are of the component
- """ # noqa: 501
- return 0.5 * np.abs(
- np.dot(x, np.roll(y, 1)) - np.dot(y, np.roll(x, 1)))
-
- def to_ndarray(self):
- """Convert masks to the format of ndarray."""
- if len(self.masks) == 0:
- return np.empty((0, self.height, self.width), dtype=np.uint8)
- bitmap_masks = []
- for poly_per_obj in self.masks:
- bitmap_masks.append(
- polygon_to_bitmap(poly_per_obj, self.height, self.width))
- return np.stack(bitmap_masks)
-
- def to_tensor(self, dtype, device):
- """See :func:`BaseInstanceMasks.to_tensor`."""
- if len(self.masks) == 0:
- return torch.empty((0, self.height, self.width),
- dtype=dtype,
- device=device)
- ndarray_masks = self.to_ndarray()
- return torch.tensor(ndarray_masks, dtype=dtype, device=device)
-
- @classmethod
- def random(cls,
- num_masks=3,
- height=32,
- width=32,
- n_verts=5,
- dtype=np.float32,
- rng=None):
- """Generate random polygon masks for demo / testing purposes.
-
- Adapted from [1]_
-
- References:
- .. [1] https://gitlab.kitware.com/computer-vision/kwimage/-/blob/928cae35ca8/kwimage/structs/polygon.py#L379 # noqa: E501
-
- Example:
- >>> from mmdet.core.mask.structures import PolygonMasks
- >>> self = PolygonMasks.random()
- >>> print('self = {}'.format(self))
- """
- from mmdet.utils.util_random import ensure_rng
- rng = ensure_rng(rng)
-
- def _gen_polygon(n, irregularity, spikeyness):
- """Creates the polygon by sampling points on a circle around the
- centre. Random noise is added by varying the angular spacing
- between sequential points, and by varying the radial distance of
- each point from the centre.
-
- Based on original code by Mike Ounsworth
-
- Args:
- n (int): number of vertices
- irregularity (float): [0,1] indicating how much variance there
- is in the angular spacing of vertices. [0,1] will map to
- [0, 2pi/numberOfVerts]
- spikeyness (float): [0,1] indicating how much variance there is
- in each vertex from the circle of radius aveRadius. [0,1]
- will map to [0, aveRadius]
-
- Returns:
- a list of vertices, in CCW order.
- """
- from scipy.stats import truncnorm
- # Generate around the unit circle
- cx, cy = (0.0, 0.0)
- radius = 1
-
- tau = np.pi * 2
-
- irregularity = np.clip(irregularity, 0, 1) * 2 * np.pi / n
- spikeyness = np.clip(spikeyness, 1e-9, 1)
-
- # generate n angle steps
- lower = (tau / n) - irregularity
- upper = (tau / n) + irregularity
- angle_steps = rng.uniform(lower, upper, n)
-
- # normalize the steps so that point 0 and point n+1 are the same
- k = angle_steps.sum() / (2 * np.pi)
- angles = (angle_steps / k).cumsum() + rng.uniform(0, tau)
-
- # Convert high and low values to be wrt the standard normal range
- # https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.truncnorm.html
- low = 0
- high = 2 * radius
- mean = radius
- std = spikeyness
- a = (low - mean) / std
- b = (high - mean) / std
- tnorm = truncnorm(a=a, b=b, loc=mean, scale=std)
-
- # now generate the points
- radii = tnorm.rvs(n, random_state=rng)
- x_pts = cx + radii * np.cos(angles)
- y_pts = cy + radii * np.sin(angles)
-
- points = np.hstack([x_pts[:, None], y_pts[:, None]])
-
- # Scale to 0-1 space
- points = points - points.min(axis=0)
- points = points / points.max(axis=0)
-
- # Randomly place within 0-1 space
- points = points * (rng.rand() * .8 + .2)
- min_pt = points.min(axis=0)
- max_pt = points.max(axis=0)
-
- high = (1 - max_pt)
- low = (0 - min_pt)
- offset = (rng.rand(2) * (high - low)) + low
- points = points + offset
- return points
-
- def _order_vertices(verts):
- """
- References:
- https://stackoverflow.com/questions/1709283/how-can-i-sort-a-coordinate-list-for-a-rectangle-counterclockwise
- """
- mlat = verts.T[0].sum() / len(verts)
- mlng = verts.T[1].sum() / len(verts)
-
- tau = np.pi * 2
- angle = (np.arctan2(mlat - verts.T[0], verts.T[1] - mlng) +
- tau) % tau
- sortx = angle.argsort()
- verts = verts.take(sortx, axis=0)
- return verts
-
- # Generate a random exterior for each requested mask
- masks = []
- for _ in range(num_masks):
- exterior = _order_vertices(_gen_polygon(n_verts, 0.9, 0.9))
- exterior = (exterior * [(width, height)]).astype(dtype)
- masks.append([exterior.ravel()])
-
- self = cls(masks, height, width)
- return self
-
-
-def polygon_to_bitmap(polygons, height, width):
- """Convert masks from the form of polygons to bitmaps.
-
- Args:
- polygons (list[ndarray]): masks in polygon representation
- height (int): mask height
- width (int): mask width
-
- Return:
- ndarray: the converted masks in bitmap representation
- """
- rles = maskUtils.frPyObjects(polygons, height, width)
- rle = maskUtils.merge(rles)
- bitmap_mask = maskUtils.decode(rle).astype(np.bool)
- return bitmap_mask
diff --git a/spaces/tsi-org/LLaVA/docs/Customize_Component.md b/spaces/tsi-org/LLaVA/docs/Customize_Component.md
deleted file mode 100644
index e99a60879920b389799fb3a0baf1fd864ee0bccc..0000000000000000000000000000000000000000
--- a/spaces/tsi-org/LLaVA/docs/Customize_Component.md
+++ /dev/null
@@ -1,20 +0,0 @@
-# Customize Components in LLaVA
-
-This is an initial guide on how to replace the LLMs, visual encoders, etc. with your choice of components.
-
-## LLM
-
-It is quite simple to swap out LLaMA to any other LLMs. You can refer to our implementation of [`llava_llama.py`](https://raw.githubusercontent.com/haotian-liu/LLaVA/main/llava/model/language_model/llava_llama.py) for an example of how to replace the LLM.
-
-Although it may seem that it still needs ~100 lines of code, most of them are copied from the original `llama.py` from HF. The only part that is different is to insert some lines for processing the multimodal inputs.
-
-In `forward` function, you can see that we call `self.prepare_inputs_labels_for_multimodal` to process the multimodal inputs. This function is defined in `LlavaMetaForCausalLM` and you just need to insert it into the `forward` function of your LLM.
-
-In `prepare_inputs_for_generation` function, you can see that we add `images` to the `model_inputs`. This is because we need to pass the images to the LLM during generation.
-
-These are basically all the changes you need to make to replace the LLM.
-
-## Visual Encoder
-
-You can check out [`clip_encoder.py`](https://github.com/haotian-liu/LLaVA/blob/main/llava/model/multimodal_encoder/clip_encoder.py) on how we implement the CLIP visual encoder.
-
diff --git a/spaces/ttt246/brain/Brain/src/rising_plugin/guardrails-config/__init__.py b/spaces/ttt246/brain/Brain/src/rising_plugin/guardrails-config/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/spaces/ucalyptus/DragGAN-unofficial/stylegan2/_init__.py b/spaces/ucalyptus/DragGAN-unofficial/stylegan2/_init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/spaces/ulysses115/diffsvc_test/modules/nsf_hifigan/utils.py b/spaces/ulysses115/diffsvc_test/modules/nsf_hifigan/utils.py
deleted file mode 100644
index 6ad5a37507987bd6e1200cb9241edf97989e3ead..0000000000000000000000000000000000000000
--- a/spaces/ulysses115/diffsvc_test/modules/nsf_hifigan/utils.py
+++ /dev/null
@@ -1,67 +0,0 @@
-import glob
-import os
-import matplotlib
-import torch
-from torch.nn.utils import weight_norm
-matplotlib.use("Agg")
-import matplotlib.pylab as plt
-
-
-def plot_spectrogram(spectrogram):
- fig, ax = plt.subplots(figsize=(10, 2))
- im = ax.imshow(spectrogram, aspect="auto", origin="lower",
- interpolation='none')
- plt.colorbar(im, ax=ax)
-
- fig.canvas.draw()
- plt.close()
-
- return fig
-
-
-def init_weights(m, mean=0.0, std=0.01):
- classname = m.__class__.__name__
- if classname.find("Conv") != -1:
- m.weight.data.normal_(mean, std)
-
-
-def apply_weight_norm(m):
- classname = m.__class__.__name__
- if classname.find("Conv") != -1:
- weight_norm(m)
-
-
-def get_padding(kernel_size, dilation=1):
- return int((kernel_size*dilation - dilation)/2)
-
-
-def load_checkpoint(filepath, device):
- assert os.path.isfile(filepath)
- print("Loading '{}'".format(filepath))
- checkpoint_dict = torch.load(filepath, map_location=device)
- print("Complete.")
- return checkpoint_dict
-
-
-def save_checkpoint(filepath, obj):
- print("Saving checkpoint to {}".format(filepath))
- torch.save(obj, filepath)
- print("Complete.")
-
-
-def del_old_checkpoints(cp_dir, prefix, n_models=2):
- pattern = os.path.join(cp_dir, prefix + '????????')
- cp_list = glob.glob(pattern) # get checkpoint paths
- cp_list = sorted(cp_list)# sort by iter
- if len(cp_list) > n_models: # if more than n_models models are found
- for cp in cp_list[:-n_models]:# delete the oldest models other than lastest n_models
- open(cp, 'w').close()# empty file contents
- os.unlink(cp)# delete file (move to trash when using Colab)
-
-
-def scan_checkpoint(cp_dir, prefix):
- pattern = os.path.join(cp_dir, prefix + '????????')
- cp_list = glob.glob(pattern)
- if len(cp_list) == 0:
- return None
- return sorted(cp_list)[-1]
\ No newline at end of file
diff --git a/spaces/user238921933/stable-diffusion-webui/scripts/prompt_matrix.py b/spaces/user238921933/stable-diffusion-webui/scripts/prompt_matrix.py
deleted file mode 100644
index 51c70998866d4b0853a46e4de73d86c3d9ec9b93..0000000000000000000000000000000000000000
--- a/spaces/user238921933/stable-diffusion-webui/scripts/prompt_matrix.py
+++ /dev/null
@@ -1,111 +0,0 @@
-import math
-from collections import namedtuple
-from copy import copy
-import random
-
-import modules.scripts as scripts
-import gradio as gr
-
-from modules import images
-from modules.processing import process_images, Processed
-from modules.shared import opts, cmd_opts, state
-import modules.sd_samplers
-
-
-def draw_xy_grid(xs, ys, x_label, y_label, cell):
- res = []
-
- ver_texts = [[images.GridAnnotation(y_label(y))] for y in ys]
- hor_texts = [[images.GridAnnotation(x_label(x))] for x in xs]
-
- first_processed = None
-
- state.job_count = len(xs) * len(ys)
-
- for iy, y in enumerate(ys):
- for ix, x in enumerate(xs):
- state.job = f"{ix + iy * len(xs) + 1} out of {len(xs) * len(ys)}"
-
- processed = cell(x, y)
- if first_processed is None:
- first_processed = processed
-
- res.append(processed.images[0])
-
- grid = images.image_grid(res, rows=len(ys))
- grid = images.draw_grid_annotations(grid, res[0].width, res[0].height, hor_texts, ver_texts)
-
- first_processed.images = [grid]
-
- return first_processed
-
-
-class Script(scripts.Script):
- def title(self):
- return "Prompt matrix"
-
- def ui(self, is_img2img):
- gr.HTML('
')
- with gr.Row():
- with gr.Column():
- put_at_start = gr.Checkbox(label='Put variable parts at start of prompt', value=False, elem_id=self.elem_id("put_at_start"))
- different_seeds = gr.Checkbox(label='Use different seed for each picture', value=False, elem_id=self.elem_id("different_seeds"))
- with gr.Column():
- prompt_type = gr.Radio(["positive", "negative"], label="Select prompt", elem_id=self.elem_id("prompt_type"), value="positive")
- variations_delimiter = gr.Radio(["comma", "space"], label="Select joining char", elem_id=self.elem_id("variations_delimiter"), value="comma")
- with gr.Column():
- margin_size = gr.Slider(label="Grid margins (px)", minimum=0, maximum=500, value=0, step=2, elem_id=self.elem_id("margin_size"))
-
- return [put_at_start, different_seeds, prompt_type, variations_delimiter, margin_size]
-
- def run(self, p, put_at_start, different_seeds, prompt_type, variations_delimiter, margin_size):
- modules.processing.fix_seed(p)
- # Raise error if promp type is not positive or negative
- if prompt_type not in ["positive", "negative"]:
- raise ValueError(f"Unknown prompt type {prompt_type}")
- # Raise error if variations delimiter is not comma or space
- if variations_delimiter not in ["comma", "space"]:
- raise ValueError(f"Unknown variations delimiter {variations_delimiter}")
-
- prompt = p.prompt if prompt_type == "positive" else p.negative_prompt
- original_prompt = prompt[0] if type(prompt) == list else prompt
- positive_prompt = p.prompt[0] if type(p.prompt) == list else p.prompt
-
- delimiter = ", " if variations_delimiter == "comma" else " "
-
- all_prompts = []
- prompt_matrix_parts = original_prompt.split("|")
- combination_count = 2 ** (len(prompt_matrix_parts) - 1)
- for combination_num in range(combination_count):
- selected_prompts = [text.strip().strip(',') for n, text in enumerate(prompt_matrix_parts[1:]) if combination_num & (1 << n)]
-
- if put_at_start:
- selected_prompts = selected_prompts + [prompt_matrix_parts[0]]
- else:
- selected_prompts = [prompt_matrix_parts[0]] + selected_prompts
-
- all_prompts.append(delimiter.join(selected_prompts))
-
- p.n_iter = math.ceil(len(all_prompts) / p.batch_size)
- p.do_not_save_grid = True
-
- print(f"Prompt matrix will create {len(all_prompts)} images using a total of {p.n_iter} batches.")
-
- if prompt_type == "positive":
- p.prompt = all_prompts
- else:
- p.negative_prompt = all_prompts
- p.seed = [p.seed + (i if different_seeds else 0) for i in range(len(all_prompts))]
- p.prompt_for_display = positive_prompt
- processed = process_images(p)
-
- grid = images.image_grid(processed.images, p.batch_size, rows=1 << ((len(prompt_matrix_parts) - 1) // 2))
- grid = images.draw_prompt_matrix(grid, processed.images[0].width, processed.images[1].height, prompt_matrix_parts, margin_size)
- processed.images.insert(0, grid)
- processed.index_of_first_image = 1
- processed.infotexts.insert(0, processed.infotexts[0])
-
- if opts.grid_save:
- images.save_image(processed.images[0], p.outpath_grids, "prompt_matrix", extension=opts.grid_format, prompt=original_prompt, seed=processed.seed, grid=True, p=p)
-
- return processed
diff --git a/spaces/vaishanthr/Simultaneous-Segmented-Depth-Prediction/yolov8/docs/modes/export.md b/spaces/vaishanthr/Simultaneous-Segmented-Depth-Prediction/yolov8/docs/modes/export.md
deleted file mode 100644
index 54518b2961086b716ab06074336404691026cd67..0000000000000000000000000000000000000000
--- a/spaces/vaishanthr/Simultaneous-Segmented-Depth-Prediction/yolov8/docs/modes/export.md
+++ /dev/null
@@ -1,88 +0,0 @@
----
-comments: true
-description: 'Export mode: Create a deployment-ready YOLOv8 model by converting it to various formats. Export to ONNX or OpenVINO for up to 3x CPU speedup.'
-keywords: ultralytics docs, YOLOv8, export YOLOv8, YOLOv8 model deployment, exporting YOLOv8, ONNX, OpenVINO, TensorRT, CoreML, TF SavedModel, PaddlePaddle, TorchScript, ONNX format, OpenVINO format, TensorRT format, CoreML format, TF SavedModel format, PaddlePaddle format, Tencent NCNN, NCNN
----
-
-
-
-**Export mode** is used for exporting a YOLOv8 model to a format that can be used for deployment. In this mode, the
-model is converted to a format that can be used by other software applications or hardware devices. This mode is useful
-when deploying the model to production environments.
-
-!!! tip "Tip"
-
- * Export to ONNX or OpenVINO for up to 3x CPU speedup.
- * Export to TensorRT for up to 5x GPU speedup.
-
-## Usage Examples
-
-Export a YOLOv8n model to a different format like ONNX or TensorRT. See Arguments section below for a full list of
-export arguments.
-
-!!! example ""
-
- === "Python"
-
- ```python
- from ultralytics import YOLO
-
- # Load a model
- model = YOLO('yolov8n.pt') # load an official model
- model = YOLO('path/to/best.pt') # load a custom trained
-
- # Export the model
- model.export(format='onnx')
- ```
- === "CLI"
-
- ```bash
- yolo export model=yolov8n.pt format=onnx # export official model
- yolo export model=path/to/best.pt format=onnx # export custom trained model
- ```
-
-## Arguments
-
-Export settings for YOLO models refer to the various configurations and options used to save or
-export the model for use in other environments or platforms. These settings can affect the model's performance, size,
-and compatibility with different systems. Some common YOLO export settings include the format of the exported model
-file (e.g. ONNX, TensorFlow SavedModel), the device on which the model will be run (e.g. CPU, GPU), and the presence of
-additional features such as masks or multiple labels per box. Other factors that may affect the export process include
-the specific task the model is being used for and the requirements or constraints of the target environment or platform.
-It is important to carefully consider and configure these settings to ensure that the exported model is optimized for
-the intended use case and can be used effectively in the target environment.
-
-| Key | Value | Description |
-|-------------|-----------------|------------------------------------------------------|
-| `format` | `'torchscript'` | format to export to |
-| `imgsz` | `640` | image size as scalar or (h, w) list, i.e. (640, 480) |
-| `keras` | `False` | use Keras for TF SavedModel export |
-| `optimize` | `False` | TorchScript: optimize for mobile |
-| `half` | `False` | FP16 quantization |
-| `int8` | `False` | INT8 quantization |
-| `dynamic` | `False` | ONNX/TensorRT: dynamic axes |
-| `simplify` | `False` | ONNX/TensorRT: simplify model |
-| `opset` | `None` | ONNX: opset version (optional, defaults to latest) |
-| `workspace` | `4` | TensorRT: workspace size (GB) |
-| `nms` | `False` | CoreML: add NMS |
-
-## Export Formats
-
-Available YOLOv8 export formats are in the table below. You can export to any format using the `format` argument,
-i.e. `format='onnx'` or `format='engine'`.
-
-| Format | `format` Argument | Model | Metadata | Arguments |
-|--------------------------------------------------------------------|-------------------|---------------------------|----------|-----------------------------------------------------|
-| [PyTorch](https://pytorch.org/) | - | `yolov8n.pt` | ✅ | - |
-| [TorchScript](https://pytorch.org/docs/stable/jit.html) | `torchscript` | `yolov8n.torchscript` | ✅ | `imgsz`, `optimize` |
-| [ONNX](https://onnx.ai/) | `onnx` | `yolov8n.onnx` | ✅ | `imgsz`, `half`, `dynamic`, `simplify`, `opset` |
-| [OpenVINO](https://docs.openvino.ai/latest/index.html) | `openvino` | `yolov8n_openvino_model/` | ✅ | `imgsz`, `half` |
-| [TensorRT](https://developer.nvidia.com/tensorrt) | `engine` | `yolov8n.engine` | ✅ | `imgsz`, `half`, `dynamic`, `simplify`, `workspace` |
-| [CoreML](https://github.com/apple/coremltools) | `coreml` | `yolov8n.mlmodel` | ✅ | `imgsz`, `half`, `int8`, `nms` |
-| [TF SavedModel](https://www.tensorflow.org/guide/saved_model) | `saved_model` | `yolov8n_saved_model/` | ✅ | `imgsz`, `keras` |
-| [TF GraphDef](https://www.tensorflow.org/api_docs/python/tf/Graph) | `pb` | `yolov8n.pb` | ❌ | `imgsz` |
-| [TF Lite](https://www.tensorflow.org/lite) | `tflite` | `yolov8n.tflite` | ✅ | `imgsz`, `half`, `int8` |
-| [TF Edge TPU](https://coral.ai/docs/edgetpu/models-intro/) | `edgetpu` | `yolov8n_edgetpu.tflite` | ✅ | `imgsz` |
-| [TF.js](https://www.tensorflow.org/js) | `tfjs` | `yolov8n_web_model/` | ✅ | `imgsz` |
-| [PaddlePaddle](https://github.com/PaddlePaddle) | `paddle` | `yolov8n_paddle_model/` | ✅ | `imgsz` |
-| [NCNN](https://github.com/Tencent/ncnn) | `ncnn` | `yolov8n_ncnn_model/` | ✅ | `imgsz`, `half` |
\ No newline at end of file
diff --git a/spaces/vishnusureshperumbavoor/vspbot-falcon-langchain/app.py b/spaces/vishnusureshperumbavoor/vspbot-falcon-langchain/app.py
deleted file mode 100644
index 4fa6a7e045d97fdbfde67d61c32d8f9bc41b2cfe..0000000000000000000000000000000000000000
--- a/spaces/vishnusureshperumbavoor/vspbot-falcon-langchain/app.py
+++ /dev/null
@@ -1,41 +0,0 @@
-import gradio as gr
-from langchain import PromptTemplate, LLMChain
-from langchain import HuggingFaceHub
-import os
-from dotenv import load_dotenv
-
-# Load environment variables from .env file
-load_dotenv()
-
-HUGGINGFACEHUB_API_TOKEN = os.getenv("HUGGINGFACEHUB_API_TOKEN")
-
-repo_id = "tiiuae/falcon-7b-instruct"
-llm = HuggingFaceHub(huggingfacehub_api_token=HUGGINGFACEHUB_API_TOKEN,
- repo_id=repo_id,
- model_kwargs={"temperature":0.7, "max_new_tokens":700})
-
-template = """
-You are a helpful AI assistant and provide the answer for the question asked politely.
-
-{question}
-Answer: Let's think step by step.
-"""
-prompt = PromptTemplate(template=template, input_variables=["question"])
-llm_chain = LLMChain(prompt=prompt, llm=llm)
-
-# Define the function that will be used in Gradio
-def generate_answer(question):
- answer = llm_chain.run(question)
- return answer
-
-# Create a Gradio interface
-iface = gr.Interface(
- fn=generate_answer,
- inputs=gr.inputs.Textbox(),
- outputs=gr.outputs.Textbox(),
- title="VSP Bot",
- description="Created by VSP",
-)
-
-# Launch the Gradio interface
-iface.launch()
\ No newline at end of file
diff --git a/spaces/vumichien/canvas_controlnet/annotator/uniformer/mmcv/ops/sync_bn.py b/spaces/vumichien/canvas_controlnet/annotator/uniformer/mmcv/ops/sync_bn.py
deleted file mode 100644
index c9b016fcbe860989c56cd1040034bcfa60e146d2..0000000000000000000000000000000000000000
--- a/spaces/vumichien/canvas_controlnet/annotator/uniformer/mmcv/ops/sync_bn.py
+++ /dev/null
@@ -1,279 +0,0 @@
-# Copyright (c) OpenMMLab. All rights reserved.
-import torch
-import torch.distributed as dist
-import torch.nn.functional as F
-from torch.autograd import Function
-from torch.autograd.function import once_differentiable
-from torch.nn.modules.module import Module
-from torch.nn.parameter import Parameter
-
-from annotator.uniformer.mmcv.cnn import NORM_LAYERS
-from ..utils import ext_loader
-
-ext_module = ext_loader.load_ext('_ext', [
- 'sync_bn_forward_mean', 'sync_bn_forward_var', 'sync_bn_forward_output',
- 'sync_bn_backward_param', 'sync_bn_backward_data'
-])
-
-
-class SyncBatchNormFunction(Function):
-
- @staticmethod
- def symbolic(g, input, running_mean, running_var, weight, bias, momentum,
- eps, group, group_size, stats_mode):
- return g.op(
- 'mmcv::MMCVSyncBatchNorm',
- input,
- running_mean,
- running_var,
- weight,
- bias,
- momentum_f=momentum,
- eps_f=eps,
- group_i=group,
- group_size_i=group_size,
- stats_mode=stats_mode)
-
- @staticmethod
- def forward(self, input, running_mean, running_var, weight, bias, momentum,
- eps, group, group_size, stats_mode):
- self.momentum = momentum
- self.eps = eps
- self.group = group
- self.group_size = group_size
- self.stats_mode = stats_mode
-
- assert isinstance(
- input, (torch.HalfTensor, torch.FloatTensor,
- torch.cuda.HalfTensor, torch.cuda.FloatTensor)), \
- f'only support Half or Float Tensor, but {input.type()}'
- output = torch.zeros_like(input)
- input3d = input.flatten(start_dim=2)
- output3d = output.view_as(input3d)
- num_channels = input3d.size(1)
-
- # ensure mean/var/norm/std are initialized as zeros
- # ``torch.empty()`` does not guarantee that
- mean = torch.zeros(
- num_channels, dtype=torch.float, device=input3d.device)
- var = torch.zeros(
- num_channels, dtype=torch.float, device=input3d.device)
- norm = torch.zeros_like(
- input3d, dtype=torch.float, device=input3d.device)
- std = torch.zeros(
- num_channels, dtype=torch.float, device=input3d.device)
-
- batch_size = input3d.size(0)
- if batch_size > 0:
- ext_module.sync_bn_forward_mean(input3d, mean)
- batch_flag = torch.ones([1], device=mean.device, dtype=mean.dtype)
- else:
- # skip updating mean and leave it as zeros when the input is empty
- batch_flag = torch.zeros([1], device=mean.device, dtype=mean.dtype)
-
- # synchronize mean and the batch flag
- vec = torch.cat([mean, batch_flag])
- if self.stats_mode == 'N':
- vec *= batch_size
- if self.group_size > 1:
- dist.all_reduce(vec, group=self.group)
- total_batch = vec[-1].detach()
- mean = vec[:num_channels]
-
- if self.stats_mode == 'default':
- mean = mean / self.group_size
- elif self.stats_mode == 'N':
- mean = mean / total_batch.clamp(min=1)
- else:
- raise NotImplementedError
-
- # leave var as zeros when the input is empty
- if batch_size > 0:
- ext_module.sync_bn_forward_var(input3d, mean, var)
-
- if self.stats_mode == 'N':
- var *= batch_size
- if self.group_size > 1:
- dist.all_reduce(var, group=self.group)
-
- if self.stats_mode == 'default':
- var /= self.group_size
- elif self.stats_mode == 'N':
- var /= total_batch.clamp(min=1)
- else:
- raise NotImplementedError
-
- # if the total batch size over all the ranks is zero,
- # we should not update the statistics in the current batch
- update_flag = total_batch.clamp(max=1)
- momentum = update_flag * self.momentum
- ext_module.sync_bn_forward_output(
- input3d,
- mean,
- var,
- weight,
- bias,
- running_mean,
- running_var,
- norm,
- std,
- output3d,
- eps=self.eps,
- momentum=momentum,
- group_size=self.group_size)
- self.save_for_backward(norm, std, weight)
- return output
-
- @staticmethod
- @once_differentiable
- def backward(self, grad_output):
- norm, std, weight = self.saved_tensors
- grad_weight = torch.zeros_like(weight)
- grad_bias = torch.zeros_like(weight)
- grad_input = torch.zeros_like(grad_output)
- grad_output3d = grad_output.flatten(start_dim=2)
- grad_input3d = grad_input.view_as(grad_output3d)
-
- batch_size = grad_input3d.size(0)
- if batch_size > 0:
- ext_module.sync_bn_backward_param(grad_output3d, norm, grad_weight,
- grad_bias)
-
- # all reduce
- if self.group_size > 1:
- dist.all_reduce(grad_weight, group=self.group)
- dist.all_reduce(grad_bias, group=self.group)
- grad_weight /= self.group_size
- grad_bias /= self.group_size
-
- if batch_size > 0:
- ext_module.sync_bn_backward_data(grad_output3d, weight,
- grad_weight, grad_bias, norm, std,
- grad_input3d)
-
- return grad_input, None, None, grad_weight, grad_bias, \
- None, None, None, None, None
-
-
-@NORM_LAYERS.register_module(name='MMSyncBN')
-class SyncBatchNorm(Module):
- """Synchronized Batch Normalization.
-
- Args:
- num_features (int): number of features/chennels in input tensor
- eps (float, optional): a value added to the denominator for numerical
- stability. Defaults to 1e-5.
- momentum (float, optional): the value used for the running_mean and
- running_var computation. Defaults to 0.1.
- affine (bool, optional): whether to use learnable affine parameters.
- Defaults to True.
- track_running_stats (bool, optional): whether to track the running
- mean and variance during training. When set to False, this
- module does not track such statistics, and initializes statistics
- buffers ``running_mean`` and ``running_var`` as ``None``. When
- these buffers are ``None``, this module always uses batch
- statistics in both training and eval modes. Defaults to True.
- group (int, optional): synchronization of stats happen within
- each process group individually. By default it is synchronization
- across the whole world. Defaults to None.
- stats_mode (str, optional): The statistical mode. Available options
- includes ``'default'`` and ``'N'``. Defaults to 'default'.
- When ``stats_mode=='default'``, it computes the overall statistics
- using those from each worker with equal weight, i.e., the
- statistics are synchronized and simply divied by ``group``. This
- mode will produce inaccurate statistics when empty tensors occur.
- When ``stats_mode=='N'``, it compute the overall statistics using
- the total number of batches in each worker ignoring the number of
- group, i.e., the statistics are synchronized and then divied by
- the total batch ``N``. This mode is beneficial when empty tensors
- occur during training, as it average the total mean by the real
- number of batch.
- """
-
- def __init__(self,
- num_features,
- eps=1e-5,
- momentum=0.1,
- affine=True,
- track_running_stats=True,
- group=None,
- stats_mode='default'):
- super(SyncBatchNorm, self).__init__()
- self.num_features = num_features
- self.eps = eps
- self.momentum = momentum
- self.affine = affine
- self.track_running_stats = track_running_stats
- group = dist.group.WORLD if group is None else group
- self.group = group
- self.group_size = dist.get_world_size(group)
- assert stats_mode in ['default', 'N'], \
- f'"stats_mode" only accepts "default" and "N", got "{stats_mode}"'
- self.stats_mode = stats_mode
- if self.affine:
- self.weight = Parameter(torch.Tensor(num_features))
- self.bias = Parameter(torch.Tensor(num_features))
- else:
- self.register_parameter('weight', None)
- self.register_parameter('bias', None)
- if self.track_running_stats:
- self.register_buffer('running_mean', torch.zeros(num_features))
- self.register_buffer('running_var', torch.ones(num_features))
- self.register_buffer('num_batches_tracked',
- torch.tensor(0, dtype=torch.long))
- else:
- self.register_buffer('running_mean', None)
- self.register_buffer('running_var', None)
- self.register_buffer('num_batches_tracked', None)
- self.reset_parameters()
-
- def reset_running_stats(self):
- if self.track_running_stats:
- self.running_mean.zero_()
- self.running_var.fill_(1)
- self.num_batches_tracked.zero_()
-
- def reset_parameters(self):
- self.reset_running_stats()
- if self.affine:
- self.weight.data.uniform_() # pytorch use ones_()
- self.bias.data.zero_()
-
- def forward(self, input):
- if input.dim() < 2:
- raise ValueError(
- f'expected at least 2D input, got {input.dim()}D input')
- if self.momentum is None:
- exponential_average_factor = 0.0
- else:
- exponential_average_factor = self.momentum
-
- if self.training and self.track_running_stats:
- if self.num_batches_tracked is not None:
- self.num_batches_tracked += 1
- if self.momentum is None: # use cumulative moving average
- exponential_average_factor = 1.0 / float(
- self.num_batches_tracked)
- else: # use exponential moving average
- exponential_average_factor = self.momentum
-
- if self.training or not self.track_running_stats:
- return SyncBatchNormFunction.apply(
- input, self.running_mean, self.running_var, self.weight,
- self.bias, exponential_average_factor, self.eps, self.group,
- self.group_size, self.stats_mode)
- else:
- return F.batch_norm(input, self.running_mean, self.running_var,
- self.weight, self.bias, False,
- exponential_average_factor, self.eps)
-
- def __repr__(self):
- s = self.__class__.__name__
- s += f'({self.num_features}, '
- s += f'eps={self.eps}, '
- s += f'momentum={self.momentum}, '
- s += f'affine={self.affine}, '
- s += f'track_running_stats={self.track_running_stats}, '
- s += f'group_size={self.group_size},'
- s += f'stats_mode={self.stats_mode})'
- return s
diff --git a/spaces/whisper-event/leaderboard/README.md b/spaces/whisper-event/leaderboard/README.md
deleted file mode 100644
index 104cbe7ee27fcd201a2c8744735541d99355a7bb..0000000000000000000000000000000000000000
--- a/spaces/whisper-event/leaderboard/README.md
+++ /dev/null
@@ -1,12 +0,0 @@
----
-title: Leaderboard
-emoji: 📊
-colorFrom: green
-colorTo: purple
-sdk: streamlit
-sdk_version: 1.26.0
-app_file: app.py
-pinned: false
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/wonderit-safeai/tts-announcer/models.py b/spaces/wonderit-safeai/tts-announcer/models.py
deleted file mode 100644
index f5acdeb2bedd47897348407c0ae55c9a160da881..0000000000000000000000000000000000000000
--- a/spaces/wonderit-safeai/tts-announcer/models.py
+++ /dev/null
@@ -1,534 +0,0 @@
-import copy
-import math
-import torch
-from torch import nn
-from torch.nn import functional as F
-
-import commons
-import modules
-import attentions
-import monotonic_align
-
-from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d
-from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm
-from commons import init_weights, get_padding
-
-
-class StochasticDurationPredictor(nn.Module):
- def __init__(self, in_channels, filter_channels, kernel_size, p_dropout, n_flows=4, gin_channels=0):
- super().__init__()
- filter_channels = in_channels # it needs to be removed from future version.
- self.in_channels = in_channels
- self.filter_channels = filter_channels
- self.kernel_size = kernel_size
- self.p_dropout = p_dropout
- self.n_flows = n_flows
- self.gin_channels = gin_channels
-
- self.log_flow = modules.Log()
- self.flows = nn.ModuleList()
- self.flows.append(modules.ElementwiseAffine(2))
- for i in range(n_flows):
- self.flows.append(modules.ConvFlow(2, filter_channels, kernel_size, n_layers=3))
- self.flows.append(modules.Flip())
-
- self.post_pre = nn.Conv1d(1, filter_channels, 1)
- self.post_proj = nn.Conv1d(filter_channels, filter_channels, 1)
- self.post_convs = modules.DDSConv(filter_channels, kernel_size, n_layers=3, p_dropout=p_dropout)
- self.post_flows = nn.ModuleList()
- self.post_flows.append(modules.ElementwiseAffine(2))
- for i in range(4):
- self.post_flows.append(modules.ConvFlow(2, filter_channels, kernel_size, n_layers=3))
- self.post_flows.append(modules.Flip())
-
- self.pre = nn.Conv1d(in_channels, filter_channels, 1)
- self.proj = nn.Conv1d(filter_channels, filter_channels, 1)
- self.convs = modules.DDSConv(filter_channels, kernel_size, n_layers=3, p_dropout=p_dropout)
- if gin_channels != 0:
- self.cond = nn.Conv1d(gin_channels, filter_channels, 1)
-
- def forward(self, x, x_mask, w=None, g=None, reverse=False, noise_scale=1.0):
- x = torch.detach(x)
- x = self.pre(x)
- if g is not None:
- g = torch.detach(g)
- x = x + self.cond(g)
- x = self.convs(x, x_mask)
- x = self.proj(x) * x_mask
-
- if not reverse:
- flows = self.flows
- assert w is not None
-
- logdet_tot_q = 0
- h_w = self.post_pre(w)
- h_w = self.post_convs(h_w, x_mask)
- h_w = self.post_proj(h_w) * x_mask
- e_q = torch.randn(w.size(0), 2, w.size(2)).to(device=x.device, dtype=x.dtype) * x_mask
- z_q = e_q
- for flow in self.post_flows:
- z_q, logdet_q = flow(z_q, x_mask, g=(x + h_w))
- logdet_tot_q += logdet_q
- z_u, z1 = torch.split(z_q, [1, 1], 1)
- u = torch.sigmoid(z_u) * x_mask
- z0 = (w - u) * x_mask
- logdet_tot_q += torch.sum((F.logsigmoid(z_u) + F.logsigmoid(-z_u)) * x_mask, [1,2])
- logq = torch.sum(-0.5 * (math.log(2*math.pi) + (e_q**2)) * x_mask, [1,2]) - logdet_tot_q
-
- logdet_tot = 0
- z0, logdet = self.log_flow(z0, x_mask)
- logdet_tot += logdet
- z = torch.cat([z0, z1], 1)
- for flow in flows:
- z, logdet = flow(z, x_mask, g=x, reverse=reverse)
- logdet_tot = logdet_tot + logdet
- nll = torch.sum(0.5 * (math.log(2*math.pi) + (z**2)) * x_mask, [1,2]) - logdet_tot
- return nll + logq # [b]
- else:
- flows = list(reversed(self.flows))
- flows = flows[:-2] + [flows[-1]] # remove a useless vflow
- z = torch.randn(x.size(0), 2, x.size(2)).to(device=x.device, dtype=x.dtype) * noise_scale
- for flow in flows:
- z = flow(z, x_mask, g=x, reverse=reverse)
- z0, z1 = torch.split(z, [1, 1], 1)
- logw = z0
- return logw
-
-
-class DurationPredictor(nn.Module):
- def __init__(self, in_channels, filter_channels, kernel_size, p_dropout, gin_channels=0):
- super().__init__()
-
- self.in_channels = in_channels
- self.filter_channels = filter_channels
- self.kernel_size = kernel_size
- self.p_dropout = p_dropout
- self.gin_channels = gin_channels
-
- self.drop = nn.Dropout(p_dropout)
- self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size, padding=kernel_size//2)
- self.norm_1 = modules.LayerNorm(filter_channels)
- self.conv_2 = nn.Conv1d(filter_channels, filter_channels, kernel_size, padding=kernel_size//2)
- self.norm_2 = modules.LayerNorm(filter_channels)
- self.proj = nn.Conv1d(filter_channels, 1, 1)
-
- if gin_channels != 0:
- self.cond = nn.Conv1d(gin_channels, in_channels, 1)
-
- def forward(self, x, x_mask, g=None):
- x = torch.detach(x)
- if g is not None:
- g = torch.detach(g)
- x = x + self.cond(g)
- x = self.conv_1(x * x_mask)
- x = torch.relu(x)
- x = self.norm_1(x)
- x = self.drop(x)
- x = self.conv_2(x * x_mask)
- x = torch.relu(x)
- x = self.norm_2(x)
- x = self.drop(x)
- x = self.proj(x * x_mask)
- return x * x_mask
-
-
-class TextEncoder(nn.Module):
- def __init__(self,
- n_vocab,
- out_channels,
- hidden_channels,
- filter_channels,
- n_heads,
- n_layers,
- kernel_size,
- p_dropout):
- super().__init__()
- self.n_vocab = n_vocab
- self.out_channels = out_channels
- self.hidden_channels = hidden_channels
- self.filter_channels = filter_channels
- self.n_heads = n_heads
- self.n_layers = n_layers
- self.kernel_size = kernel_size
- self.p_dropout = p_dropout
-
- self.emb = nn.Embedding(n_vocab, hidden_channels)
- nn.init.normal_(self.emb.weight, 0.0, hidden_channels**-0.5)
-
- self.encoder = attentions.Encoder(
- hidden_channels,
- filter_channels,
- n_heads,
- n_layers,
- kernel_size,
- p_dropout)
- self.proj= nn.Conv1d(hidden_channels, out_channels * 2, 1)
-
- def forward(self, x, x_lengths):
- x = self.emb(x) * math.sqrt(self.hidden_channels) # [b, t, h]
- x = torch.transpose(x, 1, -1) # [b, h, t]
- x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(x.dtype)
-
- x = self.encoder(x * x_mask, x_mask)
- stats = self.proj(x) * x_mask
-
- m, logs = torch.split(stats, self.out_channels, dim=1)
- return x, m, logs, x_mask
-
-
-class ResidualCouplingBlock(nn.Module):
- def __init__(self,
- channels,
- hidden_channels,
- kernel_size,
- dilation_rate,
- n_layers,
- n_flows=4,
- gin_channels=0):
- super().__init__()
- self.channels = channels
- self.hidden_channels = hidden_channels
- self.kernel_size = kernel_size
- self.dilation_rate = dilation_rate
- self.n_layers = n_layers
- self.n_flows = n_flows
- self.gin_channels = gin_channels
-
- self.flows = nn.ModuleList()
- for i in range(n_flows):
- self.flows.append(modules.ResidualCouplingLayer(channels, hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=gin_channels, mean_only=True))
- self.flows.append(modules.Flip())
-
- def forward(self, x, x_mask, g=None, reverse=False):
- if not reverse:
- for flow in self.flows:
- x, _ = flow(x, x_mask, g=g, reverse=reverse)
- else:
- for flow in reversed(self.flows):
- x = flow(x, x_mask, g=g, reverse=reverse)
- return x
-
-
-class PosteriorEncoder(nn.Module):
- def __init__(self,
- in_channels,
- out_channels,
- hidden_channels,
- kernel_size,
- dilation_rate,
- n_layers,
- gin_channels=0):
- super().__init__()
- self.in_channels = in_channels
- self.out_channels = out_channels
- self.hidden_channels = hidden_channels
- self.kernel_size = kernel_size
- self.dilation_rate = dilation_rate
- self.n_layers = n_layers
- self.gin_channels = gin_channels
-
- self.pre = nn.Conv1d(in_channels, hidden_channels, 1)
- self.enc = modules.WN(hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=gin_channels)
- self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1)
-
- def forward(self, x, x_lengths, g=None):
- x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(x.dtype)
- x = self.pre(x) * x_mask
- x = self.enc(x, x_mask, g=g)
- stats = self.proj(x) * x_mask
- m, logs = torch.split(stats, self.out_channels, dim=1)
- z = (m + torch.randn_like(m) * torch.exp(logs)) * x_mask
- return z, m, logs, x_mask
-
-
-class Generator(torch.nn.Module):
- def __init__(self, initial_channel, resblock, resblock_kernel_sizes, resblock_dilation_sizes, upsample_rates, upsample_initial_channel, upsample_kernel_sizes, gin_channels=0):
- super(Generator, self).__init__()
- self.num_kernels = len(resblock_kernel_sizes)
- self.num_upsamples = len(upsample_rates)
- self.conv_pre = Conv1d(initial_channel, upsample_initial_channel, 7, 1, padding=3)
- resblock = modules.ResBlock1 if resblock == '1' else modules.ResBlock2
-
- self.ups = nn.ModuleList()
- for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)):
- self.ups.append(weight_norm(
- ConvTranspose1d(upsample_initial_channel//(2**i), upsample_initial_channel//(2**(i+1)),
- k, u, padding=(k-u)//2)))
-
- self.resblocks = nn.ModuleList()
- for i in range(len(self.ups)):
- ch = upsample_initial_channel//(2**(i+1))
- for j, (k, d) in enumerate(zip(resblock_kernel_sizes, resblock_dilation_sizes)):
- self.resblocks.append(resblock(ch, k, d))
-
- self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False)
- self.ups.apply(init_weights)
-
- if gin_channels != 0:
- self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1)
-
- def forward(self, x, g=None):
- x = self.conv_pre(x)
- if g is not None:
- x = x + self.cond(g)
-
- for i in range(self.num_upsamples):
- x = F.leaky_relu(x, modules.LRELU_SLOPE)
- x = self.ups[i](x)
- xs = None
- for j in range(self.num_kernels):
- if xs is None:
- xs = self.resblocks[i*self.num_kernels+j](x)
- else:
- xs += self.resblocks[i*self.num_kernels+j](x)
- x = xs / self.num_kernels
- x = F.leaky_relu(x)
- x = self.conv_post(x)
- x = torch.tanh(x)
-
- return x
-
- def remove_weight_norm(self):
- print('Removing weight norm...')
- for l in self.ups:
- remove_weight_norm(l)
- for l in self.resblocks:
- l.remove_weight_norm()
-
-
-class DiscriminatorP(torch.nn.Module):
- def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False):
- super(DiscriminatorP, self).__init__()
- self.period = period
- self.use_spectral_norm = use_spectral_norm
- norm_f = weight_norm if use_spectral_norm == False else spectral_norm
- self.convs = nn.ModuleList([
- norm_f(Conv2d(1, 32, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))),
- norm_f(Conv2d(32, 128, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))),
- norm_f(Conv2d(128, 512, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))),
- norm_f(Conv2d(512, 1024, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))),
- norm_f(Conv2d(1024, 1024, (kernel_size, 1), 1, padding=(get_padding(kernel_size, 1), 0))),
- ])
- self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0)))
-
- def forward(self, x):
- fmap = []
-
- # 1d to 2d
- b, c, t = x.shape
- if t % self.period != 0: # pad first
- n_pad = self.period - (t % self.period)
- x = F.pad(x, (0, n_pad), "reflect")
- t = t + n_pad
- x = x.view(b, c, t // self.period, self.period)
-
- for l in self.convs:
- x = l(x)
- x = F.leaky_relu(x, modules.LRELU_SLOPE)
- fmap.append(x)
- x = self.conv_post(x)
- fmap.append(x)
- x = torch.flatten(x, 1, -1)
-
- return x, fmap
-
-
-class DiscriminatorS(torch.nn.Module):
- def __init__(self, use_spectral_norm=False):
- super(DiscriminatorS, self).__init__()
- norm_f = weight_norm if use_spectral_norm == False else spectral_norm
- self.convs = nn.ModuleList([
- norm_f(Conv1d(1, 16, 15, 1, padding=7)),
- norm_f(Conv1d(16, 64, 41, 4, groups=4, padding=20)),
- norm_f(Conv1d(64, 256, 41, 4, groups=16, padding=20)),
- norm_f(Conv1d(256, 1024, 41, 4, groups=64, padding=20)),
- norm_f(Conv1d(1024, 1024, 41, 4, groups=256, padding=20)),
- norm_f(Conv1d(1024, 1024, 5, 1, padding=2)),
- ])
- self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1))
-
- def forward(self, x):
- fmap = []
-
- for l in self.convs:
- x = l(x)
- x = F.leaky_relu(x, modules.LRELU_SLOPE)
- fmap.append(x)
- x = self.conv_post(x)
- fmap.append(x)
- x = torch.flatten(x, 1, -1)
-
- return x, fmap
-
-
-class MultiPeriodDiscriminator(torch.nn.Module):
- def __init__(self, use_spectral_norm=False):
- super(MultiPeriodDiscriminator, self).__init__()
- periods = [2,3,5,7,11]
-
- discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)]
- discs = discs + [DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods]
- self.discriminators = nn.ModuleList(discs)
-
- def forward(self, y, y_hat):
- y_d_rs = []
- y_d_gs = []
- fmap_rs = []
- fmap_gs = []
- for i, d in enumerate(self.discriminators):
- y_d_r, fmap_r = d(y)
- y_d_g, fmap_g = d(y_hat)
- y_d_rs.append(y_d_r)
- y_d_gs.append(y_d_g)
- fmap_rs.append(fmap_r)
- fmap_gs.append(fmap_g)
-
- return y_d_rs, y_d_gs, fmap_rs, fmap_gs
-
-
-
-class SynthesizerTrn(nn.Module):
- """
- Synthesizer for Training
- """
-
- def __init__(self,
- n_vocab,
- spec_channels,
- segment_size,
- inter_channels,
- hidden_channels,
- filter_channels,
- n_heads,
- n_layers,
- kernel_size,
- p_dropout,
- resblock,
- resblock_kernel_sizes,
- resblock_dilation_sizes,
- upsample_rates,
- upsample_initial_channel,
- upsample_kernel_sizes,
- n_speakers=0,
- gin_channels=0,
- use_sdp=True,
- **kwargs):
-
- super().__init__()
- self.n_vocab = n_vocab
- self.spec_channels = spec_channels
- self.inter_channels = inter_channels
- self.hidden_channels = hidden_channels
- self.filter_channels = filter_channels
- self.n_heads = n_heads
- self.n_layers = n_layers
- self.kernel_size = kernel_size
- self.p_dropout = p_dropout
- self.resblock = resblock
- self.resblock_kernel_sizes = resblock_kernel_sizes
- self.resblock_dilation_sizes = resblock_dilation_sizes
- self.upsample_rates = upsample_rates
- self.upsample_initial_channel = upsample_initial_channel
- self.upsample_kernel_sizes = upsample_kernel_sizes
- self.segment_size = segment_size
- self.n_speakers = n_speakers
- self.gin_channels = gin_channels
-
- self.use_sdp = use_sdp
-
- self.enc_p = TextEncoder(n_vocab,
- inter_channels,
- hidden_channels,
- filter_channels,
- n_heads,
- n_layers,
- kernel_size,
- p_dropout)
- self.dec = Generator(inter_channels, resblock, resblock_kernel_sizes, resblock_dilation_sizes, upsample_rates, upsample_initial_channel, upsample_kernel_sizes, gin_channels=gin_channels)
- self.enc_q = PosteriorEncoder(spec_channels, inter_channels, hidden_channels, 5, 1, 16, gin_channels=gin_channels)
- self.flow = ResidualCouplingBlock(inter_channels, hidden_channels, 5, 1, 4, gin_channels=gin_channels)
-
- if use_sdp:
- self.dp = StochasticDurationPredictor(hidden_channels, 192, 3, 0.5, 4, gin_channels=gin_channels)
- else:
- self.dp = DurationPredictor(hidden_channels, 256, 3, 0.5, gin_channels=gin_channels)
-
- if n_speakers > 1:
- self.emb_g = nn.Embedding(n_speakers, gin_channels)
-
- def forward(self, x, x_lengths, y, y_lengths, sid=None):
-
- x, m_p, logs_p, x_mask = self.enc_p(x, x_lengths)
- if self.n_speakers > 0:
- g = self.emb_g(sid).unsqueeze(-1) # [b, h, 1]
- else:
- g = None
-
- z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g)
- z_p = self.flow(z, y_mask, g=g)
-
- with torch.no_grad():
- # negative cross-entropy
- s_p_sq_r = torch.exp(-2 * logs_p) # [b, d, t]
- neg_cent1 = torch.sum(-0.5 * math.log(2 * math.pi) - logs_p, [1], keepdim=True) # [b, 1, t_s]
- neg_cent2 = torch.matmul(-0.5 * (z_p ** 2).transpose(1, 2), s_p_sq_r) # [b, t_t, d] x [b, d, t_s] = [b, t_t, t_s]
- neg_cent3 = torch.matmul(z_p.transpose(1, 2), (m_p * s_p_sq_r)) # [b, t_t, d] x [b, d, t_s] = [b, t_t, t_s]
- neg_cent4 = torch.sum(-0.5 * (m_p ** 2) * s_p_sq_r, [1], keepdim=True) # [b, 1, t_s]
- neg_cent = neg_cent1 + neg_cent2 + neg_cent3 + neg_cent4
-
- attn_mask = torch.unsqueeze(x_mask, 2) * torch.unsqueeze(y_mask, -1)
- attn = monotonic_align.maximum_path(neg_cent, attn_mask.squeeze(1)).unsqueeze(1).detach()
-
- w = attn.sum(2)
- if self.use_sdp:
- l_length = self.dp(x, x_mask, w, g=g)
- l_length = l_length / torch.sum(x_mask)
- else:
- logw_ = torch.log(w + 1e-6) * x_mask
- logw = self.dp(x, x_mask, g=g)
- l_length = torch.sum((logw - logw_)**2, [1,2]) / torch.sum(x_mask) # for averaging
-
- # expand prior
- m_p = torch.matmul(attn.squeeze(1), m_p.transpose(1, 2)).transpose(1, 2)
- logs_p = torch.matmul(attn.squeeze(1), logs_p.transpose(1, 2)).transpose(1, 2)
-
- z_slice, ids_slice = commons.rand_slice_segments(z, y_lengths, self.segment_size)
- o = self.dec(z_slice, g=g)
- return o, l_length, attn, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q)
-
- def infer(self, x, x_lengths, sid=None, noise_scale=1, length_scale=1, noise_scale_w=1., max_len=None):
- x, m_p, logs_p, x_mask = self.enc_p(x, x_lengths)
- if self.n_speakers > 0:
- g = self.emb_g(sid).unsqueeze(-1) # [b, h, 1]
- else:
- g = None
-
- if self.use_sdp:
- logw = self.dp(x, x_mask, g=g, reverse=True, noise_scale=noise_scale_w)
- else:
- logw = self.dp(x, x_mask, g=g)
- w = torch.exp(logw) * x_mask * length_scale
- w_ceil = torch.ceil(w)
- y_lengths = torch.clamp_min(torch.sum(w_ceil, [1, 2]), 1).long()
- y_mask = torch.unsqueeze(commons.sequence_mask(y_lengths, None), 1).to(x_mask.dtype)
- attn_mask = torch.unsqueeze(x_mask, 2) * torch.unsqueeze(y_mask, -1)
- attn = commons.generate_path(w_ceil, attn_mask)
-
- m_p = torch.matmul(attn.squeeze(1), m_p.transpose(1, 2)).transpose(1, 2) # [b, t', t], [b, t, d] -> [b, d, t']
- logs_p = torch.matmul(attn.squeeze(1), logs_p.transpose(1, 2)).transpose(1, 2) # [b, t', t], [b, t, d] -> [b, d, t']
-
- z_p = m_p + torch.randn_like(m_p) * torch.exp(logs_p) * noise_scale
- z = self.flow(z_p, y_mask, g=g, reverse=True)
- o = self.dec((z * y_mask)[:,:,:max_len], g=g)
- return o, attn, y_mask, (z, z_p, m_p, logs_p)
-
- def voice_conversion(self, y, y_lengths, sid_src, sid_tgt):
- assert self.n_speakers > 0, "n_speakers have to be larger than 0."
- g_src = self.emb_g(sid_src).unsqueeze(-1)
- g_tgt = self.emb_g(sid_tgt).unsqueeze(-1)
- z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g_src)
- z_p = self.flow(z, y_mask, g=g_src)
- z_hat = self.flow(z_p, y_mask, g=g_tgt, reverse=True)
- o_hat = self.dec(z_hat * y_mask, g=g_tgt)
- return o_hat, y_mask, (z, z_p, z_hat)
-
diff --git a/spaces/wuhuik/bingo/src/components/ui/select.tsx b/spaces/wuhuik/bingo/src/components/ui/select.tsx
deleted file mode 100644
index 77f12c2996f541b97663de4c9e20ab34d4ec2fac..0000000000000000000000000000000000000000
--- a/spaces/wuhuik/bingo/src/components/ui/select.tsx
+++ /dev/null
@@ -1,123 +0,0 @@
-'use client'
-
-import * as React from 'react'
-import * as SelectPrimitive from '@radix-ui/react-select'
-
-import { cn } from '@/lib/utils'
-import {
- IconArrowDown,
- IconCheck,
- IconChevronUpDown
-} from '@/components/ui/icons'
-
-const Select = SelectPrimitive.Root
-
-const SelectGroup = SelectPrimitive.Group
-
-const SelectValue = SelectPrimitive.Value
-
-const SelectTrigger = React.forwardRef<
- React.ElementRef,
- React.ComponentPropsWithoutRef
->(({ className, children, ...props }, ref) => (
-
- {children}
-
-
-
-
-))
-SelectTrigger.displayName = SelectPrimitive.Trigger.displayName
-
-const SelectContent = React.forwardRef<
- React.ElementRef,
- React.ComponentPropsWithoutRef
->(({ className, children, position = 'popper', ...props }, ref) => (
-
-
-
- {children}
-
-
-
-))
-SelectContent.displayName = SelectPrimitive.Content.displayName
-
-const SelectLabel = React.forwardRef<
- React.ElementRef,
- React.ComponentPropsWithoutRef
->(({ className, ...props }, ref) => (
-
-))
-SelectLabel.displayName = SelectPrimitive.Label.displayName
-
-const SelectItem = React.forwardRef<
- React.ElementRef,
- React.ComponentPropsWithoutRef
->(({ className, children, ...props }, ref) => (
-
-
-
-
-
-
- {children}
-
-))
-SelectItem.displayName = SelectPrimitive.Item.displayName
-
-const SelectSeparator = React.forwardRef<
- React.ElementRef,
- React.ComponentPropsWithoutRef
->(({ className, ...props }, ref) => (
-
-))
-SelectSeparator.displayName = SelectPrimitive.Separator.displayName
-
-export {
- Select,
- SelectGroup,
- SelectValue,
- SelectTrigger,
- SelectContent,
- SelectLabel,
- SelectItem,
- SelectSeparator
-}
diff --git a/spaces/xdecoder/Instruct-X-Decoder/xdecoder/backbone/build.py b/spaces/xdecoder/Instruct-X-Decoder/xdecoder/backbone/build.py
deleted file mode 100644
index a559fa6a010d3379ff5fcbeb43c510122988735f..0000000000000000000000000000000000000000
--- a/spaces/xdecoder/Instruct-X-Decoder/xdecoder/backbone/build.py
+++ /dev/null
@@ -1,11 +0,0 @@
-from .registry import model_entrypoints
-from .registry import is_model
-
-from .backbone import *
-
-def build_backbone(config, **kwargs):
- model_name = config['MODEL']['BACKBONE']['NAME']
- if not is_model(model_name):
- raise ValueError(f'Unkown model: {model_name}')
-
- return model_entrypoints(model_name)(config, **kwargs)
\ No newline at end of file
diff --git a/spaces/xdecoder/Instruct-X-Decoder/xdecoder/language/build.py b/spaces/xdecoder/Instruct-X-Decoder/xdecoder/language/build.py
deleted file mode 100644
index 8d9acdf9766e3bc1184c4200ef4dace3437617e4..0000000000000000000000000000000000000000
--- a/spaces/xdecoder/Instruct-X-Decoder/xdecoder/language/build.py
+++ /dev/null
@@ -1,11 +0,0 @@
-from .registry import model_entrypoints
-from .registry import is_model
-
-
-def build_language_encoder(config, **kwargs):
- model_name = config['MODEL']['TEXT']['ARCH']
-
- if not is_model(model_name):
- raise ValueError(f'Unkown model: {model_name}')
-
- return model_entrypoints(model_name)(config, **kwargs)
\ No newline at end of file
diff --git a/spaces/xfys/yolov5_tracking/trackers/strong_sort/deep/reid/scripts/main.py b/spaces/xfys/yolov5_tracking/trackers/strong_sort/deep/reid/scripts/main.py
deleted file mode 100644
index 61aa49ddd3cb8ba9c9bf1e4c281b765b8d971c10..0000000000000000000000000000000000000000
--- a/spaces/xfys/yolov5_tracking/trackers/strong_sort/deep/reid/scripts/main.py
+++ /dev/null
@@ -1,191 +0,0 @@
-import sys
-import time
-import os.path as osp
-import argparse
-import torch
-import torch.nn as nn
-
-import torchreid
-from torchreid.utils import (
- Logger, check_isfile, set_random_seed, collect_env_info,
- resume_from_checkpoint, load_pretrained_weights, compute_model_complexity
-)
-
-from default_config import (
- imagedata_kwargs, optimizer_kwargs, videodata_kwargs, engine_run_kwargs,
- get_default_config, lr_scheduler_kwargs
-)
-
-
-def build_datamanager(cfg):
- if cfg.data.type == 'image':
- return torchreid.data.ImageDataManager(**imagedata_kwargs(cfg))
- else:
- return torchreid.data.VideoDataManager(**videodata_kwargs(cfg))
-
-
-def build_engine(cfg, datamanager, model, optimizer, scheduler):
- if cfg.data.type == 'image':
- if cfg.loss.name == 'softmax':
- engine = torchreid.engine.ImageSoftmaxEngine(
- datamanager,
- model,
- optimizer=optimizer,
- scheduler=scheduler,
- use_gpu=cfg.use_gpu,
- label_smooth=cfg.loss.softmax.label_smooth
- )
-
- else:
- engine = torchreid.engine.ImageTripletEngine(
- datamanager,
- model,
- optimizer=optimizer,
- margin=cfg.loss.triplet.margin,
- weight_t=cfg.loss.triplet.weight_t,
- weight_x=cfg.loss.triplet.weight_x,
- scheduler=scheduler,
- use_gpu=cfg.use_gpu,
- label_smooth=cfg.loss.softmax.label_smooth
- )
-
- else:
- if cfg.loss.name == 'softmax':
- engine = torchreid.engine.VideoSoftmaxEngine(
- datamanager,
- model,
- optimizer=optimizer,
- scheduler=scheduler,
- use_gpu=cfg.use_gpu,
- label_smooth=cfg.loss.softmax.label_smooth,
- pooling_method=cfg.video.pooling_method
- )
-
- else:
- engine = torchreid.engine.VideoTripletEngine(
- datamanager,
- model,
- optimizer=optimizer,
- margin=cfg.loss.triplet.margin,
- weight_t=cfg.loss.triplet.weight_t,
- weight_x=cfg.loss.triplet.weight_x,
- scheduler=scheduler,
- use_gpu=cfg.use_gpu,
- label_smooth=cfg.loss.softmax.label_smooth
- )
-
- return engine
-
-
-def reset_config(cfg, args):
- if args.root:
- cfg.data.root = args.root
- if args.sources:
- cfg.data.sources = args.sources
- if args.targets:
- cfg.data.targets = args.targets
- if args.transforms:
- cfg.data.transforms = args.transforms
-
-
-def check_cfg(cfg):
- if cfg.loss.name == 'triplet' and cfg.loss.triplet.weight_x == 0:
- assert cfg.train.fixbase_epoch == 0, \
- 'The output of classifier is not included in the computational graph'
-
-
-def main():
- parser = argparse.ArgumentParser(
- formatter_class=argparse.ArgumentDefaultsHelpFormatter
- )
- parser.add_argument(
- '--config-file', type=str, default='', help='path to config file'
- )
- parser.add_argument(
- '-s',
- '--sources',
- type=str,
- nargs='+',
- help='source datasets (delimited by space)'
- )
- parser.add_argument(
- '-t',
- '--targets',
- type=str,
- nargs='+',
- help='target datasets (delimited by space)'
- )
- parser.add_argument(
- '--transforms', type=str, nargs='+', help='data augmentation'
- )
- parser.add_argument(
- '--root', type=str, default='', help='path to data root'
- )
- parser.add_argument(
- 'opts',
- default=None,
- nargs=argparse.REMAINDER,
- help='Modify config options using the command-line'
- )
- args = parser.parse_args()
-
- cfg = get_default_config()
- cfg.use_gpu = torch.cuda.is_available()
- if args.config_file:
- cfg.merge_from_file(args.config_file)
- reset_config(cfg, args)
- cfg.merge_from_list(args.opts)
- set_random_seed(cfg.train.seed)
- check_cfg(cfg)
-
- log_name = 'test.log' if cfg.test.evaluate else 'train.log'
- log_name += time.strftime('-%Y-%m-%d-%H-%M-%S')
- sys.stdout = Logger(osp.join(cfg.data.save_dir, log_name))
-
- print('Show configuration\n{}\n'.format(cfg))
- print('Collecting env info ...')
- print('** System info **\n{}\n'.format(collect_env_info()))
-
- if cfg.use_gpu:
- torch.backends.cudnn.benchmark = True
-
- datamanager = build_datamanager(cfg)
-
- print('Building model: {}'.format(cfg.model.name))
- model = torchreid.models.build_model(
- name=cfg.model.name,
- num_classes=datamanager.num_train_pids,
- loss=cfg.loss.name,
- pretrained=cfg.model.pretrained,
- use_gpu=cfg.use_gpu
- )
- num_params, flops = compute_model_complexity(
- model, (1, 3, cfg.data.height, cfg.data.width)
- )
- print('Model complexity: params={:,} flops={:,}'.format(num_params, flops))
-
- if cfg.model.load_weights and check_isfile(cfg.model.load_weights):
- load_pretrained_weights(model, cfg.model.load_weights)
-
- if cfg.use_gpu:
- model = nn.DataParallel(model).cuda()
-
- optimizer = torchreid.optim.build_optimizer(model, **optimizer_kwargs(cfg))
- scheduler = torchreid.optim.build_lr_scheduler(
- optimizer, **lr_scheduler_kwargs(cfg)
- )
-
- if cfg.model.resume and check_isfile(cfg.model.resume):
- cfg.train.start_epoch = resume_from_checkpoint(
- cfg.model.resume, model, optimizer=optimizer, scheduler=scheduler
- )
-
- print(
- 'Building {}-engine for {}-reid'.format(cfg.loss.name, cfg.data.type)
- )
- engine = build_engine(cfg, datamanager, model, optimizer, scheduler)
- engine.run(**engine_run_kwargs(cfg))
-
-
-if __name__ == '__main__':
- main()
diff --git a/spaces/xnetba/Chat_advance/modules/models/inspurai.py b/spaces/xnetba/Chat_advance/modules/models/inspurai.py
deleted file mode 100644
index c590859fa7717d032290ccc490d22f4494541576..0000000000000000000000000000000000000000
--- a/spaces/xnetba/Chat_advance/modules/models/inspurai.py
+++ /dev/null
@@ -1,345 +0,0 @@
-# 代码主要来源于 https://github.com/Shawn-Inspur/Yuan-1.0/blob/main/yuan_api/inspurai.py
-
-import hashlib
-import json
-import os
-import time
-import uuid
-from datetime import datetime
-
-import pytz
-import requests
-
-from modules.presets import NO_APIKEY_MSG
-from modules.models.base_model import BaseLLMModel
-
-
-class Example:
- """ store some examples(input, output pairs and formats) for few-shots to prime the model."""
-
- def __init__(self, inp, out):
- self.input = inp
- self.output = out
- self.id = uuid.uuid4().hex
-
- def get_input(self):
- """return the input of the example."""
- return self.input
-
- def get_output(self):
- """Return the output of the example."""
- return self.output
-
- def get_id(self):
- """Returns the unique ID of the example."""
- return self.id
-
- def as_dict(self):
- return {
- "input": self.get_input(),
- "output": self.get_output(),
- "id": self.get_id(),
- }
-
-
-class Yuan:
- """The main class for a user to interface with the Inspur Yuan API.
- A user can set account info and add examples of the API request.
- """
-
- def __init__(self,
- engine='base_10B',
- temperature=0.9,
- max_tokens=100,
- input_prefix='',
- input_suffix='\n',
- output_prefix='答:',
- output_suffix='\n\n',
- append_output_prefix_to_query=False,
- topK=1,
- topP=0.9,
- frequencyPenalty=1.2,
- responsePenalty=1.2,
- noRepeatNgramSize=2):
-
- self.examples = {}
- self.engine = engine
- self.temperature = temperature
- self.max_tokens = max_tokens
- self.topK = topK
- self.topP = topP
- self.frequencyPenalty = frequencyPenalty
- self.responsePenalty = responsePenalty
- self.noRepeatNgramSize = noRepeatNgramSize
- self.input_prefix = input_prefix
- self.input_suffix = input_suffix
- self.output_prefix = output_prefix
- self.output_suffix = output_suffix
- self.append_output_prefix_to_query = append_output_prefix_to_query
- self.stop = (output_suffix + input_prefix).strip()
- self.api = None
-
- # if self.engine not in ['base_10B','translate','dialog']:
- # raise Exception('engine must be one of [\'base_10B\',\'translate\',\'dialog\'] ')
- def set_account(self, api_key):
- account = api_key.split('||')
- self.api = YuanAPI(user=account[0], phone=account[1])
-
- def add_example(self, ex):
- """Add an example to the object.
- Example must be an instance of the Example class."""
- assert isinstance(ex, Example), "Please create an Example object."
- self.examples[ex.get_id()] = ex
-
- def delete_example(self, id):
- """Delete example with the specific id."""
- if id in self.examples:
- del self.examples[id]
-
- def get_example(self, id):
- """Get a single example."""
- return self.examples.get(id, None)
-
- def get_all_examples(self):
- """Returns all examples as a list of dicts."""
- return {k: v.as_dict() for k, v in self.examples.items()}
-
- def get_prime_text(self):
- """Formats all examples to prime the model."""
- return "".join(
- [self.format_example(ex) for ex in self.examples.values()])
-
- def get_engine(self):
- """Returns the engine specified for the API."""
- return self.engine
-
- def get_temperature(self):
- """Returns the temperature specified for the API."""
- return self.temperature
-
- def get_max_tokens(self):
- """Returns the max tokens specified for the API."""
- return self.max_tokens
-
- def craft_query(self, prompt):
- """Creates the query for the API request."""
- q = self.get_prime_text(
- ) + self.input_prefix + prompt + self.input_suffix
- if self.append_output_prefix_to_query:
- q = q + self.output_prefix
-
- return q
-
- def format_example(self, ex):
- """Formats the input, output pair."""
- return self.input_prefix + ex.get_input(
- ) + self.input_suffix + self.output_prefix + ex.get_output(
- ) + self.output_suffix
-
- def response(self,
- query,
- engine='base_10B',
- max_tokens=20,
- temperature=0.9,
- topP=0.1,
- topK=1,
- frequencyPenalty=1.0,
- responsePenalty=1.0,
- noRepeatNgramSize=0):
- """Obtains the original result returned by the API."""
-
- if self.api is None:
- return NO_APIKEY_MSG
- try:
- # requestId = submit_request(query,temperature,topP,topK,max_tokens, engine)
- requestId = self.api.submit_request(query, temperature, topP, topK, max_tokens, engine, frequencyPenalty,
- responsePenalty, noRepeatNgramSize)
- response_text = self.api.reply_request(requestId)
- except Exception as e:
- raise e
-
- return response_text
-
- def del_special_chars(self, msg):
- special_chars = ['', '', '#', '▃', '▁', '▂', ' ']
- for char in special_chars:
- msg = msg.replace(char, '')
- return msg
-
- def submit_API(self, prompt, trun=[]):
- """Submit prompt to yuan API interface and obtain an pure text reply.
- :prompt: Question or any content a user may input.
- :return: pure text response."""
- query = self.craft_query(prompt)
- res = self.response(query, engine=self.engine,
- max_tokens=self.max_tokens,
- temperature=self.temperature,
- topP=self.topP,
- topK=self.topK,
- frequencyPenalty=self.frequencyPenalty,
- responsePenalty=self.responsePenalty,
- noRepeatNgramSize=self.noRepeatNgramSize)
- if 'resData' in res and res['resData'] != None:
- txt = res['resData']
- else:
- txt = '模型返回为空,请尝试修改输入'
- # 单独针对翻译模型的后处理
- if self.engine == 'translate':
- txt = txt.replace(' ##', '').replace(' "', '"').replace(": ", ":").replace(" ,", ",") \
- .replace('英文:', '').replace('文:', '').replace("( ", "(").replace(" )", ")")
- else:
- txt = txt.replace(' ', '')
- txt = self.del_special_chars(txt)
-
- # trun多结束符截断模型输出
- if isinstance(trun, str):
- trun = [trun]
- try:
- if trun != None and isinstance(trun, list) and trun != []:
- for tr in trun:
- if tr in txt and tr != "":
- txt = txt[:txt.index(tr)]
- else:
- continue
- except:
- return txt
- return txt
-
-
-class YuanAPI:
- ACCOUNT = ''
- PHONE = ''
-
- SUBMIT_URL = "http://api.airyuan.cn:32102/v1/interface/api/infer/getRequestId?"
- REPLY_URL = "http://api.airyuan.cn:32102/v1/interface/api/result?"
-
- def __init__(self, user, phone):
- self.ACCOUNT = user
- self.PHONE = phone
-
- @staticmethod
- def code_md5(str):
- code = str.encode("utf-8")
- m = hashlib.md5()
- m.update(code)
- result = m.hexdigest()
- return result
-
- @staticmethod
- def rest_get(url, header, timeout, show_error=False):
- '''Call rest get method'''
- try:
- response = requests.get(url, headers=header, timeout=timeout, verify=False)
- return response
- except Exception as exception:
- if show_error:
- print(exception)
- return None
-
- def header_generation(self):
- """Generate header for API request."""
- t = datetime.now(pytz.timezone("Asia/Shanghai")).strftime("%Y-%m-%d")
- token = self.code_md5(self.ACCOUNT + self.PHONE + t)
- headers = {'token': token}
- return headers
-
- def submit_request(self, query, temperature, topP, topK, max_tokens, engine, frequencyPenalty, responsePenalty,
- noRepeatNgramSize):
- """Submit query to the backend server and get requestID."""
- headers = self.header_generation()
- # url=SUBMIT_URL + "account={0}&data={1}&temperature={2}&topP={3}&topK={4}&tokensToGenerate={5}&type={6}".format(ACCOUNT,query,temperature,topP,topK,max_tokens,"api")
- # url=SUBMIT_URL + "engine={0}&account={1}&data={2}&temperature={3}&topP={4}&topK={5}&tokensToGenerate={6}" \
- # "&type={7}".format(engine,ACCOUNT,query,temperature,topP,topK, max_tokens,"api")
- url = self.SUBMIT_URL + "engine={0}&account={1}&data={2}&temperature={3}&topP={4}&topK={5}&tokensToGenerate={6}" \
- "&type={7}&frequencyPenalty={8}&responsePenalty={9}&noRepeatNgramSize={10}". \
- format(engine, self.ACCOUNT, query, temperature, topP, topK, max_tokens, "api", frequencyPenalty,
- responsePenalty, noRepeatNgramSize)
- response = self.rest_get(url, headers, 30)
- response_text = json.loads(response.text)
- if response_text["flag"]:
- requestId = response_text["resData"]
- return requestId
- else:
- raise RuntimeWarning(response_text)
-
- def reply_request(self, requestId, cycle_count=5):
- """Check reply API to get the inference response."""
- url = self.REPLY_URL + "account={0}&requestId={1}".format(self.ACCOUNT, requestId)
- headers = self.header_generation()
- response_text = {"flag": True, "resData": None}
- for i in range(cycle_count):
- response = self.rest_get(url, headers, 30, show_error=True)
- response_text = json.loads(response.text)
- if response_text["resData"] is not None:
- return response_text
- if response_text["flag"] is False and i == cycle_count - 1:
- raise RuntimeWarning(response_text)
- time.sleep(3)
- return response_text
-
-
-class Yuan_Client(BaseLLMModel):
-
- def __init__(self, model_name, api_key, user_name="", system_prompt=None):
- super().__init__(model_name=model_name, user=user_name)
- self.history = []
- self.api_key = api_key
- self.system_prompt = system_prompt
-
- self.input_prefix = ""
- self.output_prefix = ""
-
- def set_text_prefix(self, option, value):
- if option == 'input_prefix':
- self.input_prefix = value
- elif option == 'output_prefix':
- self.output_prefix = value
-
- def get_answer_at_once(self):
- # yuan temperature is (0,1] and base model temperature is [0,2], and yuan 0.9 == base 1 so need to convert
- temperature = self.temperature if self.temperature <= 1 else 0.9 + (self.temperature - 1) / 10
- topP = self.top_p
- topK = self.n_choices
- # max_tokens should be in [1,200]
- max_tokens = self.max_generation_token if self.max_generation_token is not None else 50
- if max_tokens > 200:
- max_tokens = 200
- stop = self.stop_sequence if self.stop_sequence is not None else []
- examples = []
- system_prompt = self.system_prompt
- if system_prompt is not None:
- lines = system_prompt.splitlines()
- # TODO: support prefixes in system prompt or settings
- """
- if lines[0].startswith('-'):
- prefixes = lines.pop()[1:].split('|')
- self.input_prefix = prefixes[0]
- if len(prefixes) > 1:
- self.output_prefix = prefixes[1]
- if len(prefixes) > 2:
- stop = prefixes[2].split(',')
- """
- for i in range(0, len(lines), 2):
- in_line = lines[i]
- out_line = lines[i + 1] if i + 1 < len(lines) else ""
- examples.append((in_line, out_line))
- yuan = Yuan(engine=self.model_name.replace('yuanai-1.0-', ''),
- temperature=temperature,
- max_tokens=max_tokens,
- topK=topK,
- topP=topP,
- input_prefix=self.input_prefix,
- input_suffix="",
- output_prefix=self.output_prefix,
- output_suffix="".join(stop),
- )
- if not self.api_key:
- return NO_APIKEY_MSG, 0
- yuan.set_account(self.api_key)
-
- for in_line, out_line in examples:
- yuan.add_example(Example(inp=in_line, out=out_line))
-
- prompt = self.history[-1]["content"]
- answer = yuan.submit_API(prompt, trun=stop)
- return answer, len(answer)
diff --git "a/spaces/xwsm/gpt/crazy_functions/\345\233\276\347\211\207\347\224\237\346\210\220.py" "b/spaces/xwsm/gpt/crazy_functions/\345\233\276\347\211\207\347\224\237\346\210\220.py"
deleted file mode 100644
index 5bf8bc4ba95864dc53f98b7335e654f58c4fed54..0000000000000000000000000000000000000000
--- "a/spaces/xwsm/gpt/crazy_functions/\345\233\276\347\211\207\347\224\237\346\210\220.py"
+++ /dev/null
@@ -1,67 +0,0 @@
-from toolbox import CatchException, update_ui, get_conf, select_api_key
-from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
-import datetime
-
-
-def gen_image(llm_kwargs, prompt, resolution="256x256"):
- import requests, json, time, os
- from request_llm.bridge_all import model_info
-
- proxies, = get_conf('proxies')
- # Set up OpenAI API key and model
- api_key = select_api_key(llm_kwargs['api_key'], llm_kwargs['llm_model'])
- chat_endpoint = model_info[llm_kwargs['llm_model']]['endpoint']
- # 'https://api.openai.com/v1/chat/completions'
- img_endpoint = chat_endpoint.replace('chat/completions','images/generations')
- # # Generate the image
- url = img_endpoint
- headers = {
- 'Authorization': f"Bearer {api_key}",
- 'Content-Type': 'application/json'
- }
- data = {
- 'prompt': prompt,
- 'n': 1,
- 'size': resolution,
- 'response_format': 'url'
- }
- response = requests.post(url, headers=headers, json=data, proxies=proxies)
- print(response.content)
- image_url = json.loads(response.content.decode('utf8'))['data'][0]['url']
-
- # 文件保存到本地
- r = requests.get(image_url, proxies=proxies)
- file_path = 'gpt_log/image_gen/'
- os.makedirs(file_path, exist_ok=True)
- file_name = 'Image' + time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime()) + '.png'
- with open(file_path+file_name, 'wb+') as f: f.write(r.content)
-
-
- return image_url, file_path+file_name
-
-
-
-@CatchException
-def 图片生成(prompt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
- """
- txt 输入栏用户输入的文本,例如需要翻译的一段话,再例如一个包含了待处理文件的路径
- llm_kwargs gpt模型参数,如温度和top_p等,一般原样传递下去就行
- plugin_kwargs 插件模型的参数,暂时没有用武之地
- chatbot 聊天显示框的句柄,用于显示给用户
- history 聊天历史,前情提要
- system_prompt 给gpt的静默提醒
- web_port 当前软件运行的端口号
- """
- history = [] # 清空历史,以免输入溢出
- chatbot.append(("这是什么功能?", "[Local Message] 生成图像, 请先把模型切换至gpt-xxxx或者api2d-xxxx。如果中文效果不理想, 尝试Prompt。正在处理中 ....."))
- yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 由于请求gpt需要一段时间,我们先及时地做一次界面更新
- if ("advanced_arg" in plugin_kwargs) and (plugin_kwargs["advanced_arg"] == ""): plugin_kwargs.pop("advanced_arg")
- resolution = plugin_kwargs.get("advanced_arg", '256x256')
- image_url, image_path = gen_image(llm_kwargs, prompt, resolution)
- chatbot.append([prompt,
- f'图像中转网址:
`{image_url}`
'+
- f'中转网址预览:

'
- f'本地文件地址:
`{image_path}`
'+
- f'本地文件预览:

'
- ])
- yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 界面更新
diff --git a/spaces/xxccc/gpt-academic/crazy_functions/test_project/cpp/longcode/jpgd.cpp b/spaces/xxccc/gpt-academic/crazy_functions/test_project/cpp/longcode/jpgd.cpp
deleted file mode 100644
index 36d06c8e9068570c3e7624895d474f33dbfe3d29..0000000000000000000000000000000000000000
--- a/spaces/xxccc/gpt-academic/crazy_functions/test_project/cpp/longcode/jpgd.cpp
+++ /dev/null
@@ -1,3276 +0,0 @@
-// jpgd.cpp - C++ class for JPEG decompression.
-// Public domain, Rich Geldreich
-// Last updated Apr. 16, 2011
-// Alex Evans: Linear memory allocator (taken from jpge.h).
-//
-// Supports progressive and baseline sequential JPEG image files, and the most common chroma subsampling factors: Y, H1V1, H2V1, H1V2, and H2V2.
-//
-// Chroma upsampling quality: H2V2 is upsampled in the frequency domain, H2V1 and H1V2 are upsampled using point sampling.
-// Chroma upsampling reference: "Fast Scheme for Image Size Change in the Compressed Domain"
-// http://vision.ai.uiuc.edu/~dugad/research/dct/index.html
-
-#include "jpgd.h"
-#include
-
-#include
-// BEGIN EPIC MOD
-#define JPGD_ASSERT(x) { assert(x); CA_ASSUME(x); } (void)0
-// END EPIC MOD
-
-#ifdef _MSC_VER
-#pragma warning (disable : 4611) // warning C4611: interaction between '_setjmp' and C++ object destruction is non-portable
-#endif
-
-// Set to 1 to enable freq. domain chroma upsampling on images using H2V2 subsampling (0=faster nearest neighbor sampling).
-// This is slower, but results in higher quality on images with highly saturated colors.
-#define JPGD_SUPPORT_FREQ_DOMAIN_UPSAMPLING 1
-
-#define JPGD_TRUE (1)
-#define JPGD_FALSE (0)
-
-#define JPGD_MAX(a,b) (((a)>(b)) ? (a) : (b))
-#define JPGD_MIN(a,b) (((a)<(b)) ? (a) : (b))
-
-namespace jpgd {
-
- static inline void *jpgd_malloc(size_t nSize) { return FMemory::Malloc(nSize); }
- static inline void jpgd_free(void *p) { FMemory::Free(p); }
-
-// BEGIN EPIC MOD
-//@UE3 - use UE3 BGRA encoding instead of assuming RGBA
- // stolen from IImageWrapper.h
- enum ERGBFormatJPG
- {
- Invalid = -1,
- RGBA = 0,
- BGRA = 1,
- Gray = 2,
- };
- static ERGBFormatJPG jpg_format;
-// END EPIC MOD
-
- // DCT coefficients are stored in this sequence.
- static int g_ZAG[64] = { 0,1,8,16,9,2,3,10,17,24,32,25,18,11,4,5,12,19,26,33,40,48,41,34,27,20,13,6,7,14,21,28,35,42,49,56,57,50,43,36,29,22,15,23,30,37,44,51,58,59,52,45,38,31,39,46,53,60,61,54,47,55,62,63 };
-
- enum JPEG_MARKER
- {
- M_SOF0 = 0xC0, M_SOF1 = 0xC1, M_SOF2 = 0xC2, M_SOF3 = 0xC3, M_SOF5 = 0xC5, M_SOF6 = 0xC6, M_SOF7 = 0xC7, M_JPG = 0xC8,
- M_SOF9 = 0xC9, M_SOF10 = 0xCA, M_SOF11 = 0xCB, M_SOF13 = 0xCD, M_SOF14 = 0xCE, M_SOF15 = 0xCF, M_DHT = 0xC4, M_DAC = 0xCC,
- M_RST0 = 0xD0, M_RST1 = 0xD1, M_RST2 = 0xD2, M_RST3 = 0xD3, M_RST4 = 0xD4, M_RST5 = 0xD5, M_RST6 = 0xD6, M_RST7 = 0xD7,
- M_SOI = 0xD8, M_EOI = 0xD9, M_SOS = 0xDA, M_DQT = 0xDB, M_DNL = 0xDC, M_DRI = 0xDD, M_DHP = 0xDE, M_EXP = 0xDF,
- M_APP0 = 0xE0, M_APP15 = 0xEF, M_JPG0 = 0xF0, M_JPG13 = 0xFD, M_COM = 0xFE, M_TEM = 0x01, M_ERROR = 0x100, RST0 = 0xD0
- };
-
- enum JPEG_SUBSAMPLING { JPGD_GRAYSCALE = 0, JPGD_YH1V1, JPGD_YH2V1, JPGD_YH1V2, JPGD_YH2V2 };
-
-#define CONST_BITS 13
-#define PASS1_BITS 2
-#define SCALEDONE ((int32)1)
-
-#define FIX_0_298631336 ((int32)2446) /* FIX(0.298631336) */
-#define FIX_0_390180644 ((int32)3196) /* FIX(0.390180644) */
-#define FIX_0_541196100 ((int32)4433) /* FIX(0.541196100) */
-#define FIX_0_765366865 ((int32)6270) /* FIX(0.765366865) */
-#define FIX_0_899976223 ((int32)7373) /* FIX(0.899976223) */
-#define FIX_1_175875602 ((int32)9633) /* FIX(1.175875602) */
-#define FIX_1_501321110 ((int32)12299) /* FIX(1.501321110) */
-#define FIX_1_847759065 ((int32)15137) /* FIX(1.847759065) */
-#define FIX_1_961570560 ((int32)16069) /* FIX(1.961570560) */
-#define FIX_2_053119869 ((int32)16819) /* FIX(2.053119869) */
-#define FIX_2_562915447 ((int32)20995) /* FIX(2.562915447) */
-#define FIX_3_072711026 ((int32)25172) /* FIX(3.072711026) */
-
-#define DESCALE(x,n) (((x) + (SCALEDONE << ((n)-1))) >> (n))
-#define DESCALE_ZEROSHIFT(x,n) (((x) + (128 << (n)) + (SCALEDONE << ((n)-1))) >> (n))
-
-#define MULTIPLY(var, cnst) ((var) * (cnst))
-
-#define CLAMP(i) ((static_cast(i) > 255) ? (((~i) >> 31) & 0xFF) : (i))
-
- // Compiler creates a fast path 1D IDCT for X non-zero columns
- template
- struct Row
- {
- static void idct(int* pTemp, const jpgd_block_t* pSrc)
- {
- // ACCESS_COL() will be optimized at compile time to either an array access, or 0.
-#define ACCESS_COL(x) (((x) < NONZERO_COLS) ? (int)pSrc[x] : 0)
-
- const int z2 = ACCESS_COL(2), z3 = ACCESS_COL(6);
-
- const int z1 = MULTIPLY(z2 + z3, FIX_0_541196100);
- const int tmp2 = z1 + MULTIPLY(z3, - FIX_1_847759065);
- const int tmp3 = z1 + MULTIPLY(z2, FIX_0_765366865);
-
- const int tmp0 = (ACCESS_COL(0) + ACCESS_COL(4)) << CONST_BITS;
- const int tmp1 = (ACCESS_COL(0) - ACCESS_COL(4)) << CONST_BITS;
-
- const int tmp10 = tmp0 + tmp3, tmp13 = tmp0 - tmp3, tmp11 = tmp1 + tmp2, tmp12 = tmp1 - tmp2;
-
- const int atmp0 = ACCESS_COL(7), atmp1 = ACCESS_COL(5), atmp2 = ACCESS_COL(3), atmp3 = ACCESS_COL(1);
-
- const int bz1 = atmp0 + atmp3, bz2 = atmp1 + atmp2, bz3 = atmp0 + atmp2, bz4 = atmp1 + atmp3;
- const int bz5 = MULTIPLY(bz3 + bz4, FIX_1_175875602);
-
- const int az1 = MULTIPLY(bz1, - FIX_0_899976223);
- const int az2 = MULTIPLY(bz2, - FIX_2_562915447);
- const int az3 = MULTIPLY(bz3, - FIX_1_961570560) + bz5;
- const int az4 = MULTIPLY(bz4, - FIX_0_390180644) + bz5;
-
- const int btmp0 = MULTIPLY(atmp0, FIX_0_298631336) + az1 + az3;
- const int btmp1 = MULTIPLY(atmp1, FIX_2_053119869) + az2 + az4;
- const int btmp2 = MULTIPLY(atmp2, FIX_3_072711026) + az2 + az3;
- const int btmp3 = MULTIPLY(atmp3, FIX_1_501321110) + az1 + az4;
-
- pTemp[0] = DESCALE(tmp10 + btmp3, CONST_BITS-PASS1_BITS);
- pTemp[7] = DESCALE(tmp10 - btmp3, CONST_BITS-PASS1_BITS);
- pTemp[1] = DESCALE(tmp11 + btmp2, CONST_BITS-PASS1_BITS);
- pTemp[6] = DESCALE(tmp11 - btmp2, CONST_BITS-PASS1_BITS);
- pTemp[2] = DESCALE(tmp12 + btmp1, CONST_BITS-PASS1_BITS);
- pTemp[5] = DESCALE(tmp12 - btmp1, CONST_BITS-PASS1_BITS);
- pTemp[3] = DESCALE(tmp13 + btmp0, CONST_BITS-PASS1_BITS);
- pTemp[4] = DESCALE(tmp13 - btmp0, CONST_BITS-PASS1_BITS);
- }
- };
-
- template <>
- struct Row<0>
- {
- static void idct(int* pTemp, const jpgd_block_t* pSrc)
- {
-#ifdef _MSC_VER
- pTemp; pSrc;
-#endif
- }
- };
-
- template <>
- struct Row<1>
- {
- static void idct(int* pTemp, const jpgd_block_t* pSrc)
- {
- const int dcval = (pSrc[0] << PASS1_BITS);
-
- pTemp[0] = dcval;
- pTemp[1] = dcval;
- pTemp[2] = dcval;
- pTemp[3] = dcval;
- pTemp[4] = dcval;
- pTemp[5] = dcval;
- pTemp[6] = dcval;
- pTemp[7] = dcval;
- }
- };
-
- // Compiler creates a fast path 1D IDCT for X non-zero rows
- template