code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_outputs import (
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import logging
from .configuration_regnet import RegNetConfig
_snake_case = logging.get_logger(__name__)
# General docstring
_snake_case = "RegNetConfig"
# Base docstring
_snake_case = "facebook/regnet-y-040"
_snake_case = [1, 1088, 7, 7]
# Image classification docstring
_snake_case = "facebook/regnet-y-040"
_snake_case = "tabby, tabby cat"
_snake_case = [
"facebook/regnet-y-040",
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class lowercase ( nn.Module ):
def __init__( self , _a , _a , _a = 3 , _a = 1 , _a = 1 , _a = "relu" , ) -> Any:
super().__init__()
_A : Dict = nn.Convad(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , kernel_size=__SCREAMING_SNAKE_CASE , stride=__SCREAMING_SNAKE_CASE , padding=kernel_size // 2 , groups=__SCREAMING_SNAKE_CASE , bias=__SCREAMING_SNAKE_CASE , )
_A : int = nn.BatchNormad(__SCREAMING_SNAKE_CASE )
_A : List[Any] = ACTaFN[activation] if activation is not None else nn.Identity()
def a__ ( self , _a ) -> Any:
_A : Tuple = self.convolution(__SCREAMING_SNAKE_CASE )
_A : Dict = self.normalization(__SCREAMING_SNAKE_CASE )
_A : Optional[Any] = self.activation(__SCREAMING_SNAKE_CASE )
return hidden_state
class lowercase ( nn.Module ):
def __init__( self , _a ) -> Any:
super().__init__()
_A : Dict = RegNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act )
_A : Union[str, Any] = config.num_channels
def a__ ( self , _a ) -> Union[str, Any]:
_A : Union[str, Any] = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
"""Make sure that the channel dimension of the pixel values match with the one set in the configuration.""" )
_A : Any = self.embedder(__SCREAMING_SNAKE_CASE )
return hidden_state
class lowercase ( nn.Module ):
def __init__( self , _a , _a , _a = 2 ) -> List[Any]:
super().__init__()
_A : Optional[Any] = nn.Convad(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , kernel_size=1 , stride=__SCREAMING_SNAKE_CASE , bias=__SCREAMING_SNAKE_CASE )
_A : List[str] = nn.BatchNormad(__SCREAMING_SNAKE_CASE )
def a__ ( self , _a ) -> List[str]:
_A : str = self.convolution(__SCREAMING_SNAKE_CASE )
_A : List[Any] = self.normalization(__SCREAMING_SNAKE_CASE )
return hidden_state
class lowercase ( nn.Module ):
def __init__( self , _a , _a ) -> Optional[int]:
super().__init__()
_A : Any = nn.AdaptiveAvgPoolad((1, 1) )
_A : Tuple = nn.Sequential(
nn.Convad(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , kernel_size=1 ) , nn.ReLU() , nn.Convad(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , kernel_size=1 ) , nn.Sigmoid() , )
def a__ ( self , _a ) -> Optional[int]:
_A : Optional[Any] = self.pooler(__SCREAMING_SNAKE_CASE )
_A : Optional[Any] = self.attention(__SCREAMING_SNAKE_CASE )
_A : Tuple = hidden_state * attention
return hidden_state
class lowercase ( nn.Module ):
def __init__( self , _a , _a , _a , _a = 1 ) -> Dict:
super().__init__()
_A : Any = in_channels != out_channels or stride != 1
_A : str = max(1 , out_channels // config.groups_width )
_A : str = (
RegNetShortCut(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , stride=__SCREAMING_SNAKE_CASE ) if should_apply_shortcut else nn.Identity()
)
_A : int = nn.Sequential(
RegNetConvLayer(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , stride=__SCREAMING_SNAKE_CASE , groups=__SCREAMING_SNAKE_CASE , activation=config.hidden_act ) , RegNetConvLayer(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , kernel_size=1 , activation=__SCREAMING_SNAKE_CASE ) , )
_A : Optional[int] = ACTaFN[config.hidden_act]
def a__ ( self , _a ) -> Tuple:
_A : Dict = hidden_state
_A : int = self.layer(__SCREAMING_SNAKE_CASE )
_A : Optional[int] = self.shortcut(__SCREAMING_SNAKE_CASE )
hidden_state += residual
_A : str = self.activation(__SCREAMING_SNAKE_CASE )
return hidden_state
class lowercase ( nn.Module ):
def __init__( self , _a , _a , _a , _a = 1 ) -> Any:
super().__init__()
_A : Optional[int] = in_channels != out_channels or stride != 1
_A : Dict = max(1 , out_channels // config.groups_width )
_A : Dict = (
RegNetShortCut(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , stride=__SCREAMING_SNAKE_CASE ) if should_apply_shortcut else nn.Identity()
)
_A : str = nn.Sequential(
RegNetConvLayer(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , stride=__SCREAMING_SNAKE_CASE , groups=__SCREAMING_SNAKE_CASE , activation=config.hidden_act ) , RegNetSELayer(__SCREAMING_SNAKE_CASE , reduced_channels=int(round(in_channels / 4 ) ) ) , RegNetConvLayer(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , kernel_size=1 , activation=__SCREAMING_SNAKE_CASE ) , )
_A : Optional[Any] = ACTaFN[config.hidden_act]
def a__ ( self , _a ) -> str:
_A : Optional[int] = hidden_state
_A : Optional[Any] = self.layer(__SCREAMING_SNAKE_CASE )
_A : Tuple = self.shortcut(__SCREAMING_SNAKE_CASE )
hidden_state += residual
_A : Union[str, Any] = self.activation(__SCREAMING_SNAKE_CASE )
return hidden_state
class lowercase ( nn.Module ):
def __init__( self , _a , _a , _a , _a = 2 , _a = 2 , ) -> List[Any]:
super().__init__()
_A : Any = RegNetXLayer if config.layer_type == '''x''' else RegNetYLayer
_A : Any = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , stride=__SCREAMING_SNAKE_CASE , ) , *[layer(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) for _ in range(depth - 1 )] , )
def a__ ( self , _a ) -> Optional[int]:
_A : Dict = self.layers(__SCREAMING_SNAKE_CASE )
return hidden_state
class lowercase ( nn.Module ):
def __init__( self , _a ) -> List[str]:
super().__init__()
_A : Optional[Any] = nn.ModuleList([] )
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
RegNetStage(
__SCREAMING_SNAKE_CASE , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) )
_A : Dict = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(__SCREAMING_SNAKE_CASE , config.depths[1:] ):
self.stages.append(RegNetStage(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , depth=__SCREAMING_SNAKE_CASE ) )
def a__ ( self , _a , _a = False , _a = True ) -> List[str]:
_A : int = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
_A : int = hidden_states + (hidden_state,)
_A : List[str] = stage_module(__SCREAMING_SNAKE_CASE )
if output_hidden_states:
_A : Optional[int] = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=__SCREAMING_SNAKE_CASE , hidden_states=__SCREAMING_SNAKE_CASE )
class lowercase ( lowerCamelCase_ ):
_a = RegNetConfig
_a = "regnet"
_a = "pixel_values"
_a = True
def a__ ( self , _a ) -> Optional[Any]:
if isinstance(__SCREAMING_SNAKE_CASE , nn.Convad ):
nn.init.kaiming_normal_(module.weight , mode="""fan_out""" , nonlinearity="""relu""" )
elif isinstance(__SCREAMING_SNAKE_CASE , (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight , 1 )
nn.init.constant_(module.bias , 0 )
def a__ ( self , _a , _a=False ) -> Optional[int]:
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
_A : Union[str, Any] = value
_snake_case = r"\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n"
_snake_case = r"\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConvNextImageProcessor.__call__`] for details.\n\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.\n"
@add_start_docstrings(
"The bare RegNet model outputting raw features without any specific head on top.",lowerCamelCase_,)
# Copied from transformers.models.resnet.modeling_resnet.ResNetModel with RESNET->REGNET,ResNet->RegNet
class lowercase ( lowerCamelCase_ ):
def __init__( self , _a ) -> Optional[Any]:
super().__init__(__SCREAMING_SNAKE_CASE )
_A : Optional[int] = config
_A : Tuple = RegNetEmbeddings(__SCREAMING_SNAKE_CASE )
_A : List[Any] = RegNetEncoder(__SCREAMING_SNAKE_CASE )
_A : Tuple = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(__SCREAMING_SNAKE_CASE )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=__SCREAMING_SNAKE_CASE , config_class=_CONFIG_FOR_DOC , modality="""vision""" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def a__ ( self , _a , _a = None , _a = None ) -> Any:
_A : Tuple = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_A : str = return_dict if return_dict is not None else self.config.use_return_dict
_A : int = self.embedder(__SCREAMING_SNAKE_CASE )
_A : Tuple = self.encoder(
__SCREAMING_SNAKE_CASE , output_hidden_states=__SCREAMING_SNAKE_CASE , return_dict=__SCREAMING_SNAKE_CASE )
_A : Optional[Any] = encoder_outputs[0]
_A : List[Any] = self.pooler(__SCREAMING_SNAKE_CASE )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=__SCREAMING_SNAKE_CASE , pooler_output=__SCREAMING_SNAKE_CASE , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
"\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ",lowerCamelCase_,)
# Copied from transformers.models.resnet.modeling_resnet.ResNetForImageClassification with RESNET->REGNET,ResNet->RegNet,resnet->regnet
class lowercase ( lowerCamelCase_ ):
def __init__( self , _a ) -> str:
super().__init__(__SCREAMING_SNAKE_CASE )
_A : str = config.num_labels
_A : Optional[int] = RegNetModel(__SCREAMING_SNAKE_CASE )
# classification head
_A : List[Any] = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(__SCREAMING_SNAKE_CASE )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=__SCREAMING_SNAKE_CASE , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def a__ ( self , _a = None , _a = None , _a = None , _a = None , ) -> str:
_A : Optional[int] = return_dict if return_dict is not None else self.config.use_return_dict
_A : str = self.regnet(__SCREAMING_SNAKE_CASE , output_hidden_states=__SCREAMING_SNAKE_CASE , return_dict=__SCREAMING_SNAKE_CASE )
_A : Optional[int] = outputs.pooler_output if return_dict else outputs[1]
_A : Any = self.classifier(__SCREAMING_SNAKE_CASE )
_A : Any = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
_A : str = '''regression'''
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
_A : List[Any] = '''single_label_classification'''
else:
_A : Optional[int] = '''multi_label_classification'''
if self.config.problem_type == "regression":
_A : int = MSELoss()
if self.num_labels == 1:
_A : Dict = loss_fct(logits.squeeze() , labels.squeeze() )
else:
_A : List[str] = loss_fct(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
elif self.config.problem_type == "single_label_classification":
_A : List[str] = CrossEntropyLoss()
_A : str = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
_A : Union[str, Any] = BCEWithLogitsLoss()
_A : Optional[int] = loss_fct(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if not return_dict:
_A : str = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=__SCREAMING_SNAKE_CASE , logits=__SCREAMING_SNAKE_CASE , hidden_states=outputs.hidden_states )
| 26 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_lowercase : int = logging.get_logger(__name__)
_lowercase : List[Any] = {
"shi-labs/nat-mini-in1k-224": "https://huggingface.co/shi-labs/nat-mini-in1k-224/resolve/main/config.json",
# See all Nat models at https://huggingface.co/models?filter=nat
}
class lowerCAmelCase__ ( lowerCamelCase_ , lowerCamelCase_ ):
lowerCAmelCase_ = '''nat'''
lowerCAmelCase_ = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=64 , __SCREAMING_SNAKE_CASE=[3, 4, 6, 5] , __SCREAMING_SNAKE_CASE=[2, 4, 8, 16] , __SCREAMING_SNAKE_CASE=7 , __SCREAMING_SNAKE_CASE=3.0 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=1E-5 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
super().__init__(**__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[int] = patch_size
lowercase_ : List[Any] = num_channels
lowercase_ : str = embed_dim
lowercase_ : List[str] = depths
lowercase_ : str = len(__SCREAMING_SNAKE_CASE )
lowercase_ : Tuple = num_heads
lowercase_ : int = kernel_size
lowercase_ : Union[str, Any] = mlp_ratio
lowercase_ : Optional[int] = qkv_bias
lowercase_ : List[Any] = hidden_dropout_prob
lowercase_ : Optional[int] = attention_probs_dropout_prob
lowercase_ : List[Any] = drop_path_rate
lowercase_ : List[Any] = hidden_act
lowercase_ : int = layer_norm_eps
lowercase_ : int = initializer_range
# we set the hidden_size attribute in order to make Nat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
lowercase_ : Dict = int(embed_dim * 2 ** (len(__SCREAMING_SNAKE_CASE ) - 1) )
lowercase_ : Tuple = layer_scale_init_value
lowercase_ : Union[str, Any] = ['''stem'''] + [F'''stage{idx}''' for idx in range(1 , len(__SCREAMING_SNAKE_CASE ) + 1 )]
lowercase_ , lowercase_ : int = get_aligned_output_features_output_indices(
out_features=__SCREAMING_SNAKE_CASE , out_indices=__SCREAMING_SNAKE_CASE , stage_names=self.stage_names )
| 93 | 0 |
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def __lowercase ( a__ , a__ , a__ , a__ , a__=True , a__="pt" ) -> Dict:
__SCREAMING_SNAKE_CASE = {'add_prefix_space': True} if isinstance(a__ , a__ ) and not line.startswith(' ' ) else {}
__SCREAMING_SNAKE_CASE = padding_side
return tokenizer(
[line] , max_length=a__ , padding='max_length' if pad_to_max_length else None , truncation=a__ , return_tensors=a__ , add_special_tokens=a__ , **a__ , )
def __lowercase ( a__ , a__ , a__=None , ) -> Any:
__SCREAMING_SNAKE_CASE = input_ids.ne(a__ ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class UpperCAmelCase_ ( UpperCamelCase_ ):
'''simple docstring'''
def __init__( self , _A , _A , _A , _A , _A="train" , _A=None , _A=None , _A=None , _A="" , ):
'''simple docstring'''
super().__init__()
__SCREAMING_SNAKE_CASE = Path(_A ).joinpath(type_path + '.source' )
__SCREAMING_SNAKE_CASE = Path(_A ).joinpath(type_path + '.target' )
__SCREAMING_SNAKE_CASE = self.get_char_lens(self.src_file )
__SCREAMING_SNAKE_CASE = max_source_length
__SCREAMING_SNAKE_CASE = max_target_length
assert min(self.src_lens ) > 0, f"""found empty line in {self.src_file}"""
__SCREAMING_SNAKE_CASE = tokenizer
__SCREAMING_SNAKE_CASE = prefix
if n_obs is not None:
__SCREAMING_SNAKE_CASE = self.src_lens[:n_obs]
__SCREAMING_SNAKE_CASE = src_lang
__SCREAMING_SNAKE_CASE = tgt_lang
def __len__( self ):
'''simple docstring'''
return len(self.src_lens )
def __getitem__( self , _A ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = index + 1 # linecache starts at 1
__SCREAMING_SNAKE_CASE = self.prefix + linecache.getline(str(self.src_file ) , _A ).rstrip('\n' )
__SCREAMING_SNAKE_CASE = linecache.getline(str(self.tgt_file ) , _A ).rstrip('\n' )
assert source_line, f"""empty source line for index {index}"""
assert tgt_line, f"""empty tgt line for index {index}"""
# Need to add eos token manually for T5
if isinstance(self.tokenizer , _A ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
__SCREAMING_SNAKE_CASE = (
self.tokenizer.question_encoder if isinstance(self.tokenizer , _A ) else self.tokenizer
)
__SCREAMING_SNAKE_CASE = self.tokenizer.generator if isinstance(self.tokenizer , _A ) else self.tokenizer
__SCREAMING_SNAKE_CASE = encode_line(_A , _A , self.max_source_length , 'right' )
__SCREAMING_SNAKE_CASE = encode_line(_A , _A , self.max_target_length , 'right' )
__SCREAMING_SNAKE_CASE = source_inputs['input_ids'].squeeze()
__SCREAMING_SNAKE_CASE = target_inputs['input_ids'].squeeze()
__SCREAMING_SNAKE_CASE = source_inputs['attention_mask'].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def _A ( _A ):
'''simple docstring'''
return [len(_A ) for x in Path(_A ).open().readlines()]
def _A ( self , _A ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = torch.stack([x['input_ids'] for x in batch] )
__SCREAMING_SNAKE_CASE = torch.stack([x['attention_mask'] for x in batch] )
__SCREAMING_SNAKE_CASE = torch.stack([x['decoder_input_ids'] for x in batch] )
__SCREAMING_SNAKE_CASE = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , _A )
else self.tokenizer.pad_token_id
)
__SCREAMING_SNAKE_CASE = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , _A )
else self.tokenizer.pad_token_id
)
__SCREAMING_SNAKE_CASE = trim_batch(_A , _A )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = trim_batch(_A , _A , attention_mask=_A )
__SCREAMING_SNAKE_CASE = {
'input_ids': source_ids,
'attention_mask': source_mask,
'decoder_input_ids': y,
}
return batch
lowerCAmelCase__ : str =getLogger(__name__)
def __lowercase ( a__ ) -> List[str]:
return list(itertools.chain.from_iterable(a__ ) )
def __lowercase ( a__ ) -> None:
__SCREAMING_SNAKE_CASE = get_git_info()
save_json(a__ , os.path.join(a__ , 'git_log.json' ) )
def __lowercase ( a__ , a__ , a__=4 , **a__ ) -> str:
with open(a__ , 'w' ) as f:
json.dump(a__ , a__ , indent=a__ , **a__ )
def __lowercase ( a__ ) -> List[Any]:
with open(a__ ) as f:
return json.load(a__ )
def __lowercase ( ) -> Dict:
__SCREAMING_SNAKE_CASE = git.Repo(search_parent_directories=a__ )
__SCREAMING_SNAKE_CASE = {
'repo_id': str(a__ ),
'repo_sha': str(repo.head.object.hexsha ),
'repo_branch': str(repo.active_branch ),
'hostname': str(socket.gethostname() ),
}
return repo_infos
def __lowercase ( a__ , a__ ) -> List:
return list(map(a__ , a__ ) )
def __lowercase ( a__ , a__ ) -> List[str]:
with open(a__ , 'wb' ) as f:
return pickle.dump(a__ , a__ )
def __lowercase ( a__ ) -> List[str]:
def remove_articles(a__ ):
return re.sub(R'\b(a|an|the)\b' , ' ' , a__ )
def white_space_fix(a__ ):
return " ".join(text.split() )
def remove_punc(a__ ):
__SCREAMING_SNAKE_CASE = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(a__ ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(a__ ) ) ) )
def __lowercase ( a__ , a__ ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE = normalize_answer(a__ ).split()
__SCREAMING_SNAKE_CASE = normalize_answer(a__ ).split()
__SCREAMING_SNAKE_CASE = Counter(a__ ) & Counter(a__ )
__SCREAMING_SNAKE_CASE = sum(common.values() )
if num_same == 0:
return 0
__SCREAMING_SNAKE_CASE = 1.0 * num_same / len(a__ )
__SCREAMING_SNAKE_CASE = 1.0 * num_same / len(a__ )
__SCREAMING_SNAKE_CASE = (2 * precision * recall) / (precision + recall)
return fa
def __lowercase ( a__ , a__ ) -> int:
return normalize_answer(a__ ) == normalize_answer(a__ )
def __lowercase ( a__ , a__ ) -> Dict:
assert len(a__ ) == len(a__ )
__SCREAMING_SNAKE_CASE = 0
for hypo, pred in zip(a__ , a__ ):
em += exact_match_score(a__ , a__ )
if len(a__ ) > 0:
em /= len(a__ )
return {"em": em}
def __lowercase ( a__ ) -> Union[str, Any]:
return model_prefix.startswith('rag' )
def __lowercase ( a__ , a__ , a__ ) -> int:
__SCREAMING_SNAKE_CASE = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
__SCREAMING_SNAKE_CASE = 'dropout_rate'
for p in extra_params:
if getattr(a__ , a__ , a__ ):
if not hasattr(a__ , a__ ) and not hasattr(a__ , equivalent_param[p] ):
logger.info('config doesn\'t have a `{}` attribute'.format(a__ ) )
delattr(a__ , a__ )
continue
__SCREAMING_SNAKE_CASE = p if hasattr(a__ , a__ ) else equivalent_param[p]
setattr(a__ , a__ , getattr(a__ , a__ ) )
delattr(a__ , a__ )
return hparams, config
| 118 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowerCAmelCase__ : str ={'''configuration_vit_mae''': ['''VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTMAEConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : Optional[int] =[
'''VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ViTMAEForPreTraining''',
'''ViTMAELayer''',
'''ViTMAEModel''',
'''ViTMAEPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : List[str] =[
'''TFViTMAEForPreTraining''',
'''TFViTMAEModel''',
'''TFViTMAEPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_mae import (
VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMAEForPreTraining,
ViTMAELayer,
ViTMAEModel,
ViTMAEPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel
else:
import sys
lowerCAmelCase__ : Optional[int] =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 118 | 1 |
def _a ( SCREAMING_SNAKE_CASE : Optional[int] ) -> str:
"""simple docstring"""
__lowerCAmelCase: Dict = int(SCREAMING_SNAKE_CASE )
if decimal in (0, 1): # Exit cases for the recursion
return str(SCREAMING_SNAKE_CASE )
__lowerCAmelCase: str = divmod(SCREAMING_SNAKE_CASE , 2 )
return binary_recursive(SCREAMING_SNAKE_CASE ) + str(SCREAMING_SNAKE_CASE )
def _a ( SCREAMING_SNAKE_CASE : str ) -> str:
"""simple docstring"""
__lowerCAmelCase: int = str(SCREAMING_SNAKE_CASE ).strip()
if not number:
raise ValueError('No input value was provided' )
__lowerCAmelCase: Dict = """-""" if number.startswith('-' ) else """"""
__lowerCAmelCase: List[str] = number.lstrip('-' )
if not number.isnumeric():
raise ValueError('Input value is not an integer' )
return f'''{negative}0b{binary_recursive(int(SCREAMING_SNAKE_CASE ) )}'''
if __name__ == "__main__":
from doctest import testmod
testmod()
| 322 |
'''simple docstring'''
# This script creates a super tiny model that is useful inside tests, when we just want to test that
# the machinery works, without needing to the check the quality of the outcomes.
#
# This version creates a tiny vocab first, and then a tiny model - so the outcome is truly tiny -
# all files ~60KB. As compared to taking a full-size model, reducing to the minimum its layers and
# emb dimensions, but keeping the full vocab + merges files, leading to ~3MB in total for all files.
# The latter is done by `fsmt-make-super-tiny-model.py`.
#
# It will be used then as "stas/tiny-wmt19-en-ru"
from pathlib import Path
import json
import tempfile
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
_A : Dict ='''tiny-wmt19-en-ru'''
# Build
# borrowed from a test
_A : List[str] =[
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''w</w>''',
'''r</w>''',
'''t</w>''',
'''lo''',
'''low''',
'''er</w>''',
'''low</w>''',
'''lowest</w>''',
'''newer</w>''',
'''wider</w>''',
'''<unk>''',
]
_A : str =dict(zip(vocab, range(len(vocab))))
_A : List[str] =['''l o 123''', '''lo w 1456''', '''e r</w> 1789''', '''''']
with tempfile.TemporaryDirectory() as tmpdirname:
_A : Union[str, Any] =Path(tmpdirname)
_A : str =build_dir / VOCAB_FILES_NAMES['''src_vocab_file''']
_A : int =build_dir / VOCAB_FILES_NAMES['''tgt_vocab_file''']
_A : List[Any] =build_dir / VOCAB_FILES_NAMES['''merges_file''']
with open(src_vocab_file, '''w''') as fp:
fp.write(json.dumps(vocab_tokens))
with open(tgt_vocab_file, '''w''') as fp:
fp.write(json.dumps(vocab_tokens))
with open(merges_file, '''w''') as fp:
fp.write('''\n'''.join(merges))
_A : int =FSMTTokenizer(
langs=['''en''', '''ru'''],
src_vocab_size=len(vocab),
tgt_vocab_size=len(vocab),
src_vocab_file=src_vocab_file,
tgt_vocab_file=tgt_vocab_file,
merges_file=merges_file,
)
_A : List[str] =FSMTConfig(
langs=['''ru''', '''en'''],
src_vocab_size=1_000,
tgt_vocab_size=1_000,
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
_A : Union[str, Any] =FSMTForConditionalGeneration(config)
print(F'num of params {tiny_model.num_parameters()}')
# Test
_A : List[str] =tokenizer(['''Making tiny model'''], return_tensors='''pt''')
_A : Tuple =tiny_model(**batch)
print('''test output:''', len(outputs.logits[0]))
# Save
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(F'Generated {mname_tiny}')
# Upload
# transformers-cli upload tiny-wmt19-en-ru
| 41 | 0 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Image
from .base import TaskTemplate
@dataclass(frozen=A_ )
class UpperCAmelCase_ ( A_ ):
lowercase__ = field(default='''image-classification''', metadata={'''include_in_asdict_even_if_is_default''': True} )
lowercase__ = Features({'''image''': Image()} )
lowercase__ = Features({'''labels''': ClassLabel} )
lowercase__ = '''image'''
lowercase__ = '''labels'''
def __magic_name__ ( self : List[str] , snake_case_ : str ) -> Tuple:
'''simple docstring'''
if self.label_column not in features:
raise ValueError(F"""Column {self.label_column} is not present in features.""" )
if not isinstance(features[self.label_column] , snake_case_ ):
raise ValueError(F"""Column {self.label_column} is not a ClassLabel.""" )
A__ = copy.deepcopy(self )
A__ = self.label_schema.copy()
A__ = features[self.label_column]
A__ = label_schema
return task_template
@property
def __magic_name__ ( self : Any ) -> Dict[str, str]:
'''simple docstring'''
return {
self.image_column: "image",
self.label_column: "labels",
}
| 371 |
"""simple docstring"""
from argparse import ArgumentParser
from .env import EnvironmentCommand
def _SCREAMING_SNAKE_CASE ( ) -> List[Any]:
A__ = ArgumentParser("Diffusers CLI tool" , usage="diffusers-cli <command> [<args>]" )
A__ = parser.add_subparsers(help="diffusers-cli command helpers" )
# Register commands
EnvironmentCommand.register_subcommand(lowercase_ )
# Let's go
A__ = parser.parse_args()
if not hasattr(lowercase_ , "func" ):
parser.print_help()
exit(1 )
# Run
A__ = args.func(lowercase_ )
service.run()
if __name__ == "__main__":
main()
| 230 | 0 |
'''simple docstring'''
import argparse
import torch
from transformers import BertForMaskedLM
if __name__ == "__main__":
lowerCamelCase : List[str] = argparse.ArgumentParser(
description=(
'Extraction some layers of the full BertForMaskedLM or RObertaForMaskedLM for Transfer Learned'
' Distillation'
)
)
parser.add_argument('--model_type', default='bert', choices=['bert'])
parser.add_argument('--model_name', default='bert-base-uncased', type=str)
parser.add_argument('--dump_checkpoint', default='serialization_dir/tf_bert-base-uncased_0247911.pth', type=str)
parser.add_argument('--vocab_transform', action='store_true')
lowerCamelCase : str = parser.parse_args()
if args.model_type == "bert":
lowerCamelCase : List[Any] = BertForMaskedLM.from_pretrained(args.model_name)
lowerCamelCase : Any = 'bert'
else:
raise ValueError('args.model_type should be "bert".')
lowerCamelCase : int = model.state_dict()
lowerCamelCase : int = {}
for w in ["word_embeddings", "position_embeddings"]:
lowerCamelCase : Any = state_dict[f"""{prefix}.embeddings.{w}.weight"""]
for w in ["weight", "bias"]:
lowerCamelCase : Any = state_dict[f"""{prefix}.embeddings.LayerNorm.{w}"""]
lowerCamelCase : Tuple = 0
for teacher_idx in [0, 2, 4, 7, 9, 11]:
for w in ["weight", "bias"]:
lowerCamelCase : Dict = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.self.query.{w}"""
]
lowerCamelCase : Dict = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.self.key.{w}"""
]
lowerCamelCase : List[Any] = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.self.value.{w}"""
]
lowerCamelCase : Tuple = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.output.dense.{w}"""
]
lowerCamelCase : Optional[int] = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.output.LayerNorm.{w}"""
]
lowerCamelCase : Optional[Any] = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.intermediate.dense.{w}"""
]
lowerCamelCase : Dict = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.output.dense.{w}"""
]
lowerCamelCase : Any = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.output.LayerNorm.{w}"""
]
std_idx += 1
lowerCamelCase : Optional[int] = state_dict['cls.predictions.decoder.weight']
lowerCamelCase : str = state_dict['cls.predictions.bias']
if args.vocab_transform:
for w in ["weight", "bias"]:
lowerCamelCase : str = state_dict[f"""cls.predictions.transform.dense.{w}"""]
lowerCamelCase : Any = state_dict[f"""cls.predictions.transform.LayerNorm.{w}"""]
print(f"""N layers selected for distillation: {std_idx}""")
print(f"""Number of params transferred for distillation: {len(compressed_sd.keys())}""")
print(f"""Save transferred checkpoint to {args.dump_checkpoint}.""")
torch.save(compressed_sd, args.dump_checkpoint)
| 2 |
import dataclasses
import json
import sys
import types
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError
from copy import copy
from enum import Enum
from inspect import isclass
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints
import yaml
__snake_case = NewType("""DataClass""", Any)
__snake_case = NewType("""DataClassType""", Any)
def _lowercase ( UpperCamelCase_ ) -> int:
'''simple docstring'''
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise ArgumentTypeError(
F'Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive).' )
def _lowercase ( UpperCamelCase_ ) -> Callable[[str], Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = {str(UpperCamelCase_ ): choice for choice in choices}
return lambda UpperCamelCase_ : str_to_choice.get(UpperCamelCase_ , UpperCamelCase_ )
def _lowercase ( *,
UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = dataclasses.MISSING , UpperCamelCase_ = dataclasses.MISSING , UpperCamelCase_ = None , **UpperCamelCase_ , ) -> dataclasses.Field:
'''simple docstring'''
if metadata is None:
# Important, don't use as default param in function signature because dict is mutable and shared across function calls
SCREAMING_SNAKE_CASE__ = {}
if aliases is not None:
SCREAMING_SNAKE_CASE__ = aliases
if help is not None:
SCREAMING_SNAKE_CASE__ = help
return dataclasses.field(metadata=UpperCamelCase_ , default=UpperCamelCase_ , default_factory=UpperCamelCase_ , **UpperCamelCase_ )
class lowercase__ ( _UpperCAmelCase ):
A__ : Iterable[DataClassType]
def __init__( self : Union[str, Any] , UpperCAmelCase_ : Union[DataClassType, Iterable[DataClassType]] , **UpperCAmelCase_ : Optional[Any] ):
# To make the default appear when using --help
if "formatter_class" not in kwargs:
SCREAMING_SNAKE_CASE__ = ArgumentDefaultsHelpFormatter
super().__init__(**UpperCAmelCase_ )
if dataclasses.is_dataclass(UpperCAmelCase_ ):
SCREAMING_SNAKE_CASE__ = [dataclass_types]
SCREAMING_SNAKE_CASE__ = list(UpperCAmelCase_ )
for dtype in self.dataclass_types:
self._add_dataclass_arguments(UpperCAmelCase_ )
@staticmethod
def A_ ( UpperCAmelCase_ : ArgumentParser , UpperCAmelCase_ : dataclasses.Field ):
SCREAMING_SNAKE_CASE__ = F'--{field.name}'
SCREAMING_SNAKE_CASE__ = field.metadata.copy()
# field.metadata is not used at all by Data Classes,
# it is provided as a third-party extension mechanism.
if isinstance(field.type , UpperCAmelCase_ ):
raise RuntimeError(
'Unresolved type detected, which should have been done with the help of '
'`typing.get_type_hints` method by default' )
SCREAMING_SNAKE_CASE__ = kwargs.pop('aliases' , [] )
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
SCREAMING_SNAKE_CASE__ = [aliases]
SCREAMING_SNAKE_CASE__ = getattr(field.type , '__origin__' , field.type )
if origin_type is Union or (hasattr(UpperCAmelCase_ , 'UnionType' ) and isinstance(UpperCAmelCase_ , types.UnionType )):
if str not in field.type.__args__ and (
len(field.type.__args__ ) != 2 or type(UpperCAmelCase_ ) not in field.type.__args__
):
raise ValueError(
'Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because'
' the argument parser only supports one type per argument.'
F' Problem encountered in field \'{field.name}\'.' )
if type(UpperCAmelCase_ ) not in field.type.__args__:
# filter `str` in Union
SCREAMING_SNAKE_CASE__ = field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1]
SCREAMING_SNAKE_CASE__ = getattr(field.type , '__origin__' , field.type )
elif bool not in field.type.__args__:
# filter `NoneType` in Union (except for `Union[bool, NoneType]`)
SCREAMING_SNAKE_CASE__ = (
field.type.__args__[0] if isinstance(UpperCAmelCase_ , field.type.__args__[1] ) else field.type.__args__[1]
)
SCREAMING_SNAKE_CASE__ = getattr(field.type , '__origin__' , field.type )
# A variable to store kwargs for a boolean field, if needed
# so that we can init a `no_*` complement argument (see below)
SCREAMING_SNAKE_CASE__ = {}
if origin_type is Literal or (isinstance(field.type , UpperCAmelCase_ ) and issubclass(field.type , UpperCAmelCase_ )):
if origin_type is Literal:
SCREAMING_SNAKE_CASE__ = field.type.__args__
else:
SCREAMING_SNAKE_CASE__ = [x.value for x in field.type]
SCREAMING_SNAKE_CASE__ = make_choice_type_function(kwargs['choices'] )
if field.default is not dataclasses.MISSING:
SCREAMING_SNAKE_CASE__ = field.default
else:
SCREAMING_SNAKE_CASE__ = True
elif field.type is bool or field.type == Optional[bool]:
# Copy the currect kwargs to use to instantiate a `no_*` complement argument below.
# We do not initialize it here because the `no_*` alternative must be instantiated after the real argument
SCREAMING_SNAKE_CASE__ = copy(UpperCAmelCase_ )
# Hack because type=bool in argparse does not behave as we want.
SCREAMING_SNAKE_CASE__ = string_to_bool
if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING):
# Default value is False if we have no default when of type bool.
SCREAMING_SNAKE_CASE__ = False if field.default is dataclasses.MISSING else field.default
# This is the value that will get picked if we don't include --field_name in any way
SCREAMING_SNAKE_CASE__ = default
# This tells argparse we accept 0 or 1 value after --field_name
SCREAMING_SNAKE_CASE__ = '?'
# This is the value that will get picked if we do --field_name (without value)
SCREAMING_SNAKE_CASE__ = True
elif isclass(UpperCAmelCase_ ) and issubclass(UpperCAmelCase_ , UpperCAmelCase_ ):
SCREAMING_SNAKE_CASE__ = field.type.__args__[0]
SCREAMING_SNAKE_CASE__ = '+'
if field.default_factory is not dataclasses.MISSING:
SCREAMING_SNAKE_CASE__ = field.default_factory()
elif field.default is dataclasses.MISSING:
SCREAMING_SNAKE_CASE__ = True
else:
SCREAMING_SNAKE_CASE__ = field.type
if field.default is not dataclasses.MISSING:
SCREAMING_SNAKE_CASE__ = field.default
elif field.default_factory is not dataclasses.MISSING:
SCREAMING_SNAKE_CASE__ = field.default_factory()
else:
SCREAMING_SNAKE_CASE__ = True
parser.add_argument(UpperCAmelCase_ , *UpperCAmelCase_ , **UpperCAmelCase_ )
# Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added.
# Order is important for arguments with the same destination!
# We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down
# here and we do not need those changes/additional keys.
if field.default is True and (field.type is bool or field.type == Optional[bool]):
SCREAMING_SNAKE_CASE__ = False
parser.add_argument(F'--no_{field.name}' , action='store_false' , dest=field.name , **UpperCAmelCase_ )
def A_ ( self : List[Any] , UpperCAmelCase_ : DataClassType ):
if hasattr(UpperCAmelCase_ , '_argument_group_name' ):
SCREAMING_SNAKE_CASE__ = self.add_argument_group(dtype._argument_group_name )
else:
SCREAMING_SNAKE_CASE__ = self
try:
SCREAMING_SNAKE_CASE__ = get_type_hints(UpperCAmelCase_ )
except NameError:
raise RuntimeError(
F'Type resolution failed for {dtype}. Try declaring the class in global scope or '
'removing line of `from __future__ import annotations` which opts in Postponed '
'Evaluation of Annotations (PEP 563)' )
except TypeError as ex:
# Remove this block when we drop Python 3.9 support
if sys.version_info[:2] < (3, 10) and "unsupported operand type(s) for |" in str(UpperCAmelCase_ ):
SCREAMING_SNAKE_CASE__ = '.'.join(map(UpperCAmelCase_ , sys.version_info[:3] ) )
raise RuntimeError(
F'Type resolution failed for {dtype} on Python {python_version}. Try removing '
'line of `from __future__ import annotations` which opts in union types as '
'`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To '
'support Python versions that lower than 3.10, you need to use '
'`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of '
'`X | None`.' ) from ex
raise
for field in dataclasses.fields(UpperCAmelCase_ ):
if not field.init:
continue
SCREAMING_SNAKE_CASE__ = type_hints[field.name]
self._parse_dataclass_field(UpperCAmelCase_ , UpperCAmelCase_ )
def A_ ( self : Dict , UpperCAmelCase_ : Any=None , UpperCAmelCase_ : List[Any]=False , UpperCAmelCase_ : Dict=True , UpperCAmelCase_ : str=None , UpperCAmelCase_ : str=None , ):
if args_file_flag or args_filename or (look_for_args_file and len(sys.argv )):
SCREAMING_SNAKE_CASE__ = []
if args_filename:
args_files.append(Path(UpperCAmelCase_ ) )
elif look_for_args_file and len(sys.argv ):
args_files.append(Path(sys.argv[0] ).with_suffix('.args' ) )
# args files specified via command line flag should overwrite default args files so we add them last
if args_file_flag:
# Create special parser just to extract the args_file_flag values
SCREAMING_SNAKE_CASE__ = ArgumentParser()
args_file_parser.add_argument(UpperCAmelCase_ , type=UpperCAmelCase_ , action='append' )
# Use only remaining args for further parsing (remove the args_file_flag)
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = args_file_parser.parse_known_args(args=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = vars(UpperCAmelCase_ ).get(args_file_flag.lstrip('-' ) , UpperCAmelCase_ )
if cmd_args_file_paths:
args_files.extend([Path(UpperCAmelCase_ ) for p in cmd_args_file_paths] )
SCREAMING_SNAKE_CASE__ = []
for args_file in args_files:
if args_file.exists():
file_args += args_file.read_text().split()
# in case of duplicate arguments the last one has precedence
# args specified via the command line should overwrite args from files, so we add them last
SCREAMING_SNAKE_CASE__ = file_args + args if args is not None else file_args + sys.argv[1:]
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.parse_known_args(args=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = []
for dtype in self.dataclass_types:
SCREAMING_SNAKE_CASE__ = {f.name for f in dataclasses.fields(UpperCAmelCase_ ) if f.init}
SCREAMING_SNAKE_CASE__ = {k: v for k, v in vars(UpperCAmelCase_ ).items() if k in keys}
for k in keys:
delattr(UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = dtype(**UpperCAmelCase_ )
outputs.append(UpperCAmelCase_ )
if len(namespace.__dict__ ) > 0:
# additional namespace.
outputs.append(UpperCAmelCase_ )
if return_remaining_strings:
return (*outputs, remaining_args)
else:
if remaining_args:
raise ValueError(F'Some specified arguments are not used by the HfArgumentParser: {remaining_args}' )
return (*outputs,)
def A_ ( self : str , UpperCAmelCase_ : Dict[str, Any] , UpperCAmelCase_ : bool = False ):
SCREAMING_SNAKE_CASE__ = set(args.keys() )
SCREAMING_SNAKE_CASE__ = []
for dtype in self.dataclass_types:
SCREAMING_SNAKE_CASE__ = {f.name for f in dataclasses.fields(UpperCAmelCase_ ) if f.init}
SCREAMING_SNAKE_CASE__ = {k: v for k, v in args.items() if k in keys}
unused_keys.difference_update(inputs.keys() )
SCREAMING_SNAKE_CASE__ = dtype(**UpperCAmelCase_ )
outputs.append(UpperCAmelCase_ )
if not allow_extra_keys and unused_keys:
raise ValueError(F'Some keys are not used by the HfArgumentParser: {sorted(UpperCAmelCase_ )}' )
return tuple(UpperCAmelCase_ )
def A_ ( self : Tuple , UpperCAmelCase_ : str , UpperCAmelCase_ : bool = False ):
with open(Path(UpperCAmelCase_ ) , encoding='utf-8' ) as open_json_file:
SCREAMING_SNAKE_CASE__ = json.loads(open_json_file.read() )
SCREAMING_SNAKE_CASE__ = self.parse_dict(UpperCAmelCase_ , allow_extra_keys=UpperCAmelCase_ )
return tuple(UpperCAmelCase_ )
def A_ ( self : Tuple , UpperCAmelCase_ : str , UpperCAmelCase_ : bool = False ):
SCREAMING_SNAKE_CASE__ = self.parse_dict(yaml.safe_load(Path(UpperCAmelCase_ ).read_text() ) , allow_extra_keys=UpperCAmelCase_ )
return tuple(UpperCAmelCase_ )
| 176 | 0 |
from __future__ import annotations
def a__ ( A__, A__ ):
SCREAMING_SNAKE_CASE_ : list[list[int]] = []
SCREAMING_SNAKE_CASE_ : list[int] = []
SCREAMING_SNAKE_CASE_ : Any = 0
SCREAMING_SNAKE_CASE_ : Optional[int] = sum(A__ )
create_state_space_tree(A__, A__, A__, A__, A__, A__ )
return result
def a__ ( A__, A__, A__, A__, A__, A__, ):
if sum(A__ ) > max_sum or (remaining_nums_sum + sum(A__ )) < max_sum:
return
if sum(A__ ) == max_sum:
result.append(A__ )
return
for index in range(A__, len(A__ ) ):
create_state_space_tree(
A__, A__, index + 1, [*path, nums[index]], A__, remaining_nums_sum - nums[index], )
lowerCAmelCase__ : int =[3, 34, 4, 12, 5, 2]
lowerCAmelCase__ : str =9
lowerCAmelCase__ : int =generate_sum_of_subsets_soln(nums, max_sum)
print(*result)
| 162 |
from typing import Any
def a__ ( A__, A__, A__, A__, A__, ):
_validation(
A__, A__, A__, A__, A__, )
# Creates data structures and fill initial step
SCREAMING_SNAKE_CASE_ : dict = {}
SCREAMING_SNAKE_CASE_ : dict = {}
for state in states_space:
SCREAMING_SNAKE_CASE_ : int = observations_space[0]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = (
initial_probabilities[state] * emission_probabilities[state][observation]
)
SCREAMING_SNAKE_CASE_ : str = None
# Fills the data structure with the probabilities of
# different transitions and pointers to previous states
for o in range(1, len(A__ ) ):
SCREAMING_SNAKE_CASE_ : List[str] = observations_space[o]
SCREAMING_SNAKE_CASE_ : str = observations_space[o - 1]
for state in states_space:
# Calculates the argmax for probability function
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ''
SCREAMING_SNAKE_CASE_ : str = -1
for k_state in states_space:
SCREAMING_SNAKE_CASE_ : List[str] = (
probabilities[(k_state, prior_observation)]
* transition_probabilities[k_state][state]
* emission_probabilities[state][observation]
)
if probability > max_probability:
SCREAMING_SNAKE_CASE_ : Tuple = probability
SCREAMING_SNAKE_CASE_ : Optional[int] = k_state
# Update probabilities and pointers dicts
SCREAMING_SNAKE_CASE_ : List[Any] = (
probabilities[(arg_max, prior_observation)]
* transition_probabilities[arg_max][state]
* emission_probabilities[state][observation]
)
SCREAMING_SNAKE_CASE_ : Tuple = arg_max
# The final observation
SCREAMING_SNAKE_CASE_ : Optional[int] = observations_space[len(A__ ) - 1]
# argmax for given final observation
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ''
SCREAMING_SNAKE_CASE_ : List[str] = -1
for k_state in states_space:
SCREAMING_SNAKE_CASE_ : int = probabilities[(k_state, final_observation)]
if probability > max_probability:
SCREAMING_SNAKE_CASE_ : List[Any] = probability
SCREAMING_SNAKE_CASE_ : Tuple = k_state
SCREAMING_SNAKE_CASE_ : Optional[Any] = arg_max
# Process pointers backwards
SCREAMING_SNAKE_CASE_ : Union[str, Any] = last_state
SCREAMING_SNAKE_CASE_ : List[str] = []
for o in range(len(A__ ) - 1, -1, -1 ):
result.append(A__ )
SCREAMING_SNAKE_CASE_ : Tuple = pointers[previous, observations_space[o]]
result.reverse()
return result
def a__ ( A__, A__, A__, A__, A__, ):
_validate_not_empty(
A__, A__, A__, A__, A__, )
_validate_lists(A__, A__ )
_validate_dicts(
A__, A__, A__ )
def a__ ( A__, A__, A__, A__, A__, ):
if not all(
[
observations_space,
states_space,
initial_probabilities,
transition_probabilities,
emission_probabilities,
] ):
raise ValueError('There\'s an empty parameter' )
def a__ ( A__, A__ ):
_validate_list(A__, 'observations_space' )
_validate_list(A__, 'states_space' )
def a__ ( A__, A__ ):
if not isinstance(_object, A__ ):
SCREAMING_SNAKE_CASE_ : List[str] = F'''{var_name} must be a list'''
raise ValueError(A__ )
else:
for x in _object:
if not isinstance(A__, A__ ):
SCREAMING_SNAKE_CASE_ : Dict = F'''{var_name} must be a list of strings'''
raise ValueError(A__ )
def a__ ( A__, A__, A__, ):
_validate_dict(A__, 'initial_probabilities', A__ )
_validate_nested_dict(A__, 'transition_probabilities' )
_validate_nested_dict(A__, 'emission_probabilities' )
def a__ ( A__, A__ ):
_validate_dict(_object, A__, A__ )
for x in _object.values():
_validate_dict(A__, A__, A__, A__ )
def a__ ( A__, A__, A__, A__ = False ):
if not isinstance(_object, A__ ):
SCREAMING_SNAKE_CASE_ : Dict = F'''{var_name} must be a dict'''
raise ValueError(A__ )
if not all(isinstance(A__, A__ ) for x in _object ):
SCREAMING_SNAKE_CASE_ : Optional[int] = F'''{var_name} all keys must be strings'''
raise ValueError(A__ )
if not all(isinstance(A__, A__ ) for x in _object.values() ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = 'nested dictionary ' if nested else ''
SCREAMING_SNAKE_CASE_ : Any = F'''{var_name} {nested_text}all values must be {value_type.__name__}'''
raise ValueError(A__ )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 162 | 1 |
"""simple docstring"""
def lowercase ( _snake_case : int , _snake_case : int ) ->int:
"""simple docstring"""
return int((input_a, input_a).count(0 ) != 0 )
def lowercase ( ) ->None:
"""simple docstring"""
assert nand_gate(0 , 0 ) == 1
assert nand_gate(0 , 1 ) == 1
assert nand_gate(1 , 0 ) == 1
assert nand_gate(1 , 1 ) == 0
if __name__ == "__main__":
print(nand_gate(0, 0))
print(nand_gate(0, 1))
print(nand_gate(1, 0))
print(nand_gate(1, 1))
| 102 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import XGLMConfig, XGLMTokenizer, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.xglm.modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
)
@require_tf
class __UpperCamelCase :
lowercase : Union[str, Any] =XGLMConfig
lowercase : Optional[Any] ={}
lowercase : Optional[int] ='gelu'
def __init__( self, lowerCAmelCase, lowerCAmelCase=14, lowerCAmelCase=7, lowerCAmelCase=True, lowerCAmelCase=True, lowerCAmelCase=True, lowerCAmelCase=99, lowerCAmelCase=32, lowerCAmelCase=2, lowerCAmelCase=4, lowerCAmelCase=37, lowerCAmelCase="gelu", lowerCAmelCase=0.1, lowerCAmelCase=0.1, lowerCAmelCase=512, lowerCAmelCase=0.0_2, ):
"""simple docstring"""
lowerCamelCase_ =parent
lowerCamelCase_ =batch_size
lowerCamelCase_ =seq_length
lowerCamelCase_ =is_training
lowerCamelCase_ =use_input_mask
lowerCamelCase_ =use_labels
lowerCamelCase_ =vocab_size
lowerCamelCase_ =d_model
lowerCamelCase_ =num_hidden_layers
lowerCamelCase_ =num_attention_heads
lowerCamelCase_ =ffn_dim
lowerCamelCase_ =activation_function
lowerCamelCase_ =activation_dropout
lowerCamelCase_ =attention_dropout
lowerCamelCase_ =max_position_embeddings
lowerCamelCase_ =initializer_range
lowerCamelCase_ =None
lowerCamelCase_ =0
lowerCamelCase_ =2
lowerCamelCase_ =1
def lowercase__ ( self ):
"""simple docstring"""
return XGLMConfig.from_pretrained('''facebook/xglm-564M''' )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =tf.clip_by_value(
ids_tensor([self.batch_size, self.seq_length], self.vocab_size ), clip_value_min=0, clip_value_max=3 )
lowerCamelCase_ =None
if self.use_input_mask:
lowerCamelCase_ =random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase_ =self.get_config()
lowerCamelCase_ =floats_tensor([self.num_hidden_layers, self.num_attention_heads], 2 )
return (
config,
input_ids,
input_mask,
head_mask,
)
def lowercase__ ( self ):
"""simple docstring"""
return XGLMConfig(
vocab_size=self.vocab_size, d_model=self.hidden_size, num_layers=self.num_hidden_layers, attention_heads=self.num_attention_heads, ffn_dim=self.ffn_dim, activation_function=self.activation_function, activation_dropout=self.activation_dropout, attention_dropout=self.attention_dropout, max_position_embeddings=self.max_position_embeddings, initializer_range=self.initializer_range, use_cache=lowerCAmelCase, bos_token_id=self.bos_token_id, eos_token_id=self.eos_token_id, pad_token_id=self.pad_token_id, return_dict=lowerCAmelCase, )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.prepare_config_and_inputs()
(
(
lowerCamelCase_
), (
lowerCamelCase_
), (
lowerCamelCase_
), (
lowerCamelCase_
),
) =config_and_inputs
lowerCamelCase_ ={
'''input_ids''': input_ids,
'''head_mask''': head_mask,
}
return config, inputs_dict
@require_tf
class __UpperCamelCase ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
lowercase : int =(TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else ()
lowercase : Optional[Any] =(TFXGLMForCausalLM,) if is_tf_available() else ()
lowercase : Tuple =(
{'feature-extraction': TFXGLMModel, 'text-generation': TFXGLMForCausalLM} if is_tf_available() else {}
)
lowercase : Optional[Any] =False
lowercase : Optional[Any] =False
lowercase : Optional[int] =False
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =TFXGLMModelTester(self )
lowerCamelCase_ =ConfigTester(self, config_class=lowerCAmelCase, n_embd=37 )
def lowercase__ ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
@slow
def lowercase__ ( self ):
"""simple docstring"""
for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ =TFXGLMModel.from_pretrained(lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
@unittest.skip(reason='''Currently, model embeddings are going to undergo a major refactor.''' )
def lowercase__ ( self ):
"""simple docstring"""
super().test_resize_token_embeddings()
@require_tf
class __UpperCamelCase ( unittest.TestCase ):
@slow
def lowercase__ ( self, lowerCAmelCase=True ):
"""simple docstring"""
lowerCamelCase_ =TFXGLMForCausalLM.from_pretrained('''facebook/xglm-564M''' )
lowerCamelCase_ =tf.convert_to_tensor([[2, 268, 9_865]], dtype=tf.intaa ) # The dog
# </s> The dog is a very friendly dog. He is very affectionate and loves to play with other
# fmt: off
lowerCamelCase_ =[2, 268, 9_865, 67, 11, 1_988, 57_252, 9_865, 5, 984, 67, 1_988, 213_838, 1_658, 53, 70_446, 33, 6_657, 278, 1_581]
# fmt: on
lowerCamelCase_ =model.generate(lowerCAmelCase, do_sample=lowerCAmelCase, num_beams=1 )
if verify_outputs:
self.assertListEqual(output_ids[0].numpy().tolist(), lowerCAmelCase )
@slow
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =XGLMTokenizer.from_pretrained('''facebook/xglm-564M''' )
lowerCamelCase_ =TFXGLMForCausalLM.from_pretrained('''facebook/xglm-564M''' )
tf.random.set_seed(0 )
lowerCamelCase_ =tokenizer('''Today is a nice day and''', return_tensors='''tf''' )
lowerCamelCase_ =tokenized.input_ids
# forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices)
with tf.device(''':/CPU:0''' ):
lowerCamelCase_ =model.generate(lowerCAmelCase, do_sample=lowerCAmelCase, seed=[7, 0] )
lowerCamelCase_ =tokenizer.decode(output_ids[0], skip_special_tokens=lowerCAmelCase )
lowerCamelCase_ =(
'''Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due'''
)
self.assertEqual(lowerCAmelCase, lowerCAmelCase )
@slow
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =TFXGLMForCausalLM.from_pretrained('''facebook/xglm-564M''' )
lowerCamelCase_ =XGLMTokenizer.from_pretrained('''facebook/xglm-564M''' )
lowerCamelCase_ ='''left'''
# use different length sentences to test batching
lowerCamelCase_ =[
'''This is an extremelly long sentence that only exists to test the ability of the model to cope with '''
'''left-padding, such as in batched generation. The output for the sequence below should be the same '''
'''regardless of whether left padding is applied or not. When''',
'''Hello, my dog is a little''',
]
lowerCamelCase_ =tokenizer(lowerCAmelCase, return_tensors='''tf''', padding=lowerCAmelCase )
lowerCamelCase_ =inputs['''input_ids''']
lowerCamelCase_ =model.generate(input_ids=lowerCAmelCase, attention_mask=inputs['''attention_mask'''], max_new_tokens=12 )
lowerCamelCase_ =tokenizer(sentences[0], return_tensors='''tf''' ).input_ids
lowerCamelCase_ =model.generate(input_ids=lowerCAmelCase, max_new_tokens=12 )
lowerCamelCase_ =tokenizer(sentences[1], return_tensors='''tf''' ).input_ids
lowerCamelCase_ =model.generate(input_ids=lowerCAmelCase, max_new_tokens=12 )
lowerCamelCase_ =tokenizer.batch_decode(lowerCAmelCase, skip_special_tokens=lowerCAmelCase )
lowerCamelCase_ =tokenizer.decode(output_non_padded[0], skip_special_tokens=lowerCAmelCase )
lowerCamelCase_ =tokenizer.decode(output_padded[0], skip_special_tokens=lowerCAmelCase )
lowerCamelCase_ =[
'''This is an extremelly long sentence that only exists to test the ability of the model to cope with '''
'''left-padding, such as in batched generation. The output for the sequence below should be the same '''
'''regardless of whether left padding is applied or not. When left padding is applied, the sequence will be '''
'''a single''',
'''Hello, my dog is a little bit of a shy one, but he is very friendly''',
]
self.assertListEqual(lowerCAmelCase, lowerCAmelCase )
self.assertListEqual(lowerCAmelCase, [non_padded_sentence, padded_sentence] )
| 75 | 0 |
'''simple docstring'''
import os
import pytest
from transformers.dynamic_module_utils import get_imports
_lowercase : List[Any] = "\nimport os\n"
_lowercase : List[str] = "\ndef foo():\n import os\n return False\n"
_lowercase : List[str] = "\ndef foo():\n def bar():\n if True:\n import os\n return False\n return bar()\n"
_lowercase : Any = "\nimport os\n\ntry:\n import bar\nexcept ImportError:\n raise ValueError()\n"
_lowercase : str = "\nimport os\n\ndef foo():\n try:\n import bar\n except ImportError:\n raise ValueError()\n"
_lowercase : List[Any] = "\nimport os\n\ntry:\n import bar\nexcept (ImportError, AttributeError):\n raise ValueError()\n"
_lowercase : Optional[int] = "\nimport os\n\ntry:\n import bar\nexcept ImportError as e:\n raise ValueError()\n"
_lowercase : Tuple = "\nimport os\n\ntry:\n import bar\nexcept:\n raise ValueError()\n"
_lowercase : List[Any] = "\nimport os\n\ntry:\n import bar\n import baz\nexcept ImportError:\n raise ValueError()\n"
_lowercase : Optional[int] = "\nimport os\n\ntry:\n import bar\n import baz\nexcept ImportError:\n x = 1\n raise ValueError()\n"
_lowercase : Any = [
TOP_LEVEL_IMPORT,
IMPORT_IN_FUNCTION,
DEEPLY_NESTED_IMPORT,
TOP_LEVEL_TRY_IMPORT,
GENERIC_EXCEPT_IMPORT,
MULTILINE_TRY_IMPORT,
MULTILINE_BOTH_IMPORT,
MULTIPLE_EXCEPTS_IMPORT,
EXCEPT_AS_IMPORT,
TRY_IMPORT_IN_FUNCTION,
]
@pytest.mark.parametrize("""case""" , UpperCAmelCase__ )
def lowerCamelCase ( UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Tuple ) -> Union[str, Any]:
lowercase_ : Tuple = os.path.join(UpperCAmelCase__ , """test_file.py""" )
with open(UpperCAmelCase__ , """w""" ) as _tmp_file:
_tmp_file.write(UpperCAmelCase__ )
lowercase_ : Optional[Any] = get_imports(UpperCAmelCase__ )
assert parsed_imports == ["os"]
| 356 | '''simple docstring'''
import os
import numpy
import onnx
def lowerCamelCase ( UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : str ) -> Tuple:
lowercase_ : Tuple = a.name
lowercase_ : Tuple = b.name
lowercase_ : Any = """"""
lowercase_ : List[Any] = """"""
lowercase_ : List[Any] = a == b
lowercase_ : Union[str, Any] = name_a
lowercase_ : Optional[Any] = name_b
return res
def lowerCamelCase ( UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Optional[Any] ) -> Union[str, Any]:
for i, input_name in enumerate(node_proto.input ):
if input_name == name:
node_proto.input.insert(UpperCAmelCase__ , UpperCAmelCase__ )
node_proto.input.pop(i + 1 )
if node_proto.op_type == "If":
_graph_replace_input_with(node_proto.attribute[0].g , UpperCAmelCase__ , UpperCAmelCase__ )
_graph_replace_input_with(node_proto.attribute[1].g , UpperCAmelCase__ , UpperCAmelCase__ )
if node_proto.op_type == "Loop":
_graph_replace_input_with(node_proto.attribute[0].g , UpperCAmelCase__ , UpperCAmelCase__ )
def lowerCamelCase ( UpperCAmelCase__ : List[str] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : str ) -> int:
for n in graph_proto.node:
_node_replace_input_with(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
def lowerCamelCase ( UpperCAmelCase__ : Any , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Dict ) -> List[str]:
lowercase_ : int = list(model.graph.initializer )
lowercase_ : List[str] = list(model_without_ext.graph.initializer )
for i, ref_i in ind_to_replace:
assert inits_with_data[i].name == inits[i].name
assert inits_with_data[ref_i].name == inits[ref_i].name
assert i > ref_i
lowercase_ : Optional[Any] = inits[i].name
lowercase_ : List[str] = inits[ref_i].name
model_without_ext.graph.initializer.remove(inits[i] )
# for n in model.graph.node:
_graph_replace_input_with(model_without_ext.graph , UpperCAmelCase__ , UpperCAmelCase__ )
def lowerCamelCase ( UpperCAmelCase__ : int ) -> List[str]:
lowercase_ : Dict = os.path.dirname(UpperCAmelCase__ )
lowercase_ : Optional[Any] = os.path.basename(UpperCAmelCase__ )
lowercase_ : str = onnx.load(os.path.join(UpperCAmelCase__ , UpperCAmelCase__ ) )
lowercase_ : List[Any] = list(model.graph.initializer )
lowercase_ : int = set()
lowercase_ : int = {}
lowercase_ : str = []
lowercase_ : int = 0
for i in range(len(UpperCAmelCase__ ) ):
if i in dup_set:
continue
for j in range(i + 1 , len(UpperCAmelCase__ ) ):
if j in dup_set:
continue
if _is_equal_tensor_proto(inits[i] , inits[j] ):
dup_set.add(UpperCAmelCase__ )
dup_set.add(UpperCAmelCase__ )
lowercase_ : Dict = inits[j].data_type
lowercase_ : List[str] = numpy.prod(inits[j].dims )
if dtype == 1:
mem_size *= 4
elif dtype == 6:
mem_size *= 4
elif dtype == 7 or dtype == 11:
mem_size *= 8
else:
print("""unexpected data type: """ , UpperCAmelCase__ )
total_reduced_size += mem_size
lowercase_ : int = inits[i].name
lowercase_ : List[str] = inits[j].name
if name_i in dup_map:
dup_map[name_i].append(UpperCAmelCase__ )
else:
lowercase_ : Optional[int] = [name_j]
ind_to_replace.append((j, i) )
print("""total reduced size: """ , total_reduced_size / 1024 / 1024 / 1024 , """GB""" )
lowercase_ : Tuple = sorted(UpperCAmelCase__ )
_remove_dup_initializers_from_model(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
lowercase_ : Union[str, Any] = """optimized_""" + model_file_name
lowercase_ : Optional[int] = os.path.join(UpperCAmelCase__ , UpperCAmelCase__ )
onnx.save(UpperCAmelCase__ , UpperCAmelCase__ )
return new_model
| 21 | 0 |
import multiprocessing
from typing import TYPE_CHECKING, Optional, Union
from .. import Dataset, Features, config
from ..formatting import query_table
from ..packaged_modules.sql.sql import Sql
from ..utils import logging
from .abc import AbstractDatasetInputStream
if TYPE_CHECKING:
import sqlitea
import sqlalchemy
class lowerCamelCase (SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __init__( self : Optional[Any] , __magic_name__ : Union[str, "sqlalchemy.sql.Selectable"] , __magic_name__ : Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"] , __magic_name__ : Optional[Features] = None , __magic_name__ : str = None , __magic_name__ : bool = False , **__magic_name__ : Dict , ) -> List[Any]:
super().__init__(features=__magic_name__ , cache_dir=__magic_name__ , keep_in_memory=__magic_name__ , **__magic_name__ )
SCREAMING_SNAKE_CASE_ = Sql(
cache_dir=__magic_name__ , features=__magic_name__ , sql=__magic_name__ , con=__magic_name__ , **__magic_name__ , )
def __A ( self : Dict ) -> List[str]:
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
self.builder.download_and_prepare(
download_config=__magic_name__ , download_mode=__magic_name__ , verification_mode=__magic_name__ , base_path=__magic_name__ , )
# Build dataset for splits
SCREAMING_SNAKE_CASE_ = self.builder.as_dataset(
split="train" , verification_mode=__magic_name__ , in_memory=self.keep_in_memory )
return dataset
class lowerCamelCase :
"""simple docstring"""
def __init__( self : str , __magic_name__ : Dataset , __magic_name__ : str , __magic_name__ : Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"] , __magic_name__ : Optional[int] = None , __magic_name__ : Optional[int] = None , **__magic_name__ : Dict , ) -> Tuple:
if num_proc is not None and num_proc <= 0:
raise ValueError(F'''num_proc {num_proc} must be an integer > 0.''' )
SCREAMING_SNAKE_CASE_ = dataset
SCREAMING_SNAKE_CASE_ = name
SCREAMING_SNAKE_CASE_ = con
SCREAMING_SNAKE_CASE_ = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
SCREAMING_SNAKE_CASE_ = num_proc
SCREAMING_SNAKE_CASE_ = to_sql_kwargs
def __A ( self : Union[str, Any] ) -> int:
SCREAMING_SNAKE_CASE_ = self.to_sql_kwargs.pop("sql" , __magic_name__ )
SCREAMING_SNAKE_CASE_ = self.to_sql_kwargs.pop("con" , __magic_name__ )
SCREAMING_SNAKE_CASE_ = self.to_sql_kwargs.pop("index" , __magic_name__ )
SCREAMING_SNAKE_CASE_ = self._write(index=__magic_name__ , **self.to_sql_kwargs )
return written
def __A ( self : str , __magic_name__ : int ) -> str:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = args
SCREAMING_SNAKE_CASE_ = {**to_sql_kwargs, "if_exists": "append"} if offset > 0 else to_sql_kwargs
SCREAMING_SNAKE_CASE_ = query_table(
table=self.dataset.data , key=slice(__magic_name__ , offset + self.batch_size ) , indices=self.dataset._indices , )
SCREAMING_SNAKE_CASE_ = batch.to_pandas()
SCREAMING_SNAKE_CASE_ = df.to_sql(self.name , self.con , index=__magic_name__ , **__magic_name__ )
return num_rows or len(__magic_name__ )
def __A ( self : Tuple , __magic_name__ : str , **__magic_name__ : Any ) -> int:
SCREAMING_SNAKE_CASE_ = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating SQL from Arrow format" , ):
written += self._batch_sql((offset, index, to_sql_kwargs) )
else:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for num_rows in logging.tqdm(
pool.imap(
self._batch_sql , [(offset, index, to_sql_kwargs) for offset in range(0 , __magic_name__ , __magic_name__ )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating SQL from Arrow format" , ):
written += num_rows
return written
| 118 | import argparse
import logging
from collections import namedtuple
import torch
from model_bertabs import BertAbsSummarizer
from models.model_builder import AbsSummarizer # The authors' implementation
from transformers import BertTokenizer
logging.basicConfig(level=logging.INFO)
A : Optional[int] = logging.getLogger(__name__)
A : int = "Hello world! cécé herlolip"
A : List[Any] = namedtuple(
"BertAbsConfig",
[
"temp_dir",
"large",
"use_bert_emb",
"finetune_bert",
"encoder",
"share_emb",
"max_pos",
"enc_layers",
"enc_hidden_size",
"enc_heads",
"enc_ff_size",
"enc_dropout",
"dec_layers",
"dec_hidden_size",
"dec_heads",
"dec_ff_size",
"dec_dropout",
],
)
def a__ ( __UpperCamelCase , __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = BertAbsConfig(
temp_dir="." , finetune_bert=__UpperCamelCase , large=__UpperCamelCase , share_emb=__UpperCamelCase , use_bert_emb=__UpperCamelCase , encoder="bert" , max_pos=5_1_2 , enc_layers=6 , enc_hidden_size=5_1_2 , enc_heads=8 , enc_ff_size=5_1_2 , enc_dropout=0.2 , dec_layers=6 , dec_hidden_size=7_6_8 , dec_heads=8 , dec_ff_size=2_0_4_8 , dec_dropout=0.2 , )
SCREAMING_SNAKE_CASE_ = torch.load(__UpperCamelCase , lambda __UpperCamelCase , __UpperCamelCase : storage )
SCREAMING_SNAKE_CASE_ = AbsSummarizer(__UpperCamelCase , torch.device("cpu" ) , __UpperCamelCase )
original.eval()
SCREAMING_SNAKE_CASE_ = BertAbsSummarizer(__UpperCamelCase , torch.device("cpu" ) )
new_model.eval()
# -------------------
# Convert the weights
# -------------------
logging.info("convert the model" )
new_model.bert.load_state_dict(original.bert.state_dict() )
new_model.decoder.load_state_dict(original.decoder.state_dict() )
new_model.generator.load_state_dict(original.generator.state_dict() )
# ----------------------------------
# Make sure the outpus are identical
# ----------------------------------
logging.info("Make sure that the models' outputs are identical" )
SCREAMING_SNAKE_CASE_ = BertTokenizer.from_pretrained("bert-base-uncased" )
# prepare the model inputs
SCREAMING_SNAKE_CASE_ = tokenizer.encode("This is sample éàalj'-." )
encoder_input_ids.extend([tokenizer.pad_token_id] * (5_1_2 - len(__UpperCamelCase )) )
SCREAMING_SNAKE_CASE_ = torch.tensor(__UpperCamelCase ).unsqueeze(0 )
SCREAMING_SNAKE_CASE_ = tokenizer.encode("This is sample 3 éàalj'-." )
decoder_input_ids.extend([tokenizer.pad_token_id] * (5_1_2 - len(__UpperCamelCase )) )
SCREAMING_SNAKE_CASE_ = torch.tensor(__UpperCamelCase ).unsqueeze(0 )
# failsafe to make sure the weights reset does not affect the
# loaded weights.
assert torch.max(torch.abs(original.generator[0].weight - new_model.generator[0].weight ) ) == 0
# forward pass
SCREAMING_SNAKE_CASE_ = encoder_input_ids
SCREAMING_SNAKE_CASE_ = decoder_input_ids
SCREAMING_SNAKE_CASE_ = SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
# The original model does not apply the geneator layer immediatly but rather in
# the beam search (where it combines softmax + linear layer). Since we already
# apply the softmax in our generation process we only apply the linear layer here.
# We make sure that the outputs of the full stack are identical
SCREAMING_SNAKE_CASE_ = original(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )[0]
SCREAMING_SNAKE_CASE_ = original.generator(__UpperCamelCase )
SCREAMING_SNAKE_CASE_ = new_model(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )[0]
SCREAMING_SNAKE_CASE_ = new_model.generator(__UpperCamelCase )
SCREAMING_SNAKE_CASE_ = torch.max(torch.abs(output_converted_model - output_original_model ) ).item()
print("Maximum absolute difference beween weights: {:.2f}".format(__UpperCamelCase ) )
SCREAMING_SNAKE_CASE_ = torch.max(torch.abs(output_converted_generator - output_original_generator ) ).item()
print("Maximum absolute difference beween weights: {:.2f}".format(__UpperCamelCase ) )
SCREAMING_SNAKE_CASE_ = torch.allclose(__UpperCamelCase , __UpperCamelCase , atol=1E-3 )
if are_identical:
logging.info("all weights are equal up to 1e-3" )
else:
raise ValueError("the weights are different. The new model is likely different from the original one." )
# The model has been saved with torch.save(model) and this is bound to the exact
# directory structure. We save the state_dict instead.
logging.info("saving the model's state dictionary" )
torch.save(
new_model.state_dict() , "./bertabs-finetuned-cnndm-extractive-abstractive-summarization/pytorch_model.bin" )
if __name__ == "__main__":
A : Any = argparse.ArgumentParser()
parser.add_argument(
"--bertabs_checkpoint_path",
default=None,
type=str,
required=True,
help="Path the official PyTorch dump.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
required=True,
help="Path to the output PyTorch model.",
)
A : int = parser.parse_args()
convert_bertabs_checkpoints(
args.bertabs_checkpoint_path,
args.pytorch_dump_folder_path,
)
| 118 | 1 |
from __future__ import annotations
from scipy.special import comb # type: ignore
class __lowercase :
"""simple docstring"""
def __init__( self : Union[str, Any] , lowerCAmelCase__ : list[tuple[float, float]]):
SCREAMING_SNAKE_CASE_: int = list_of_points
# Degree determines the flexibility of the curve.
# Degree = 1 will produce a straight line.
SCREAMING_SNAKE_CASE_: Dict = len(lowercase_) - 1
def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase__ : float):
assert 0 <= t <= 1, "Time t must be between 0 and 1."
SCREAMING_SNAKE_CASE_: list[float] = []
for i in range(len(self.list_of_points)):
# basis function for each i
output_values.append(
comb(self.degree , lowercase_) * ((1 - t) ** (self.degree - i)) * (t**i))
# the basis must sum up to 1 for it to produce a valid Bezier curve.
assert round(sum(lowercase_) , 5) == 1
return output_values
def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase__ : float):
assert 0 <= t <= 1, "Time t must be between 0 and 1."
SCREAMING_SNAKE_CASE_: List[str] = self.basis_function(lowercase_)
SCREAMING_SNAKE_CASE_: Tuple = 0.0
SCREAMING_SNAKE_CASE_: Dict = 0.0
for i in range(len(self.list_of_points)):
# For all points, sum up the product of i-th basis function and i-th point.
x += basis_function[i] * self.list_of_points[i][0]
y += basis_function[i] * self.list_of_points[i][1]
return (x, y)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase__ : float = 0.01):
from matplotlib import pyplot as plt # type: ignore
SCREAMING_SNAKE_CASE_: list[float] = [] # x coordinates of points to plot
SCREAMING_SNAKE_CASE_: list[float] = [] # y coordinates of points to plot
SCREAMING_SNAKE_CASE_: Tuple = 0.0
while t <= 1:
SCREAMING_SNAKE_CASE_: Any = self.bezier_curve_function(lowercase_)
to_plot_x.append(value[0])
to_plot_y.append(value[1])
t += step_size
SCREAMING_SNAKE_CASE_: Any = [i[0] for i in self.list_of_points]
SCREAMING_SNAKE_CASE_: List[str] = [i[1] for i in self.list_of_points]
plt.plot(
lowercase_ , lowercase_ , color="blue" , label="Curve of Degree " + str(self.degree) , )
plt.scatter(lowercase_ , lowercase_ , color="red" , label="Control Points")
plt.legend()
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
BezierCurve([(1, 2), (3, 5)]).plot_curve() # degree 1
BezierCurve([(0, 0), (5, 5), (5, 0)]).plot_curve() # degree 2
BezierCurve([(0, 0), (5, 5), (5, 0), (2.5, -2.5)]).plot_curve() # degree 3
| 371 |
from __future__ import annotations
from collections import deque
from collections.abc import Iterator
from dataclasses import dataclass
@dataclass
class __lowercase :
"""simple docstring"""
_UpperCAmelCase : int
_UpperCAmelCase : int
class __lowercase :
"""simple docstring"""
def __init__( self : Any , lowerCAmelCase__ : int):
SCREAMING_SNAKE_CASE_: list[list[Edge]] = [[] for _ in range(lowerCAmelCase__)]
SCREAMING_SNAKE_CASE_: Dict = size
def __getitem__( self : Dict , lowerCAmelCase__ : int):
return iter(self._graph[vertex])
@property
def _SCREAMING_SNAKE_CASE ( self : Tuple):
return self._size
def _SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : int):
if weight not in (0, 1):
raise ValueError("Edge weight must be either 0 or 1.")
if to_vertex < 0 or to_vertex >= self.size:
raise ValueError("Vertex indexes must be in [0; size).")
self._graph[from_vertex].append(Edge(lowerCAmelCase__ , lowerCAmelCase__))
def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase__ : int , lowerCAmelCase__ : int):
SCREAMING_SNAKE_CASE_: Optional[int] = deque([start_vertex])
SCREAMING_SNAKE_CASE_: list[int | None] = [None] * self.size
SCREAMING_SNAKE_CASE_: List[Any] = 0
while queue:
SCREAMING_SNAKE_CASE_: int = queue.popleft()
SCREAMING_SNAKE_CASE_: str = distances[current_vertex]
if current_distance is None:
continue
for edge in self[current_vertex]:
SCREAMING_SNAKE_CASE_: Optional[int] = current_distance + edge.weight
SCREAMING_SNAKE_CASE_: str = distances[edge.destination_vertex]
if (
isinstance(lowerCAmelCase__ , lowerCAmelCase__)
and new_distance >= dest_vertex_distance
):
continue
SCREAMING_SNAKE_CASE_: Any = new_distance
if edge.weight == 0:
queue.appendleft(edge.destination_vertex)
else:
queue.append(edge.destination_vertex)
if distances[finish_vertex] is None:
raise ValueError("No path from start_vertex to finish_vertex.")
return distances[finish_vertex]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 127 | 0 |
'''simple docstring'''
import math
import sys
def UpperCAmelCase_ (__a : Optional[int] ):
"""simple docstring"""
_a : Optional[Any] = ''''''
try:
with open(__lowerCAmelCase , 'rb' ) as binary_file:
_a : int = binary_file.read()
for dat in data:
_a : Any = f"""{dat:08b}"""
result += curr_byte
return result
except OSError:
print('File not accessible' )
sys.exit()
def UpperCAmelCase_ (__a : int ):
"""simple docstring"""
_a : List[str] = {'''0''': '''0''', '''1''': '''1'''}
_a : List[Any] = '''''', ''''''
_a : Tuple = len(__lowerCAmelCase )
for i in range(len(__lowerCAmelCase ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
_a : Tuple = lexicon[curr_string]
result += last_match_id
_a : Any = last_match_id + '''0'''
if math.loga(__lowerCAmelCase ).is_integer():
_a : Tuple = {}
for curr_key in list(__lowerCAmelCase ):
_a : Union[str, Any] = lexicon.pop(__lowerCAmelCase )
_a : Optional[Any] = new_lex
_a : Tuple = last_match_id + '''1'''
index += 1
_a : Dict = ''''''
return result
def UpperCAmelCase_ (__a : str , __a : Tuple ):
"""simple docstring"""
_a : Dict = 8
try:
with open(__lowerCAmelCase , 'wb' ) as opened_file:
_a : Union[str, Any] = [
to_write[i : i + byte_length]
for i in range(0 , len(__lowerCAmelCase ) , __lowerCAmelCase )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append('10000000' )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array[:-1]:
opened_file.write(int(__lowerCAmelCase , 2 ).to_bytes(1 , byteorder='big' ) )
except OSError:
print('File not accessible' )
sys.exit()
def UpperCAmelCase_ (__a : int ):
"""simple docstring"""
_a : Any = 0
for letter in data_bits:
if letter == "1":
break
counter += 1
_a : Optional[int] = data_bits[counter:]
_a : int = data_bits[counter + 1 :]
return data_bits
def UpperCAmelCase_ (__a : Union[str, Any] , __a : Optional[int] ):
"""simple docstring"""
_a : Union[str, Any] = read_file_binary(__lowerCAmelCase )
_a : List[str] = remove_prefix(__lowerCAmelCase )
_a : Any = decompress_data(__lowerCAmelCase )
write_file_binary(__lowerCAmelCase , __lowerCAmelCase )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 271 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A__ = logging.get_logger(__name__)
A__ = {
'''distilbert-base-uncased''': '''https://huggingface.co/distilbert-base-uncased/resolve/main/config.json''',
'''distilbert-base-uncased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/config.json'''
),
'''distilbert-base-cased''': '''https://huggingface.co/distilbert-base-cased/resolve/main/config.json''',
'''distilbert-base-cased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/config.json'''
),
'''distilbert-base-german-cased''': '''https://huggingface.co/distilbert-base-german-cased/resolve/main/config.json''',
'''distilbert-base-multilingual-cased''': (
'''https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/config.json'''
),
'''distilbert-base-uncased-finetuned-sst-2-english''': (
'''https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english/resolve/main/config.json'''
),
}
class a ( __lowerCamelCase ):
__lowerCAmelCase : str = """distilbert"""
__lowerCAmelCase : str = {
"""hidden_size""": """dim""",
"""num_attention_heads""": """n_heads""",
"""num_hidden_layers""": """n_layers""",
}
def __init__( self :Dict ,__lowercase :Tuple=3_0_5_2_2 ,__lowercase :Optional[Any]=5_1_2 ,__lowercase :List[str]=False ,__lowercase :List[str]=6 ,__lowercase :Optional[Any]=1_2 ,__lowercase :Tuple=7_6_8 ,__lowercase :int=4 * 7_6_8 ,__lowercase :List[Any]=0.1 ,__lowercase :List[str]=0.1 ,__lowercase :Union[str, Any]="gelu" ,__lowercase :List[str]=0.02 ,__lowercase :Optional[int]=0.1 ,__lowercase :Dict=0.2 ,__lowercase :Union[str, Any]=0 ,**__lowercase :Optional[Any] ,):
snake_case__ : List[str] = vocab_size
snake_case__ : Tuple = max_position_embeddings
snake_case__ : Optional[int] = sinusoidal_pos_embds
snake_case__ : str = n_layers
snake_case__ : List[Any] = n_heads
snake_case__ : Tuple = dim
snake_case__ : str = hidden_dim
snake_case__ : int = dropout
snake_case__ : Dict = attention_dropout
snake_case__ : Tuple = activation
snake_case__ : int = initializer_range
snake_case__ : Optional[Any] = qa_dropout
snake_case__ : Union[str, Any] = seq_classif_dropout
super().__init__(**__lowercase ,pad_token_id=__lowercase )
class a ( __lowerCamelCase ):
@property
def __lowerCamelCase ( self :Union[str, Any] ):
if self.task == "multiple-choice":
snake_case__ : str = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
snake_case__ : Optional[int] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 230 | 0 |
"""simple docstring"""
class _a :
"""simple docstring"""
def __init__( self : Tuple , __UpperCamelCase : int )->None:
_UpperCAmelCase = size
_UpperCAmelCase = [0] * size
_UpperCAmelCase = [0] * size
@staticmethod
def lowercase__ ( __UpperCamelCase : int )->int:
return index | (index + 1)
@staticmethod
def lowercase__ ( __UpperCamelCase : int )->int:
return (index & (index + 1)) - 1
def lowercase__ ( self : Optional[Any] , __UpperCamelCase : int , __UpperCamelCase : int )->None:
_UpperCAmelCase = value
while index < self.size:
_UpperCAmelCase = self.get_prev(__UpperCamelCase ) + 1
if current_left_border == index:
_UpperCAmelCase = value
else:
_UpperCAmelCase = max(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = self.get_next(__UpperCamelCase )
def lowercase__ ( self : Union[str, Any] , __UpperCamelCase : int , __UpperCamelCase : int )->int:
right -= 1 # Because of right is exclusive
_UpperCAmelCase = 0
while left <= right:
_UpperCAmelCase = self.get_prev(__UpperCamelCase )
if left <= current_left:
_UpperCAmelCase = max(__UpperCamelCase , self.tree[right] )
_UpperCAmelCase = current_left
else:
_UpperCAmelCase = max(__UpperCamelCase , self.arr[right] )
right -= 1
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 326 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__A : Optional[int] = {"configuration_mmbt": ["MMBTConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : int = ["MMBTForClassification", "MMBTModel", "ModalEmbeddings"]
if TYPE_CHECKING:
from .configuration_mmbt import MMBTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings
else:
import sys
__A : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 326 | 1 |
'''simple docstring'''
from __future__ import annotations
class A__ :
def __init__( self , UpperCamelCase__=None ) -> Any:
'''simple docstring'''
A_ = data
A_ = None
def __repr__( self ) -> List[str]:
'''simple docstring'''
A_ = []
A_ = self
while temp:
string_rep.append(f'''{temp.data}''' )
A_ = temp.next
return "->".join(UpperCamelCase__ )
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> Tuple:
if not elements_list:
raise Exception("""The Elements List is empty""" )
A_ = A_ = Node(elements_list[0] )
for i in range(1, len(UpperCAmelCase__ ) ):
A_ = Node(elements_list[i] )
A_ = current.next
return head
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> None:
if head_node is not None and isinstance(UpperCAmelCase__, UpperCAmelCase__ ):
print_reverse(head_node.next )
print(head_node.data )
def UpperCAmelCase__ ( ) -> Optional[Any]:
from doctest import testmod
testmod()
A_ = make_linked_list([14, 52, 14, 12, 43] )
print("""Linked List:""" )
print(UpperCAmelCase__ )
print("""Elements in Reverse:""" )
print_reverse(UpperCAmelCase__ )
if __name__ == "__main__":
main()
| 162 |
'''simple docstring'''
from functools import lru_cache
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> set:
A_ = 2
A_ = set()
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.add(UpperCAmelCase__ )
if n > 1:
factors.add(UpperCAmelCase__ )
return factors
@lru_cache
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> int:
return len(unique_prime_factors(UpperCAmelCase__ ) )
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> bool:
return len(set(UpperCAmelCase__ ) ) in (0, 1)
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> list:
A_ = 2
while True:
# Increment each value of a generated range
A_ = [base + i for i in range(UpperCAmelCase__ )]
# Run elements through out unique_prime_factors function
# Append our target number to the end.
A_ = [upf_len(UpperCAmelCase__ ) for x in group]
checker.append(UpperCAmelCase__ )
# If all numbers in the list are equal, return the group variable.
if equality(UpperCAmelCase__ ):
return group
# Increment our base variable by 1
base += 1
def UpperCAmelCase__ ( UpperCAmelCase__ = 4 ) -> int:
A_ = run(UpperCAmelCase__ )
return results[0] if len(UpperCAmelCase__ ) else None
if __name__ == "__main__":
print(solution())
| 162 | 1 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_nllb import NllbTokenizer
else:
_lowerCamelCase : Union[str, Any] = None
_lowerCamelCase : str = logging.get_logger(__name__)
_lowerCamelCase : Dict = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''}
_lowerCamelCase : List[Any] = {
'''vocab_file''': {
'''facebook/nllb-200-distilled-600M''': (
'''https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model'''
),
},
'''tokenizer_file''': {
'''facebook/nllb-200-distilled-600M''': (
'''https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json'''
),
},
}
_lowerCamelCase : Optional[int] = {
'''facebook/nllb-large-en-ro''': 1_0_2_4,
'''facebook/nllb-200-distilled-600M''': 1_0_2_4,
}
# fmt: off
_lowerCamelCase : Optional[Any] = ['''ace_Arab''', '''ace_Latn''', '''acm_Arab''', '''acq_Arab''', '''aeb_Arab''', '''afr_Latn''', '''ajp_Arab''', '''aka_Latn''', '''amh_Ethi''', '''apc_Arab''', '''arb_Arab''', '''ars_Arab''', '''ary_Arab''', '''arz_Arab''', '''asm_Beng''', '''ast_Latn''', '''awa_Deva''', '''ayr_Latn''', '''azb_Arab''', '''azj_Latn''', '''bak_Cyrl''', '''bam_Latn''', '''ban_Latn''', '''bel_Cyrl''', '''bem_Latn''', '''ben_Beng''', '''bho_Deva''', '''bjn_Arab''', '''bjn_Latn''', '''bod_Tibt''', '''bos_Latn''', '''bug_Latn''', '''bul_Cyrl''', '''cat_Latn''', '''ceb_Latn''', '''ces_Latn''', '''cjk_Latn''', '''ckb_Arab''', '''crh_Latn''', '''cym_Latn''', '''dan_Latn''', '''deu_Latn''', '''dik_Latn''', '''dyu_Latn''', '''dzo_Tibt''', '''ell_Grek''', '''eng_Latn''', '''epo_Latn''', '''est_Latn''', '''eus_Latn''', '''ewe_Latn''', '''fao_Latn''', '''pes_Arab''', '''fij_Latn''', '''fin_Latn''', '''fon_Latn''', '''fra_Latn''', '''fur_Latn''', '''fuv_Latn''', '''gla_Latn''', '''gle_Latn''', '''glg_Latn''', '''grn_Latn''', '''guj_Gujr''', '''hat_Latn''', '''hau_Latn''', '''heb_Hebr''', '''hin_Deva''', '''hne_Deva''', '''hrv_Latn''', '''hun_Latn''', '''hye_Armn''', '''ibo_Latn''', '''ilo_Latn''', '''ind_Latn''', '''isl_Latn''', '''ita_Latn''', '''jav_Latn''', '''jpn_Jpan''', '''kab_Latn''', '''kac_Latn''', '''kam_Latn''', '''kan_Knda''', '''kas_Arab''', '''kas_Deva''', '''kat_Geor''', '''knc_Arab''', '''knc_Latn''', '''kaz_Cyrl''', '''kbp_Latn''', '''kea_Latn''', '''khm_Khmr''', '''kik_Latn''', '''kin_Latn''', '''kir_Cyrl''', '''kmb_Latn''', '''kon_Latn''', '''kor_Hang''', '''kmr_Latn''', '''lao_Laoo''', '''lvs_Latn''', '''lij_Latn''', '''lim_Latn''', '''lin_Latn''', '''lit_Latn''', '''lmo_Latn''', '''ltg_Latn''', '''ltz_Latn''', '''lua_Latn''', '''lug_Latn''', '''luo_Latn''', '''lus_Latn''', '''mag_Deva''', '''mai_Deva''', '''mal_Mlym''', '''mar_Deva''', '''min_Latn''', '''mkd_Cyrl''', '''plt_Latn''', '''mlt_Latn''', '''mni_Beng''', '''khk_Cyrl''', '''mos_Latn''', '''mri_Latn''', '''zsm_Latn''', '''mya_Mymr''', '''nld_Latn''', '''nno_Latn''', '''nob_Latn''', '''npi_Deva''', '''nso_Latn''', '''nus_Latn''', '''nya_Latn''', '''oci_Latn''', '''gaz_Latn''', '''ory_Orya''', '''pag_Latn''', '''pan_Guru''', '''pap_Latn''', '''pol_Latn''', '''por_Latn''', '''prs_Arab''', '''pbt_Arab''', '''quy_Latn''', '''ron_Latn''', '''run_Latn''', '''rus_Cyrl''', '''sag_Latn''', '''san_Deva''', '''sat_Beng''', '''scn_Latn''', '''shn_Mymr''', '''sin_Sinh''', '''slk_Latn''', '''slv_Latn''', '''smo_Latn''', '''sna_Latn''', '''snd_Arab''', '''som_Latn''', '''sot_Latn''', '''spa_Latn''', '''als_Latn''', '''srd_Latn''', '''srp_Cyrl''', '''ssw_Latn''', '''sun_Latn''', '''swe_Latn''', '''swh_Latn''', '''szl_Latn''', '''tam_Taml''', '''tat_Cyrl''', '''tel_Telu''', '''tgk_Cyrl''', '''tgl_Latn''', '''tha_Thai''', '''tir_Ethi''', '''taq_Latn''', '''taq_Tfng''', '''tpi_Latn''', '''tsn_Latn''', '''tso_Latn''', '''tuk_Latn''', '''tum_Latn''', '''tur_Latn''', '''twi_Latn''', '''tzm_Tfng''', '''uig_Arab''', '''ukr_Cyrl''', '''umb_Latn''', '''urd_Arab''', '''uzn_Latn''', '''vec_Latn''', '''vie_Latn''', '''war_Latn''', '''wol_Latn''', '''xho_Latn''', '''ydd_Hebr''', '''yor_Latn''', '''yue_Hant''', '''zho_Hans''', '''zho_Hant''', '''zul_Latn''']
class lowerCamelCase (__lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase_ = VOCAB_FILES_NAMES
UpperCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase_ = ["input_ids", "attention_mask"]
UpperCAmelCase_ = NllbTokenizer
UpperCAmelCase_ = []
UpperCAmelCase_ = []
def __init__( self : Optional[int], _UpperCAmelCase : Optional[int]=None, _UpperCAmelCase : str=None, _UpperCAmelCase : Optional[Any]="<s>", _UpperCAmelCase : Dict="</s>", _UpperCAmelCase : Optional[int]="</s>", _UpperCAmelCase : str="<s>", _UpperCAmelCase : Any="<unk>", _UpperCAmelCase : Dict="<pad>", _UpperCAmelCase : Optional[int]="<mask>", _UpperCAmelCase : Union[str, Any]=None, _UpperCAmelCase : Dict=None, _UpperCAmelCase : List[str]=None, _UpperCAmelCase : Any=False, **_UpperCAmelCase : List[Any], ) -> int:
"""simple docstring"""
# Mask token behave like a normal word, i.e. include the space before it
SCREAMING_SNAKE_CASE__ : List[str] = AddedToken(_UpperCAmelCase, lstrip=_UpperCAmelCase, rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase, _UpperCAmelCase ) else mask_token
SCREAMING_SNAKE_CASE__ : List[Any] = legacy_behaviour
super().__init__(
vocab_file=_UpperCAmelCase, tokenizer_file=_UpperCAmelCase, bos_token=_UpperCAmelCase, eos_token=_UpperCAmelCase, sep_token=_UpperCAmelCase, cls_token=_UpperCAmelCase, unk_token=_UpperCAmelCase, pad_token=_UpperCAmelCase, mask_token=_UpperCAmelCase, src_lang=_UpperCAmelCase, tgt_lang=_UpperCAmelCase, additional_special_tokens=_UpperCAmelCase, legacy_behaviour=_UpperCAmelCase, **_UpperCAmelCase, )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = vocab_file
SCREAMING_SNAKE_CASE__ : Tuple = False if not self.vocab_file else True
SCREAMING_SNAKE_CASE__ : int = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({"additional_special_tokens": _additional_special_tokens} )
SCREAMING_SNAKE_CASE__ : List[str] = {
lang_code: self.convert_tokens_to_ids(_UpperCAmelCase ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
SCREAMING_SNAKE_CASE__ : List[str] = src_lang if src_lang is not None else "eng_Latn"
SCREAMING_SNAKE_CASE__ : int = self.convert_tokens_to_ids(self._src_lang )
SCREAMING_SNAKE_CASE__ : Optional[Any] = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def A_ ( self : str ) -> str:
"""simple docstring"""
return self._src_lang
@src_lang.setter
def A_ ( self : List[Any], _UpperCAmelCase : str ) -> None:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def A_ ( self : int, _UpperCAmelCase : List[int], _UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def A_ ( self : List[str], _UpperCAmelCase : List[int], _UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [self.sep_token_id]
SCREAMING_SNAKE_CASE__ : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def A_ ( self : Optional[Any], _UpperCAmelCase : Optional[Any], _UpperCAmelCase : str, _UpperCAmelCase : Optional[str], _UpperCAmelCase : Optional[str], **_UpperCAmelCase : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
if src_lang is None or tgt_lang is None:
raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model" )
SCREAMING_SNAKE_CASE__ : Any = src_lang
SCREAMING_SNAKE_CASE__ : List[str] = self(_UpperCAmelCase, add_special_tokens=_UpperCAmelCase, return_tensors=_UpperCAmelCase, **_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Any = self.convert_tokens_to_ids(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = tgt_lang_id
return inputs
def A_ ( self : Optional[Any], _UpperCAmelCase : List[str], _UpperCAmelCase : str = "eng_Latn", _UpperCAmelCase : Optional[List[str]] = None, _UpperCAmelCase : str = "fra_Latn", **_UpperCAmelCase : Any, ) -> BatchEncoding:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = src_lang
SCREAMING_SNAKE_CASE__ : str = tgt_lang
return super().prepare_seqaseq_batch(_UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase )
def A_ ( self : str ) -> List[Any]:
"""simple docstring"""
return self.set_src_lang_special_tokens(self.src_lang )
def A_ ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def A_ ( self : str, _UpperCAmelCase : Dict ) -> None:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = self.convert_tokens_to_ids(_UpperCAmelCase )
if self.legacy_behaviour:
SCREAMING_SNAKE_CASE__ : Any = []
SCREAMING_SNAKE_CASE__ : int = [self.eos_token_id, self.cur_lang_code]
else:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [self.cur_lang_code]
SCREAMING_SNAKE_CASE__ : Dict = [self.eos_token_id]
SCREAMING_SNAKE_CASE__ : str = self.convert_ids_to_tokens(self.prefix_tokens )
SCREAMING_SNAKE_CASE__ : int = self.convert_ids_to_tokens(self.suffix_tokens )
SCREAMING_SNAKE_CASE__ : List[Any] = processors.TemplateProcessing(
single=prefix_tokens_str + ["$A"] + suffix_tokens_str, pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str, special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str, self.prefix_tokens + self.suffix_tokens ) ), )
def A_ ( self : Tuple, _UpperCAmelCase : str ) -> None:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = self.convert_tokens_to_ids(_UpperCAmelCase )
if self.legacy_behaviour:
SCREAMING_SNAKE_CASE__ : str = []
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [self.eos_token_id, self.cur_lang_code]
else:
SCREAMING_SNAKE_CASE__ : int = [self.cur_lang_code]
SCREAMING_SNAKE_CASE__ : Tuple = [self.eos_token_id]
SCREAMING_SNAKE_CASE__ : Tuple = self.convert_ids_to_tokens(self.prefix_tokens )
SCREAMING_SNAKE_CASE__ : Dict = self.convert_ids_to_tokens(self.suffix_tokens )
SCREAMING_SNAKE_CASE__ : Optional[int] = processors.TemplateProcessing(
single=prefix_tokens_str + ["$A"] + suffix_tokens_str, pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str, special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str, self.prefix_tokens + self.suffix_tokens ) ), )
def A_ ( self : List[str], _UpperCAmelCase : str, _UpperCAmelCase : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(_UpperCAmelCase ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory.''' )
return
SCREAMING_SNAKE_CASE__ : Any = os.path.join(
_UpperCAmelCase, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCAmelCase ):
copyfile(self.vocab_file, _UpperCAmelCase )
return (out_vocab_file,)
| 191 |
_lowerCamelCase : dict[tuple[int, int, int], int] = {}
def _a ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ) -> int:
'''simple docstring'''
if late == 3 or absent == 2:
return 0
# if we have no days left, and have not failed any other rules,
# we have a prize string
if days == 0:
return 1
# No easy solution, so now we need to do the recursive calculation
# First, check if the combination is already in the cache, and
# if yes, return the stored value from there since we already
# know the number of possible prize strings from this point on
SCREAMING_SNAKE_CASE__ : Union[str, Any] = (days, absent, late)
if key in cache:
return cache[key]
# now we calculate the three possible ways that can unfold from
# this point on, depending on our attendance today
# 1) if we are late (but not absent), the "absent" counter stays as
# it is, but the "late" counter increases by one
SCREAMING_SNAKE_CASE__ : Tuple = _calculate(days - 1 , SCREAMING_SNAKE_CASE__ , late + 1 )
# 2) if we are absent, the "absent" counter increases by 1, and the
# "late" counter resets to 0
SCREAMING_SNAKE_CASE__ : Dict = _calculate(days - 1 , absent + 1 , 0 )
# 3) if we are on time, this resets the "late" counter and keeps the
# absent counter
SCREAMING_SNAKE_CASE__ : Any = _calculate(days - 1 , SCREAMING_SNAKE_CASE__ , 0 )
SCREAMING_SNAKE_CASE__ : str = state_late + state_absent + state_ontime
SCREAMING_SNAKE_CASE__ : Optional[int] = prizestrings
return prizestrings
def _a ( SCREAMING_SNAKE_CASE__ : int = 30 ) -> int:
'''simple docstring'''
return _calculate(SCREAMING_SNAKE_CASE__ , absent=0 , late=0 )
if __name__ == "__main__":
print(solution())
| 191 | 1 |
def lowercase__ ( __snake_case : Optional[Any] = 1_000 ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = -1
UpperCAmelCase_ : List[str] = 0
for a in range(1 , n // 3 ):
# Solving the two equations a**2+b**2=c**2 and a+b+c=N eliminating c
UpperCAmelCase_ : Optional[int] = (n * n - 2 * a * n) // (2 * n - 2 * a)
UpperCAmelCase_ : Tuple = n - a - b
if c * c == (a * a + b * b):
UpperCAmelCase_ : str = a * b * c
if candidate >= product:
UpperCAmelCase_ : Any = candidate
return product
if __name__ == "__main__":
print(F'{solution() = }')
| 29 |
import unittest
from transformers import JukeboxTokenizer
from transformers.testing_utils import require_torch
class _lowerCamelCase( unittest.TestCase ):
lowercase_ : Dict = JukeboxTokenizer
lowercase_ : Dict = {
"""artist""": """Zac Brown Band""",
"""genres""": """Country""",
"""lyrics""": """I met a traveller from an antique land,
Who said \"Two vast and trunkless legs of stone
Stand in the desert. . . . Near them, on the sand,
Half sunk a shattered visage lies, whose frown,
And wrinkled lip, and sneer of cold command,
Tell that its sculptor well those passions read
Which yet survive, stamped on these lifeless things,
The hand that mocked them, and the heart that fed;
And on the pedestal, these words appear:
My name is Ozymandias, King of Kings;
Look on my Works, ye Mighty, and despair!
Nothing beside remains. Round the decay
Of that colossal Wreck, boundless and bare
The lone and level sands stretch far away
""",
}
@require_torch
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
import torch
_lowercase : str = JukeboxTokenizer.from_pretrained('openai/jukebox-1b-lyrics')
_lowercase : Optional[Any] = tokenizer(**self.metas)['input_ids']
# fmt: off
_lowercase : Optional[int] = [
torch.tensor([[
0, 0, 0, 71_69, 5_07, 9, 76, 39, 31, 46, 76, 27,
76, 46, 44, 27, 48, 31, 38, 38, 31, 44, 76, 32,
44, 41, 39, 76, 27, 40, 76, 27, 40, 46, 35, 43,
47, 31, 76, 38, 27, 40, 30, 64, 78, 76, 76, 76,
76, 76, 76, 76, 76, 23, 34, 41, 76, 45, 27, 35,
30, 76, 71, 20, 49, 41, 76, 48, 27, 45, 46, 76,
27, 40, 30, 76, 46, 44, 47, 40, 37, 38, 31, 45,
45, 76, 38, 31, 33, 45, 76, 41, 32, 76, 45, 46,
41, 40, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
19, 46, 27, 40, 30, 76, 35, 40, 76, 46, 34, 31,
76, 30, 31, 45, 31, 44, 46, 63, 76, 63, 76, 63,
76, 63, 76, 14, 31, 27, 44, 76, 46, 34, 31, 39,
64, 76, 41, 40, 76, 46, 34, 31, 76, 45, 27, 40,
30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 8,
27, 38, 32, 76, 45, 47, 40, 37, 76, 27, 76, 45,
34, 27, 46, 46, 31, 44, 31, 30, 76, 48, 35, 45,
27, 33, 31, 76, 38, 35, 31, 45, 64, 76, 49, 34,
41, 45, 31, 76, 32, 44, 41, 49, 40, 64, 78, 76,
76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 49,
44, 35, 40, 37, 38, 31, 30, 76, 38, 35, 42, 64,
76, 27, 40, 30, 76, 45, 40, 31, 31, 44, 76, 41,
32, 76, 29, 41, 38, 30, 76, 29, 41, 39, 39, 27,
40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 31, 38, 38, 76, 46, 34, 27, 46, 76, 35, 46,
45, 76, 45, 29, 47, 38, 42, 46, 41, 44, 76, 49,
31, 38, 38, 76, 46, 34, 41, 45, 31, 76, 42, 27,
45, 45, 35, 41, 40, 45, 76, 44, 31, 27, 30, 78,
76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 35, 29,
34, 76, 51, 31, 46, 76, 45, 47, 44, 48, 35, 48,
31, 64, 76, 45, 46, 27, 39, 42, 31, 30, 76, 41,
40, 76, 46, 34, 31, 45, 31, 76, 38, 35, 32, 31,
38, 31, 45, 45, 76, 46, 34, 35, 40, 33, 45, 64,
78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31,
76, 34, 27, 40, 30, 76, 46, 34, 27, 46, 76, 39,
41, 29, 37, 31, 30, 76, 46, 34, 31, 39, 64, 76,
27, 40, 30, 76, 46, 34, 31, 76, 34, 31, 27, 44,
46, 76, 46, 34, 27, 46, 76, 32, 31, 30, 66, 78,
76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76,
41, 40, 76, 46, 34, 31, 76, 42, 31, 30, 31, 45,
46, 27, 38, 64, 76, 46, 34, 31, 45, 31, 76, 49,
41, 44, 30, 45, 76, 27, 42, 42, 31, 27, 44, 65,
78, 76, 76, 76, 76, 76, 76, 76, 76, 13, 51, 76,
40, 27, 39, 31, 76, 35, 45, 76, 15, 52, 51, 39,
27, 40, 30, 35, 27, 45, 64, 76, 11, 35, 40, 33,
76, 41, 32, 76, 11, 35, 40, 33, 45, 66, 78, 76,
76, 76, 76, 76, 76, 76, 76, 12, 41, 41, 37, 76,
41, 40, 76, 39, 51, 76, 23, 41, 44, 37, 45, 64,
76, 51, 31, 76, 13, 35, 33, 34, 46, 51, 64, 76,
27, 40, 30, 76, 30, 31, 45, 42, 27, 35, 44, 67,
78, 76, 76, 76, 76, 76, 76, 76, 76, 14, 41, 46,
34, 35, 40, 33, 76, 28, 31, 45, 35, 30, 31, 76,
44, 31, 39, 27, 35, 40, 45, 63, 76, 18, 41, 47,
40, 30, 76, 46, 34, 31, 76, 30, 31, 29, 27, 51,
78, 76, 76, 76, 76, 76, 76, 76, 76, 15, 32, 76,
46, 34, 27, 46, 76, 29, 41, 38, 41, 45, 45, 27,
38, 76, 23, 44, 31, 29, 37, 64, 76, 28, 41, 47,
40, 30, 38, 31, 45, 45, 76, 27, 40, 30, 76, 28,
27, 44, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 34, 31, 76, 38, 41, 40, 31, 76, 27, 40, 30,
76, 38, 31, 48, 31, 38, 76, 45, 27, 40, 30, 45,
76, 45, 46, 44, 31, 46, 29, 34, 76, 32, 27, 44,
76, 27, 49, 27, 51, 78, 76, 76, 76, 76, 76, 76,
76, 76]]),
torch.tensor([[0, 0, 0, 10_69, 11]]),
torch.tensor([[0, 0, 0, 10_69, 11]]),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0], EXPECTED_OUTPUT[0]))
self.assertTrue(torch.allclose(tokens[1], EXPECTED_OUTPUT[1]))
self.assertTrue(torch.allclose(tokens[2], EXPECTED_OUTPUT[2]))
@require_torch
def UpperCamelCase ( self) -> int:
"""simple docstring"""
import torch
_lowercase : List[str] = JukeboxTokenizer.from_pretrained('openai/jukebox-5b-lyrics')
_lowercase : List[str] = tokenizer(**self.metas)['input_ids']
# fmt: off
_lowercase : Optional[int] = [
torch.tensor([[
0, 0, 0, 10_69, 11, -1, -1, -1, -1, 9, 77, 39,
31, 46, 77, 27, 77, 46, 44, 27, 48, 31, 38, 38,
31, 44, 77, 32, 44, 41, 39, 77, 27, 40, 77, 27,
40, 46, 35, 43, 47, 31, 77, 38, 27, 40, 30, 64,
79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 41,
77, 45, 27, 35, 30, 77, 72, 20, 49, 41, 77, 48,
27, 45, 46, 77, 27, 40, 30, 77, 46, 44, 47, 40,
37, 38, 31, 45, 45, 77, 38, 31, 33, 45, 77, 41,
32, 77, 45, 46, 41, 40, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 19, 46, 27, 40, 30, 77, 35, 40,
77, 46, 34, 31, 77, 30, 31, 45, 31, 44, 46, 63,
77, 63, 77, 63, 77, 63, 77, 14, 31, 27, 44, 77,
46, 34, 31, 39, 64, 77, 41, 40, 77, 46, 34, 31,
77, 45, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77,
77, 77, 77, 8, 27, 38, 32, 77, 45, 47, 40, 37,
77, 27, 77, 45, 34, 27, 46, 46, 31, 44, 31, 30,
77, 48, 35, 45, 27, 33, 31, 77, 38, 35, 31, 45,
64, 77, 49, 34, 41, 45, 31, 77, 32, 44, 41, 49,
40, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1,
40, 30, 77, 49, 44, 35, 40, 37, 38, 31, 30, 77,
38, 35, 42, 64, 77, 27, 40, 30, 77, 45, 40, 31,
31, 44, 77, 41, 32, 77, 29, 41, 38, 30, 77, 29,
41, 39, 39, 27, 40, 30, 64, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 31, 38, 38, 77, 46, 34, 27,
46, 77, 35, 46, 45, 77, 45, 29, 47, 38, 42, 46,
41, 44, 77, 49, 31, 38, 38, 77, 46, 34, 41, 45,
31, 77, 42, 27, 45, 45, 35, 41, 40, 45, 77, 44,
31, 27, 30, 79, 77, 77, 77, 77, 77, 77, 77, 77,
23, 34, 35, 29, 34, 77, 51, 31, 46, 77, 45, 47,
44, 48, 35, 48, 31, 64, 77, 45, 46, 27, 39, 42,
31, 30, 77, 41, 40, 77, 46, 34, 31, 45, 31, 77,
38, 35, 32, 31, 38, 31, 45, 45, 77, 46, 34, 35,
40, 33, 45, 64, 79, 77, 77, 77, 77, 77, 77, 77,
77, 20, 34, 31, 77, 34, 27, 40, 30, 77, 46, 34,
27, 46, 77, 39, 41, 29, 37, 31, 30, 77, 46, 34,
31, 39, 64, 77, 27, 40, 30, 77, 46, 34, 31, 77,
34, 31, 27, 44, 46, 77, 46, 34, 27, 46, 77, 32,
31, 30, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77,
1, 40, 30, 77, 41, 40, 77, 46, 34, 31, 77, 42,
31, 30, 31, 45, 46, 27, 38, 64, 77, 46, 34, 31,
45, 31, 77, 49, 41, 44, 30, 45, 77, 27, 42, 42,
31, 27, 44, 65, 79, 77, 77, 77, 77, 77, 77, 77,
77, 13, 51, 77, 40, 27, 39, 31, 77, 35, 45, 77,
15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 77,
11, 35, 40, 33, 77, 41, 32, 77, 11, 35, 40, 33,
45, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 12,
41, 41, 37, 77, 41, 40, 77, 39, 51, 77, 23, 41,
44, 37, 45, 64, 77, 51, 31, 77, 13, 35, 33, 34,
46, 51, 64, 77, 27, 40, 30, 77, 30, 31, 45, 42,
27, 35, 44, 67, 79, 77, 77, 77, 77, 77, 77, 77,
77, 14, 41, 46, 34, 35, 40, 33, 77, 28, 31, 45,
35, 30, 31, 77, 44, 31, 39, 27, 35, 40, 45, 63,
77, 18, 41, 47, 40, 30, 77, 46, 34, 31, 77, 30,
31, 29, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77,
77, 15, 32, 77, 46, 34, 27, 46, 77, 29, 41, 38,
41, 45, 45, 27, 38, 77, 23, 44, 31, 29, 37, 64,
77, 28, 41, 47, 40, 30, 38, 31, 45, 45, 77, 27,
40, 30, 77, 28, 27, 44, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 34, 31, 77, 38, 41, 40, 31,
77, 27, 40, 30, 77, 38, 31, 48, 31, 38, 77, 45,
27, 40, 30, 45, 77, 45, 46, 44, 31, 46, 29, 34,
77, 32, 27, 44, 77, 27, 49, 27, 51, 79, 77, 77,
77, 77, 77, 77, 77, 77]]),
torch.tensor([[0, 0, 0, 10_69, 11, -1, -1, -1, -1]]),
torch.tensor([[0, 0, 0, 10_69, 11, -1, -1, -1, -1]]),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0], EXPECTED_OUTPUT[0]))
self.assertTrue(torch.allclose(tokens[1], EXPECTED_OUTPUT[1]))
self.assertTrue(torch.allclose(tokens[2], EXPECTED_OUTPUT[2]))
| 21 | 0 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
UpperCAmelCase__ = {
'Acehnese Arabic': 'ace_Arab',
'Acehnese Latin': 'ace_Latn',
'Mesopotamian Arabic': 'acm_Arab',
'Ta\'izzi-Adeni Arabic': 'acq_Arab',
'Tunisian Arabic': 'aeb_Arab',
'Afrikaans': 'afr_Latn',
'South Levantine Arabic': 'ajp_Arab',
'Akan': 'aka_Latn',
'Amharic': 'amh_Ethi',
'North Levantine Arabic': 'apc_Arab',
'Modern Standard Arabic': 'arb_Arab',
'Modern Standard Arabic Romanized': 'arb_Latn',
'Najdi Arabic': 'ars_Arab',
'Moroccan Arabic': 'ary_Arab',
'Egyptian Arabic': 'arz_Arab',
'Assamese': 'asm_Beng',
'Asturian': 'ast_Latn',
'Awadhi': 'awa_Deva',
'Central Aymara': 'ayr_Latn',
'South Azerbaijani': 'azb_Arab',
'North Azerbaijani': 'azj_Latn',
'Bashkir': 'bak_Cyrl',
'Bambara': 'bam_Latn',
'Balinese': 'ban_Latn',
'Belarusian': 'bel_Cyrl',
'Bemba': 'bem_Latn',
'Bengali': 'ben_Beng',
'Bhojpuri': 'bho_Deva',
'Banjar Arabic': 'bjn_Arab',
'Banjar Latin': 'bjn_Latn',
'Standard Tibetan': 'bod_Tibt',
'Bosnian': 'bos_Latn',
'Buginese': 'bug_Latn',
'Bulgarian': 'bul_Cyrl',
'Catalan': 'cat_Latn',
'Cebuano': 'ceb_Latn',
'Czech': 'ces_Latn',
'Chokwe': 'cjk_Latn',
'Central Kurdish': 'ckb_Arab',
'Crimean Tatar': 'crh_Latn',
'Welsh': 'cym_Latn',
'Danish': 'dan_Latn',
'German': 'deu_Latn',
'Southwestern Dinka': 'dik_Latn',
'Dyula': 'dyu_Latn',
'Dzongkha': 'dzo_Tibt',
'Greek': 'ell_Grek',
'English': 'eng_Latn',
'Esperanto': 'epo_Latn',
'Estonian': 'est_Latn',
'Basque': 'eus_Latn',
'Ewe': 'ewe_Latn',
'Faroese': 'fao_Latn',
'Fijian': 'fij_Latn',
'Finnish': 'fin_Latn',
'Fon': 'fon_Latn',
'French': 'fra_Latn',
'Friulian': 'fur_Latn',
'Nigerian Fulfulde': 'fuv_Latn',
'Scottish Gaelic': 'gla_Latn',
'Irish': 'gle_Latn',
'Galician': 'glg_Latn',
'Guarani': 'grn_Latn',
'Gujarati': 'guj_Gujr',
'Haitian Creole': 'hat_Latn',
'Hausa': 'hau_Latn',
'Hebrew': 'heb_Hebr',
'Hindi': 'hin_Deva',
'Chhattisgarhi': 'hne_Deva',
'Croatian': 'hrv_Latn',
'Hungarian': 'hun_Latn',
'Armenian': 'hye_Armn',
'Igbo': 'ibo_Latn',
'Ilocano': 'ilo_Latn',
'Indonesian': 'ind_Latn',
'Icelandic': 'isl_Latn',
'Italian': 'ita_Latn',
'Javanese': 'jav_Latn',
'Japanese': 'jpn_Jpan',
'Kabyle': 'kab_Latn',
'Jingpho': 'kac_Latn',
'Kamba': 'kam_Latn',
'Kannada': 'kan_Knda',
'Kashmiri Arabic': 'kas_Arab',
'Kashmiri Devanagari': 'kas_Deva',
'Georgian': 'kat_Geor',
'Central Kanuri Arabic': 'knc_Arab',
'Central Kanuri Latin': 'knc_Latn',
'Kazakh': 'kaz_Cyrl',
'Kabiyè': 'kbp_Latn',
'Kabuverdianu': 'kea_Latn',
'Khmer': 'khm_Khmr',
'Kikuyu': 'kik_Latn',
'Kinyarwanda': 'kin_Latn',
'Kyrgyz': 'kir_Cyrl',
'Kimbundu': 'kmb_Latn',
'Northern Kurdish': 'kmr_Latn',
'Kikongo': 'kon_Latn',
'Korean': 'kor_Hang',
'Lao': 'lao_Laoo',
'Ligurian': 'lij_Latn',
'Limburgish': 'lim_Latn',
'Lingala': 'lin_Latn',
'Lithuanian': 'lit_Latn',
'Lombard': 'lmo_Latn',
'Latgalian': 'ltg_Latn',
'Luxembourgish': 'ltz_Latn',
'Luba-Kasai': 'lua_Latn',
'Ganda': 'lug_Latn',
'Luo': 'luo_Latn',
'Mizo': 'lus_Latn',
'Standard Latvian': 'lvs_Latn',
'Magahi': 'mag_Deva',
'Maithili': 'mai_Deva',
'Malayalam': 'mal_Mlym',
'Marathi': 'mar_Deva',
'Minangkabau Arabic ': 'min_Arab',
'Minangkabau Latin': 'min_Latn',
'Macedonian': 'mkd_Cyrl',
'Plateau Malagasy': 'plt_Latn',
'Maltese': 'mlt_Latn',
'Meitei Bengali': 'mni_Beng',
'Halh Mongolian': 'khk_Cyrl',
'Mossi': 'mos_Latn',
'Maori': 'mri_Latn',
'Burmese': 'mya_Mymr',
'Dutch': 'nld_Latn',
'Norwegian Nynorsk': 'nno_Latn',
'Norwegian Bokmål': 'nob_Latn',
'Nepali': 'npi_Deva',
'Northern Sotho': 'nso_Latn',
'Nuer': 'nus_Latn',
'Nyanja': 'nya_Latn',
'Occitan': 'oci_Latn',
'West Central Oromo': 'gaz_Latn',
'Odia': 'ory_Orya',
'Pangasinan': 'pag_Latn',
'Eastern Panjabi': 'pan_Guru',
'Papiamento': 'pap_Latn',
'Western Persian': 'pes_Arab',
'Polish': 'pol_Latn',
'Portuguese': 'por_Latn',
'Dari': 'prs_Arab',
'Southern Pashto': 'pbt_Arab',
'Ayacucho Quechua': 'quy_Latn',
'Romanian': 'ron_Latn',
'Rundi': 'run_Latn',
'Russian': 'rus_Cyrl',
'Sango': 'sag_Latn',
'Sanskrit': 'san_Deva',
'Santali': 'sat_Olck',
'Sicilian': 'scn_Latn',
'Shan': 'shn_Mymr',
'Sinhala': 'sin_Sinh',
'Slovak': 'slk_Latn',
'Slovenian': 'slv_Latn',
'Samoan': 'smo_Latn',
'Shona': 'sna_Latn',
'Sindhi': 'snd_Arab',
'Somali': 'som_Latn',
'Southern Sotho': 'sot_Latn',
'Spanish': 'spa_Latn',
'Tosk Albanian': 'als_Latn',
'Sardinian': 'srd_Latn',
'Serbian': 'srp_Cyrl',
'Swati': 'ssw_Latn',
'Sundanese': 'sun_Latn',
'Swedish': 'swe_Latn',
'Swahili': 'swh_Latn',
'Silesian': 'szl_Latn',
'Tamil': 'tam_Taml',
'Tatar': 'tat_Cyrl',
'Telugu': 'tel_Telu',
'Tajik': 'tgk_Cyrl',
'Tagalog': 'tgl_Latn',
'Thai': 'tha_Thai',
'Tigrinya': 'tir_Ethi',
'Tamasheq Latin': 'taq_Latn',
'Tamasheq Tifinagh': 'taq_Tfng',
'Tok Pisin': 'tpi_Latn',
'Tswana': 'tsn_Latn',
'Tsonga': 'tso_Latn',
'Turkmen': 'tuk_Latn',
'Tumbuka': 'tum_Latn',
'Turkish': 'tur_Latn',
'Twi': 'twi_Latn',
'Central Atlas Tamazight': 'tzm_Tfng',
'Uyghur': 'uig_Arab',
'Ukrainian': 'ukr_Cyrl',
'Umbundu': 'umb_Latn',
'Urdu': 'urd_Arab',
'Northern Uzbek': 'uzn_Latn',
'Venetian': 'vec_Latn',
'Vietnamese': 'vie_Latn',
'Waray': 'war_Latn',
'Wolof': 'wol_Latn',
'Xhosa': 'xho_Latn',
'Eastern Yiddish': 'ydd_Hebr',
'Yoruba': 'yor_Latn',
'Yue Chinese': 'yue_Hant',
'Chinese Simplified': 'zho_Hans',
'Chinese Traditional': 'zho_Hant',
'Standard Malay': 'zsm_Latn',
'Zulu': 'zul_Latn',
}
class lowerCAmelCase__ ( A_ ):
__a = """facebook/nllb-200-distilled-600M"""
__a = (
"""This is a tool that translates text from a language to another. It takes three inputs: `text`, which should """
"""be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, """
"""which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in """
"""plain English, such as 'Romanian', or 'Albanian'. It returns the text translated in `tgt_lang`."""
)
__a = """translator"""
__a = AutoTokenizer
__a = AutoModelForSeqaSeqLM
__a = LANGUAGE_CODES
__a = ["""text""", """text""", """text"""]
__a = ["""text"""]
def lowercase ( self : Union[str, Any] , _lowerCamelCase : str , _lowerCamelCase : Any , _lowerCamelCase : Dict ):
if src_lang not in self.lang_to_code:
raise ValueError(f'''{src_lang} is not a supported language.''' )
if tgt_lang not in self.lang_to_code:
raise ValueError(f'''{tgt_lang} is not a supported language.''' )
_snake_case = self.lang_to_code[src_lang]
_snake_case = self.lang_to_code[tgt_lang]
return self.pre_processor._build_translation_inputs(
_lowerCamelCase , return_tensors='''pt''' , src_lang=_lowerCamelCase , tgt_lang=_lowerCamelCase )
def lowercase ( self : Tuple , _lowerCamelCase : Optional[Any] ):
return self.model.generate(**_lowerCamelCase )
def lowercase ( self : Optional[Any] , _lowerCamelCase : str ):
return self.post_processor.decode(outputs[0].tolist() , skip_special_tokens=_lowerCamelCase )
| 40 |
"""simple docstring"""
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import MaskaFormerConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel
if is_vision_available():
from transformers import MaskaFormerImageProcessor
if is_vision_available():
from PIL import Image
class lowerCAmelCase__ :
def __init__( self : List[Any] , _lowerCamelCase : Optional[int] , _lowerCamelCase : List[Any]=2 , _lowerCamelCase : List[str]=True , _lowerCamelCase : Optional[Any]=False , _lowerCamelCase : Optional[Any]=10 , _lowerCamelCase : Dict=3 , _lowerCamelCase : Optional[int]=32 * 8 , _lowerCamelCase : Optional[int]=32 * 8 , _lowerCamelCase : Dict=4 , _lowerCamelCase : Optional[int]=64 , ):
_snake_case = parent
_snake_case = batch_size
_snake_case = is_training
_snake_case = use_auxiliary_loss
_snake_case = num_queries
_snake_case = num_channels
_snake_case = min_size
_snake_case = max_size
_snake_case = num_labels
_snake_case = hidden_dim
_snake_case = hidden_dim
def lowercase ( self : List[str] ):
_snake_case = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
_lowerCamelCase )
_snake_case = torch.ones([self.batch_size, self.min_size, self.max_size] , device=_lowerCamelCase )
_snake_case = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=_lowerCamelCase ) > 0.5
).float()
_snake_case = (torch.rand((self.batch_size, self.num_labels) , device=_lowerCamelCase ) > 0.5).long()
_snake_case = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def lowercase ( self : Optional[Any] ):
_snake_case = MaskaFormerConfig(
hidden_size=self.hidden_dim , )
_snake_case = self.num_queries
_snake_case = self.num_labels
_snake_case = [1, 1, 1, 1]
_snake_case = self.num_channels
_snake_case = 64
_snake_case = 128
_snake_case = self.hidden_dim
_snake_case = self.hidden_dim
_snake_case = self.hidden_dim
return config
def lowercase ( self : Any ):
_snake_case , _snake_case , _snake_case , _snake_case , _snake_case = self.prepare_config_and_inputs()
_snake_case = {'''pixel_values''': pixel_values, '''pixel_mask''': pixel_mask}
return config, inputs_dict
def lowercase ( self : Union[str, Any] , _lowerCamelCase : List[Any] , _lowerCamelCase : int ):
_snake_case = output.encoder_hidden_states
_snake_case = output.pixel_decoder_hidden_states
_snake_case = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(_lowerCamelCase ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(_lowerCamelCase ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(_lowerCamelCase ) , config.decoder_layers )
def lowercase ( self : List[Any] , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Any , _lowerCamelCase : List[Any] , _lowerCamelCase : Dict=False ):
with torch.no_grad():
_snake_case = MaskaFormerModel(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
_snake_case = model(pixel_values=_lowerCamelCase , pixel_mask=_lowerCamelCase )
_snake_case = model(_lowerCamelCase , output_hidden_states=_lowerCamelCase )
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.hidden_dim) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(_lowerCamelCase , _lowerCamelCase )
def lowercase ( self : int , _lowerCamelCase : Dict , _lowerCamelCase : List[Any] , _lowerCamelCase : Any , _lowerCamelCase : str , _lowerCamelCase : Union[str, Any] ):
_snake_case = MaskaFormerForUniversalSegmentation(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
def comm_check_on_output(_lowerCamelCase : List[str] ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
_snake_case = model(pixel_values=_lowerCamelCase , pixel_mask=_lowerCamelCase )
_snake_case = model(_lowerCamelCase )
comm_check_on_output(_lowerCamelCase )
_snake_case = model(
pixel_values=_lowerCamelCase , pixel_mask=_lowerCamelCase , mask_labels=_lowerCamelCase , class_labels=_lowerCamelCase )
comm_check_on_output(_lowerCamelCase )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class lowerCAmelCase__ ( A_ , A_ , unittest.TestCase ):
__a = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else ()
__a = {"""feature-extraction""": MaskaFormerModel} if is_torch_available() else {}
__a = False
__a = False
__a = False
__a = False
def lowercase ( self : int ):
_snake_case = MaskaFormerModelTester(self )
_snake_case = ConfigTester(self , config_class=_lowerCamelCase , has_text_modality=_lowerCamelCase )
def lowercase ( self : Dict ):
self.config_tester.run_common_tests()
def lowercase ( self : Tuple ):
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(_lowerCamelCase , **_lowerCamelCase , output_hidden_states=_lowerCamelCase )
def lowercase ( self : Any ):
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*_lowerCamelCase )
@unittest.skip(reason='''Mask2Former does not use inputs_embeds''' )
def lowercase ( self : Optional[int] ):
pass
@unittest.skip(reason='''Mask2Former does not have a get_input_embeddings method''' )
def lowercase ( self : Dict ):
pass
@unittest.skip(reason='''Mask2Former is not a generative model''' )
def lowercase ( self : int ):
pass
@unittest.skip(reason='''Mask2Former does not use token embeddings''' )
def lowercase ( self : List[str] ):
pass
@require_torch_multi_gpu
@unittest.skip(
reason='''Mask2Former has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' )
def lowercase ( self : Optional[int] ):
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def lowercase ( self : str ):
pass
def lowercase ( self : Optional[int] ):
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case = model_class(_lowerCamelCase )
_snake_case = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_snake_case = [*signature.parameters.keys()]
_snake_case = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _lowerCamelCase )
@slow
def lowercase ( self : Optional[int] ):
for model_name in ["facebook/mask2former-swin-small-coco-instance"]:
_snake_case = MaskaFormerModel.from_pretrained(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
def lowercase ( self : List[Any] ):
_snake_case = (self.model_tester.min_size,) * 2
_snake_case = {
'''pixel_values''': torch.randn((2, 3, *size) , device=_lowerCamelCase ),
'''mask_labels''': torch.randn((2, 10, *size) , device=_lowerCamelCase ),
'''class_labels''': torch.zeros(2 , 10 , device=_lowerCamelCase ).long(),
}
_snake_case = self.model_tester.get_config()
_snake_case = MaskaFormerForUniversalSegmentation(_lowerCamelCase ).to(_lowerCamelCase )
_snake_case = model(**_lowerCamelCase )
self.assertTrue(outputs.loss is not None )
def lowercase ( self : Union[str, Any] ):
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(_lowerCamelCase , **_lowerCamelCase , output_hidden_states=_lowerCamelCase )
def lowercase ( self : str ):
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case = model_class(_lowerCamelCase ).to(_lowerCamelCase )
_snake_case = model(**_lowerCamelCase , output_attentions=_lowerCamelCase )
self.assertTrue(outputs.attentions is not None )
def lowercase ( self : str ):
if not self.model_tester.is_training:
return
_snake_case = self.all_model_classes[1]
_snake_case , _snake_case , _snake_case , _snake_case , _snake_case = self.model_tester.prepare_config_and_inputs()
_snake_case = model_class(_lowerCamelCase )
model.to(_lowerCamelCase )
model.train()
_snake_case = model(_lowerCamelCase , mask_labels=_lowerCamelCase , class_labels=_lowerCamelCase ).loss
loss.backward()
def lowercase ( self : Optional[int] ):
_snake_case = self.all_model_classes[1]
_snake_case , _snake_case , _snake_case , _snake_case , _snake_case = self.model_tester.prepare_config_and_inputs()
_snake_case = True
_snake_case = True
_snake_case = model_class(_lowerCamelCase ).to(_lowerCamelCase )
model.train()
_snake_case = model(_lowerCamelCase , mask_labels=_lowerCamelCase , class_labels=_lowerCamelCase )
_snake_case = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
_snake_case = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
_snake_case = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
_snake_case = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=_lowerCamelCase )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
UpperCAmelCase__ = 1e-4
def _UpperCAmelCase ( ) -> Tuple:
_snake_case = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_vision
@slow
class lowerCAmelCase__ ( unittest.TestCase ):
@cached_property
def lowercase ( self : Optional[Any] ):
return "facebook/mask2former-swin-small-coco-instance"
@cached_property
def lowercase ( self : int ):
return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None
def lowercase ( self : Any ):
_snake_case = MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(_lowerCamelCase )
_snake_case = self.default_image_processor
_snake_case = prepare_img()
_snake_case = image_processor(_lowerCamelCase , return_tensors='''pt''' ).to(_lowerCamelCase )
_snake_case = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(_lowerCamelCase , (1, 3, 384, 384) )
with torch.no_grad():
_snake_case = model(**_lowerCamelCase )
_snake_case = torch.tensor(
[[-0.2_7_9_0, -1.0_7_1_7, -1.1_6_6_8], [-0.5_1_2_8, -0.3_1_2_8, -0.4_9_8_7], [-0.5_8_3_2, 0.1_9_7_1, -0.0_1_9_7]] ).to(_lowerCamelCase )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , _lowerCamelCase , atol=_lowerCamelCase ) )
_snake_case = torch.tensor(
[[0.8_9_7_3, 1.1_8_4_7, 1.1_7_7_6], [1.1_9_3_4, 1.5_0_4_0, 1.5_1_2_8], [1.1_1_5_3, 1.4_4_8_6, 1.4_9_5_1]] ).to(_lowerCamelCase )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , _lowerCamelCase , atol=_lowerCamelCase ) )
_snake_case = torch.tensor(
[[2.1_1_5_2, 1.7_0_0_0, -0.8_6_0_3], [1.5_8_0_8, 1.8_0_0_4, -0.9_3_5_3], [1.6_0_4_3, 1.7_4_9_5, -0.5_9_9_9]] ).to(_lowerCamelCase )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , _lowerCamelCase , atol=_lowerCamelCase ) )
def lowercase ( self : str ):
_snake_case = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(_lowerCamelCase ).eval()
_snake_case = self.default_image_processor
_snake_case = prepare_img()
_snake_case = image_processor(_lowerCamelCase , return_tensors='''pt''' ).to(_lowerCamelCase )
_snake_case = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(_lowerCamelCase , (1, 3, 384, 384) )
with torch.no_grad():
_snake_case = model(**_lowerCamelCase )
# masks_queries_logits
_snake_case = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) )
_snake_case = [
[-8.7_8_3_9, -9.0_0_5_6, -8.8_1_2_1],
[-7.4_1_0_4, -7.0_3_1_3, -6.5_4_0_1],
[-6.6_1_0_5, -6.3_4_2_7, -6.4_6_7_5],
]
_snake_case = torch.tensor(_lowerCamelCase ).to(_lowerCamelCase )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , _lowerCamelCase , atol=_lowerCamelCase ) )
# class_queries_logits
_snake_case = outputs.class_queries_logits
self.assertEqual(class_queries_logits.shape , (1, model.config.num_queries, model.config.num_labels + 1) )
_snake_case = torch.tensor(
[
[1.8_3_2_4, -8.0_8_3_5, -4.1_9_2_2],
[0.8_4_5_0, -9.0_0_5_0, -3.6_0_5_3],
[0.3_0_4_5, -7.7_2_9_3, -3.0_2_7_5],
] ).to(_lowerCamelCase )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , _lowerCamelCase , atol=_lowerCamelCase ) )
def lowercase ( self : Optional[int] ):
_snake_case = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(_lowerCamelCase ).eval()
_snake_case = self.default_image_processor
_snake_case = image_processor(
[np.zeros((3, 800, 1333) ), np.zeros((3, 800, 1333) )] , segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] , return_tensors='''pt''' , )
_snake_case = inputs['''pixel_values'''].to(_lowerCamelCase )
_snake_case = [el.to(_lowerCamelCase ) for el in inputs['''mask_labels''']]
_snake_case = [el.to(_lowerCamelCase ) for el in inputs['''class_labels''']]
with torch.no_grad():
_snake_case = model(**_lowerCamelCase )
self.assertTrue(outputs.loss is not None )
| 40 | 1 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModel,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableUnCLIPImgaImgPipeline, UNetaDConditionModel
from diffusers.pipelines.pipeline_utils import DiffusionPipeline
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import (
enable_full_determinism,
floats_tensor,
load_image,
load_numpy,
require_torch_gpu,
skip_mps,
slow,
torch_device,
)
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class a__ ( A__ , A__ , A__ , unittest.TestCase ):
A = StableUnCLIPImgaImgPipeline
A = TEXT_GUIDED_IMAGE_VARIATION_PARAMS
A = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
A = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
A = frozenset([] )
def __UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = 32
SCREAMING_SNAKE_CASE_ : Dict = embedder_hidden_size
# image encoding components
SCREAMING_SNAKE_CASE_ : List[Any] = CLIPImageProcessor(crop_size=32,size=32 )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ : List[str] = CLIPVisionModelWithProjection(
CLIPVisionConfig(
hidden_size=_A,projection_dim=_A,num_hidden_layers=5,num_attention_heads=4,image_size=32,intermediate_size=37,patch_size=1,) )
# regular denoising components
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ : List[str] = StableUnCLIPImageNormalizer(embedding_dim=_A )
SCREAMING_SNAKE_CASE_ : List[str] = DDPMScheduler(beta_schedule="squaredcos_cap_v2" )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ : Dict = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ : List[str] = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0,eos_token_id=2,hidden_size=_A,projection_dim=32,intermediate_size=37,layer_norm_eps=1E-05,num_attention_heads=4,num_hidden_layers=5,pad_token_id=1,vocab_size=1000,) )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ : Tuple = UNetaDConditionModel(
sample_size=32,in_channels=4,out_channels=4,down_block_types=("CrossAttnDownBlock2D", "DownBlock2D"),up_block_types=("UpBlock2D", "CrossAttnUpBlock2D"),block_out_channels=(32, 64),attention_head_dim=(2, 4),class_embed_type="projection",projection_class_embeddings_input_dim=embedder_projection_dim * 2,cross_attention_dim=_A,layers_per_block=1,upcast_attention=_A,use_linear_projection=_A,)
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ : Tuple = DDIMScheduler(
beta_schedule="scaled_linear",beta_start=0.00085,beta_end=0.012,prediction_type="v_prediction",set_alpha_to_one=_A,steps_offset=1,)
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ : Any = AutoencoderKL()
SCREAMING_SNAKE_CASE_ : str = {
# image encoding components
"feature_extractor": feature_extractor,
"image_encoder": image_encoder.eval(),
# image noising components
"image_normalizer": image_normalizer.eval(),
"image_noising_scheduler": image_noising_scheduler,
# regular denoising components
"tokenizer": tokenizer,
"text_encoder": text_encoder.eval(),
"unet": unet.eval(),
"scheduler": scheduler,
"vae": vae.eval(),
}
return components
def __UpperCamelCase ( self : int,_A : str,_A : Optional[int]=0,_A : Union[str, Any]=True ):
"""simple docstring"""
if str(_A ).startswith("mps" ):
SCREAMING_SNAKE_CASE_ : int = torch.manual_seed(_A )
else:
SCREAMING_SNAKE_CASE_ : int = torch.Generator(device=_A ).manual_seed(_A )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = floats_tensor((1, 3, 32, 32),rng=random.Random(_A ) ).to(_A )
if pil_image:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = input_image * 0.5 + 0.5
SCREAMING_SNAKE_CASE_ : Union[str, Any] = input_image.clamp(0,1 )
SCREAMING_SNAKE_CASE_ : int = input_image.cpu().permute(0,2,3,1 ).float().numpy()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = DiffusionPipeline.numpy_to_pil(_A )[0]
return {
"prompt": "An anime racoon running a marathon",
"image": input_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "np",
}
@skip_mps
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = "cpu" # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE_ : List[str] = self.get_dummy_components()
SCREAMING_SNAKE_CASE_ : Dict = StableUnCLIPImgaImgPipeline(**_A )
SCREAMING_SNAKE_CASE_ : Optional[Any] = sd_pipe.to(_A )
sd_pipe.set_progress_bar_config(disable=_A )
SCREAMING_SNAKE_CASE_ : List[str] = self.get_dummy_inputs(_A )
inputs.update({"image_embeds": None} )
SCREAMING_SNAKE_CASE_ : Dict = sd_pipe(**_A ).images
SCREAMING_SNAKE_CASE_ : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
SCREAMING_SNAKE_CASE_ : Optional[Any] = np.array([0.3872, 0.7224, 0.5601, 0.4741, 0.6872, 0.5814, 0.4636, 0.3867, 0.5078] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def __UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = torch_device in ["cpu", "mps"]
self._test_attention_slicing_forward_pass(test_max_difference=_A )
def __UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = torch_device in ["cpu", "mps"]
self._test_inference_batch_single_identical(test_max_difference=_A )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available(),reason="XFormers attention is only available with CUDA and `xformers` installed",)
def __UpperCamelCase ( self : int ):
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(test_max_difference=_A )
@slow
@require_torch_gpu
class a__ ( unittest.TestCase ):
def __UpperCamelCase ( self : str ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png" )
SCREAMING_SNAKE_CASE_ : Dict = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_img2img_anime_turtle_fp16.npy" )
SCREAMING_SNAKE_CASE_ : Optional[Any] = StableUnCLIPImgaImgPipeline.from_pretrained(
"fusing/stable-unclip-2-1-l-img2img",torch_dtype=torch.floataa )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
SCREAMING_SNAKE_CASE_ : Tuple = torch.Generator(device="cpu" ).manual_seed(0 )
SCREAMING_SNAKE_CASE_ : Optional[int] = pipe(_A,"anime turle",generator=_A,output_type="np" )
SCREAMING_SNAKE_CASE_ : Any = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(_A,_A )
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png" )
SCREAMING_SNAKE_CASE_ : Tuple = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_h_img2img_anime_turtle_fp16.npy" )
SCREAMING_SNAKE_CASE_ : Tuple = StableUnCLIPImgaImgPipeline.from_pretrained(
"fusing/stable-unclip-2-1-h-img2img",torch_dtype=torch.floataa )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
SCREAMING_SNAKE_CASE_ : Dict = torch.Generator(device="cpu" ).manual_seed(0 )
SCREAMING_SNAKE_CASE_ : Optional[int] = pipe(_A,"anime turle",generator=_A,output_type="np" )
SCREAMING_SNAKE_CASE_ : Optional[int] = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(_A,_A )
def __UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png" )
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
SCREAMING_SNAKE_CASE_ : int = StableUnCLIPImgaImgPipeline.from_pretrained(
"fusing/stable-unclip-2-1-h-img2img",torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE_ : int = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
SCREAMING_SNAKE_CASE_ : Optional[int] = pipe(
_A,"anime turtle",num_inference_steps=2,output_type="np",)
SCREAMING_SNAKE_CASE_ : List[Any] = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 18 |
def UpperCAmelCase__ (UpperCamelCase_ = 4_00_00_00 ):
"""simple docstring"""
snake_case = [0, 1]
snake_case = 0
while fib[i] <= n:
fib.append(fib[i] + fib[i + 1] )
if fib[i + 2] > n:
break
i += 1
snake_case = 0
for j in range(len(UpperCamelCase_ ) - 1 ):
if fib[j] % 2 == 0:
total += fib[j]
return total
if __name__ == "__main__":
print(f'''{solution() = }''')
| 127 | 0 |
"""simple docstring"""
import math
import flax.linen as nn
import jax.numpy as jnp
def _snake_case ( UpperCamelCase : jnp.ndarray , UpperCamelCase : int , UpperCamelCase : float = 1 , UpperCamelCase : float = 1 , UpperCamelCase : float = 1.0e4 , UpperCamelCase : bool = False , UpperCamelCase : float = 1.0 , ):
assert timesteps.ndim == 1, "Timesteps should be a 1d-array"
assert embedding_dim % 2 == 0, F"Embedding dimension {embedding_dim} should be even"
UpperCAmelCase : List[str] = float(embedding_dim // 2 )
UpperCAmelCase : int = math.log(max_timescale / min_timescale ) / (num_timescales - freq_shift)
UpperCAmelCase : Dict = min_timescale * jnp.exp(jnp.arange(UpperCamelCase , dtype=jnp.floataa ) * -log_timescale_increment )
UpperCAmelCase : int = jnp.expand_dims(UpperCamelCase , 1 ) * jnp.expand_dims(UpperCamelCase , 0 )
# scale embeddings
UpperCAmelCase : Any = scale * emb
if flip_sin_to_cos:
UpperCAmelCase : Optional[Any] = jnp.concatenate([jnp.cos(UpperCamelCase ), jnp.sin(UpperCamelCase )] , axis=1 )
else:
UpperCAmelCase : int = jnp.concatenate([jnp.sin(UpperCamelCase ), jnp.cos(UpperCamelCase )] , axis=1 )
UpperCAmelCase : Optional[int] = jnp.reshape(UpperCamelCase , [jnp.shape(UpperCamelCase )[0], embedding_dim] )
return signal
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
__lowerCAmelCase : int = 32
__lowerCAmelCase : jnp.dtype = jnp.floataa
@nn.compact
def __call__( self , _SCREAMING_SNAKE_CASE ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase : Optional[Any] = nn.Dense(self.time_embed_dim , dtype=self.dtype , name="""linear_1""" )(a__ )
UpperCAmelCase : Any = nn.silu(a__ )
UpperCAmelCase : Optional[int] = nn.Dense(self.time_embed_dim , dtype=self.dtype , name="""linear_2""" )(a__ )
return temb
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
__lowerCAmelCase : int = 32
__lowerCAmelCase : bool = False
__lowerCAmelCase : float = 1
@nn.compact
def __call__( self , _SCREAMING_SNAKE_CASE ) -> Any:
'''simple docstring'''
return get_sinusoidal_embeddings(
a__ , embedding_dim=self.dim , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.freq_shift )
| 353 |
"""simple docstring"""
def _snake_case ( UpperCamelCase : dict ):
UpperCAmelCase : set[int] = set()
# To detect a back edge, keep track of vertices currently in the recursion stack
UpperCAmelCase : set[int] = set()
return any(
node not in visited and depth_first_search(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
for node in graph )
def _snake_case ( UpperCamelCase : dict , UpperCamelCase : int , UpperCamelCase : set , UpperCamelCase : set ):
visited.add(UpperCamelCase )
rec_stk.add(UpperCamelCase )
for node in graph[vertex]:
if node not in visited:
if depth_first_search(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ):
return True
elif node in rec_stk:
return True
# The node needs to be removed from recursion stack before function ends
rec_stk.remove(UpperCamelCase )
return False
if __name__ == "__main__":
from doctest import testmod
testmod()
| 76 | 0 |
def lowerCAmelCase__( lowercase : bytes ) -> str:
return "".join([hex(lowercase )[2:].zfill(2 ).upper() for byte in list(lowercase )] )
def lowerCAmelCase__( lowercase : str ) -> bytes:
# Check data validity, following RFC3548
# https://www.ietf.org/rfc/rfc3548.txt
if (len(lowercase ) % 2) != 0:
raise ValueError(
"Base16 encoded data is invalid:\nData does not have an even number of hex digits." )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(lowercase ) <= set("0123456789ABCDEF" ):
raise ValueError(
"Base16 encoded data is invalid:\nData is not uppercase hex or it contains invalid characters." )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(lowercase ) , 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 326 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SwiftFormerConfig,
SwiftFormerForImageClassification,
ViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = torch.device('''cpu''')
def lowerCAmelCase__( ) -> Any:
__snake_case : List[Any] = "http://images.cocodataset.org/val2017/000000039769.jpg"
__snake_case : Optional[int] = Image.open(requests.get(lowercase , stream=lowercase ).raw )
return im
def lowerCAmelCase__( lowercase : Dict ) -> List[Any]:
if swiftformer_name == "swiftformer_xs":
return torch.tensor([-2.1_703E00, 2.1_107E00, -2.0_811E00, 8.8_685E-01, 2.4_360E-01] )
elif swiftformer_name == "swiftformer_s":
return torch.tensor([3.9_636E-01, 2.3_478E-01, -1.6_963E00, -1.7_381E00, -8.6_337E-01] )
elif swiftformer_name == "swiftformer_l1":
return torch.tensor([-4.2_768E-01, -4.7_429E-01, -1.0_897E00, -1.0_248E00, 3.5_523E-02] )
elif swiftformer_name == "swiftformer_l3":
return torch.tensor([-2.5_330E-01, 2.4_211E-01, -6.0_185E-01, -8.2_789E-01, -6.0_446E-02] )
def lowerCAmelCase__( lowercase : Tuple , lowercase : Union[str, Any] , lowercase : Union[str, Any] ) -> List[Any]:
__snake_case : List[Any] = dct.pop(lowercase )
__snake_case : List[Any] = val
def lowerCAmelCase__( lowercase : Union[str, Any] ) -> Tuple:
__snake_case : Optional[Any] = []
for k in state_dict.keys():
__snake_case : Union[str, Any] = k
if ".pwconv" in k:
__snake_case : Any = k_new.replace(".pwconv" , ".point_wise_conv" )
if ".dwconv" in k:
__snake_case : List[Any] = k_new.replace(".dwconv" , ".depth_wise_conv" )
if ".Proj." in k:
__snake_case : Optional[int] = k_new.replace(".Proj." , ".proj." )
if "patch_embed" in k_new:
__snake_case : int = k_new.replace("patch_embed" , "swiftformer.patch_embed.patch_embedding" )
if "network" in k_new:
__snake_case : int = k_new.split("." )
if ls[2].isdigit():
__snake_case : List[Any] = "swiftformer.encoder.network." + ls[1] + ".blocks." + ls[2] + "." + ".".join(ls[3:] )
else:
__snake_case : Optional[int] = k_new.replace("network" , "swiftformer.encoder.network" )
rename_keys.append((k, k_new) )
return rename_keys
@torch.no_grad()
def lowerCAmelCase__( lowercase : List[Any] , lowercase : Optional[Any] , lowercase : List[str] ) -> Union[str, Any]:
__snake_case : List[str] = SwiftFormerConfig()
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
__snake_case : Tuple = 1000
__snake_case : Any = "huggingface/label-files"
__snake_case : int = "imagenet-1k-id2label.json"
__snake_case : Dict = json.load(open(hf_hub_download(lowercase , lowercase , repo_type="dataset" ) , "r" ) )
__snake_case : str = {int(lowercase ): v for k, v in idalabel.items()}
__snake_case : int = idalabel
__snake_case : Optional[int] = {v: k for k, v in idalabel.items()}
# size of the architecture
if swiftformer_name == "swiftformer_xs":
__snake_case : Optional[Any] = [3, 3, 6, 4]
__snake_case : Optional[int] = [48, 56, 112, 220]
elif swiftformer_name == "swiftformer_s":
__snake_case : List[str] = [3, 3, 9, 6]
__snake_case : Optional[Any] = [48, 64, 168, 224]
elif swiftformer_name == "swiftformer_l1":
__snake_case : Optional[int] = [4, 3, 10, 5]
__snake_case : Dict = [48, 96, 192, 384]
elif swiftformer_name == "swiftformer_l3":
__snake_case : str = [4, 4, 12, 6]
__snake_case : Optional[Any] = [64, 128, 320, 512]
# load state_dict of original model, remove and rename some keys
if original_ckpt:
if original_ckpt.startswith("https" ):
__snake_case : Optional[Any] = torch.hub.load_state_dict_from_url(lowercase , map_location="cpu" , check_hash=lowercase )
else:
__snake_case : Tuple = torch.load(lowercase , map_location="cpu" )
__snake_case : Optional[int] = checkpoint
__snake_case : Any = create_rename_keys(lowercase )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(lowercase , lowercase , lowercase )
# load HuggingFace model
__snake_case : Tuple = SwiftFormerForImageClassification(lowercase ).eval()
hf_model.load_state_dict(lowercase )
# prepare test inputs
__snake_case : Optional[Any] = prepare_img()
__snake_case : str = ViTImageProcessor.from_pretrained("preprocessor_config" )
__snake_case : Optional[int] = processor(images=lowercase , return_tensors="pt" )
# compare outputs from both models
__snake_case : str = get_expected_output(lowercase )
__snake_case : Optional[int] = hf_model(inputs["pixel_values"] ).logits
assert hf_logits.shape == torch.Size([1, 1000] )
assert torch.allclose(hf_logits[0, 0:5] , lowercase , atol=1E-3 )
Path(lowercase ).mkdir(exist_ok=lowercase )
print(f"""Saving model {swiftformer_name} to {pytorch_dump_folder_path}""" )
hf_model.save_pretrained(lowercase )
if __name__ == "__main__":
_UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--swiftformer_name''',
default='''swiftformer_xs''',
choices=['''swiftformer_xs''', '''swiftformer_s''', '''swiftformer_l1''', '''swiftformer_l3'''],
type=str,
help='''Name of the SwiftFormer model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''./converted_outputs/''',
type=str,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument('''--original_ckpt''', default=None, type=str, help='''Path to the original model checkpoint.''')
_UpperCamelCase = parser.parse_args()
convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
| 326 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {
'''transfo-xl-wt103''': '''https://huggingface.co/transfo-xl-wt103/resolve/main/config.json''',
}
class __lowerCamelCase ( __snake_case ):
lowerCamelCase_ : str = 'transfo-xl'
lowerCamelCase_ : Union[str, Any] = ['mems']
lowerCamelCase_ : Any = {
'n_token': 'vocab_size',
'hidden_size': 'd_model',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self , lowerCamelCase=267735 , lowerCamelCase=[20000, 40000, 200000] , lowerCamelCase=1024 , lowerCamelCase=1024 , lowerCamelCase=16 , lowerCamelCase=64 , lowerCamelCase=4096 , lowerCamelCase=4 , lowerCamelCase=False , lowerCamelCase=18 , lowerCamelCase=1600 , lowerCamelCase=1000 , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=0 , lowerCamelCase=-1 , lowerCamelCase=True , lowerCamelCase=0.1 , lowerCamelCase=0.0 , lowerCamelCase=True , lowerCamelCase="normal" , lowerCamelCase=0.01 , lowerCamelCase=0.01 , lowerCamelCase=0.02 , lowerCamelCase=1e-5 , lowerCamelCase=0 , **lowerCamelCase , ) -> Any:
snake_case_ = vocab_size
snake_case_ = []
self.cutoffs.extend(lowerCamelCase )
if proj_share_all_but_first:
snake_case_ = [False] + [True] * len(self.cutoffs )
else:
snake_case_ = [False] + [False] * len(self.cutoffs )
snake_case_ = d_model
snake_case_ = d_embed
snake_case_ = d_head
snake_case_ = d_inner
snake_case_ = div_val
snake_case_ = pre_lnorm
snake_case_ = n_layer
snake_case_ = n_head
snake_case_ = mem_len
snake_case_ = same_length
snake_case_ = attn_type
snake_case_ = clamp_len
snake_case_ = sample_softmax
snake_case_ = adaptive
snake_case_ = dropout
snake_case_ = dropatt
snake_case_ = untie_r
snake_case_ = init
snake_case_ = init_range
snake_case_ = proj_init_std
snake_case_ = init_std
snake_case_ = layer_norm_epsilon
super().__init__(eos_token_id=lowerCamelCase , **lowerCamelCase )
@property
def lowerCAmelCase_ ( self ) -> Any:
# Message copied from Transformer-XL documentation
logger.info(f'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
return -1
@max_position_embeddings.setter
def lowerCAmelCase_ ( self , lowerCamelCase ) -> List[Any]:
# Message copied from Transformer-XL documentation
raise NotImplementedError(
f'''The model {self.model_type} is one of the few models that has no sequence length limit.''' ) | 34 |
import unittest
from transformers import GPTNeoXJapaneseConfig, is_torch_available
from transformers.models.gpt_neox_japanese.tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import GPTNeoXJapaneseForCausalLM, GPTNeoXJapaneseModel
class __lowerCamelCase :
def __init__( self , lowerCamelCase , lowerCamelCase=13 , lowerCamelCase=7 , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=99 , lowerCamelCase=32 , lowerCamelCase=5 , lowerCamelCase=4 , lowerCamelCase=4 , lowerCamelCase="gelu" , lowerCamelCase=0.0 , lowerCamelCase=0.1 , lowerCamelCase=True , lowerCamelCase=512 , lowerCamelCase=16 , lowerCamelCase=2 , lowerCamelCase=0.02 , lowerCamelCase=3 , lowerCamelCase=4 , lowerCamelCase=None , ) -> Union[str, Any]:
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = seq_length
snake_case_ = is_training
snake_case_ = use_input_mask
snake_case_ = use_token_type_ids
snake_case_ = use_labels
snake_case_ = vocab_size
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = intermediate_multiple_size
snake_case_ = hidden_act
snake_case_ = hidden_dropout
snake_case_ = attention_dropout
snake_case_ = weight_tying
snake_case_ = max_position_embeddings
snake_case_ = type_vocab_size
snake_case_ = type_sequence_label_size
snake_case_ = initializer_range
snake_case_ = num_labels
snake_case_ = num_choices
snake_case_ = scope
def lowerCAmelCase_ ( self ) -> str:
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ = None
if self.use_input_mask:
snake_case_ = random_attention_mask([self.batch_size, self.seq_length] )
snake_case_ = None
if self.use_labels:
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case_ = self.get_config()
return config, input_ids, input_mask, token_labels
def lowerCAmelCase_ ( self ) -> Optional[int]:
return GPTNeoXJapaneseConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_multiple_size=self.intermediate_multiple_size , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , weight_tying=self.weight_tying , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCamelCase , initializer_range=self.initializer_range , )
def lowerCAmelCase_ ( self ) -> int:
snake_case_ , snake_case_ , snake_case_ , snake_case_ = self.prepare_config_and_inputs()
snake_case_ = True
return config, input_ids, input_mask, token_labels
def lowerCAmelCase_ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> Optional[int]:
snake_case_ = GPTNeoXJapaneseModel(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
snake_case_ = model(lowerCamelCase , attention_mask=lowerCamelCase )
snake_case_ = model(lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase_ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> Optional[int]:
snake_case_ = True
snake_case_ = GPTNeoXJapaneseModel(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
snake_case_ = model(lowerCamelCase , attention_mask=lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase_ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> Optional[Any]:
snake_case_ = GPTNeoXJapaneseForCausalLM(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
snake_case_ = model(lowerCamelCase , attention_mask=lowerCamelCase , labels=lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase_ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> Tuple:
snake_case_ = True
snake_case_ = GPTNeoXJapaneseForCausalLM(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
# first forward pass
snake_case_ = model(lowerCamelCase , attention_mask=lowerCamelCase , use_cache=lowerCamelCase )
snake_case_ = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
snake_case_ = ids_tensor((self.batch_size, 3) , config.vocab_size )
snake_case_ = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
snake_case_ = torch.cat([input_ids, next_tokens] , dim=-1 )
snake_case_ = torch.cat([input_mask, next_mask] , dim=-1 )
snake_case_ = model(lowerCamelCase , attention_mask=lowerCamelCase , output_hidden_states=lowerCamelCase )
snake_case_ = output_from_no_past["""hidden_states"""][0]
snake_case_ = model(
lowerCamelCase , attention_mask=lowerCamelCase , past_key_values=lowerCamelCase , output_hidden_states=lowerCamelCase , )["""hidden_states"""][0]
# select random slice
snake_case_ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
snake_case_ = output_from_no_past[:, -3:, random_slice_idx].detach()
snake_case_ = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowerCamelCase , lowerCamelCase , atol=1e-3 ) )
def lowerCAmelCase_ ( self ) -> Union[str, Any]:
snake_case_ = self.prepare_config_and_inputs()
snake_case_ , snake_case_ , snake_case_ , snake_case_ = config_and_inputs
snake_case_ = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class __lowerCamelCase ( __snake_case , __snake_case , unittest.TestCase ):
lowerCamelCase_ : Any = (GPTNeoXJapaneseModel, GPTNeoXJapaneseForCausalLM) if is_torch_available() else ()
lowerCamelCase_ : str = (GPTNeoXJapaneseForCausalLM,) if is_torch_available() else ()
lowerCamelCase_ : Any = (
{'feature-extraction': GPTNeoXJapaneseModel, 'text-generation': GPTNeoXJapaneseForCausalLM}
if is_torch_available()
else {}
)
lowerCamelCase_ : Tuple = False
lowerCamelCase_ : Dict = False
lowerCamelCase_ : Tuple = False
lowerCamelCase_ : Optional[int] = False
def lowerCAmelCase_ ( self ) -> Tuple:
snake_case_ = GPTNeoXJapaneseModelTester(self )
snake_case_ = ConfigTester(self , config_class=lowerCamelCase , hidden_size=37 )
def lowerCAmelCase_ ( self ) -> str:
self.config_tester.run_common_tests()
def lowerCAmelCase_ ( self ) -> str:
snake_case_ , snake_case_ , snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(lowerCamelCase , lowerCamelCase , lowerCamelCase )
def lowerCAmelCase_ ( self ) -> Tuple:
snake_case_ , snake_case_ , snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(lowerCamelCase , lowerCamelCase , lowerCamelCase )
def lowerCAmelCase_ ( self ) -> Tuple:
# This regression test was failing with PyTorch < 1.3
snake_case_ , snake_case_ , snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_decoder()
snake_case_ = None
self.model_tester.create_and_check_model_as_decoder(lowerCamelCase , lowerCamelCase , lowerCamelCase )
def lowerCAmelCase_ ( self ) -> Dict:
snake_case_ , snake_case_ , snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(lowerCamelCase , lowerCamelCase , lowerCamelCase )
def lowerCAmelCase_ ( self ) -> Dict:
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*lowerCamelCase )
@slow
def lowerCAmelCase_ ( self ) -> Any:
snake_case_ = """abeja/gpt-neox-japanese-2.7b"""
snake_case_ = ["""データサイエンティストとは、""", """100年後に必要とされる会社は、""", """フルリモートの環境で働くために必要なことは、""", """国境の長いトンネルを抜けると""", """美味しい日本食といえば、"""]
snake_case_ = [
"""データサイエンティストとは、データを分析し、ビジネスに役立つ知見を導き出す専門家のことです。""",
"""100年後に必要とされる会社は、「人」が中心の会社です。""",
"""フルリモートの環境で働くために必要なことは、「自分の時間をコントロールする」ことです。""",
"""国境の長いトンネルを抜けると、そこは雪国だった。""",
"""美味しい日本食といえば、やっぱりお寿司ですよね。""",
]
snake_case_ = GPTNeoXJapaneseTokenizer.from_pretrained(lowerCamelCase )
snake_case_ = GPTNeoXJapaneseForCausalLM.from_pretrained(lowerCamelCase )
snake_case_ = []
for prompt in prompts:
snake_case_ = tokenizer(lowerCamelCase , return_tensors="""pt""" ).input_ids
snake_case_ = model.generate(lowerCamelCase , max_length=50 )
snake_case_ = tokenizer.batch_decode(lowerCamelCase , skip_special_tokens=lowerCamelCase )
predicted_outputs += generated_string
self.assertListEqual(lowerCamelCase , lowerCamelCase ) | 34 | 1 |
"""simple docstring"""
import darl # noqa
import gym
import tqdm
from diffusers.experimental import ValueGuidedRLPipeline
lowerCamelCase_ = {
"n_samples": 6_4,
"horizon": 3_2,
"num_inference_steps": 2_0,
"n_guide_steps": 2, # can set to 0 for faster sampling, does not use value network
"scale_grad_by_std": True,
"scale": 0.1,
"eta": 0.0,
"t_grad_cutoff": 2,
"device": "cpu",
}
if __name__ == "__main__":
lowerCamelCase_ = "hopper-medium-v2"
lowerCamelCase_ = gym.make(env_name)
lowerCamelCase_ = ValueGuidedRLPipeline.from_pretrained(
"bglick13/hopper-medium-v2-value-function-hor32",
env=env,
)
env.seed(0)
lowerCamelCase_ = env.reset()
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = 1_0_0_0
lowerCamelCase_ = [obs.copy()]
try:
for t in tqdm.tqdm(range(T)):
# call the policy
lowerCamelCase_ = pipeline(obs, planning_horizon=3_2)
# execute action in environment
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = env.step(denorm_actions)
lowerCamelCase_ = env.get_normalized_score(total_reward)
# update return
total_reward += reward
total_score += score
print(
f'Step: {t}, Reward: {reward}, Total Reward: {total_reward}, Score: {score}, Total Score:'
f' {total_score}'
)
# save observations for rendering
rollout.append(next_observation.copy())
lowerCamelCase_ = next_observation
except KeyboardInterrupt:
pass
print(f'Total reward: {total_reward}') | 191 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase_ = {
"configuration_blip_2": [
"BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP",
"Blip2Config",
"Blip2QFormerConfig",
"Blip2VisionConfig",
],
"processing_blip_2": ["Blip2Processor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = [
"BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST",
"Blip2Model",
"Blip2QFormerModel",
"Blip2PreTrainedModel",
"Blip2ForConditionalGeneration",
"Blip2VisionModel",
]
if TYPE_CHECKING:
from .configuration_blip_a import (
BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlipaConfig,
BlipaQFormerConfig,
BlipaVisionConfig,
)
from .processing_blip_a import BlipaProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip_a import (
BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipaForConditionalGeneration,
BlipaModel,
BlipaPreTrainedModel,
BlipaQFormerModel,
BlipaVisionModel,
)
else:
import sys
lowerCamelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) | 191 | 1 |
'''simple docstring'''
from argparse import ArgumentParser, Namespace
from typing import Any, List, Optional
from ..pipelines import Pipeline, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from fastapi import Body, FastAPI, HTTPException
from fastapi.routing import APIRoute
from pydantic import BaseModel
from starlette.responses import JSONResponse
from uvicorn import run
_lowerCamelCase : int = True
except (ImportError, AttributeError):
_lowerCamelCase : Optional[Any] = object
def __a ( *UpperCAmelCase , **UpperCAmelCase ) ->int:
"""simple docstring"""
pass
_lowerCamelCase : List[str] = False
_lowerCamelCase : List[str] = logging.get_logger('transformers-cli/serving')
def __a ( UpperCAmelCase ) ->Optional[Any]:
"""simple docstring"""
A = pipeline(
task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , )
return ServeCommand(UpperCAmelCase , args.host , args.port , args.workers )
class __UpperCAmelCase ( A__ ):
'''simple docstring'''
__lowerCAmelCase = 42
class __UpperCAmelCase ( A__ ):
'''simple docstring'''
__lowerCAmelCase = 42
__lowerCAmelCase = 42
class __UpperCAmelCase ( A__ ):
'''simple docstring'''
__lowerCAmelCase = 42
class __UpperCAmelCase ( A__ ):
'''simple docstring'''
__lowerCAmelCase = 42
class __UpperCAmelCase ( A__ ):
'''simple docstring'''
@staticmethod
def A (_lowerCAmelCase : ArgumentParser ):
A = parser.add_parser(
"""serve""" , help="""CLI tool to run inference requests through REST and GraphQL endpoints.""" )
serve_parser.add_argument(
"""--task""" , type=_lowerCAmelCase , choices=get_supported_tasks() , help="""The task to run the pipeline on""" , )
serve_parser.add_argument("""--host""" , type=_lowerCAmelCase , default="""localhost""" , help="""Interface the server will listen on.""" )
serve_parser.add_argument("""--port""" , type=_lowerCAmelCase , default=8888 , help="""Port the serving will listen to.""" )
serve_parser.add_argument("""--workers""" , type=_lowerCAmelCase , default=1 , help="""Number of http workers""" )
serve_parser.add_argument("""--model""" , type=_lowerCAmelCase , help="""Model's name or path to stored model.""" )
serve_parser.add_argument("""--config""" , type=_lowerCAmelCase , help="""Model's config name or path to stored model.""" )
serve_parser.add_argument("""--tokenizer""" , type=_lowerCAmelCase , help="""Tokenizer name to use.""" )
serve_parser.add_argument(
"""--device""" , type=_lowerCAmelCase , default=-1 , help="""Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)""" , )
serve_parser.set_defaults(func=_lowerCAmelCase )
def __init__(self : List[Any] , _lowerCAmelCase : Pipeline , _lowerCAmelCase : str , _lowerCAmelCase : int , _lowerCAmelCase : int ):
A = pipeline
A = host
A = port
A = workers
if not _serve_dependencies_installed:
raise RuntimeError(
"""Using serve command requires FastAPI and uvicorn. """
"""Please install transformers with [serving]: pip install \"transformers[serving]\"."""
"""Or install FastAPI and uvicorn separately.""" )
else:
logger.info(F"""Serving model over {host}:{port}""" )
A = FastAPI(
routes=[
APIRoute(
"""/""" , self.model_info , response_model=_lowerCAmelCase , response_class=_lowerCAmelCase , methods=["""GET"""] , ),
APIRoute(
"""/tokenize""" , self.tokenize , response_model=_lowerCAmelCase , response_class=_lowerCAmelCase , methods=["""POST"""] , ),
APIRoute(
"""/detokenize""" , self.detokenize , response_model=_lowerCAmelCase , response_class=_lowerCAmelCase , methods=["""POST"""] , ),
APIRoute(
"""/forward""" , self.forward , response_model=_lowerCAmelCase , response_class=_lowerCAmelCase , methods=["""POST"""] , ),
] , timeout=600 , )
def A (self : int ):
run(self._app , host=self.host , port=self.port , workers=self.workers )
def A (self : List[Any] ):
return ServeModelInfoResult(infos=vars(self._pipeline.model.config ) )
def A (self : int , _lowerCAmelCase : str = Body(_lowerCAmelCase , embed=_lowerCAmelCase ) , _lowerCAmelCase : bool = Body(_lowerCAmelCase , embed=_lowerCAmelCase ) ):
try:
A = self._pipeline.tokenizer.tokenize(_lowerCAmelCase )
if return_ids:
A = self._pipeline.tokenizer.convert_tokens_to_ids(_lowerCAmelCase )
return ServeTokenizeResult(tokens=_lowerCAmelCase , tokens_ids=_lowerCAmelCase )
else:
return ServeTokenizeResult(tokens=_lowerCAmelCase )
except Exception as e:
raise HTTPException(status_code=500 , detail={"""model""": """""", """error""": str(_lowerCAmelCase )} )
def A (self : Any , _lowerCAmelCase : List[int] = Body(_lowerCAmelCase , embed=_lowerCAmelCase ) , _lowerCAmelCase : bool = Body(_lowerCAmelCase , embed=_lowerCAmelCase ) , _lowerCAmelCase : bool = Body(_lowerCAmelCase , embed=_lowerCAmelCase ) , ):
try:
A = self._pipeline.tokenizer.decode(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
return ServeDeTokenizeResult(model="""""" , text=_lowerCAmelCase )
except Exception as e:
raise HTTPException(status_code=500 , detail={"""model""": """""", """error""": str(_lowerCAmelCase )} )
async def A (self : str , _lowerCAmelCase : Optional[int]=Body(_lowerCAmelCase , embed=_lowerCAmelCase ) ):
# Check we don't have empty string
if len(_lowerCAmelCase ) == 0:
return ServeForwardResult(output=[] , attention=[] )
try:
# Forward through the model
A = self._pipeline(_lowerCAmelCase )
return ServeForwardResult(output=_lowerCAmelCase )
except Exception as e:
raise HTTPException(500 , {"""error""": str(_lowerCAmelCase )} )
| 337 |
'''simple docstring'''
import heapq as hq
import math
from collections.abc import Iterator
class __UpperCAmelCase :
'''simple docstring'''
def __init__(self : Any , _lowerCAmelCase : List[Any] ):
A = str(id_ )
A = None
A = None
A = []
A = {} # {vertex:distance}
def __lt__(self : List[Any] , _lowerCAmelCase : Tuple ):
return self.key < other.key
def __repr__(self : str ):
return self.id
def A (self : Union[str, Any] , _lowerCAmelCase : List[str] ):
self.neighbors.append(_lowerCAmelCase )
def A (self : str , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Union[str, Any] ):
A = weight
def __a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->List[Any]:
"""simple docstring"""
graph[a - 1].add_neighbor(graph[b - 1] )
graph[b - 1].add_neighbor(graph[a - 1] )
# add the edges:
graph[a - 1].add_edge(graph[b - 1] , UpperCAmelCase )
graph[b - 1].add_edge(graph[a - 1] , UpperCAmelCase )
def __a ( UpperCAmelCase , UpperCAmelCase ) ->list:
"""simple docstring"""
A = []
for u in graph:
A = math.inf
A = None
A = 0
A = graph[:]
while q:
A = min(UpperCAmelCase )
q.remove(UpperCAmelCase )
for v in u.neighbors:
if (v in q) and (u.edges[v.id] < v.key):
A = u
A = u.edges[v.id]
for i in range(1 , len(UpperCAmelCase ) ):
a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) )
return a
def __a ( UpperCAmelCase , UpperCAmelCase ) ->Iterator[tuple]:
"""simple docstring"""
for u in graph:
A = math.inf
A = None
A = 0
A = list(UpperCAmelCase )
hq.heapify(UpperCAmelCase )
while h:
A = hq.heappop(UpperCAmelCase )
for v in u.neighbors:
if (v in h) and (u.edges[v.id] < v.key):
A = u
A = u.edges[v.id]
hq.heapify(UpperCAmelCase )
for i in range(1 , len(UpperCAmelCase ) ):
yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1)
def __a ( ) ->None:
"""simple docstring"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| 337 | 1 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.speechta import SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaProcessor
from ..utils import is_datasets_available
from .base import PipelineTool
if is_datasets_available():
from datasets import load_dataset
class _A ( _a ):
"""simple docstring"""
UpperCAmelCase : Union[str, Any] = """microsoft/speecht5_tts"""
UpperCAmelCase : Optional[Any] = (
"""This is a tool that reads an English text out loud. It takes an input named `text` which should contain the """
"""text to read (in English) and returns a waveform object containing the sound."""
)
UpperCAmelCase : str = """text_reader"""
UpperCAmelCase : str = SpeechTaProcessor
UpperCAmelCase : Tuple = SpeechTaForTextToSpeech
UpperCAmelCase : Tuple = SpeechTaHifiGan
UpperCAmelCase : Optional[Any] = ["""text"""]
UpperCAmelCase : List[Any] = ["""audio"""]
def __snake_case ( self : Tuple):
if self.post_processor is None:
a : Tuple = "microsoft/speecht5_hifigan"
super().setup()
def __snake_case ( self : List[Any] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : List[Any]=None):
a : Any = self.pre_processor(text=__UpperCAmelCase , return_tensors="pt" , truncation=__UpperCAmelCase)
if speaker_embeddings is None:
if not is_datasets_available():
raise ImportError("Datasets needs to be installed if not passing speaker embeddings.")
a : List[str] = load_dataset("Matthijs/cmu-arctic-xvectors" , split="validation")
a : Optional[int] = torch.tensor(embeddings_dataset[7305]["xvector"]).unsqueeze(0)
return {"input_ids": inputs["input_ids"], "speaker_embeddings": speaker_embeddings}
def __snake_case ( self : Union[str, Any] , __UpperCAmelCase : Optional[int]):
with torch.no_grad():
return self.model.generate_speech(**__UpperCAmelCase)
def __snake_case ( self : Tuple , __UpperCAmelCase : str):
with torch.no_grad():
return self.post_processor(__UpperCAmelCase).cpu().detach()
| 40 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..models.auto import AutoProcessor
from ..models.vision_encoder_decoder import VisionEncoderDecoderModel
from ..utils import is_vision_available
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class _A ( _a ):
"""simple docstring"""
UpperCAmelCase : str = """naver-clova-ix/donut-base-finetuned-docvqa"""
UpperCAmelCase : Tuple = (
"""This is a tool that answers a question about an document (pdf). It takes an input named `document` which """
"""should be the document containing the information, as well as a `question` that is the question about the """
"""document. It returns a text that contains the answer to the question."""
)
UpperCAmelCase : List[str] = """document_qa"""
UpperCAmelCase : str = AutoProcessor
UpperCAmelCase : Optional[int] = VisionEncoderDecoderModel
UpperCAmelCase : int = ["""image""", """text"""]
UpperCAmelCase : int = ["""text"""]
def __init__( self : Tuple , *__UpperCAmelCase : Union[str, Any] , **__UpperCAmelCase : Any):
if not is_vision_available():
raise ValueError("Pillow must be installed to use the DocumentQuestionAnsweringTool.")
super().__init__(*__UpperCAmelCase , **__UpperCAmelCase)
def __snake_case ( self : Tuple , __UpperCAmelCase : "Image" , __UpperCAmelCase : str):
a : Any = "<s_docvqa><s_question>{user_input}</s_question><s_answer>"
a : Union[str, Any] = task_prompt.replace("{user_input}" , __UpperCAmelCase)
a : Optional[Any] = self.pre_processor.tokenizer(
__UpperCAmelCase , add_special_tokens=__UpperCAmelCase , return_tensors="pt").input_ids
a : Any = self.pre_processor(__UpperCAmelCase , return_tensors="pt").pixel_values
return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values}
def __snake_case ( self : int , __UpperCAmelCase : int):
return self.model.generate(
inputs["pixel_values"].to(self.device) , decoder_input_ids=inputs["decoder_input_ids"].to(self.device) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=__UpperCAmelCase , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=__UpperCAmelCase , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=__UpperCAmelCase , ).sequences
def __snake_case ( self : str , __UpperCAmelCase : List[Any]):
a : Union[str, Any] = self.pre_processor.batch_decode(__UpperCAmelCase)[0]
a : Optional[Any] = sequence.replace(self.pre_processor.tokenizer.eos_token , "")
a : Any = sequence.replace(self.pre_processor.tokenizer.pad_token , "")
a : Optional[Any] = re.sub(r"<.*?>" , "" , __UpperCAmelCase , count=1).strip() # remove first task start token
a : List[str] = self.pre_processor.tokenajson(__UpperCAmelCase)
return sequence["answer"]
| 40 | 1 |
"""simple docstring"""
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
_lowerCamelCase : str = {
"RUCAIBox/mvp": "https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json",
}
class __snake_case (_a ):
lowerCAmelCase__ = "mvp"
lowerCAmelCase__ = ["past_key_values"]
lowerCAmelCase__ = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self : Tuple , _UpperCAmelCase : Optional[Any]=5_0267 , _UpperCAmelCase : List[Any]=1024 , _UpperCAmelCase : List[Any]=12 , _UpperCAmelCase : Optional[int]=4096 , _UpperCAmelCase : Tuple=16 , _UpperCAmelCase : List[Any]=12 , _UpperCAmelCase : Tuple=4096 , _UpperCAmelCase : Tuple=16 , _UpperCAmelCase : str=0.0 , _UpperCAmelCase : Optional[int]=0.0 , _UpperCAmelCase : Dict="gelu" , _UpperCAmelCase : Dict=1024 , _UpperCAmelCase : Optional[int]=0.1 , _UpperCAmelCase : Dict=0.0 , _UpperCAmelCase : Optional[int]=0.0 , _UpperCAmelCase : Union[str, Any]=0.02 , _UpperCAmelCase : Dict=0.0 , _UpperCAmelCase : Tuple=False , _UpperCAmelCase : Optional[Any]=True , _UpperCAmelCase : List[Any]=1 , _UpperCAmelCase : int=0 , _UpperCAmelCase : Optional[Any]=2 , _UpperCAmelCase : List[Any]=True , _UpperCAmelCase : int=2 , _UpperCAmelCase : Any=2 , _UpperCAmelCase : Optional[int]=False , _UpperCAmelCase : Any=100 , _UpperCAmelCase : int=800 , **_UpperCAmelCase : Optional[Any] , ) -> List[str]:
'''simple docstring'''
_lowerCAmelCase : Any = vocab_size
_lowerCAmelCase : Tuple = max_position_embeddings
_lowerCAmelCase : int = d_model
_lowerCAmelCase : Optional[int] = encoder_ffn_dim
_lowerCAmelCase : Dict = encoder_layers
_lowerCAmelCase : int = encoder_attention_heads
_lowerCAmelCase : str = decoder_ffn_dim
_lowerCAmelCase : Tuple = decoder_layers
_lowerCAmelCase : str = decoder_attention_heads
_lowerCAmelCase : Optional[int] = dropout
_lowerCAmelCase : int = attention_dropout
_lowerCAmelCase : List[str] = activation_dropout
_lowerCAmelCase : Tuple = activation_function
_lowerCAmelCase : Dict = init_std
_lowerCAmelCase : Tuple = encoder_layerdrop
_lowerCAmelCase : Any = decoder_layerdrop
_lowerCAmelCase : Union[str, Any] = classifier_dropout
_lowerCAmelCase : str = use_cache
_lowerCAmelCase : Union[str, Any] = encoder_layers
_lowerCAmelCase : List[str] = scale_embedding # scale factor will be sqrt(d_model) if True
_lowerCAmelCase : Any = use_prompt
_lowerCAmelCase : Optional[int] = prompt_length
_lowerCAmelCase : Optional[int] = prompt_mid_dim
super().__init__(
pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , is_encoder_decoder=_UpperCAmelCase , decoder_start_token_id=_UpperCAmelCase , forced_eos_token_id=_UpperCAmelCase , **_UpperCAmelCase , )
if self.forced_bos_token_id is None and kwargs.get("""force_bos_token_to_be_generated""" , _UpperCAmelCase ):
_lowerCAmelCase : Optional[Any] = self.bos_token_id
warnings.warn(
f"Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. "
"""The config can simply be saved and uploaded again to be fixed.""" )
| 358 |
import argparse
import json
import os
import fairseq
import torch
from torch import nn
from transformers import (
SpeechaTextaConfig,
SpeechaTextaForCausalLM,
SpeechaTextaTokenizer,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
_lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
_lowerCamelCase : List[str] = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
}
_lowerCamelCase : Dict = [
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
]
def _UpperCAmelCase (UpperCamelCase_ : List[Any] , UpperCamelCase_ : Dict , UpperCamelCase_ : int , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : int ):
'''simple docstring'''
for attribute in key.split(""".""" ):
_lowerCAmelCase : Optional[int] = getattr(UpperCamelCase_ , UpperCamelCase_ )
if weight_type is not None:
_lowerCAmelCase : Optional[Any] = getattr(UpperCamelCase_ , UpperCamelCase_ ).shape
else:
_lowerCAmelCase : Any = hf_pointer.shape
assert hf_shape == value.shape, (
F"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
F" {value.shape} for {full_name}"
)
if weight_type == "weight":
_lowerCAmelCase : Tuple = value
elif weight_type == "weight_g":
_lowerCAmelCase : Optional[int] = value
elif weight_type == "weight_v":
_lowerCAmelCase : str = value
elif weight_type == "bias":
_lowerCAmelCase : str = value
else:
_lowerCAmelCase : Tuple = value
logger.info(F"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def _UpperCAmelCase (UpperCamelCase_ : List[Any] , UpperCamelCase_ : Union[str, Any] ):
'''simple docstring'''
_lowerCAmelCase : List[str] = []
_lowerCAmelCase : Tuple = fairseq_model.state_dict()
_lowerCAmelCase : str = hf_model.feature_extractor
# if encoder has different dim to decoder -> use proj_weight
_lowerCAmelCase : Any = None
for name, value in fairseq_dict.items():
_lowerCAmelCase : Union[str, Any] = False
if "conv_layers" in name:
load_conv_layer(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , hf_model.config.feat_extract_norm == """group""" , )
_lowerCAmelCase : Any = True
elif name.split(""".""" )[0] == "proj":
_lowerCAmelCase : Union[str, Any] = fairseq_model.proj
_lowerCAmelCase : Optional[Any] = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
_lowerCAmelCase : Union[str, Any] = True
if "*" in mapped_key:
_lowerCAmelCase : Union[str, Any] = name.split(UpperCamelCase_ )[0].split(""".""" )[-2]
_lowerCAmelCase : Optional[Any] = mapped_key.replace("""*""" , UpperCamelCase_ )
if "weight_g" in name:
_lowerCAmelCase : List[str] = """weight_g"""
elif "weight_v" in name:
_lowerCAmelCase : Tuple = """weight_v"""
elif "bias" in name:
_lowerCAmelCase : Dict = """bias"""
elif "weight" in name:
_lowerCAmelCase : Optional[int] = """weight"""
else:
_lowerCAmelCase : Optional[Any] = None
set_recursively(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
continue
if not is_used:
unused_weights.append(UpperCamelCase_ )
logger.warning(F"Unused weights: {unused_weights}" )
return proj_weight
def _UpperCAmelCase (UpperCamelCase_ : Any , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : str , UpperCamelCase_ : Tuple , UpperCamelCase_ : int ):
'''simple docstring'''
_lowerCAmelCase : List[str] = full_name.split("""conv_layers.""" )[-1]
_lowerCAmelCase : Optional[int] = name.split(""".""" )
_lowerCAmelCase : List[str] = int(items[0] )
_lowerCAmelCase : Dict = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."
)
_lowerCAmelCase : List[Any] = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."
)
_lowerCAmelCase : Any = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"
" found."
)
_lowerCAmelCase : Tuple = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."
)
_lowerCAmelCase : Union[str, Any] = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(UpperCamelCase_ )
def _UpperCAmelCase (UpperCamelCase_ : int ):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = emb.weight.shape
_lowerCAmelCase : Union[str, Any] = nn.Linear(UpperCamelCase_ , UpperCamelCase_ , bias=UpperCamelCase_ )
_lowerCAmelCase : List[Any] = emb.weight.data
return lin_layer
def _UpperCAmelCase (UpperCamelCase_ : Any ):
'''simple docstring'''
with open(UpperCamelCase_ , """r""" , encoding="""utf-8""" ) as f:
_lowerCAmelCase : Optional[int] = f.readlines()
_lowerCAmelCase : Dict = [line.split(""" """ )[0] for line in lines]
_lowerCAmelCase : Dict = len(UpperCamelCase_ )
_lowerCAmelCase : Optional[int] = {
"""<s>""": 0,
"""<pad>""": 1,
"""</s>""": 2,
"""<unk>""": 3,
}
vocab_dict.update(dict(zip(UpperCamelCase_ , range(4 , num_words + 4 ) ) ) )
return vocab_dict
@torch.no_grad()
def _UpperCAmelCase (UpperCamelCase_ : List[Any] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Any , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : List[str] , ):
'''simple docstring'''
_lowerCAmelCase : Any = WavaVecaConfig.from_pretrained(UpperCamelCase_ )
_lowerCAmelCase : Union[str, Any] = SpeechaTextaConfig.from_pretrained(
UpperCamelCase_ , vocab_size=UpperCamelCase_ , decoder_layers=UpperCamelCase_ , do_stable_layer_norm=UpperCamelCase_ )
_lowerCAmelCase : List[str] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=UpperCamelCase_ , return_attention_mask=UpperCamelCase_ , )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : str = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
_lowerCAmelCase : Tuple = model[0].eval()
# set weights for wav2vec2 encoder
_lowerCAmelCase : Union[str, Any] = WavaVecaModel(UpperCamelCase_ )
_lowerCAmelCase : Union[str, Any] = recursively_load_weights_wavaveca(model.encoder , UpperCamelCase_ )
_lowerCAmelCase : List[str] = SpeechaTextaForCausalLM(UpperCamelCase_ )
_lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=UpperCamelCase_ )
# set output linear layer
unexpected_keys.remove("""embed_out""" )
_lowerCAmelCase : Dict = nn.Parameter(model.decoder.embed_out.detach() )
# layer norm is init to identity matrix so leaving it is fine
logger.warning(F"The following keys are missing when loading the decoder weights: {missing_keys}" )
logger.warning(F"The following keys are unexpected when loading the decoder weights: {unexpected_keys}" )
_lowerCAmelCase : List[Any] = SpeechEncoderDecoderModel(encoder=UpperCamelCase_ , decoder=UpperCamelCase_ )
_lowerCAmelCase : Any = False
# add projection layer
_lowerCAmelCase : List[Any] = nn.Parameter(projection_layer.weight )
_lowerCAmelCase : Union[str, Any] = nn.Parameter(projection_layer.bias )
_lowerCAmelCase : Any = create_vocab_dict(UpperCamelCase_ )
with open(os.path.join(UpperCamelCase_ , """vocab.json""" ) , """w""" ) as fp:
json.dump(UpperCamelCase_ , UpperCamelCase_ )
_lowerCAmelCase : Any = SpeechaTextaTokenizer(os.path.join(UpperCamelCase_ , """vocab.json""" ) )
tokenizer.save_pretrained(UpperCamelCase_ )
_lowerCAmelCase : str = hf_wavavec.config.to_dict()
_lowerCAmelCase : Any = tokenizer.pad_token_id
_lowerCAmelCase : List[str] = tokenizer.bos_token_id
_lowerCAmelCase : Any = tokenizer.eos_token_id
_lowerCAmelCase : Union[str, Any] = """speech_to_text_2"""
_lowerCAmelCase : Any = """wav2vec2"""
_lowerCAmelCase : str = SpeechEncoderDecoderConfig.from_dict(UpperCamelCase_ )
hf_wavavec.save_pretrained(UpperCamelCase_ )
feature_extractor.save_pretrained(UpperCamelCase_ )
if __name__ == "__main__":
_lowerCamelCase : int = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument(
"--encoder_config_path",
default="facebook/wav2vec2-large-lv60",
type=str,
help="Path to hf encoder wav2vec2 checkpoint config",
)
parser.add_argument(
"--decoder_config_path",
default="facebook/s2t-small-mustc-en-fr-st",
type=str,
help="Path to hf decoder s2t checkpoint config",
)
parser.add_argument("--vocab_size", default=1_0_2_2_4, type=int, help="Vocab size of decoder")
parser.add_argument("--num_decoder_layers", default=7, type=int, help="Number of decoder layers")
_lowerCamelCase : Optional[Any] = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
vocab_size=args.vocab_size,
num_decoder_layers=args.num_decoder_layers,
)
| 159 | 0 |
def lowerCamelCase_ ( UpperCamelCase__ : int ) -> str:
"""simple docstring"""
if number > 0:
raise ValueError('input must be a negative integer' )
__lowerCamelCase = len(bin(UpperCamelCase__ )[3:] )
__lowerCamelCase = bin(abs(UpperCamelCase__ ) - (1 << binary_number_length) )[3:]
__lowerCamelCase = (
(
'1'
+ '0' * (binary_number_length - len(UpperCamelCase__ ))
+ twos_complement_number
)
if number < 0
else '0'
)
return "0b" + twos_complement_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 90 |
import os
def lowerCamelCase__ ( ):
with open(os.path.dirname(_a) + "/p022_names.txt") as file:
SCREAMING_SNAKE_CASE : List[str] = str(file.readlines()[0])
SCREAMING_SNAKE_CASE : List[Any] = names.replace("\"" , "").split(",")
names.sort()
SCREAMING_SNAKE_CASE : Dict = 0
SCREAMING_SNAKE_CASE : Dict = 0
for i, name in enumerate(_a):
for letter in name:
name_score += ord(_a) - 64
total_score += (i + 1) * name_score
SCREAMING_SNAKE_CASE : str = 0
return total_score
if __name__ == "__main__":
print(solution()) | 76 | 0 |
import logging
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import librosa
import torch
from datasets import DatasetDict, load_dataset
from packaging import version
from torch import nn
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForPreTraining,
is_apex_available,
trainer_utils,
)
from transformers.models.wavaveca.modeling_wavaveca import _compute_mask_indices
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse('1.6'):
snake_case__ : Dict = True
from torch.cuda.amp import autocast
snake_case__ : Tuple = logging.getLogger(__name__)
@dataclass
class A_ :
lowerCAmelCase__ = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
lowerCAmelCase__ = field(
default=_lowerCamelCase , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
lowerCAmelCase__ = field(
default=_lowerCamelCase , metadata={"""help""": """Whether to freeze the feature extractor layers of the model."""} )
lowerCAmelCase__ = field(
default=_lowerCamelCase , metadata={"""help""": """Whether to log verbose messages or not."""} , )
lowerCAmelCase__ = field(
default=2.0 , metadata={"""help""": """Maximum temperature for gumbel softmax."""} )
lowerCAmelCase__ = field(
default=0.5 , metadata={"""help""": """Minimum temperature for gumbel softmax."""} )
lowerCAmelCase__ = field(
default=0.999_995 , metadata={"""help""": """Decay of gumbel temperature during training."""} )
def _a ( lowerCamelCase: ModelArguments , lowerCamelCase: TrainingArguments ) -> List[str]:
'''simple docstring'''
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
__A = logging.WARNING
if model_args.verbose_logging:
__A = logging.DEBUG
elif trainer_utils.is_main_process(training_args.local_rank ):
__A = logging.INFO
logger.setLevel(lowerCamelCase )
@dataclass
class A_ :
lowerCAmelCase__ = field(
default=_lowerCamelCase , metadata={"""help""": """The name of the dataset to use (via the datasets library)."""} )
lowerCAmelCase__ = field(
default=_lowerCamelCase , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} )
lowerCAmelCase__ = field(
default="""train""" , metadata={
"""help""": """The name of the training data set split to use (via the datasets library). Defaults to 'train'"""
} , )
lowerCAmelCase__ = field(
default="""validation""" , metadata={
"""help""": (
"""The name of the validation data set split to use (via the datasets library). Defaults to 'validation'"""
)
} , )
lowerCAmelCase__ = field(
default="""file""" , metadata={"""help""": """Column in the dataset that contains speech file path. Defaults to 'file'"""} , )
lowerCAmelCase__ = field(
default=_lowerCamelCase , metadata={"""help""": """Overwrite the cached preprocessed datasets or not."""} )
lowerCAmelCase__ = field(
default=1 , metadata={
"""help""": """The percentage of the train set used as validation set in case there's no validation split"""
} , )
lowerCAmelCase__ = field(
default=_lowerCamelCase , metadata={"""help""": """The number of processes to use for the preprocessing."""} , )
lowerCAmelCase__ = field(
default=20.0 , metadata={"""help""": """Filter audio files that are longer than `max_duration_in_seconds` seconds"""} )
@dataclass
class A_ :
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
lowerCAmelCase__ = "longest"
lowerCAmelCase__ = None
lowerCAmelCase__ = None
def __call__(self :Union[str, Any] , _UpperCamelCase :List[Dict[str, Union[List[int], torch.Tensor]]] )-> Dict[str, torch.Tensor]:
# reformat list to dict and set to pytorch format
__A = self.feature_extractor.pad(
_UpperCamelCase , max_length=self.max_length , padding=self.padding , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='''pt''' , )
__A = self.model._get_feat_extract_output_lengths(batch['''input_values'''].shape[-1] )
__A = batch['''input_values'''].shape[0]
# make sure that no loss is computed on padded inputs
if batch["attention_mask"] is not None:
# compute real output lengths according to convolution formula
__A = self.model._get_feat_extract_output_lengths(batch['''attention_mask'''].sum(-1 ) ).to(
torch.long )
__A = torch.zeros(
(batch_size, mask_indices_seq_length) , dtype=torch.long , device=batch['''input_values'''].device )
# these two operations makes sure that all values
# before the output lengths indices are attended to
__A = 1
__A = attention_mask.flip([-1] ).cumsum(-1 ).flip([-1] ).bool()
# sample randomly masked indices
__A = _compute_mask_indices(
(batch_size, mask_indices_seq_length) , self.model.config.mask_time_prob , self.model.config.mask_time_length , attention_mask=_UpperCamelCase , min_masks=2 , )
return batch
class A_ ( _lowerCamelCase ):
def __init__(self :List[str] , *_UpperCamelCase :Union[str, Any] , _UpperCamelCase :Optional[int]=1 , _UpperCamelCase :Union[str, Any]=0 , _UpperCamelCase :Union[str, Any]=1.0 , **_UpperCamelCase :int )-> Union[str, Any]:
super().__init__(*_UpperCamelCase , **_UpperCamelCase )
__A = 0
__A = max_gumbel_temp
__A = min_gumbel_temp
__A = gumbel_temp_decay
def _lowerCAmelCase (self :List[str] , _UpperCamelCase :nn.Module , _UpperCamelCase :Dict[str, Union[torch.Tensor, Any]] )-> torch.Tensor:
model.train()
__A = self._prepare_inputs(_UpperCamelCase )
if self.use_amp:
with autocast():
__A = self.compute_loss(_UpperCamelCase , _UpperCamelCase )
else:
__A = self.compute_loss(_UpperCamelCase , _UpperCamelCase )
if self.args.n_gpu > 1 or self.deepspeed:
if model.module.config.ctc_loss_reduction == "mean":
__A = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
__A = loss.sum() / (inputs['''mask_time_indices''']).sum()
else:
raise ValueError(f"""{model.config.ctc_loss_reduction} is not valid. Choose one of ['mean', 'sum']""" )
if self.args.gradient_accumulation_steps > 1:
__A = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(_UpperCamelCase ).backward()
elif self.use_apex:
with amp.scale_loss(_UpperCamelCase , self.optimizer ) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(_UpperCamelCase )
else:
loss.backward()
self.num_update_step += 1
# make sure gumbel softmax temperature is decayed
if self.args.n_gpu > 1 or self.deepspeed:
model.module.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step , self.min_gumbel_temp ) )
else:
model.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step , self.min_gumbel_temp ) )
return loss.detach()
def _a ( ) -> Union[str, Any]:
'''simple docstring'''
__A = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
__A , __A , __A = parser.parse_args_into_dataclasses()
configure_logger(lowerCamelCase , lowerCamelCase )
# Downloading and loading a dataset from the hub.
__A = load_dataset(data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
if "validation" not in datasets.keys():
# make sure only "validation" and "train" keys remain"
__A = DatasetDict()
__A = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"""{data_args.train_split_name}[:{data_args.validation_split_percentage}%]""" , cache_dir=model_args.cache_dir , )
__A = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"""{data_args.train_split_name}[{data_args.validation_split_percentage}%:]""" , cache_dir=model_args.cache_dir , )
else:
# make sure only "validation" and "train" keys remain"
__A = DatasetDict()
__A = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split='''validation''' , cache_dir=model_args.cache_dir , )
__A = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"""{data_args.train_split_name}""" , cache_dir=model_args.cache_dir , )
# only normalized-inputs-training is supported
__A = WavaVecaFeatureExtractor.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , do_normalize=lowerCamelCase )
def prepare_dataset(lowerCamelCase: Any ):
# check that all files have the correct sampling rate
__A , __A = librosa.load(batch[data_args.speech_file_column] , sr=feature_extractor.sampling_rate )
return batch
# load audio files into numpy arrays
__A = datasets.map(
lowerCamelCase , num_proc=data_args.preprocessing_num_workers , remove_columns=datasets['''train'''].column_names )
# filter audio files that are too long
__A = vectorized_datasets.filter(
lambda lowerCamelCase : len(data['''speech'''] ) < int(data_args.max_duration_in_seconds * feature_extractor.sampling_rate ) )
def normalize(lowerCamelCase: int ):
return feature_extractor(batch['''speech'''] , sampling_rate=feature_extractor.sampling_rate )
# normalize and transform to `BatchFeatures`
__A = vectorized_datasets.map(
lowerCamelCase , batched=lowerCamelCase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , remove_columns=vectorized_datasets['''train'''].column_names , )
# pretraining is only supported for "newer" stable layer norm architecture
# apply_spec_augment has to be True, mask_feature_prob has to be 0.0
__A = WavaVecaConfig.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , gradient_checkpointing=training_args.gradient_checkpointing , )
if not config.do_stable_layer_norm or config.feat_extract_norm != "layer":
raise ValueError(
'''PreTraining is only supported for ``config.do_stable_layer_norm=True`` and'''
''' ``config.feat_extract_norm=\'layer\'''' )
__A = WavaVecaForPreTraining(lowerCamelCase )
__A = DataCollatorForWavaVecaPretraining(model=lowerCamelCase , feature_extractor=lowerCamelCase )
__A = WavaVecaPreTrainer(
model=lowerCamelCase , data_collator=lowerCamelCase , args=lowerCamelCase , train_dataset=vectorized_datasets['''train'''] , eval_dataset=vectorized_datasets['''validation'''] , tokenizer=lowerCamelCase , max_gumbel_temp=model_args.max_gumbel_temperature , min_gumbel_temp=model_args.min_gumbel_temperature , gumbel_temp_decay=model_args.gumbel_temperature_decay , )
trainer.train()
if __name__ == "__main__":
main()
| 250 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case__ : Tuple = logging.get_logger(__name__)
snake_case__ : int = {
'sayakpaul/vit-msn-base': 'https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json',
# See all ViT MSN models at https://huggingface.co/models?filter=vit_msn
}
class A_ ( _lowerCamelCase ):
lowerCAmelCase__ = """vit_msn"""
def __init__(self :str , _UpperCamelCase :List[Any]=768 , _UpperCamelCase :Optional[int]=12 , _UpperCamelCase :Any=12 , _UpperCamelCase :Dict=3072 , _UpperCamelCase :str="gelu" , _UpperCamelCase :str=0.0 , _UpperCamelCase :Union[str, Any]=0.0 , _UpperCamelCase :Optional[int]=0.0_2 , _UpperCamelCase :Any=1e-06 , _UpperCamelCase :Any=224 , _UpperCamelCase :Optional[Any]=16 , _UpperCamelCase :Any=3 , _UpperCamelCase :str=True , **_UpperCamelCase :Dict , )-> Union[str, Any]:
super().__init__(**_UpperCamelCase )
__A = hidden_size
__A = num_hidden_layers
__A = num_attention_heads
__A = intermediate_size
__A = hidden_act
__A = hidden_dropout_prob
__A = attention_probs_dropout_prob
__A = initializer_range
__A = layer_norm_eps
__A = image_size
__A = patch_size
__A = num_channels
__A = qkv_bias
| 250 | 1 |
'''simple docstring'''
from argparse import ArgumentParser
from datasets.commands.convert import ConvertCommand
from datasets.commands.dummy_data import DummyDataCommand
from datasets.commands.env import EnvironmentCommand
from datasets.commands.run_beam import RunBeamCommand
from datasets.commands.test import TestCommand
from datasets.utils.logging import set_verbosity_info
def snake_case_ (_a : Tuple ):
return {key.lstrip('''-''' ): value for key, value in zip(unknown_args[::2] , unknown_args[1::2] )}
def snake_case_ ():
UpperCAmelCase = ArgumentParser(
'''HuggingFace Datasets CLI tool''' , usage='''datasets-cli <command> [<args>]''' , allow_abbrev=_a )
UpperCAmelCase = parser.add_subparsers(help='''datasets-cli command helpers''' )
set_verbosity_info()
# Register commands
ConvertCommand.register_subcommand(_a )
EnvironmentCommand.register_subcommand(_a )
TestCommand.register_subcommand(_a )
RunBeamCommand.register_subcommand(_a )
DummyDataCommand.register_subcommand(_a )
# Parse args
UpperCAmelCase , UpperCAmelCase = parser.parse_known_args()
if not hasattr(_a , '''func''' ):
parser.print_help()
exit(1 )
UpperCAmelCase = parse_unknown_args(_a )
# Run
UpperCAmelCase = args.func(_a , **_a )
service.run()
if __name__ == "__main__":
main()
| 34 |
'''simple docstring'''
from datetime import datetime
import requests
from bsa import BeautifulSoup
if __name__ == "__main__":
A =input('Enter image url: ').strip()
print(f"""Downloading image from {url} ...""")
A =BeautifulSoup(requests.get(url).content, 'html.parser')
# The image URL is in the content field of the first meta tag with property og:image
A =soup.find('meta', {'property': 'og:image'})['content']
A =requests.get(image_url).content
A =f"""{datetime.now():%Y-%m-%d_%H:%M:%S}.jpg"""
with open(file_name, 'wb') as fp:
fp.write(image_data)
print(f"""Done. Image saved to disk as {file_name}.""")
| 34 | 1 |
"""simple docstring"""
import inspect
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel, VQModel
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class _lowercase ( __UpperCAmelCase ):
def __init__( self , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) -> Tuple:
super().__init__()
self.register_modules(vqvae=UpperCAmelCase_ , unet=UpperCAmelCase_ , scheduler=UpperCAmelCase_ )
@torch.no_grad()
def __call__( self , UpperCAmelCase_ = 1 , UpperCAmelCase_ = None , UpperCAmelCase_ = 0.0 , UpperCAmelCase_ = 50 , UpperCAmelCase_ = "pil" , UpperCAmelCase_ = True , **UpperCAmelCase_ , ) -> Union[Tuple, ImagePipelineOutput]:
lowerCamelCase : Dict = randn_tensor(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , generator=UpperCAmelCase_ , )
lowerCamelCase : str = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
lowerCamelCase : Optional[int] = latents * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(UpperCAmelCase_ )
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
lowerCamelCase : Any = 'eta' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
lowerCamelCase : Tuple = {}
if accepts_eta:
lowerCamelCase : List[Any] = eta
for t in self.progress_bar(self.scheduler.timesteps ):
lowerCamelCase : Union[str, Any] = self.scheduler.scale_model_input(UpperCAmelCase_ , UpperCAmelCase_ )
# predict the noise residual
lowerCamelCase : Optional[int] = self.unet(UpperCAmelCase_ , UpperCAmelCase_ ).sample
# compute the previous noisy sample x_t -> x_t-1
lowerCamelCase : Tuple = self.scheduler.step(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , **UpperCAmelCase_ ).prev_sample
# decode the image latents with the VAE
lowerCamelCase : Optional[int] = self.vqvae.decode(UpperCAmelCase_ ).sample
lowerCamelCase : Any = (image / 2 + 0.5).clamp(0 , 1 )
lowerCamelCase : Tuple = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
lowerCamelCase : Any = self.numpy_to_pil(UpperCAmelCase_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=UpperCAmelCase_ )
| 357 |
"""simple docstring"""
def UpperCAmelCase ( a_ = 10 ):
'''simple docstring'''
if not isinstance(a_, a_ ) or n < 0:
raise ValueError('Invalid input' )
lowerCamelCase : Union[str, Any] = 10**n
lowerCamelCase : int = 2_8433 * (pow(2, 783_0457, a_ )) + 1
return str(number % modulus )
if __name__ == "__main__":
from doctest import testmod
testmod()
print(F"""{solution(1_0) = }""")
| 205 | 0 |
import argparse
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline
if __name__ == "__main__":
__a = argparse.ArgumentParser()
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
parser.add_argument(
'''--txt2img_unclip''',
default='''kakaobrain/karlo-v1-alpha''',
type=str,
required=False,
help='''The pretrained txt2img unclip.''',
)
__a = parser.parse_args()
__a = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip)
__a = CLIPImageProcessor()
__a = CLIPVisionModelWithProjection.from_pretrained('''openai/clip-vit-large-patch14''')
__a = UnCLIPImageVariationPipeline(
decoder=txtaimg.decoder,
text_encoder=txtaimg.text_encoder,
tokenizer=txtaimg.tokenizer,
text_proj=txtaimg.text_proj,
feature_extractor=feature_extractor,
image_encoder=image_encoder,
super_res_first=txtaimg.super_res_first,
super_res_last=txtaimg.super_res_last,
decoder_scheduler=txtaimg.decoder_scheduler,
super_res_scheduler=txtaimg.super_res_scheduler,
)
imgaimg.save_pretrained(args.dump_path)
| 337 |
from __future__ import annotations
def __lowercase ( _UpperCamelCase ) ->float:
"""simple docstring"""
if not nums:
raise ValueError('''List is empty''' )
return sum(_UpperCamelCase ) / len(_UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 337 | 1 |
"""simple docstring"""
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_led import LEDTokenizer
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
lowerCAmelCase__ = {
'''vocab_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json''',
},
'''merges_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json''',
},
}
lowerCAmelCase__ = {
'''allenai/led-base-16384''': 16_384,
}
class _lowerCamelCase ( _lowercase ):
UpperCAmelCase_ = VOCAB_FILES_NAMES
UpperCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase_ = LEDTokenizer
UpperCAmelCase_ = ["input_ids", "attention_mask"]
def __init__(self , __a=None , __a=None , __a=None , __a="replace" , __a="<s>" , __a="</s>" , __a="</s>" , __a="<s>" , __a="<unk>" , __a="<pad>" , __a="<mask>" , __a=False , __a=True , **__a , ) -> Dict:
super().__init__(
__a , __a , tokenizer_file=__a , errors=__a , bos_token=__a , eos_token=__a , sep_token=__a , cls_token=__a , unk_token=__a , pad_token=__a , mask_token=__a , add_prefix_space=__a , trim_offsets=__a , **__a , )
UpperCamelCase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , __a ) != add_prefix_space:
UpperCamelCase = getattr(__a , pre_tok_state.pop("type" ) )
UpperCamelCase = add_prefix_space
UpperCamelCase = pre_tok_class(**__a )
UpperCamelCase = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
UpperCamelCase = "post_processor"
UpperCamelCase = getattr(self.backend_tokenizer , __a , __a )
if tokenizer_component_instance:
UpperCamelCase = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
UpperCamelCase = tuple(state["sep"] )
if "cls" in state:
UpperCamelCase = tuple(state["cls"] )
UpperCamelCase = False
if state.get("add_prefix_space" , __a ) != add_prefix_space:
UpperCamelCase = add_prefix_space
UpperCamelCase = True
if state.get("trim_offsets" , __a ) != trim_offsets:
UpperCamelCase = trim_offsets
UpperCamelCase = True
if changes_to_apply:
UpperCamelCase = getattr(__a , state.pop("type" ) )
UpperCamelCase = component_class(**__a )
setattr(self.backend_tokenizer , __a , __a )
@property
# Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED
def snake_case_ (self ) -> str:
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def snake_case_ (self , __a ) -> Optional[int]:
UpperCamelCase = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else value
UpperCamelCase = value
def snake_case_ (self , *__a , **__a ) -> BatchEncoding:
UpperCamelCase = kwargs.get("is_split_into_words" , __a )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs." )
return super()._batch_encode_plus(*__a , **__a )
def snake_case_ (self , *__a , **__a ) -> BatchEncoding:
UpperCamelCase = kwargs.get("is_split_into_words" , __a )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs." )
return super()._encode_plus(*__a , **__a )
def snake_case_ (self , __a , __a = None ) -> Tuple[str]:
UpperCamelCase = self._tokenizer.model.save(__a , name=__a )
return tuple(__a )
def snake_case_ (self , __a , __a=None ) -> str:
UpperCamelCase = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def snake_case_ (self , __a , __a = None ) -> List[int]:
UpperCamelCase = [self.sep_token_id]
UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def snake_case_ (self , __a , __a = None , __a = PaddingStrategy.DO_NOT_PAD , __a = None , __a = None , ) -> dict:
UpperCamelCase = super()._pad(
encoded_inputs=__a , max_length=__a , padding_strategy=__a , pad_to_multiple_of=__a , return_attention_mask=__a , )
# Load from model defaults
if return_attention_mask is None:
UpperCamelCase = "attention_mask" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
UpperCamelCase = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
UpperCamelCase = len(encoded_inputs["global_attention_mask"] ) != len(__a )
if needs_to_be_padded:
UpperCamelCase = len(__a ) - len(encoded_inputs["global_attention_mask"] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
UpperCamelCase = (
encoded_inputs["global_attention_mask"] + [-1] * difference
)
elif self.padding_side == "left":
UpperCamelCase = [-1] * difference + encoded_inputs[
"global_attention_mask"
]
else:
raise ValueError("Invalid padding strategy:" + str(self.padding_side ) )
return encoded_inputs
| 244 |
"""simple docstring"""
import inspect
from typing import Optional, Union
import numpy as np
import PIL
import torch
from torch.nn import functional as F
from torchvision import transforms
from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
DPMSolverMultistepScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.utils import (
PIL_INTERPOLATION,
randn_tensor,
)
def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if isinstance(_SCREAMING_SNAKE_CASE , torch.Tensor ):
return image
elif isinstance(_SCREAMING_SNAKE_CASE , PIL.Image.Image ):
UpperCamelCase = [image]
if isinstance(image[0] , PIL.Image.Image ):
UpperCamelCase = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION["lanczos"] ) )[None, :] for i in image]
UpperCamelCase = np.concatenate(_SCREAMING_SNAKE_CASE , axis=0 )
UpperCamelCase = np.array(_SCREAMING_SNAKE_CASE ).astype(np.floataa ) / 2_55.0
UpperCamelCase = image.transpose(0 , 3 , 1 , 2 )
UpperCamelCase = 2.0 * image - 1.0
UpperCamelCase = torch.from_numpy(_SCREAMING_SNAKE_CASE )
elif isinstance(image[0] , torch.Tensor ):
UpperCamelCase = torch.cat(_SCREAMING_SNAKE_CASE , dim=0 )
return image
def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=0.99_95 ):
"""simple docstring"""
if not isinstance(_SCREAMING_SNAKE_CASE , np.ndarray ):
UpperCamelCase = True
UpperCamelCase = va.device
UpperCamelCase = va.cpu().numpy()
UpperCamelCase = va.cpu().numpy()
UpperCamelCase = np.sum(va * va / (np.linalg.norm(_SCREAMING_SNAKE_CASE ) * np.linalg.norm(_SCREAMING_SNAKE_CASE )) )
if np.abs(_SCREAMING_SNAKE_CASE ) > DOT_THRESHOLD:
UpperCamelCase = (1 - t) * va + t * va
else:
UpperCamelCase = np.arccos(_SCREAMING_SNAKE_CASE )
UpperCamelCase = np.sin(_SCREAMING_SNAKE_CASE )
UpperCamelCase = theta_a * t
UpperCamelCase = np.sin(_SCREAMING_SNAKE_CASE )
UpperCamelCase = np.sin(theta_a - theta_t ) / sin_theta_a
UpperCamelCase = sin_theta_t / sin_theta_a
UpperCamelCase = sa * va + sa * va
if inputs_are_torch:
UpperCamelCase = torch.from_numpy(_SCREAMING_SNAKE_CASE ).to(_SCREAMING_SNAKE_CASE )
return va
def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase = F.normalize(_SCREAMING_SNAKE_CASE , dim=-1 )
UpperCamelCase = F.normalize(_SCREAMING_SNAKE_CASE , dim=-1 )
return (x - y).norm(dim=-1 ).div(2 ).arcsin().pow(2 ).mul(2 )
def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
for param in model.parameters():
UpperCamelCase = value
class _lowerCamelCase ( _lowercase ):
def __init__(self , __a , __a , __a , __a , __a , __a , __a , __a=None , __a=None , __a=None , ) -> Tuple:
super().__init__()
self.register_modules(
vae=__a , text_encoder=__a , clip_model=__a , tokenizer=__a , unet=__a , scheduler=__a , feature_extractor=__a , coca_model=__a , coca_tokenizer=__a , coca_transform=__a , )
UpperCamelCase = (
feature_extractor.size
if isinstance(feature_extractor.size , __a )
else feature_extractor.size["shortest_edge"]
)
UpperCamelCase = transforms.Normalize(mean=feature_extractor.image_mean , std=feature_extractor.image_std )
set_requires_grad(self.text_encoder , __a )
set_requires_grad(self.clip_model , __a )
def snake_case_ (self , __a = "auto" ) -> List[Any]:
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
UpperCamelCase = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(__a )
def snake_case_ (self ) -> Optional[int]:
self.enable_attention_slicing(__a )
def snake_case_ (self ) -> int:
set_requires_grad(self.vae , __a )
def snake_case_ (self ) -> Optional[Any]:
set_requires_grad(self.vae , __a )
def snake_case_ (self ) -> Optional[int]:
set_requires_grad(self.unet , __a )
def snake_case_ (self ) -> str:
set_requires_grad(self.unet , __a )
def snake_case_ (self , __a , __a , __a ) -> str:
# get the original timestep using init_timestep
UpperCamelCase = min(int(num_inference_steps * strength ) , __a )
UpperCamelCase = max(num_inference_steps - init_timestep , 0 )
UpperCamelCase = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def snake_case_ (self , __a , __a , __a , __a , __a , __a=None ) -> Tuple:
if not isinstance(__a , torch.Tensor ):
raise ValueError(F"`image` has to be of type `torch.Tensor` but is {type(__a )}" )
UpperCamelCase = image.to(device=__a , dtype=__a )
if isinstance(__a , __a ):
UpperCamelCase = [
self.vae.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(__a )
]
UpperCamelCase = torch.cat(__a , dim=0 )
else:
UpperCamelCase = self.vae.encode(__a ).latent_dist.sample(__a )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
UpperCamelCase = 0.18215 * init_latents
UpperCamelCase = init_latents.repeat_interleave(__a , dim=0 )
UpperCamelCase = randn_tensor(init_latents.shape , generator=__a , device=__a , dtype=__a )
# get latents
UpperCamelCase = self.scheduler.add_noise(__a , __a , __a )
UpperCamelCase = init_latents
return latents
def snake_case_ (self , __a ) -> Union[str, Any]:
UpperCamelCase = self.coca_transform(__a ).unsqueeze(0 )
with torch.no_grad(), torch.cuda.amp.autocast():
UpperCamelCase = self.coca_model.generate(transformed_image.to(device=self.device , dtype=self.coca_model.dtype ) )
UpperCamelCase = self.coca_tokenizer.decode(generated[0].cpu().numpy() )
return generated.split("<end_of_text>" )[0].replace("<start_of_text>" , "" ).rstrip(" .," )
def snake_case_ (self , __a , __a ) -> Union[str, Any]:
UpperCamelCase = self.feature_extractor.preprocess(__a )
UpperCamelCase = torch.from_numpy(clip_image_input["pixel_values"][0] ).unsqueeze(0 ).to(self.device ).half()
UpperCamelCase = self.clip_model.get_image_features(__a )
UpperCamelCase = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=__a )
UpperCamelCase = image_embeddings_clip.repeat_interleave(__a , dim=0 )
return image_embeddings_clip
@torch.enable_grad()
def snake_case_ (self , __a , __a , __a , __a , __a , __a , __a , ) -> List[str]:
UpperCamelCase = latents.detach().requires_grad_()
UpperCamelCase = self.scheduler.scale_model_input(__a , __a )
# predict the noise residual
UpperCamelCase = self.unet(__a , __a , encoder_hidden_states=__a ).sample
if isinstance(self.scheduler , (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler) ):
UpperCamelCase = self.scheduler.alphas_cumprod[timestep]
UpperCamelCase = 1 - alpha_prod_t
# compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
UpperCamelCase = (latents - beta_prod_t ** 0.5 * noise_pred) / alpha_prod_t ** 0.5
UpperCamelCase = torch.sqrt(__a )
UpperCamelCase = pred_original_sample * (fac) + latents * (1 - fac)
elif isinstance(self.scheduler , __a ):
UpperCamelCase = self.scheduler.sigmas[index]
UpperCamelCase = latents - sigma * noise_pred
else:
raise ValueError(F"scheduler type {type(self.scheduler )} not supported" )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
UpperCamelCase = 1 / 0.18215 * sample
UpperCamelCase = self.vae.decode(__a ).sample
UpperCamelCase = (image / 2 + 0.5).clamp(0 , 1 )
UpperCamelCase = transforms.Resize(self.feature_extractor_size )(__a )
UpperCamelCase = self.normalize(__a ).to(latents.dtype )
UpperCamelCase = self.clip_model.get_image_features(__a )
UpperCamelCase = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=__a )
UpperCamelCase = spherical_dist_loss(__a , __a ).mean() * clip_guidance_scale
UpperCamelCase = -torch.autograd.grad(__a , __a )[0]
if isinstance(self.scheduler , __a ):
UpperCamelCase = latents.detach() + grads * (sigma**2)
UpperCamelCase = noise_pred_original
else:
UpperCamelCase = noise_pred_original - torch.sqrt(__a ) * grads
return noise_pred, latents
@torch.no_grad()
def __call__(self , __a , __a , __a = None , __a = None , __a = 5_12 , __a = 5_12 , __a = 0.6 , __a = 50 , __a = 7.5 , __a = 1 , __a = 0.0 , __a = 1_00 , __a = None , __a = "pil" , __a = True , __a = 0.8 , __a = 0.1 , __a = 0.1 , ) -> List[Any]:
if isinstance(__a , __a ) and len(__a ) != batch_size:
raise ValueError(F"You have passed {batch_size} batch_size, but only {len(__a )} generators." )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"`height` and `width` have to be divisible by 8 but are {height} and {width}." )
if isinstance(__a , torch.Generator ) and batch_size > 1:
UpperCamelCase = [generator] + [None] * (batch_size - 1)
UpperCamelCase = [
("model", self.coca_model is None),
("tokenizer", self.coca_tokenizer is None),
("transform", self.coca_transform is None),
]
UpperCamelCase = [x[0] for x in coca_is_none if x[1]]
UpperCamelCase = ", ".join(__a )
# generate prompts with coca model if prompt is None
if content_prompt is None:
if len(__a ):
raise ValueError(
F"Content prompt is None and CoCa [{coca_is_none_str}] is None."
F"Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline." )
UpperCamelCase = self.get_image_description(__a )
if style_prompt is None:
if len(__a ):
raise ValueError(
F"Style prompt is None and CoCa [{coca_is_none_str}] is None."
F" Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline." )
UpperCamelCase = self.get_image_description(__a )
# get prompt text embeddings for content and style
UpperCamelCase = self.tokenizer(
__a , padding="max_length" , max_length=self.tokenizer.model_max_length , truncation=__a , return_tensors="pt" , )
UpperCamelCase = self.text_encoder(content_text_input.input_ids.to(self.device ) )[0]
UpperCamelCase = self.tokenizer(
__a , padding="max_length" , max_length=self.tokenizer.model_max_length , truncation=__a , return_tensors="pt" , )
UpperCamelCase = self.text_encoder(style_text_input.input_ids.to(self.device ) )[0]
UpperCamelCase = slerp(__a , __a , __a )
# duplicate text embeddings for each generation per prompt
UpperCamelCase = text_embeddings.repeat_interleave(__a , dim=0 )
# set timesteps
UpperCamelCase = "offset" in set(inspect.signature(self.scheduler.set_timesteps ).parameters.keys() )
UpperCamelCase = {}
if accepts_offset:
UpperCamelCase = 1
self.scheduler.set_timesteps(__a , **__a )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
self.scheduler.timesteps.to(self.device )
UpperCamelCase , UpperCamelCase = self.get_timesteps(__a , __a , self.device )
UpperCamelCase = timesteps[:1].repeat(__a )
# Preprocess image
UpperCamelCase = preprocess(__a , __a , __a )
UpperCamelCase = self.prepare_latents(
__a , __a , __a , text_embeddings.dtype , self.device , __a )
UpperCamelCase = preprocess(__a , __a , __a )
UpperCamelCase = self.prepare_latents(
__a , __a , __a , text_embeddings.dtype , self.device , __a )
UpperCamelCase = slerp(__a , __a , __a )
if clip_guidance_scale > 0:
UpperCamelCase = self.get_clip_image_embeddings(__a , __a )
UpperCamelCase = self.get_clip_image_embeddings(__a , __a )
UpperCamelCase = slerp(
__a , __a , __a )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
UpperCamelCase = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
UpperCamelCase = content_text_input.input_ids.shape[-1]
UpperCamelCase = self.tokenizer([""] , padding="max_length" , max_length=__a , return_tensors="pt" )
UpperCamelCase = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt
UpperCamelCase = uncond_embeddings.repeat_interleave(__a , dim=0 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
UpperCamelCase = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
UpperCamelCase = (batch_size, self.unet.config.in_channels, height // 8, width // 8)
UpperCamelCase = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not work reproducibly on mps
UpperCamelCase = torch.randn(__a , generator=__a , device="cpu" , dtype=__a ).to(
self.device )
else:
UpperCamelCase = torch.randn(__a , generator=__a , device=self.device , dtype=__a )
else:
if latents.shape != latents_shape:
raise ValueError(F"Unexpected latents shape, got {latents.shape}, expected {latents_shape}" )
UpperCamelCase = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
UpperCamelCase = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
UpperCamelCase = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
UpperCamelCase = {}
if accepts_eta:
UpperCamelCase = eta
# check if the scheduler accepts generator
UpperCamelCase = "generator" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
if accepts_generator:
UpperCamelCase = generator
with self.progress_bar(total=__a ):
for i, t in enumerate(__a ):
# expand the latents if we are doing classifier free guidance
UpperCamelCase = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
UpperCamelCase = self.scheduler.scale_model_input(__a , __a )
# predict the noise residual
UpperCamelCase = self.unet(__a , __a , encoder_hidden_states=__a ).sample
# perform classifier free guidance
if do_classifier_free_guidance:
UpperCamelCase , UpperCamelCase = noise_pred.chunk(2 )
UpperCamelCase = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# perform clip guidance
if clip_guidance_scale > 0:
UpperCamelCase = (
text_embeddings.chunk(2 )[1] if do_classifier_free_guidance else text_embeddings
)
UpperCamelCase , UpperCamelCase = self.cond_fn(
__a , __a , __a , __a , __a , __a , __a , )
# compute the previous noisy sample x_t -> x_t-1
UpperCamelCase = self.scheduler.step(__a , __a , __a , **__a ).prev_sample
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
UpperCamelCase = 1 / 0.18215 * latents
UpperCamelCase = self.vae.decode(__a ).sample
UpperCamelCase = (image / 2 + 0.5).clamp(0 , 1 )
UpperCamelCase = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
UpperCamelCase = self.numpy_to_pil(__a )
if not return_dict:
return (image, None)
return StableDiffusionPipelineOutput(images=__a , nsfw_content_detected=__a )
| 244 | 1 |
'''simple docstring'''
import inspect
import re
from hashlib import shaaaa
from typing import Dict, List
from .arrow import arrow
from .audiofolder import audiofolder
from .csv import csv
from .imagefolder import imagefolder
from .json import json
from .pandas import pandas
from .parquet import parquet
from .sql import sql # noqa F401
from .text import text
def snake_case__ ( _A: List[str] ) -> str:
'''simple docstring'''
lowerCAmelCase = []
for line in lines:
lowerCAmelCase = re.sub(r"""#.*""" , """""" , lowerCAmelCase_ ) # remove comments
if line:
filtered_lines.append(lowerCAmelCase_ )
lowerCAmelCase = """\n""".join(lowerCAmelCase_ )
# Make a hash from all this code
lowerCAmelCase = full_str.encode("""utf-8""" )
return shaaaa(lowerCAmelCase_ ).hexdigest()
# get importable module names and hash for caching
__lowercase = {
'''csv''': (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())),
'''json''': (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())),
'''pandas''': (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())),
'''parquet''': (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())),
'''arrow''': (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())),
'''text''': (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())),
'''imagefolder''': (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())),
'''audiofolder''': (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())),
}
# Used to infer the module to use based on the data files extensions
__lowercase = {
'''.csv''': ('''csv''', {}),
'''.tsv''': ('''csv''', {'''sep''': '''\t'''}),
'''.json''': ('''json''', {}),
'''.jsonl''': ('''json''', {}),
'''.parquet''': ('''parquet''', {}),
'''.arrow''': ('''arrow''', {}),
'''.txt''': ('''text''', {}),
}
_EXTENSION_TO_MODULE.update({ext: ('''imagefolder''', {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ('''imagefolder''', {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext: ('''audiofolder''', {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ('''audiofolder''', {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
__lowercase = {'''imagefolder''', '''audiofolder'''}
# Used to filter data files based on extensions given a module name
__lowercase = {}
for _ext, (_module, _) in _EXTENSION_TO_MODULE.items():
_MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext)
_MODULE_TO_EXTENSIONS["imagefolder"].append('''.zip''')
_MODULE_TO_EXTENSIONS["audiofolder"].append('''.zip''')
| 272 |
from __future__ import annotations
from collections import namedtuple
def _lowerCAmelCase ( lowerCAmelCase_ :float , lowerCAmelCase_ :float , lowerCAmelCase_ :float )->tuple:
'''simple docstring'''
snake_case_ = namedtuple("result" , "name value" )
if (voltage, current, power).count(0 ) != 1:
raise ValueError("Only one argument must be 0" )
elif power < 0:
raise ValueError(
"Power cannot be negative in any electrical/electronics system" )
elif voltage == 0:
return result("voltage" , power / current )
elif current == 0:
return result("current" , power / voltage )
elif power == 0:
return result("power" , float(round(abs(voltage * current ) , 2 ) ) )
else:
raise ValueError("Exactly one argument must be 0" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 159 | 0 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPanoramaPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
@skip_mps
class _lowercase ( lowerCAmelCase, lowerCAmelCase, unittest.TestCase ):
"""simple docstring"""
__A = StableDiffusionPanoramaPipeline
__A = TEXT_TO_IMAGE_PARAMS
__A = TEXT_TO_IMAGE_BATCH_PARAMS
__A = TEXT_TO_IMAGE_IMAGE_PARAMS
__A = TEXT_TO_IMAGE_IMAGE_PARAMS
def UpperCamelCase_ (self ):
"""simple docstring"""
torch.manual_seed(0 )
a = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
a = DDIMScheduler()
torch.manual_seed(0 )
a = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
torch.manual_seed(0 )
a = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
a = CLIPTextModel(lowerCamelCase_ )
a = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
a = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_=0 ):
"""simple docstring"""
a = torch.manual_seed(lowerCamelCase_ )
a = {
"prompt": "a photo of the dolomites",
"generator": generator,
# Setting height and width to None to prevent OOMs on CPU.
"height": None,
"width": None,
"num_inference_steps": 1,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def UpperCamelCase_ (self ):
"""simple docstring"""
a = "cpu" # ensure determinism for the device-dependent torch.Generator
a = self.get_dummy_components()
a = StableDiffusionPanoramaPipeline(**lowerCamelCase_ )
a = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
a = self.get_dummy_inputs(lowerCamelCase_ )
a = sd_pipe(**lowerCamelCase_ ).images
a = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
a = np.array([0.6186, 0.5374, 0.4915, 0.4135, 0.4114, 0.4563, 0.5128, 0.4977, 0.4757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCamelCase_ (self ):
"""simple docstring"""
super().test_inference_batch_consistent(batch_sizes=[1, 2] )
def UpperCamelCase_ (self ):
"""simple docstring"""
super().test_inference_batch_single_identical(batch_size=2 , expected_max_diff=3.2_5E-3 )
def UpperCamelCase_ (self ):
"""simple docstring"""
a = "cpu" # ensure determinism for the device-dependent torch.Generator
a = self.get_dummy_components()
a = StableDiffusionPanoramaPipeline(**lowerCamelCase_ )
a = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
a = self.get_dummy_inputs(lowerCamelCase_ )
a = "french fries"
a = sd_pipe(**lowerCamelCase_ , negative_prompt=lowerCamelCase_ )
a = output.images
a = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
a = np.array([0.6187, 0.5375, 0.4915, 0.4136, 0.4114, 0.4563, 0.5128, 0.4976, 0.4757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCamelCase_ (self ):
"""simple docstring"""
a = "cpu" # ensure determinism for the device-dependent torch.Generator
a = self.get_dummy_components()
a = StableDiffusionPanoramaPipeline(**lowerCamelCase_ )
a = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
a = self.get_dummy_inputs(lowerCamelCase_ )
a = sd_pipe(**lowerCamelCase_ , view_batch_size=2 )
a = output.images
a = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
a = np.array([0.6187, 0.5375, 0.4915, 0.4136, 0.4114, 0.4563, 0.5128, 0.4976, 0.4757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCamelCase_ (self ):
"""simple docstring"""
a = "cpu" # ensure determinism for the device-dependent torch.Generator
a = self.get_dummy_components()
a = EulerAncestralDiscreteScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="scaled_linear" )
a = StableDiffusionPanoramaPipeline(**lowerCamelCase_ )
a = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
a = self.get_dummy_inputs(lowerCamelCase_ )
a = sd_pipe(**lowerCamelCase_ ).images
a = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
a = np.array([0.4024, 0.6510, 0.4901, 0.5378, 0.5813, 0.5622, 0.4795, 0.4467, 0.4952] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCamelCase_ (self ):
"""simple docstring"""
a = "cpu" # ensure determinism for the device-dependent torch.Generator
a = self.get_dummy_components()
a = PNDMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="scaled_linear" , skip_prk_steps=lowerCamelCase_ )
a = StableDiffusionPanoramaPipeline(**lowerCamelCase_ )
a = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
a = self.get_dummy_inputs(lowerCamelCase_ )
a = sd_pipe(**lowerCamelCase_ ).images
a = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
a = np.array([0.6391, 0.6291, 0.4861, 0.5134, 0.5552, 0.4578, 0.5032, 0.5023, 0.4539] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class _lowercase ( unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase_ (self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase_ (self , lowerCamelCase_=0 ):
"""simple docstring"""
a = torch.manual_seed(lowerCamelCase_ )
a = {
"prompt": "a photo of the dolomites",
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 7.5,
"output_type": "numpy",
}
return inputs
def UpperCamelCase_ (self ):
"""simple docstring"""
a = "stabilityai/stable-diffusion-2-base"
a = DDIMScheduler.from_pretrained(lowerCamelCase_ , subfolder="scheduler" )
a = StableDiffusionPanoramaPipeline.from_pretrained(lowerCamelCase_ , scheduler=lowerCamelCase_ , safety_checker=lowerCamelCase_ )
pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
pipe.enable_attention_slicing()
a = self.get_inputs()
a = pipe(**lowerCamelCase_ ).images
a = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 2048, 3)
a = np.array(
[
0.3696_8392,
0.2702_5372,
0.3244_6766,
0.2837_9387,
0.3636_3274,
0.3073_3347,
0.2710_0027,
0.2705_4125,
0.2553_6096,
] )
assert np.abs(expected_slice - image_slice ).max() < 1E-2
def UpperCamelCase_ (self ):
"""simple docstring"""
a = StableDiffusionPanoramaPipeline.from_pretrained(
"stabilityai/stable-diffusion-2-base" , safety_checker=lowerCamelCase_ )
a = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
pipe.enable_attention_slicing()
a = self.get_inputs()
a = pipe(**lowerCamelCase_ ).images
a = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 2048, 3)
a = np.array(
[
[
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
]
] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def UpperCamelCase_ (self ):
"""simple docstring"""
a = 0
def callback_fn(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> None:
a = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
a = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 256)
a = latents[0, -3:, -3:, -1]
a = np.array(
[
0.1868_1869,
0.3390_7816,
0.536_1276,
0.1443_2865,
-0.0285_6611,
-0.7394_1123,
0.2339_7987,
0.4732_2682,
-0.3782_3164,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
elif step == 2:
a = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 256)
a = latents[0, -3:, -3:, -1]
a = np.array(
[
0.1853_9645,
0.3398_7248,
0.537_8559,
0.1443_7142,
-0.0245_5261,
-0.733_8317,
0.2399_0755,
0.4735_6272,
-0.378_6505,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
a = False
a = "stabilityai/stable-diffusion-2-base"
a = DDIMScheduler.from_pretrained(lowerCamelCase_ , subfolder="scheduler" )
a = StableDiffusionPanoramaPipeline.from_pretrained(lowerCamelCase_ , scheduler=lowerCamelCase_ , safety_checker=lowerCamelCase_ )
a = pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
pipe.enable_attention_slicing()
a = self.get_inputs()
pipe(**lowerCamelCase_ , callback=lowerCamelCase_ , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def UpperCamelCase_ (self ):
"""simple docstring"""
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
a = "stabilityai/stable-diffusion-2-base"
a = DDIMScheduler.from_pretrained(lowerCamelCase_ , subfolder="scheduler" )
a = StableDiffusionPanoramaPipeline.from_pretrained(lowerCamelCase_ , scheduler=lowerCamelCase_ , safety_checker=lowerCamelCase_ )
a = pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
a = self.get_inputs()
a = pipe(**lowerCamelCase_ )
a = torch.cuda.max_memory_allocated()
# make sure that less than 5.2 GB is allocated
assert mem_bytes < 5.5 * 10**9
| 71 |
import os
def a( ) -> List[str]:
"""simple docstring"""
with open(os.path.dirname(A ) + "/grid.txt" ) as f:
a = [] # noqa: E741
for _ in range(20 ):
l.append([int(A ) for x in f.readline().split()] )
a = 0
# right
for i in range(20 ):
for j in range(17 ):
a = l[i][j] * l[i][j + 1] * l[i][j + 2] * l[i][j + 3]
if temp > maximum:
a = temp
# down
for i in range(17 ):
for j in range(20 ):
a = l[i][j] * l[i + 1][j] * l[i + 2][j] * l[i + 3][j]
if temp > maximum:
a = temp
# diagonal 1
for i in range(17 ):
for j in range(17 ):
a = l[i][j] * l[i + 1][j + 1] * l[i + 2][j + 2] * l[i + 3][j + 3]
if temp > maximum:
a = temp
# diagonal 2
for i in range(17 ):
for j in range(3 , 20 ):
a = l[i][j] * l[i + 1][j - 1] * l[i + 2][j - 2] * l[i + 3][j - 3]
if temp > maximum:
a = temp
return maximum
if __name__ == "__main__":
print(solution())
| 71 | 1 |
'''simple docstring'''
from __future__ import annotations
def _A ( snake_case ) -> float:
if not nums:
raise ValueError("List is empty" )
return sum(snake_case ) / len(snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 250 |
'''simple docstring'''
import unittest
from transformers import PegasusConfig, PegasusTokenizer, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
_snake_case = 'platform'
import jax
import jax.numpy as jnp
import numpy as np
from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel
@require_flax
class a__ :
_SCREAMING_SNAKE_CASE : Tuple = PegasusConfig
_SCREAMING_SNAKE_CASE : int = {}
_SCREAMING_SNAKE_CASE : Optional[int] = 'gelu'
def __init__( self , _UpperCamelCase , _UpperCamelCase=13 , _UpperCamelCase=7 , _UpperCamelCase=True , _UpperCamelCase=False , _UpperCamelCase=99 , _UpperCamelCase=32 , _UpperCamelCase=5 , _UpperCamelCase=4 , _UpperCamelCase=37 , _UpperCamelCase=0.1 , _UpperCamelCase=0.1 , _UpperCamelCase=20 , _UpperCamelCase=2 , _UpperCamelCase=1 , _UpperCamelCase=0 , ):
"""simple docstring"""
_lowercase : Union[str, Any] = parent
_lowercase : Optional[int] = batch_size
_lowercase : List[str] = seq_length
_lowercase : Optional[Any] = is_training
_lowercase : int = use_labels
_lowercase : Optional[int] = vocab_size
_lowercase : str = hidden_size
_lowercase : Optional[Any] = num_hidden_layers
_lowercase : Union[str, Any] = num_attention_heads
_lowercase : Tuple = intermediate_size
_lowercase : Optional[int] = hidden_dropout_prob
_lowercase : Optional[Any] = attention_probs_dropout_prob
_lowercase : Tuple = max_position_embeddings
_lowercase : List[str] = eos_token_id
_lowercase : Optional[int] = pad_token_id
_lowercase : Optional[Any] = bos_token_id
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ).clip(3 , self.vocab_size )
_lowercase : Tuple = np.expand_dims(np.array([self.eos_token_id] * self.batch_size ) , 1 )
_lowercase : int = np.concatenate([input_ids, eos_tensor] , axis=1 )
_lowercase : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowercase : List[str] = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
_lowercase : Optional[int] = prepare_pegasus_inputs_dict(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
return config, inputs_dict
def _lowerCamelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
_lowercase : List[Any] = 20
_lowercase : int = model_class_name(_UpperCamelCase )
_lowercase : str = model.encode(inputs_dict["input_ids"] )
_lowercase , _lowercase : List[str] = (
inputs_dict["decoder_input_ids"],
inputs_dict["decoder_attention_mask"],
)
_lowercase : List[Any] = model.init_cache(decoder_input_ids.shape[0] , _UpperCamelCase , _UpperCamelCase )
_lowercase : str = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="i4" )
_lowercase : Tuple = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
_lowercase : Union[str, Any] = model.decode(
decoder_input_ids[:, :-1] , _UpperCamelCase , decoder_attention_mask=_UpperCamelCase , past_key_values=_UpperCamelCase , decoder_position_ids=_UpperCamelCase , )
_lowercase : Union[str, Any] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4" )
_lowercase : Optional[Any] = model.decode(
decoder_input_ids[:, -1:] , _UpperCamelCase , decoder_attention_mask=_UpperCamelCase , past_key_values=outputs_cache.past_key_values , decoder_position_ids=_UpperCamelCase , )
_lowercase : Any = model.decode(_UpperCamelCase , _UpperCamelCase )
_lowercase : int = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f'''Max diff is {diff}''' )
def _lowerCamelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
_lowercase : List[str] = 20
_lowercase : List[Any] = model_class_name(_UpperCamelCase )
_lowercase : Dict = model.encode(inputs_dict["input_ids"] )
_lowercase , _lowercase : int = (
inputs_dict["decoder_input_ids"],
inputs_dict["decoder_attention_mask"],
)
_lowercase : Optional[int] = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
_lowercase : int = model.init_cache(decoder_input_ids.shape[0] , _UpperCamelCase , _UpperCamelCase )
_lowercase : List[str] = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
_lowercase : Optional[int] = model.decode(
decoder_input_ids[:, :-1] , _UpperCamelCase , decoder_attention_mask=_UpperCamelCase , past_key_values=_UpperCamelCase , decoder_position_ids=_UpperCamelCase , )
_lowercase : List[Any] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4" )
_lowercase : Optional[int] = model.decode(
decoder_input_ids[:, -1:] , _UpperCamelCase , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=_UpperCamelCase , decoder_position_ids=_UpperCamelCase , )
_lowercase : Dict = model.decode(_UpperCamelCase , _UpperCamelCase , decoder_attention_mask=_UpperCamelCase )
_lowercase : Union[str, Any] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f'''Max diff is {diff}''' )
def _A ( snake_case , snake_case , snake_case , snake_case=None , snake_case=None , ) -> int:
if attention_mask is None:
_lowercase : int = np.not_equal(snake_case , config.pad_token_id ).astype(np.inta )
if decoder_attention_mask is None:
_lowercase : Dict = np.concatenate(
[
np.ones(decoder_input_ids[:, :1].shape , dtype=np.inta ),
np.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ).astype(np.inta ),
] , axis=-1 , )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
}
@require_flax
class a__ ( lowerCamelCase_ , unittest.TestCase ):
_SCREAMING_SNAKE_CASE : List[str] = (
(
FlaxPegasusForConditionalGeneration,
FlaxPegasusModel,
)
if is_flax_available()
else ()
)
_SCREAMING_SNAKE_CASE : List[str] = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else ()
_SCREAMING_SNAKE_CASE : List[str] = True
_SCREAMING_SNAKE_CASE : int = False
_SCREAMING_SNAKE_CASE : List[Any] = False
_SCREAMING_SNAKE_CASE : Optional[int] = False
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : Optional[Any] = FlaxPegasusModelTester(self )
_lowercase : Union[str, Any] = ConfigTester(self , config_class=_UpperCamelCase )
def _lowerCamelCase ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase , _lowercase : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase , _lowercase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase , _lowercase : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_lowercase : Optional[int] = self._prepare_for_class(_UpperCamelCase , _UpperCamelCase )
_lowercase : Union[str, Any] = model_class(_UpperCamelCase )
@jax.jit
def encode_jitted(_UpperCamelCase , _UpperCamelCase=None , **_UpperCamelCase ):
return model.encode(input_ids=_UpperCamelCase , attention_mask=_UpperCamelCase )
with self.subTest("JIT Enabled" ):
_lowercase : Optional[Any] = encode_jitted(**_UpperCamelCase ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
_lowercase : Dict = encode_jitted(**_UpperCamelCase ).to_tuple()
self.assertEqual(len(_UpperCamelCase ) , len(_UpperCamelCase ) )
for jitted_output, output in zip(_UpperCamelCase , _UpperCamelCase ):
self.assertEqual(jitted_output.shape , output.shape )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase , _lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_lowercase : Optional[int] = model_class(_UpperCamelCase )
_lowercase : Tuple = model.encode(inputs_dict["input_ids"] , inputs_dict["attention_mask"] )
_lowercase : List[Any] = {
"decoder_input_ids": inputs_dict["decoder_input_ids"],
"decoder_attention_mask": inputs_dict["decoder_attention_mask"],
"encoder_outputs": encoder_outputs,
}
@jax.jit
def decode_jitted(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
return model.decode(
decoder_input_ids=_UpperCamelCase , decoder_attention_mask=_UpperCamelCase , encoder_outputs=_UpperCamelCase , )
with self.subTest("JIT Enabled" ):
_lowercase : List[Any] = decode_jitted(**_UpperCamelCase ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
_lowercase : Optional[Any] = decode_jitted(**_UpperCamelCase ).to_tuple()
self.assertEqual(len(_UpperCamelCase ) , len(_UpperCamelCase ) )
for jitted_output, output in zip(_UpperCamelCase , _UpperCamelCase ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def _lowerCamelCase ( self ):
"""simple docstring"""
for model_class_name in self.all_model_classes:
_lowercase : List[Any] = model_class_name.from_pretrained("google/pegasus-large" , from_pt=_UpperCamelCase )
_lowercase : Optional[int] = np.ones((1, 1) )
_lowercase : int = model(_UpperCamelCase )
self.assertIsNotNone(_UpperCamelCase )
@slow
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : str = FlaxPegasusForConditionalGeneration.from_pretrained("google/pegasus-xsum" )
_lowercase : str = PegasusTokenizer.from_pretrained("google/pegasus-xsum" )
_lowercase : str = [
" PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.",
" The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" ",
]
_lowercase : Optional[int] = [
"California's largest electricity provider has turned off power to hundreds of thousands of customers.",
"Pop group N-Dubz have revealed they were surprised to get four nominations for this year's Mobo Awards.",
]
_lowercase : Optional[int] = tokenizer(_UpperCamelCase , return_tensors="np" , truncation=_UpperCamelCase , max_length=512 , padding=_UpperCamelCase )
_lowercase : Tuple = model.generate(**_UpperCamelCase , num_beams=2 ).sequences
_lowercase : Union[str, Any] = tokenizer.batch_decode(_UpperCamelCase , skip_special_tokens=_UpperCamelCase )
assert tgt_text == decoded
| 250 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
_lowercase : List[str] = {
'configuration_transfo_xl': ['TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TransfoXLConfig'],
'tokenization_transfo_xl': ['TransfoXLCorpus', 'TransfoXLTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Dict = [
'TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST',
'AdaptiveEmbedding',
'TransfoXLForSequenceClassification',
'TransfoXLLMHeadModel',
'TransfoXLModel',
'TransfoXLPreTrainedModel',
'load_tf_weights_in_transfo_xl',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Dict = [
'TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFAdaptiveEmbedding',
'TFTransfoXLForSequenceClassification',
'TFTransfoXLLMHeadModel',
'TFTransfoXLMainLayer',
'TFTransfoXLModel',
'TFTransfoXLPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_transfo_xl import TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, TransfoXLConfig
from .tokenization_transfo_xl import TransfoXLCorpus, TransfoXLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_transfo_xl import (
TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
AdaptiveEmbedding,
TransfoXLForSequenceClassification,
TransfoXLLMHeadModel,
TransfoXLModel,
TransfoXLPreTrainedModel,
load_tf_weights_in_transfo_xl,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_transfo_xl import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFAdaptiveEmbedding,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLMainLayer,
TFTransfoXLModel,
TFTransfoXLPreTrainedModel,
)
else:
import sys
_lowercase : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 86 |
"""simple docstring"""
from __future__ import annotations
import sys
from collections import deque
from typing import Generic, TypeVar
_lowercase : List[Any] = TypeVar('T')
class _UpperCAmelCase ( Generic[T] ):
a__ : deque[T] # Cache store of keys
a__ : set[T] # References of the keys in cache
a__ : int = 10 # Maximum capacity of cache
def __init__( self : Optional[Any] , _lowercase : int ):
__UpperCAmelCase = deque()
__UpperCAmelCase = set()
if not n:
__UpperCAmelCase = sys.maxsize
elif n < 0:
raise ValueError('''n should be an integer greater than 0.''' )
else:
__UpperCAmelCase = n
def a ( self : Optional[Any] , _lowercase : T ):
if x not in self.key_reference:
if len(self.dq_store ) == LRUCache._MAX_CAPACITY:
__UpperCAmelCase = self.dq_store.pop()
self.key_reference.remove(_lowercase )
else:
self.dq_store.remove(_lowercase )
self.dq_store.appendleft(_lowercase )
self.key_reference.add(_lowercase )
def a ( self : str ):
for k in self.dq_store:
print(_lowercase )
def __repr__( self : Dict ):
return F'''LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}'''
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowercase : LRUCache[str | int] = LRUCache(4)
lru_cache.refer('A')
lru_cache.refer(2)
lru_cache.refer(3)
lru_cache.refer('A')
lru_cache.refer(4)
lru_cache.refer(5)
lru_cache.display()
print(lru_cache)
assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]"
| 86 | 1 |
"""simple docstring"""
from __future__ import annotations
def __A ( a_ :list[int] , a_ :list[int] , a_ :int) -> tuple[float, list[float]]:
__a : Optional[int] = list(range(len(A__)))
__a : Dict = [v / w for v, w in zip(A__ , A__)]
index.sort(key=lambda a_: ratio[i] , reverse=A__)
__a : List[Any] = 0
__a : Tuple = [0] * len(A__)
for i in index:
if weight[i] <= capacity:
__a : str = 1
max_value += value[i]
capacity -= weight[i]
else:
__a : Tuple = capacity / weight[i]
max_value += value[i] * capacity / weight[i]
break
return max_value, fractions
if __name__ == "__main__":
import doctest
doctest.testmod() | 160 |
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import torch
class __lowerCAmelCase ( TensorFormatter[Mapping, """torch.Tensor""", Mapping] ):
def __init__( self , lowerCAmelCase=None , **lowerCAmelCase ) -> Optional[Any]:
'''simple docstring'''
super().__init__(features=lowerCAmelCase )
_lowercase =torch_tensor_kwargs
import torch # noqa import torch at initialization
def A__ ( self , lowerCAmelCase ) -> int:
'''simple docstring'''
import torch
if isinstance(lowerCAmelCase , lowerCAmelCase ) and column:
if all(
isinstance(lowerCAmelCase , torch.Tensor ) and x.shape == column[0].shape and x.dtype == column[0].dtype
for x in column ):
return torch.stack(lowerCAmelCase )
return column
def A__ ( self , lowerCAmelCase ) -> str:
'''simple docstring'''
import torch
if isinstance(lowerCAmelCase , (str, bytes, type(lowerCAmelCase )) ):
return value
elif isinstance(lowerCAmelCase , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
_lowercase ={}
if isinstance(lowerCAmelCase , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
_lowercase ={'dtype': torch.intaa}
elif isinstance(lowerCAmelCase , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
_lowercase ={'dtype': torch.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(lowerCAmelCase , PIL.Image.Image ):
_lowercase =np.asarray(lowerCAmelCase )
return torch.tensor(lowerCAmelCase , **{**default_dtype, **self.torch_tensor_kwargs} )
def A__ ( self , lowerCAmelCase ) -> str:
'''simple docstring'''
import torch
# support for torch, tf, jax etc.
if hasattr(lowerCAmelCase , '__array__' ) and not isinstance(lowerCAmelCase , torch.Tensor ):
_lowercase =data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(lowerCAmelCase , np.ndarray ):
if data_struct.dtype == object: # torch tensors cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(lowerCAmelCase ) for substruct in data_struct] )
elif isinstance(lowerCAmelCase , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(lowerCAmelCase ) for substruct in data_struct] )
return self._tensorize(lowerCAmelCase )
def A__ ( self , lowerCAmelCase ) -> Tuple:
'''simple docstring'''
return map_nested(self._recursive_tensorize , lowerCAmelCase , map_list=lowerCAmelCase )
def A__ ( self , lowerCAmelCase ) -> Mapping:
'''simple docstring'''
_lowercase =self.numpy_arrow_extractor().extract_row(lowerCAmelCase )
_lowercase =self.python_features_decoder.decode_row(lowerCAmelCase )
return self.recursive_tensorize(lowerCAmelCase )
def A__ ( self , lowerCAmelCase ) -> "torch.Tensor":
'''simple docstring'''
_lowercase =self.numpy_arrow_extractor().extract_column(lowerCAmelCase )
_lowercase =self.python_features_decoder.decode_column(lowerCAmelCase , pa_table.column_names[0] )
_lowercase =self.recursive_tensorize(lowerCAmelCase )
_lowercase =self._consolidate(lowerCAmelCase )
return column
def A__ ( self , lowerCAmelCase ) -> Mapping:
'''simple docstring'''
_lowercase =self.numpy_arrow_extractor().extract_batch(lowerCAmelCase )
_lowercase =self.python_features_decoder.decode_batch(lowerCAmelCase )
_lowercase =self.recursive_tensorize(lowerCAmelCase )
for column_name in batch:
_lowercase =self._consolidate(batch[column_name] )
return batch
| 205 | 0 |
'''simple docstring'''
import math
import qiskit
def _A (lowerCAmelCase__ :int = 1 , lowerCAmelCase__ :int = 1 , lowerCAmelCase__ :int = 1 ) -> qiskit.result.counts.Counts:
'''simple docstring'''
if (
isinstance(lowerCAmelCase__ , lowerCAmelCase__ )
or isinstance(lowerCAmelCase__ , lowerCAmelCase__ )
or isinstance(lowerCAmelCase__ , lowerCAmelCase__ )
):
raise TypeError('inputs must be integers.' )
if (input_a < 0) or (input_a < 0) or (carry_in < 0):
raise ValueError('inputs must be positive.' )
if (
(math.floor(lowerCAmelCase__ ) != input_a)
or (math.floor(lowerCAmelCase__ ) != input_a)
or (math.floor(lowerCAmelCase__ ) != carry_in)
):
raise ValueError('inputs must be exact integers.' )
if (input_a > 2) or (input_a > 2) or (carry_in > 2):
raise ValueError('inputs must be less or equal to 2.' )
# build registers
_a = qiskit.QuantumRegister(4 , 'qr' )
_a = qiskit.ClassicalRegister(2 , 'cr' )
# list the entries
_a = [input_a, input_a, carry_in]
_a = qiskit.QuantumCircuit(lowerCAmelCase__ , lowerCAmelCase__ )
for i in range(0 , 3 ):
if entry[i] == 2:
quantum_circuit.h(lowerCAmelCase__ ) # for hadamard entries
elif entry[i] == 1:
quantum_circuit.x(lowerCAmelCase__ ) # for 1 entries
elif entry[i] == 0:
quantum_circuit.i(lowerCAmelCase__ ) # for 0 entries
# build the circuit
quantum_circuit.ccx(0 , 1 , 3 ) # ccx = toffoli gate
quantum_circuit.cx(0 , 1 )
quantum_circuit.ccx(1 , 2 , 3 )
quantum_circuit.cx(1 , 2 )
quantum_circuit.cx(0 , 1 )
quantum_circuit.measure([2, 3] , lowerCAmelCase__ ) # measure the last two qbits
_a = qiskit.Aer.get_backend('aer_simulator' )
_a = qiskit.execute(lowerCAmelCase__ , lowerCAmelCase__ , shots=10_00 )
return job.result().get_counts(lowerCAmelCase__ )
if __name__ == "__main__":
print(f'''Total sum count for state is: {quantum_full_adder(1, 1, 1)}''')
| 104 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy
a_ : List[str] = logging.get_logger(__name__)
class a ( _SCREAMING_SNAKE_CASE ):
def __init__( self , __magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ) -> Any:
_a = feature_size
_a = sampling_rate
_a = padding_value
_a = kwargs.pop('padding_side' , 'right' )
_a = kwargs.pop('return_attention_mask' , __magic_name__ )
super().__init__(**__magic_name__ )
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ = True , __magic_name__ = None , __magic_name__ = False , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , ) -> BatchFeature:
# If we have a list of dicts, let's convert it in a dict of lists
# We do this to allow using this method as a collate_fn function in PyTorch Dataloader
if isinstance(__magic_name__ , (list, tuple) ) and isinstance(processed_features[0] , (dict, BatchFeature) ):
_a = {
key: [example[key] for example in processed_features] for key in processed_features[0].keys()
}
# The model's main input name, usually `input_values`, has be passed for padding
if self.model_input_names[0] not in processed_features:
raise ValueError(
'You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`'
f' to this method that includes {self.model_input_names[0]}, but you provided'
f' {list(processed_features.keys() )}' )
_a = processed_features[self.model_input_names[0]]
_a = (
return_attention_mask if return_attention_mask is not None else self.return_attention_mask
)
if len(__magic_name__ ) == 0:
if return_attention_mask:
_a = []
return processed_features
# If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays
# and rebuild them afterwards if no return_tensors is specified
# Note that we lose the specific device the tensor may be on for PyTorch
_a = required_input[0]
if isinstance(__magic_name__ , (list, tuple) ):
# first_element might be an empty list/tuple in some edge cases so we grab the first non empty element.
_a = 0
while len(required_input[index] ) == 0:
index += 1
if index < len(__magic_name__ ):
_a = required_input[index][0]
if return_tensors is None:
if is_tf_tensor(__magic_name__ ):
_a = 'tf'
elif is_torch_tensor(__magic_name__ ):
_a = 'pt'
elif isinstance(__magic_name__ , (int, float, list, tuple, np.ndarray) ):
_a = 'np'
else:
raise ValueError(
f'type of {first_element} unknown: {type(__magic_name__ )}. '
'Should be one of a python, numpy, pytorch or tensorflow object.' )
for key, value in processed_features.items():
if isinstance(value[0] , (int, float) ):
_a = to_numpy(__magic_name__ )
else:
_a = [to_numpy(__magic_name__ ) for v in value]
# Convert padding_strategy in PaddingStrategy
_a = self._get_padding_strategies(padding=__magic_name__ , max_length=__magic_name__ )
_a = processed_features[self.model_input_names[0]]
_a = len(__magic_name__ )
if not all(len(__magic_name__ ) == batch_size for v in processed_features.values() ):
raise ValueError('Some items in the output dictionary have a different batch size than others.' )
_a = []
for i in range(__magic_name__ ):
_a = {k: v[i] for k, v in processed_features.items()}
# truncation
_a = self._truncate(
__magic_name__ , max_length=__magic_name__ , pad_to_multiple_of=__magic_name__ , truncation=__magic_name__ , )
truncated_inputs.append(__magic_name__ )
if padding_strategy == PaddingStrategy.LONGEST:
# make sure that `max_length` cannot be longer than the longest truncated length
_a = max(len(input_slice[self.model_input_names[0]] ) for input_slice in truncated_inputs )
_a = PaddingStrategy.MAX_LENGTH
_a = {}
for i in range(__magic_name__ ):
# padding
_a = self._pad(
truncated_inputs[i] , max_length=__magic_name__ , padding_strategy=__magic_name__ , pad_to_multiple_of=__magic_name__ , return_attention_mask=__magic_name__ , )
for key, value in outputs.items():
if key not in batch_outputs:
_a = []
if value.dtype is np.dtype(np.floataa ):
_a = value.astype(np.floataa )
batch_outputs[key].append(__magic_name__ )
return BatchFeature(__magic_name__ , tensor_type=__magic_name__ )
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ = None , __magic_name__ = PaddingStrategy.DO_NOT_PAD , __magic_name__ = None , __magic_name__ = None , ) -> dict:
_a = processed_features[self.model_input_names[0]]
if padding_strategy == PaddingStrategy.LONGEST:
_a = len(__magic_name__ )
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
_a = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
_a = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(__magic_name__ ) < max_length
if return_attention_mask and "attention_mask" not in processed_features:
_a = np.ones(len(__magic_name__ ) , dtype=np.intaa )
if needs_to_be_padded:
_a = max_length - len(__magic_name__ )
if self.padding_side == "right":
if return_attention_mask:
_a = np.pad(
processed_features['attention_mask'] , (0, difference) )
_a = ((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference)
_a = np.pad(
__magic_name__ , __magic_name__ , 'constant' , constant_values=self.padding_value )
elif self.padding_side == "left":
if return_attention_mask:
_a = np.pad(
processed_features['attention_mask'] , (difference, 0) )
_a = ((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0)
_a = np.pad(
__magic_name__ , __magic_name__ , 'constant' , constant_values=self.padding_value )
else:
raise ValueError('Invalid padding strategy:' + str(self.padding_side ) )
return processed_features
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , ) -> List[str]:
if not truncation:
return processed_features
elif truncation and max_length is None:
raise ValueError('When setting ``truncation=True``, make sure that ``max_length`` is defined.' )
_a = processed_features[self.model_input_names[0]]
# find `max_length` that fits `pad_to_multiple_of`
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
_a = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
_a = len(__magic_name__ ) > max_length
if needs_to_be_truncated:
_a = processed_features[self.model_input_names[0]][:max_length]
if "attention_mask" in processed_features:
_a = processed_features['attention_mask'][:max_length]
return processed_features
def __UpperCAmelCase ( self , __magic_name__=False , __magic_name__=None ) -> Dict:
# Get padding strategy
if padding is not False:
if padding is True:
_a = PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch
elif not isinstance(__magic_name__ , __magic_name__ ):
_a = PaddingStrategy(__magic_name__ )
elif isinstance(__magic_name__ , __magic_name__ ):
_a = padding
else:
_a = PaddingStrategy.DO_NOT_PAD
# Set max length if needed
if max_length is None:
if padding_strategy == PaddingStrategy.MAX_LENGTH:
raise ValueError(
f'When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined' )
# Test if we have a padding value
if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None):
raise ValueError(
'Asking to pad but the feature_extractor does not have a padding value. Please select a value to use'
' as `padding_value`. For example: `feature_extractor.padding_value = 0.0`.' )
return padding_strategy
| 104 | 1 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class __A( __lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = """facebook/bart-large-mnli"""
SCREAMING_SNAKE_CASE__ = (
"""This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which """
"""should be the text to classify, and `labels`, which should be the list of labels to use for classification. """
"""It returns the most likely label in the list of provided `labels` for the input text."""
)
SCREAMING_SNAKE_CASE__ = """text_classifier"""
SCREAMING_SNAKE_CASE__ = AutoTokenizer
SCREAMING_SNAKE_CASE__ = AutoModelForSequenceClassification
SCREAMING_SNAKE_CASE__ = ["""text""", ["""text"""]]
SCREAMING_SNAKE_CASE__ = ["""text"""]
def UpperCAmelCase_ (self ):
super().setup()
UpperCamelCase__ = self.model.config
UpperCamelCase__ = -1
for idx, label in config.idalabel.items():
if label.lower().startswith("""entail""" ):
UpperCamelCase__ = int(SCREAMING_SNAKE_CASE_ )
if self.entailment_id == -1:
raise ValueError("""Could not determine the entailment ID from the model config, please pass it at init.""" )
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = labels
return self.pre_processor(
[text] * len(SCREAMING_SNAKE_CASE_ ) , [F"This example is {label}" for label in labels] , return_tensors="""pt""" , padding="""max_length""" , )
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = outputs.logits
UpperCamelCase__ = torch.argmax(logits[:, 2] ).item()
return self._labels[label_id]
| 244 |
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments
@require_tf
class __A( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ ):
for model_result in results.values():
for batch_size, sequence_length in zip(model_result["""bs"""] , model_result["""ss"""] ):
UpperCamelCase__ = model_result["""result"""][batch_size][sequence_length]
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ (self ):
UpperCamelCase__ = """sshleifer/tiny-gpt2"""
UpperCamelCase__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=SCREAMING_SNAKE_CASE_ , inference=SCREAMING_SNAKE_CASE_ , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=SCREAMING_SNAKE_CASE_ , multi_process=SCREAMING_SNAKE_CASE_ , )
UpperCamelCase__ = TensorFlowBenchmark(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCAmelCase_ (self ):
UpperCamelCase__ = """sgugger/tiny-distilbert-classification"""
UpperCamelCase__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=SCREAMING_SNAKE_CASE_ , inference=SCREAMING_SNAKE_CASE_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=SCREAMING_SNAKE_CASE_ , only_pretrain_model=SCREAMING_SNAKE_CASE_ , )
UpperCamelCase__ = TensorFlowBenchmark(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCAmelCase_ (self ):
UpperCamelCase__ = """sshleifer/tiny-gpt2"""
UpperCamelCase__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=SCREAMING_SNAKE_CASE_ , inference=SCREAMING_SNAKE_CASE_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=SCREAMING_SNAKE_CASE_ , )
UpperCamelCase__ = TensorFlowBenchmark(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCAmelCase_ (self ):
UpperCamelCase__ = """sshleifer/tiny-gpt2"""
UpperCamelCase__ = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=SCREAMING_SNAKE_CASE_ , inference=SCREAMING_SNAKE_CASE_ , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=SCREAMING_SNAKE_CASE_ , multi_process=SCREAMING_SNAKE_CASE_ , )
UpperCamelCase__ = TensorFlowBenchmark(SCREAMING_SNAKE_CASE_ , [config] )
UpperCamelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCAmelCase_ (self ):
UpperCamelCase__ = """sshleifer/tiny-gpt2"""
UpperCamelCase__ = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=SCREAMING_SNAKE_CASE_ , inference=SCREAMING_SNAKE_CASE_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=SCREAMING_SNAKE_CASE_ , )
UpperCamelCase__ = TensorFlowBenchmark(SCREAMING_SNAKE_CASE_ , [config] )
UpperCamelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCAmelCase_ (self ):
UpperCamelCase__ = """sshleifer/tiny-gpt2"""
UpperCamelCase__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=SCREAMING_SNAKE_CASE_ , inference=SCREAMING_SNAKE_CASE_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=SCREAMING_SNAKE_CASE_ , )
UpperCamelCase__ = TensorFlowBenchmark(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def UpperCAmelCase_ (self ):
UpperCamelCase__ = """sshleifer/tiny-gpt2"""
UpperCamelCase__ = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=SCREAMING_SNAKE_CASE_ , inference=SCREAMING_SNAKE_CASE_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=SCREAMING_SNAKE_CASE_ , )
UpperCamelCase__ = TensorFlowBenchmark(SCREAMING_SNAKE_CASE_ , [config] )
UpperCamelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def UpperCAmelCase_ (self ):
UpperCamelCase__ = """patrickvonplaten/t5-tiny-random"""
UpperCamelCase__ = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=SCREAMING_SNAKE_CASE_ , inference=SCREAMING_SNAKE_CASE_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=SCREAMING_SNAKE_CASE_ , )
UpperCamelCase__ = TensorFlowBenchmark(SCREAMING_SNAKE_CASE_ , configs=[config] )
UpperCamelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices("""GPU""" ) ) == 0 , """Cannot do xla on CPU.""" )
def UpperCAmelCase_ (self ):
UpperCamelCase__ = """sshleifer/tiny-gpt2"""
UpperCamelCase__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=SCREAMING_SNAKE_CASE_ , inference=SCREAMING_SNAKE_CASE_ , sequence_lengths=[8] , batch_sizes=[1] , use_xla=SCREAMING_SNAKE_CASE_ , multi_process=SCREAMING_SNAKE_CASE_ , )
UpperCamelCase__ = TensorFlowBenchmark(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCAmelCase_ (self ):
UpperCamelCase__ = """sshleifer/tiny-gpt2"""
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCamelCase__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=SCREAMING_SNAKE_CASE_ , save_to_csv=SCREAMING_SNAKE_CASE_ , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(SCREAMING_SNAKE_CASE_ , """inf_time.csv""" ) , inference_memory_csv_file=os.path.join(SCREAMING_SNAKE_CASE_ , """inf_mem.csv""" ) , env_info_csv_file=os.path.join(SCREAMING_SNAKE_CASE_ , """env.csv""" ) , multi_process=SCREAMING_SNAKE_CASE_ , )
UpperCamelCase__ = TensorFlowBenchmark(SCREAMING_SNAKE_CASE_ )
benchmark.run()
self.assertTrue(Path(os.path.join(SCREAMING_SNAKE_CASE_ , """inf_time.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(SCREAMING_SNAKE_CASE_ , """inf_mem.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(SCREAMING_SNAKE_CASE_ , """env.csv""" ) ).exists() )
def UpperCAmelCase_ (self ):
UpperCamelCase__ = """sshleifer/tiny-gpt2"""
def _check_summary_is_not_empty(SCREAMING_SNAKE_CASE_ ):
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , """sequential""" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , """cumulative""" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , """current""" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , """total""" ) )
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCamelCase__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=SCREAMING_SNAKE_CASE_ , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(SCREAMING_SNAKE_CASE_ , """log.txt""" ) , log_print=SCREAMING_SNAKE_CASE_ , trace_memory_line_by_line=SCREAMING_SNAKE_CASE_ , eager_mode=SCREAMING_SNAKE_CASE_ , multi_process=SCREAMING_SNAKE_CASE_ , )
UpperCamelCase__ = TensorFlowBenchmark(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
self.assertTrue(Path(os.path.join(SCREAMING_SNAKE_CASE_ , """log.txt""" ) ).exists() )
| 244 | 1 |
"""simple docstring"""
import csv
from collections import defaultdict
from dataclasses import dataclass, field
from typing import List, Optional
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.ticker import ScalarFormatter
from transformers import HfArgumentParser
def lowercase ( lowerCAmelCase__ : str=None , lowerCAmelCase__ : Optional[int]=None ) -> Tuple:
return field(default_factory=lambda: default , metadata=lowerCAmelCase__ )
@dataclass
class __lowerCAmelCase :
'''simple docstring'''
__UpperCAmelCase : str = field(
metadata={'help': 'The csv file to plot.'} , )
__UpperCAmelCase : bool = field(
default=__SCREAMING_SNAKE_CASE , metadata={'help': 'Whether to plot along batch size or sequence length. Defaults to sequence length.'} , )
__UpperCAmelCase : bool = field(
default=__SCREAMING_SNAKE_CASE , metadata={'help': 'Whether the csv file has time results or memory results. Defaults to memory results.'} , )
__UpperCAmelCase : bool = field(
default=__SCREAMING_SNAKE_CASE , metadata={'help': 'Disable logarithmic scale when plotting'} , )
__UpperCAmelCase : bool = field(
default=__SCREAMING_SNAKE_CASE , metadata={
'help': 'Whether the csv file has training results or inference results. Defaults to inference results.'
} , )
__UpperCAmelCase : Optional[str] = field(
default=__SCREAMING_SNAKE_CASE , metadata={'help': 'Filename under which the plot will be saved. If unused no plot is saved.'} , )
__UpperCAmelCase : Optional[List[str]] = list_field(
default=__SCREAMING_SNAKE_CASE , metadata={'help': 'List of model names that are used instead of the ones in the csv file.'} )
def lowercase ( lowerCAmelCase__ : Dict ) -> Any:
try:
int(lowerCAmelCase__ )
return True
except ValueError:
return False
def lowercase ( lowerCAmelCase__ : Optional[int] ) -> Optional[Any]:
try:
float(lowerCAmelCase__ )
return True
except ValueError:
return False
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self , _a ):
__a = args
__a = defaultdict(lambda: {"bsz": [], "seq_len": [], "result": {}} )
with open(self.args.csv_file , newline='''''' ) as csv_file:
__a = csv.DictReader(_a )
for row in reader:
__a = row['''model''']
self.result_dict[model_name]["bsz"].append(int(row['''batch_size'''] ) )
self.result_dict[model_name]["seq_len"].append(int(row['''sequence_length'''] ) )
if can_convert_to_int(row['''result'''] ):
# value is not None
__a = int(row['''result'''] )
elif can_convert_to_float(row['''result'''] ):
# value is not None
__a = float(row['''result'''] )
def __UpperCAmelCase ( self ):
__a , __a = plt.subplots()
__a = '''Time usage''' if self.args.is_time else '''Memory usage'''
__a = title_str + ''' for training''' if self.args.is_train else title_str + ''' for inference'''
if not self.args.no_log_scale:
# set logarithm scales
ax.set_xscale('''log''' )
ax.set_yscale('''log''' )
for axis in [ax.xaxis, ax.yaxis]:
axis.set_major_formatter(ScalarFormatter() )
for model_name_idx, model_name in enumerate(self.result_dict.keys() ):
__a = sorted(set(self.result_dict[model_name]['''bsz'''] ) )
__a = sorted(set(self.result_dict[model_name]['''seq_len'''] ) )
__a = self.result_dict[model_name]['''result''']
((__a) , (__a)) = (
(batch_sizes, sequence_lengths) if self.args.plot_along_batch else (sequence_lengths, batch_sizes)
)
__a = (
model_name if self.args.short_model_names is None else self.args.short_model_names[model_name_idx]
)
for inner_loop_value in inner_loop_array:
if self.args.plot_along_batch:
__a = np.asarray(
[results[(x, inner_loop_value)] for x in x_axis_array if (x, inner_loop_value) in results] , dtype=_a , )
else:
__a = np.asarray(
[results[(inner_loop_value, x)] for x in x_axis_array if (inner_loop_value, x) in results] , dtype=np.floataa , )
((__a) , (__a)) = (
('''batch_size''', '''len''') if self.args.plot_along_batch else ('''in #tokens''', '''bsz''')
)
__a = np.asarray(_a , _a )[: len(_a )]
plt.scatter(
_a , _a , label=f'''{label_model_name} - {inner_loop_label}: {inner_loop_value}''' )
plt.plot(_a , _a , '''--''' )
title_str += f''' {label_model_name} vs.'''
__a = title_str[:-4]
__a = '''Time in s''' if self.args.is_time else '''Memory in MB'''
# plot
plt.title(_a )
plt.xlabel(_a )
plt.ylabel(_a )
plt.legend()
if self.args.figure_png_file is not None:
plt.savefig(self.args.figure_png_file )
else:
plt.show()
def lowercase ( ) -> List[str]:
__a = HfArgumentParser(lowerCAmelCase__ )
__a = parser.parse_args_into_dataclasses()[0]
__a = Plot(args=lowerCAmelCase__ )
plot.plot()
if __name__ == "__main__":
main()
| 11 |
"""simple docstring"""
import torch
from diffusers import UnCLIPScheduler
from .test_schedulers import SchedulerCommonTest
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : List[str] = (UnCLIPScheduler,)
def __UpperCAmelCase ( self , **_a ):
__a = {
'''num_train_timesteps''': 1_000,
'''variance_type''': '''fixed_small_log''',
'''clip_sample''': True,
'''clip_sample_range''': 1.0,
'''prediction_type''': '''epsilon''',
}
config.update(**_a )
return config
def __UpperCAmelCase ( self ):
for timesteps in [1, 5, 100, 1_000]:
self.check_over_configs(num_train_timesteps=_a )
def __UpperCAmelCase ( self ):
for variance in ["fixed_small_log", "learned_range"]:
self.check_over_configs(variance_type=_a )
def __UpperCAmelCase ( self ):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=_a )
def __UpperCAmelCase ( self ):
for clip_sample_range in [1, 5, 10, 20]:
self.check_over_configs(clip_sample_range=_a )
def __UpperCAmelCase ( self ):
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(prediction_type=_a )
def __UpperCAmelCase ( self ):
for time_step in [0, 500, 999]:
for prev_timestep in [None, 5, 100, 250, 500, 750]:
if prev_timestep is not None and prev_timestep >= time_step:
continue
self.check_over_forward(time_step=_a , prev_timestep=_a )
def __UpperCAmelCase ( self ):
__a = self.scheduler_classes[0]
__a = self.get_scheduler_config(variance_type='''fixed_small_log''' )
__a = scheduler_class(**_a )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.0_000E-10 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.054_9625 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.999_4987 ) ) < 1E-5
def __UpperCAmelCase ( self ):
__a = self.scheduler_classes[0]
__a = self.get_scheduler_config(variance_type='''learned_range''' )
__a = scheduler_class(**_a )
__a = 0.5
assert scheduler._get_variance(1 , predicted_variance=_a ) - -10.171_2790 < 1E-5
assert scheduler._get_variance(487 , predicted_variance=_a ) - -5.799_8052 < 1E-5
assert scheduler._get_variance(999 , predicted_variance=_a ) - -0.001_0011 < 1E-5
def __UpperCAmelCase ( self ):
__a = self.scheduler_classes[0]
__a = self.get_scheduler_config()
__a = scheduler_class(**_a )
__a = scheduler.timesteps
__a = self.dummy_model()
__a = self.dummy_sample_deter
__a = torch.manual_seed(0 )
for i, t in enumerate(_a ):
# 1. predict noise residual
__a = model(_a , _a )
# 2. predict previous mean of sample x_t-1
__a = scheduler.step(_a , _a , _a , generator=_a ).prev_sample
__a = pred_prev_sample
__a = torch.sum(torch.abs(_a ) )
__a = torch.mean(torch.abs(_a ) )
assert abs(result_sum.item() - 252.268_2495 ) < 1E-2
assert abs(result_mean.item() - 0.328_4743 ) < 1E-3
def __UpperCAmelCase ( self ):
__a = self.scheduler_classes[0]
__a = self.get_scheduler_config()
__a = scheduler_class(**_a )
scheduler.set_timesteps(25 )
__a = scheduler.timesteps
__a = self.dummy_model()
__a = self.dummy_sample_deter
__a = torch.manual_seed(0 )
for i, t in enumerate(_a ):
# 1. predict noise residual
__a = model(_a , _a )
if i + 1 == timesteps.shape[0]:
__a = None
else:
__a = timesteps[i + 1]
# 2. predict previous mean of sample x_t-1
__a = scheduler.step(
_a , _a , _a , prev_timestep=_a , generator=_a ).prev_sample
__a = pred_prev_sample
__a = torch.sum(torch.abs(_a ) )
__a = torch.mean(torch.abs(_a ) )
assert abs(result_sum.item() - 258.204_4983 ) < 1E-2
assert abs(result_mean.item() - 0.336_2038 ) < 1E-3
def __UpperCAmelCase ( self ):
pass
def __UpperCAmelCase ( self ):
pass
| 11 | 1 |
from __future__ import annotations
import unittest
from transformers import XGLMConfig, XGLMTokenizer, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.xglm.modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
)
@require_tf
class __A :
"""simple docstring"""
UpperCamelCase__ : int =XGLMConfig
UpperCamelCase__ : Optional[Any] ={}
UpperCamelCase__ : List[str] ="""gelu"""
def __init__( self , lowerCamelCase__ , lowerCamelCase__=14 , lowerCamelCase__=7 , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=99 , lowerCamelCase__=32 , lowerCamelCase__=2 , lowerCamelCase__=4 , lowerCamelCase__=37 , lowerCamelCase__="gelu" , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=512 , lowerCamelCase__=0.02 , ):
"""simple docstring"""
__UpperCamelCase : Tuple =parent
__UpperCamelCase : List[str] =batch_size
__UpperCamelCase : str =seq_length
__UpperCamelCase : Dict =is_training
__UpperCamelCase : Tuple =use_input_mask
__UpperCamelCase : List[Any] =use_labels
__UpperCamelCase : Any =vocab_size
__UpperCamelCase : List[Any] =d_model
__UpperCamelCase : Optional[int] =num_hidden_layers
__UpperCamelCase : List[str] =num_attention_heads
__UpperCamelCase : Optional[int] =ffn_dim
__UpperCamelCase : str =activation_function
__UpperCamelCase : Any =activation_dropout
__UpperCamelCase : Optional[int] =attention_dropout
__UpperCamelCase : Optional[int] =max_position_embeddings
__UpperCamelCase : Any =initializer_range
__UpperCamelCase : Dict =None
__UpperCamelCase : Optional[int] =0
__UpperCamelCase : Optional[Any] =2
__UpperCamelCase : str =1
def __lowercase ( self ):
"""simple docstring"""
return XGLMConfig.from_pretrained('facebook/xglm-564M' )
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : List[Any] =tf.clip_by_value(
ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) , clip_value_min=0 , clip_value_max=3 )
__UpperCamelCase : Union[str, Any] =None
if self.use_input_mask:
__UpperCamelCase : Dict =random_attention_mask([self.batch_size, self.seq_length] )
__UpperCamelCase : Any =self.get_config()
__UpperCamelCase : Optional[Any] =floats_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
)
def __lowercase ( self ):
"""simple docstring"""
return XGLMConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , num_layers=self.num_hidden_layers , attention_heads=self.num_attention_heads , ffn_dim=self.ffn_dim , activation_function=self.activation_function , activation_dropout=self.activation_dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , use_cache=lowerCamelCase__ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , return_dict=lowerCamelCase__ , )
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : List[str] =self.prepare_config_and_inputs()
(
(
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) ,
) : int =config_and_inputs
__UpperCamelCase : Optional[Any] ={
'input_ids': input_ids,
'head_mask': head_mask,
}
return config, inputs_dict
@require_tf
class __A ( a , a , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase__ : Union[str, Any] =(TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else ()
UpperCamelCase__ : str =(TFXGLMForCausalLM,) if is_tf_available() else ()
UpperCamelCase__ : Optional[Any] =(
{"""feature-extraction""": TFXGLMModel, """text-generation""": TFXGLMForCausalLM} if is_tf_available() else {}
)
UpperCamelCase__ : Tuple =False
UpperCamelCase__ : Tuple =False
UpperCamelCase__ : Optional[Any] =False
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : Tuple =TFXGLMModelTester(self )
__UpperCamelCase : Dict =ConfigTester(self , config_class=lowerCamelCase__ , n_embd=37 )
def __lowercase ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
@slow
def __lowercase ( self ):
"""simple docstring"""
for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase : Optional[Any] =TFXGLMModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
@unittest.skip(reason='Currently, model embeddings are going to undergo a major refactor.' )
def __lowercase ( self ):
"""simple docstring"""
super().test_resize_token_embeddings()
@require_tf
class __A ( unittest.TestCase ):
"""simple docstring"""
@slow
def __lowercase ( self , lowerCamelCase__=True ):
"""simple docstring"""
__UpperCamelCase : int =TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' )
__UpperCamelCase : List[str] =tf.convert_to_tensor([[2, 268, 9865]] , dtype=tf.intaa ) # The dog
# </s> The dog is a very friendly dog. He is very affectionate and loves to play with other
# fmt: off
__UpperCamelCase : str =[2, 268, 9865, 67, 11, 1988, 57252, 9865, 5, 984, 67, 1988, 213838, 1658, 53, 70446, 33, 6657, 278, 1581]
# fmt: on
__UpperCamelCase : Optional[Any] =model.generate(lowerCamelCase__ , do_sample=lowerCamelCase__ , num_beams=1 )
if verify_outputs:
self.assertListEqual(output_ids[0].numpy().tolist() , lowerCamelCase__ )
@slow
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : List[str] =XGLMTokenizer.from_pretrained('facebook/xglm-564M' )
__UpperCamelCase : Union[str, Any] =TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' )
tf.random.set_seed(0 )
__UpperCamelCase : str =tokenizer('Today is a nice day and' , return_tensors='tf' )
__UpperCamelCase : Union[str, Any] =tokenized.input_ids
# forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices)
with tf.device(':/CPU:0' ):
__UpperCamelCase : Any =model.generate(lowerCamelCase__ , do_sample=lowerCamelCase__ , seed=[7, 0] )
__UpperCamelCase : Tuple =tokenizer.decode(output_ids[0] , skip_special_tokens=lowerCamelCase__ )
__UpperCamelCase : List[Any] =(
'Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due'
)
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
@slow
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : Tuple =TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' )
__UpperCamelCase : Optional[Any] =XGLMTokenizer.from_pretrained('facebook/xglm-564M' )
__UpperCamelCase : Optional[Any] ='left'
# use different length sentences to test batching
__UpperCamelCase : Optional[int] =[
'This is an extremelly long sentence that only exists to test the ability of the model to cope with '
'left-padding, such as in batched generation. The output for the sequence below should be the same '
'regardless of whether left padding is applied or not. When',
'Hello, my dog is a little',
]
__UpperCamelCase : List[Any] =tokenizer(lowerCamelCase__ , return_tensors='tf' , padding=lowerCamelCase__ )
__UpperCamelCase : Union[str, Any] =inputs['input_ids']
__UpperCamelCase : Dict =model.generate(input_ids=lowerCamelCase__ , attention_mask=inputs['attention_mask'] , max_new_tokens=12 )
__UpperCamelCase : List[Any] =tokenizer(sentences[0] , return_tensors='tf' ).input_ids
__UpperCamelCase : Dict =model.generate(input_ids=lowerCamelCase__ , max_new_tokens=12 )
__UpperCamelCase : Any =tokenizer(sentences[1] , return_tensors='tf' ).input_ids
__UpperCamelCase : Optional[Any] =model.generate(input_ids=lowerCamelCase__ , max_new_tokens=12 )
__UpperCamelCase : Optional[int] =tokenizer.batch_decode(lowerCamelCase__ , skip_special_tokens=lowerCamelCase__ )
__UpperCamelCase : Union[str, Any] =tokenizer.decode(output_non_padded[0] , skip_special_tokens=lowerCamelCase__ )
__UpperCamelCase : int =tokenizer.decode(output_padded[0] , skip_special_tokens=lowerCamelCase__ )
__UpperCamelCase : Any =[
'This is an extremelly long sentence that only exists to test the ability of the model to cope with '
'left-padding, such as in batched generation. The output for the sequence below should be the same '
'regardless of whether left padding is applied or not. When left padding is applied, the sequence will be '
'a single',
'Hello, my dog is a little bit of a shy one, but he is very friendly',
]
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , [non_padded_sentence, padded_sentence] )
| 71 |
A_ :Optional[int] = '''
# Installazione di Transformers
! pip install transformers datasets
# Per installare dalla fonte invece dell\'ultima versione rilasciata, commenta il comando sopra e
# rimuovi la modalità commento al comando seguente.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
A_ :Union[str, Any] = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
A_ :Optional[Any] = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 71 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__snake_case = {
"""configuration_instructblip""": [
"""INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""InstructBlipConfig""",
"""InstructBlipQFormerConfig""",
"""InstructBlipVisionConfig""",
],
"""processing_instructblip""": ["""InstructBlipProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
"""INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""InstructBlipQFormerModel""",
"""InstructBlipPreTrainedModel""",
"""InstructBlipForConditionalGeneration""",
"""InstructBlipVisionModel""",
]
if TYPE_CHECKING:
from .configuration_instructblip import (
INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
InstructBlipConfig,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
)
from .processing_instructblip import InstructBlipProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_instructblip import (
INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
InstructBlipForConditionalGeneration,
InstructBlipPreTrainedModel,
InstructBlipQFormerModel,
InstructBlipVisionModel,
)
else:
import sys
__snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 353 |
import logging
import os
from typing import Dict, List, Optional, Union
import torch
import torch.nn as nn
from accelerate.utils.imports import (
is_abit_bnb_available,
is_abit_bnb_available,
is_bnb_available,
)
from ..big_modeling import dispatch_model, init_empty_weights
from .dataclasses import BnbQuantizationConfig
from .modeling import (
find_tied_parameters,
get_balanced_memory,
infer_auto_device_map,
load_checkpoint_in_model,
offload_weight,
set_module_tensor_to_device,
)
if is_bnb_available():
import bitsandbytes as bnb
from copy import deepcopy
__snake_case = logging.getLogger(__name__)
def _lowercase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = False , ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = bnb_quantization_config.load_in_abit
SCREAMING_SNAKE_CASE__ = bnb_quantization_config.load_in_abit
if load_in_abit and not is_abit_bnb_available():
raise ImportError(
'You have a version of `bitsandbytes` that is not compatible with 8bit quantization,'
' make sure you have the latest version of `bitsandbytes` installed.' )
if load_in_abit and not is_abit_bnb_available():
raise ValueError(
'You have a version of `bitsandbytes` that is not compatible with 4bit quantization,'
'make sure you have the latest version of `bitsandbytes` installed.' )
SCREAMING_SNAKE_CASE__ = []
# custom device map
if isinstance(UpperCamelCase_ , UpperCamelCase_ ) and len(device_map.keys() ) > 1:
SCREAMING_SNAKE_CASE__ = [key for key, value in device_map.items() if value in ['disk', 'cpu']]
# We keep some modules such as the lm_head in their original dtype for numerical stability reasons
if bnb_quantization_config.skip_modules is None:
SCREAMING_SNAKE_CASE__ = get_keys_to_not_convert(UpperCamelCase_ )
# add cpu modules to skip modules only for 4-bit modules
if load_in_abit:
bnb_quantization_config.skip_modules.extend(UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ = bnb_quantization_config.skip_modules
# We add the modules we want to keep in full precision
if bnb_quantization_config.keep_in_fpaa_modules is None:
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = bnb_quantization_config.keep_in_fpaa_modules
modules_to_not_convert.extend(UpperCamelCase_ )
# compatibility with peft
SCREAMING_SNAKE_CASE__ = load_in_abit
SCREAMING_SNAKE_CASE__ = load_in_abit
SCREAMING_SNAKE_CASE__ = get_parameter_device(UpperCamelCase_ )
if model_device.type != "meta":
# quantization of an already loaded model
logger.warning(
'It is not recommended to quantize a loaded model. '
'The model should be instantiated under the `init_empty_weights` context manager.' )
SCREAMING_SNAKE_CASE__ = replace_with_bnb_layers(UpperCamelCase_ , UpperCamelCase_ , modules_to_not_convert=UpperCamelCase_ )
# convert param to the right dtype
SCREAMING_SNAKE_CASE__ = bnb_quantization_config.torch_dtype
for name, param in model.state_dict().items():
if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ):
param.to(torch.floataa )
if param.dtype != torch.floataa:
SCREAMING_SNAKE_CASE__ = name.replace('.weight' , '' ).replace('.bias' , '' )
SCREAMING_SNAKE_CASE__ = getattr(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
if param is not None:
param.to(torch.floataa )
elif torch.is_floating_point(UpperCamelCase_ ):
param.to(UpperCamelCase_ )
if model_device.type == "cuda":
# move everything to cpu in the first place because we can't do quantization if the weights are already on cuda
model.cuda(torch.cuda.current_device() )
torch.cuda.empty_cache()
elif torch.cuda.is_available():
model.to(torch.cuda.current_device() )
else:
raise RuntimeError('No GPU found. A GPU is needed for quantization.' )
logger.info(
F'The model device type is {model_device.type}. However, cuda is needed for quantization.'
'We move the model to cuda.' )
return model
elif weights_location is None:
raise RuntimeError(
F'`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} ' )
else:
with init_empty_weights():
SCREAMING_SNAKE_CASE__ = replace_with_bnb_layers(
UpperCamelCase_ , UpperCamelCase_ , modules_to_not_convert=UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ = get_quantized_model_device_map(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , max_memory=UpperCamelCase_ , no_split_module_classes=UpperCamelCase_ , )
if offload_state_dict is None and device_map is not None and "disk" in device_map.values():
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = any(x in list(device_map.values() ) for x in ['cpu', 'disk'] )
load_checkpoint_in_model(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , dtype=bnb_quantization_config.torch_dtype , offload_folder=UpperCamelCase_ , offload_state_dict=UpperCamelCase_ , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , )
return dispatch_model(UpperCamelCase_ , device_map=UpperCamelCase_ , offload_dir=UpperCamelCase_ )
def _lowercase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=None ) -> Optional[Any]:
'''simple docstring'''
if device_map is None:
if torch.cuda.is_available():
SCREAMING_SNAKE_CASE__ = {'': torch.cuda.current_device()}
else:
raise RuntimeError('No GPU found. A GPU is needed for quantization.' )
logger.info('The device_map was not initialized.' 'Setting device_map to `{\'\':torch.cuda.current_device()}`.' )
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]:
raise ValueError(
'If passing a string for `device_map`, please choose \'auto\', \'balanced\', \'balanced_low_0\' or '
'\'sequential\'.' )
SCREAMING_SNAKE_CASE__ = {}
special_dtypes.update(
{
name: bnb_quantization_config.torch_dtype
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.skip_modules )
} )
special_dtypes.update(
{
name: torch.floataa
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules )
} )
SCREAMING_SNAKE_CASE__ = {}
SCREAMING_SNAKE_CASE__ = special_dtypes
SCREAMING_SNAKE_CASE__ = no_split_module_classes
SCREAMING_SNAKE_CASE__ = bnb_quantization_config.target_dtype
# get max_memory for each device.
if device_map != "sequential":
SCREAMING_SNAKE_CASE__ = get_balanced_memory(
UpperCamelCase_ , low_zero=(device_map == 'balanced_low_0') , max_memory=UpperCamelCase_ , **UpperCamelCase_ , )
SCREAMING_SNAKE_CASE__ = max_memory
SCREAMING_SNAKE_CASE__ = infer_auto_device_map(UpperCamelCase_ , **UpperCamelCase_ )
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
# check if don't have any quantized module on the cpu
SCREAMING_SNAKE_CASE__ = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules
SCREAMING_SNAKE_CASE__ = {
key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert
}
for device in ["cpu", "disk"]:
if device in device_map_without_some_modules.values():
if bnb_quantization_config.load_in_abit:
raise ValueError(
'\n Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit\n the quantized model. If you want to dispatch the model on the CPU or the disk while keeping\n these modules in `torch_dtype`, you need to pass a custom `device_map` to\n `load_and_quantize_model`. Check\n https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk\n for more details.\n ' )
else:
logger.info(
'Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit' )
del device_map_without_some_modules
return device_map
def _lowercase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=None , UpperCamelCase_=None ) -> Optional[Any]:
'''simple docstring'''
if modules_to_not_convert is None:
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = _replace_with_bnb_layers(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
if not has_been_replaced:
logger.warning(
'You are loading your model in 8bit or 4bit but no linear modules were found in your model.'
' this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers.'
' Please double check your model architecture, or submit an issue on github if you think this is'
' a bug.' )
return model
def _lowercase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=None , UpperCamelCase_=None , ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = False
for name, module in model.named_children():
if current_key_name is None:
SCREAMING_SNAKE_CASE__ = []
current_key_name.append(UpperCamelCase_ )
if isinstance(UpperCamelCase_ , nn.Linear ) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
SCREAMING_SNAKE_CASE__ = '.'.join(UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ = True
for key in modules_to_not_convert:
if (
(key in current_key_name_str) and (key + "." in current_key_name_str)
) or key == current_key_name_str:
SCREAMING_SNAKE_CASE__ = False
break
if proceed:
# Load bnb module with empty weight and replace ``nn.Linear` module
if bnb_quantization_config.load_in_abit:
SCREAMING_SNAKE_CASE__ = bnb.nn.LinearabitLt(
module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=UpperCamelCase_ , threshold=bnb_quantization_config.llm_inta_threshold , )
elif bnb_quantization_config.load_in_abit:
SCREAMING_SNAKE_CASE__ = bnb.nn.Linearabit(
module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , )
else:
raise ValueError('load_in_8bit and load_in_4bit can\'t be both False' )
SCREAMING_SNAKE_CASE__ = module.weight.data
if module.bias is not None:
SCREAMING_SNAKE_CASE__ = module.bias.data
bnb_module.requires_grad_(UpperCamelCase_ )
setattr(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ = True
if len(list(module.children() ) ) > 0:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = _replace_with_bnb_layers(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ = has_been_replaced | _has_been_replaced
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def _lowercase ( UpperCamelCase_ ) -> Union[str, Any]:
'''simple docstring'''
with init_empty_weights():
SCREAMING_SNAKE_CASE__ = deepcopy(UpperCamelCase_ ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
SCREAMING_SNAKE_CASE__ = find_tied_parameters(UpperCamelCase_ )
# For compatibility with Accelerate < 0.18
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
SCREAMING_SNAKE_CASE__ = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
SCREAMING_SNAKE_CASE__ = sum(UpperCamelCase_ , [] )
SCREAMING_SNAKE_CASE__ = len(UpperCamelCase_ ) > 0
# Check if it is a base model
SCREAMING_SNAKE_CASE__ = False
if hasattr(UpperCamelCase_ , 'base_model_prefix' ):
SCREAMING_SNAKE_CASE__ = not hasattr(UpperCamelCase_ , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
SCREAMING_SNAKE_CASE__ = list(model.named_children() )
SCREAMING_SNAKE_CASE__ = [list_modules[-1][0]]
# add last module together with tied weights
SCREAMING_SNAKE_CASE__ = set(UpperCamelCase_ ) - set(UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ = list(set(UpperCamelCase_ ) ) + list(UpperCamelCase_ )
# remove ".weight" from the keys
SCREAMING_SNAKE_CASE__ = ['.weight', '.bias']
SCREAMING_SNAKE_CASE__ = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
SCREAMING_SNAKE_CASE__ = name.replace(UpperCamelCase_ , '' )
filtered_module_names.append(UpperCamelCase_ )
return filtered_module_names
def _lowercase ( UpperCamelCase_ ) -> str:
'''simple docstring'''
for m in model.modules():
if isinstance(UpperCamelCase_ , bnb.nn.Linearabit ):
return True
return False
def _lowercase ( UpperCamelCase_ ) -> str:
'''simple docstring'''
return next(parameter.parameters() ).device
def _lowercase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> List[str]:
'''simple docstring'''
if fpaa_statistics is None:
set_module_tensor_to_device(UpperCamelCase_ , UpperCamelCase_ , 0 , dtype=UpperCamelCase_ , value=UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ = param_name
SCREAMING_SNAKE_CASE__ = model
if "." in tensor_name:
SCREAMING_SNAKE_CASE__ = tensor_name.split('.' )
for split in splits[:-1]:
SCREAMING_SNAKE_CASE__ = getattr(UpperCamelCase_ , UpperCamelCase_ )
if new_module is None:
raise ValueError(F'{module} has no attribute {split}.' )
SCREAMING_SNAKE_CASE__ = new_module
SCREAMING_SNAKE_CASE__ = splits[-1]
# offload weights
SCREAMING_SNAKE_CASE__ = False
offload_weight(module._parameters[tensor_name] , UpperCamelCase_ , UpperCamelCase_ , index=UpperCamelCase_ )
if hasattr(module._parameters[tensor_name] , 'SCB' ):
offload_weight(
module._parameters[tensor_name].SCB , param_name.replace('weight' , 'SCB' ) , UpperCamelCase_ , index=UpperCamelCase_ , )
else:
offload_weight(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , index=UpperCamelCase_ )
offload_weight(UpperCamelCase_ , param_name.replace('weight' , 'SCB' ) , UpperCamelCase_ , index=UpperCamelCase_ )
set_module_tensor_to_device(UpperCamelCase_ , UpperCamelCase_ , 'meta' , dtype=UpperCamelCase_ , value=torch.empty(*param.size() ) )
| 169 | 0 |
"""simple docstring"""
from collections.abc import Generator
def __lowerCAmelCase ():
__lowerCAmelCase , __lowerCAmelCase : List[Any] = 0, 1
while True:
__lowerCAmelCase , __lowerCAmelCase : Optional[int] = b, a + b
yield b
def __lowerCAmelCase (_UpperCamelCase = 1000 ):
__lowerCAmelCase : Optional[int] = 1
__lowerCAmelCase : List[str] = fibonacci_generator()
while len(str(next(_UpperCamelCase ) ) ) < n:
answer += 1
return answer + 1
if __name__ == "__main__":
print(solution(int(str(input()).strip()))) | 86 |
"""simple docstring"""
import enum
import warnings
from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING
from ..utils import add_end_docstrings, is_tf_available
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
class A__ ( enum.Enum):
A_ : List[Any] = 0
A_ : Dict = 1
A_ : Union[str, Any] = 2
@add_end_docstrings(_lowerCamelCase)
class A__ ( _lowerCamelCase):
A_ : str = '\n In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The\n voice of Nicholas\'s young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western\n Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision\n and denounces one of the men as a horse thief. Although his father initially slaps him for making such an\n accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of\n the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,\n begging for his blessing. <eod> </s> <eos>\n '
def __init__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
super().__init__(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
self.check_model_type(
TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == 'tf' else MODEL_FOR_CAUSAL_LM_MAPPING )
if "prefix" not in self._preprocess_params:
# This is very specific. The logic is quite complex and needs to be done
# as a "default".
# It also defines both some preprocess_kwargs and generate_kwargs
# which is why we cannot put them in their respective methods.
__lowerCAmelCase : Any = None
if self.model.config.prefix is not None:
__lowerCAmelCase : str = self.model.config.prefix
if prefix is None and self.model.__class__.__name__ in [
"XLNetLMHeadModel",
"TransfoXLLMHeadModel",
"TFXLNetLMHeadModel",
"TFTransfoXLLMHeadModel",
]:
# For XLNet and TransformerXL we add an article to the prompt to give more state to the model.
__lowerCAmelCase : Tuple = self.XL_PREFIX
if prefix is not None:
# Recalculate some generate_kwargs linked to prefix.
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : Union[str, Any] = self._sanitize_parameters(prefix=_SCREAMING_SNAKE_CASE , **self._forward_params )
__lowerCAmelCase : List[str] = {**self._preprocess_params, **preprocess_params}
__lowerCAmelCase : List[str] = {**self._forward_params, **forward_params}
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE , ):
__lowerCAmelCase : Optional[int] = {}
if prefix is not None:
__lowerCAmelCase : Union[str, Any] = prefix
if prefix:
__lowerCAmelCase : Dict = self.tokenizer(
_SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE , return_tensors=self.framework )
__lowerCAmelCase : List[Any] = prefix_inputs['input_ids'].shape[-1]
if handle_long_generation is not None:
if handle_long_generation not in {"hole"}:
raise ValueError(
f"{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected"
' [None, \'hole\']' )
__lowerCAmelCase : int = handle_long_generation
preprocess_params.update(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : int = generate_kwargs
__lowerCAmelCase : List[Any] = {}
if return_full_text is not None and return_type is None:
if return_text is not None:
raise ValueError('`return_text` is mutually exclusive with `return_full_text`' )
if return_tensors is not None:
raise ValueError('`return_full_text` is mutually exclusive with `return_tensors`' )
__lowerCAmelCase : Optional[Any] = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT
if return_tensors is not None and return_type is None:
if return_text is not None:
raise ValueError('`return_text` is mutually exclusive with `return_tensors`' )
__lowerCAmelCase : List[Any] = ReturnType.TENSORS
if return_type is not None:
__lowerCAmelCase : Optional[Any] = return_type
if clean_up_tokenization_spaces is not None:
__lowerCAmelCase : Tuple = clean_up_tokenization_spaces
if stop_sequence is not None:
__lowerCAmelCase : Union[str, Any] = self.tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE )
if len(_SCREAMING_SNAKE_CASE ) > 1:
warnings.warn(
'Stopping on a multiple token sequence is not yet supported on transformers. The first token of'
' the stop sequence will be used as the stop sequence string in the interim.' )
__lowerCAmelCase : Optional[Any] = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def __lowerCamelCase ( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
# Parse arguments
if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]:
kwargs.update({'add_space_before_punct_symbol': True} )
return super()._parse_and_tokenize(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def __call__( self , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
return super().__call__(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE="" , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Any = self.tokenizer(
prefix + prompt_text , padding=_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE , return_tensors=self.framework )
__lowerCAmelCase : Optional[Any] = prompt_text
if handle_long_generation == "hole":
__lowerCAmelCase : str = inputs['input_ids'].shape[-1]
if "max_new_tokens" in generate_kwargs:
__lowerCAmelCase : Union[str, Any] = generate_kwargs['max_new_tokens']
else:
__lowerCAmelCase : Any = generate_kwargs.get('max_length' , self.model.config.max_length ) - cur_len
if new_tokens < 0:
raise ValueError('We cannot infer how many new tokens are expected' )
if cur_len + new_tokens > self.tokenizer.model_max_length:
__lowerCAmelCase : Any = self.tokenizer.model_max_length - new_tokens
if keep_length <= 0:
raise ValueError(
'We cannot use `hole` to handle this generation the number of desired tokens exceeds the'
' models max length' )
__lowerCAmelCase : int = inputs['input_ids'][:, -keep_length:]
if "attention_mask" in inputs:
__lowerCAmelCase : List[Any] = inputs['attention_mask'][:, -keep_length:]
return inputs
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : str = model_inputs['input_ids']
__lowerCAmelCase : List[Any] = model_inputs.get('attention_mask' , _SCREAMING_SNAKE_CASE )
# Allow empty prompts
if input_ids.shape[1] == 0:
__lowerCAmelCase : Dict = None
__lowerCAmelCase : str = None
__lowerCAmelCase : Tuple = 1
else:
__lowerCAmelCase : Any = input_ids.shape[0]
__lowerCAmelCase : Union[str, Any] = model_inputs.pop('prompt_text' )
# If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying
# generate_kwargs, as some of the parameterization may come from the initialization of the pipeline.
__lowerCAmelCase : Optional[int] = generate_kwargs.pop('prefix_length' , 0 )
if prefix_length > 0:
__lowerCAmelCase : Any = 'max_new_tokens' in generate_kwargs or (
'generation_config' in generate_kwargs
and generate_kwargs['generation_config'].max_new_tokens is not None
)
if not has_max_new_tokens:
__lowerCAmelCase : List[str] = generate_kwargs.get('max_length' ) or self.model.config.max_length
generate_kwargs["max_length"] += prefix_length
__lowerCAmelCase : Dict = 'min_new_tokens' in generate_kwargs or (
'generation_config' in generate_kwargs
and generate_kwargs['generation_config'].min_new_tokens is not None
)
if not has_min_new_tokens and "min_length" in generate_kwargs:
generate_kwargs["min_length"] += prefix_length
# BS x SL
__lowerCAmelCase : Optional[int] = self.model.generate(input_ids=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = generated_sequence.shape[0]
if self.framework == "pt":
__lowerCAmelCase : Dict = generated_sequence.reshape(_SCREAMING_SNAKE_CASE , out_b // in_b , *generated_sequence.shape[1:] )
elif self.framework == "tf":
__lowerCAmelCase : Any = tf.reshape(_SCREAMING_SNAKE_CASE , (in_b, out_b // in_b, *generated_sequence.shape[1:]) )
return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text}
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=ReturnType.FULL_TEXT , _SCREAMING_SNAKE_CASE=True ):
__lowerCAmelCase : Any = model_outputs['generated_sequence'][0]
__lowerCAmelCase : Tuple = model_outputs['input_ids']
__lowerCAmelCase : Any = model_outputs['prompt_text']
__lowerCAmelCase : int = generated_sequence.numpy().tolist()
__lowerCAmelCase : Union[str, Any] = []
for sequence in generated_sequence:
if return_type == ReturnType.TENSORS:
__lowerCAmelCase : int = {'generated_token_ids': sequence}
elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}:
# Decode text
__lowerCAmelCase : Any = self.tokenizer.decode(
_SCREAMING_SNAKE_CASE , skip_special_tokens=_SCREAMING_SNAKE_CASE , clean_up_tokenization_spaces=_SCREAMING_SNAKE_CASE , )
# Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used
if input_ids is None:
__lowerCAmelCase : Optional[Any] = 0
else:
__lowerCAmelCase : Any = len(
self.tokenizer.decode(
input_ids[0] , skip_special_tokens=_SCREAMING_SNAKE_CASE , clean_up_tokenization_spaces=_SCREAMING_SNAKE_CASE , ) )
if return_type == ReturnType.FULL_TEXT:
__lowerCAmelCase : Union[str, Any] = prompt_text + text[prompt_length:]
else:
__lowerCAmelCase : int = text[prompt_length:]
__lowerCAmelCase : Dict = {'generated_text': all_text}
records.append(_SCREAMING_SNAKE_CASE )
return records | 86 | 1 |
from manim import *
class _snake_case ( lowercase__):
def A__ ( self : Optional[int] ):
lowercase__ = Rectangle(height=0.5, width=0.5 )
lowercase__ = Rectangle(height=0.25, width=0.25 )
lowercase__ = Rectangle(height=0.46, width=0.46 ).set_stroke(width=0 )
lowercase__ = [mem.copy() for i in range(6 )]
lowercase__ = [mem.copy() for i in range(6 )]
lowercase__ = VGroup(*__lowercase ).arrange(__lowercase, buff=0 )
lowercase__ = VGroup(*__lowercase ).arrange(__lowercase, buff=0 )
lowercase__ = VGroup(__lowercase, __lowercase ).arrange(__lowercase, buff=0 )
lowercase__ = Text("CPU", font_size=24 )
lowercase__ = Group(__lowercase, __lowercase ).arrange(__lowercase, buff=0.5, aligned_edge=__lowercase )
cpu.move_to([-2.5, -0.5, 0] )
self.add(__lowercase )
lowercase__ = [mem.copy() for i in range(4 )]
lowercase__ = VGroup(*__lowercase ).arrange(__lowercase, buff=0 )
lowercase__ = Text("GPU", font_size=24 )
lowercase__ = Group(__lowercase, __lowercase ).arrange(__lowercase, buff=0.5, aligned_edge=__lowercase )
gpu.move_to([-1, -1, 0] )
self.add(__lowercase )
lowercase__ = [mem.copy() for i in range(6 )]
lowercase__ = VGroup(*__lowercase ).arrange(__lowercase, buff=0 )
lowercase__ = Text("Model", font_size=24 )
lowercase__ = Group(__lowercase, __lowercase ).arrange(__lowercase, buff=0.5, aligned_edge=__lowercase )
model.move_to([3, -1.0, 0] )
self.add(__lowercase )
lowercase__ = []
lowercase__ = []
lowercase__ = []
for i, rect in enumerate(__lowercase ):
rect.set_stroke(__lowercase )
lowercase__ = Rectangle(height=0.46 / 4, width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(__lowercase, opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ), buff=0.02, direction=__lowercase )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(model_cpu_arr[0], direction=__lowercase, buff=0.0 )
else:
cpu_target.next_to(model_cpu_arr[i - 1], direction=__lowercase, buff=0.0 )
self.add(__lowercase )
model_cpu_arr.append(__lowercase )
self.add(*__lowercase, *__lowercase, *__lowercase )
lowercase__ = [mem.copy() for i in range(6 )]
lowercase__ = VGroup(*__lowercase ).arrange(__lowercase, buff=0 )
lowercase__ = Text("Loaded Checkpoint", font_size=24 )
lowercase__ = Group(__lowercase, __lowercase ).arrange(__lowercase, buff=0.5, aligned_edge=__lowercase )
checkpoint.move_to([3, 0.5, 0] )
self.add(__lowercase )
lowercase__ = []
lowercase__ = []
for i, rect in enumerate(__lowercase ):
lowercase__ = fill.copy().set_fill(__lowercase, opacity=0.7 )
target.move_to(__lowercase )
ckpt_arr.append(__lowercase )
lowercase__ = target.copy()
if i < 5:
cpu_target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.move_to(cpu_right_col_base[i - 5] )
ckpt_cpu_arr.append(__lowercase )
self.add(*__lowercase, *__lowercase )
lowercase__ = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
lowercase__ = MarkupText(
F'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''', font_size=18, )
key_text.move_to([-5, 2.4, 0] )
self.add(__lowercase, __lowercase )
lowercase__ = MarkupText(
F'''<span fgcolor=\'{BLUE}\'>●</span> Checkpoint''', font_size=18, )
blue_text.next_to(__lowercase, DOWN * 2.4, aligned_edge=key_text.get_left() )
self.add(__lowercase )
lowercase__ = MarkupText(
F'''Based on the passed in configuration, weights are stored in\na variety of np.memmaps on disk or to a particular device.''', font_size=24, )
step_a.move_to([2, 2, 0] )
lowercase__ = [meta_mem.copy() for i in range(6 )]
lowercase__ = [meta_mem.copy() for i in range(6 )]
lowercase__ = VGroup(*__lowercase ).arrange(__lowercase, buff=0 )
lowercase__ = VGroup(*__lowercase ).arrange(__lowercase, buff=0 )
lowercase__ = VGroup(__lowercase, __lowercase ).arrange(__lowercase, buff=0 )
lowercase__ = Text("Disk", font_size=24 )
lowercase__ = Group(__lowercase, __lowercase ).arrange(__lowercase, buff=0.5, aligned_edge=__lowercase )
disk.move_to([-4.0, -1.25, 0] )
self.play(Write(__lowercase, run_time=3 ), Write(__lowercase, run_time=1 ), Create(__lowercase, run_time=1 ) )
lowercase__ = []
for i, rect in enumerate(__lowercase ):
lowercase__ = rect.copy()
target.generate_target()
target.target.move_to(disk_left_col_base[i] ).scale(0.5 )
animations.append(MoveToTarget(__lowercase, run_time=1.5 ) )
self.play(*__lowercase )
self.play(FadeOut(__lowercase ) )
lowercase__ = MarkupText(F'''Then, the checkpoint is removed from memory\nthrough garbage collection.''', font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(__lowercase, run_time=3 ) )
self.play(
FadeOut(__lowercase, __lowercase, *__lowercase, *__lowercase ), )
self.wait()
| 224 |
from __future__ import annotations
from typing import TypedDict
class _snake_case ( lowercase__):
UpperCamelCase__ : str
UpperCamelCase__ : int
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
raise TypeError("The parameter s type must be str." )
return [s[i:] + s[:i] for i in range(len(SCREAMING_SNAKE_CASE_ ) )]
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
raise TypeError("The parameter s type must be str." )
if not s:
raise ValueError("The parameter s must not be empty." )
lowercase__ = all_rotations(SCREAMING_SNAKE_CASE_ )
rotations.sort() # sort the list of rotations in alphabetically order
# make a string composed of the last char of each rotation
lowercase__ = {
"bwt_string": "".join([word[-1] for word in rotations] ),
"idx_original_string": rotations.index(SCREAMING_SNAKE_CASE_ ),
}
return response
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
raise TypeError("The parameter bwt_string type must be str." )
if not bwt_string:
raise ValueError("The parameter bwt_string must not be empty." )
try:
lowercase__ = int(SCREAMING_SNAKE_CASE_ )
except ValueError:
raise TypeError(
"The parameter idx_original_string type must be int or passive"
" of cast to int." )
if idx_original_string < 0:
raise ValueError("The parameter idx_original_string must not be lower than 0." )
if idx_original_string >= len(SCREAMING_SNAKE_CASE_ ):
raise ValueError(
"The parameter idx_original_string must be lower than" " len(bwt_string)." )
lowercase__ = [""] * len(SCREAMING_SNAKE_CASE_ )
for _ in range(len(SCREAMING_SNAKE_CASE_ ) ):
for i in range(len(SCREAMING_SNAKE_CASE_ ) ):
lowercase__ = bwt_string[i] + ordered_rotations[i]
ordered_rotations.sort()
return ordered_rotations[idx_original_string]
if __name__ == "__main__":
lowercase_ = """Provide a string that I will generate its BWT transform: """
lowercase_ = input(entry_msg).strip()
lowercase_ = bwt_transform(s)
print(
F'Burrows Wheeler transform for string \'{s}\' results '
F'in \'{result["bwt_string"]}\''
)
lowercase_ = reverse_bwt(result["""bwt_string"""], result["""idx_original_string"""])
print(
F'Reversing Burrows Wheeler transform for entry \'{result["bwt_string"]}\' '
F'we get original string \'{original_string}\''
)
| 224 | 1 |
'''simple docstring'''
from math import loga
def _A ( A__ ):
"""simple docstring"""
if a < 0:
raise ValueError('''Input value must be a positive integer''' )
elif isinstance(A__ , A__ ):
raise TypeError('''Input value must be a \'int\' type''' )
return 0 if (a == 0) else int(loga(a & -a ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 104 |
'''simple docstring'''
import os
lowerCAmelCase__ = {'''I''': 1, '''V''': 5, '''X''': 10, '''L''': 50, '''C''': 100, '''D''': 500, '''M''': 1000}
def _A ( A__ ):
"""simple docstring"""
__lowercase = 0
__lowercase = 0
while index < len(A__ ) - 1:
__lowercase = SYMBOLS[numerals[index]]
__lowercase = SYMBOLS[numerals[index + 1]]
if current_value < next_value:
total_value -= current_value
else:
total_value += current_value
index += 1
total_value += SYMBOLS[numerals[index]]
return total_value
def _A ( A__ ):
"""simple docstring"""
__lowercase = ''''''
__lowercase = num // 1000
numerals += m_count * "M"
num %= 1000
__lowercase = num // 100
if c_count == 9:
numerals += "CM"
c_count -= 9
elif c_count == 4:
numerals += "CD"
c_count -= 4
if c_count >= 5:
numerals += "D"
c_count -= 5
numerals += c_count * "C"
num %= 100
__lowercase = num // 10
if x_count == 9:
numerals += "XC"
x_count -= 9
elif x_count == 4:
numerals += "XL"
x_count -= 4
if x_count >= 5:
numerals += "L"
x_count -= 5
numerals += x_count * "X"
num %= 10
if num == 9:
numerals += "IX"
num -= 9
elif num == 4:
numerals += "IV"
num -= 4
if num >= 5:
numerals += "V"
num -= 5
numerals += num * "I"
return numerals
def _A ( A__ = "/p089_roman.txt" ):
"""simple docstring"""
__lowercase = 0
with open(os.path.dirname(A__ ) + roman_numerals_filename ) as filea:
__lowercase = filea.readlines()
for line in lines:
__lowercase = line.strip()
__lowercase = parse_roman_numerals(A__ )
__lowercase = generate_roman_numerals(A__ )
savings += len(A__ ) - len(A__ )
return savings
if __name__ == "__main__":
print(f'{solution() = }')
| 104 | 1 |
"""simple docstring"""
import inspect
import unittest
from datasets import load_dataset
from packaging import version
from transformers import BeitConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_MAPPING,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
)
from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
import PIL
from PIL import Image
from transformers import BeitImageProcessor
class lowercase_ :
'''simple docstring'''
def __init__( self : Any , _UpperCAmelCase : List[str] , _UpperCAmelCase : Optional[int]=100 , _UpperCAmelCase : Any=13 , _UpperCAmelCase : Optional[int]=30 , _UpperCAmelCase : Tuple=2 , _UpperCAmelCase : int=3 , _UpperCAmelCase : Tuple=True , _UpperCAmelCase : Optional[Any]=True , _UpperCAmelCase : List[Any]=32 , _UpperCAmelCase : Dict=4 , _UpperCAmelCase : Dict=4 , _UpperCAmelCase : Optional[int]=37 , _UpperCAmelCase : str="gelu" , _UpperCAmelCase : Dict=0.1 , _UpperCAmelCase : Union[str, Any]=0.1 , _UpperCAmelCase : Dict=10 , _UpperCAmelCase : List[str]=0.02 , _UpperCAmelCase : Optional[Any]=3 , _UpperCAmelCase : List[str]=None , _UpperCAmelCase : Dict=[0, 1, 2, 3] , ):
_A = parent
_A = 100
_A = batch_size
_A = image_size
_A = patch_size
_A = num_channels
_A = is_training
_A = use_labels
_A = hidden_size
_A = num_hidden_layers
_A = num_attention_heads
_A = intermediate_size
_A = hidden_act
_A = hidden_dropout_prob
_A = attention_probs_dropout_prob
_A = type_sequence_label_size
_A = initializer_range
_A = scope
_A = out_indices
_A = num_labels
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
_A = (image_size // patch_size) ** 2
_A = num_patches + 1
def lowerCAmelCase_ ( self : Dict ):
_A = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_A = None
_A = None
if self.use_labels:
_A = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_A = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
_A = self.get_config()
return config, pixel_values, labels, pixel_labels
def lowerCAmelCase_ ( self : Tuple ):
return BeitConfig(
vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , out_indices=self.out_indices , )
def lowerCAmelCase_ ( self : Optional[Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : Tuple , _UpperCAmelCase : Tuple , _UpperCAmelCase : Union[str, Any] ):
_A = BeitModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
_A = model(UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase_ ( self : Any , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : List[str] ):
_A = BeitForMaskedImageModeling(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
_A = model(UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) )
def lowerCAmelCase_ ( self : Any , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : Dict ):
_A = self.type_sequence_label_size
_A = BeitForImageClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
_A = model(UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_A = 1
_A = BeitForImageClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
_A = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_A = model(UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowerCAmelCase_ ( self : str , _UpperCAmelCase : Dict , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Union[str, Any] ):
_A = self.num_labels
_A = BeitForSemanticSegmentation(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
_A = model(UpperCamelCase__ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
_A = model(UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
def lowerCAmelCase_ ( self : str ):
_A = self.prepare_config_and_inputs()
_A , _A , _A , _A = config_and_inputs
_A = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class lowercase_ ( __a , __a , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase : int = (
(BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation)
if is_torch_available()
else ()
)
UpperCAmelCase : str = (
{
'''feature-extraction''': BeitModel,
'''image-classification''': BeitForImageClassification,
'''image-segmentation''': BeitForSemanticSegmentation,
}
if is_torch_available()
else {}
)
UpperCAmelCase : int = False
UpperCAmelCase : Tuple = False
UpperCAmelCase : str = False
def lowerCAmelCase_ ( self : Tuple ):
_A = BeitModelTester(self )
_A = ConfigTester(self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ , hidden_size=37 )
def lowerCAmelCase_ ( self : int ):
self.config_tester.run_common_tests()
@unittest.skip(reason='BEiT does not use inputs_embeds' )
def lowerCAmelCase_ ( self : List[str] ):
pass
@require_torch_multi_gpu
@unittest.skip(reason='BEiT has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`' )
def lowerCAmelCase_ ( self : Optional[int] ):
pass
def lowerCAmelCase_ ( self : str ):
_A , _A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A = model_class(UpperCamelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_A = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCamelCase__ , nn.Linear ) )
def lowerCAmelCase_ ( self : Dict ):
_A , _A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A = model_class(UpperCamelCase__ )
_A = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_A = [*signature.parameters.keys()]
_A = ['pixel_values']
self.assertListEqual(arg_names[:1] , UpperCamelCase__ )
def lowerCAmelCase_ ( self : Any ):
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def lowerCAmelCase_ ( self : Union[str, Any] ):
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCamelCase__ )
def lowerCAmelCase_ ( self : List[str] ):
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCamelCase__ )
def lowerCAmelCase_ ( self : int ):
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*UpperCamelCase__ )
def lowerCAmelCase_ ( self : Optional[Any] ):
if not self.model_tester.is_training:
return
_A , _A = self.model_tester.prepare_config_and_inputs_for_common()
_A = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if model_class in [*get_values(UpperCamelCase__ ), BeitForMaskedImageModeling]:
continue
_A = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.train()
_A = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__ )
_A = model(**UpperCamelCase__ ).loss
loss.backward()
def lowerCAmelCase_ ( self : List[str] ):
_A , _A = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
_A = False
_A = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if (
model_class in [*get_values(UpperCamelCase__ ), BeitForMaskedImageModeling]
or not model_class.supports_gradient_checkpointing
):
continue
_A = model_class(UpperCamelCase__ )
model.gradient_checkpointing_enable()
model.to(UpperCamelCase__ )
model.train()
_A = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__ )
_A = model(**UpperCamelCase__ ).loss
loss.backward()
def lowerCAmelCase_ ( self : Union[str, Any] ):
_A , _A = self.model_tester.prepare_config_and_inputs_for_common()
_A = _config_zero_init(UpperCamelCase__ )
for model_class in self.all_model_classes:
_A = model_class(config=UpperCamelCase__ )
for name, param in model.named_parameters():
# we skip lambda parameters as these require special initial values
# determined by config.layer_scale_init_value
if "lambda" in name:
continue
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@slow
def lowerCAmelCase_ ( self : List[Any] ):
for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_A = BeitModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
def _snake_case ( ) -> Union[str, Any]:
'''simple docstring'''
_A = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class lowercase_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowerCAmelCase_ ( self : Union[str, Any] ):
return BeitImageProcessor.from_pretrained('microsoft/beit-base-patch16-224' ) if is_vision_available() else None
@slow
def lowerCAmelCase_ ( self : Optional[Any] ):
_A = BeitForMaskedImageModeling.from_pretrained('microsoft/beit-base-patch16-224-pt22k' ).to(UpperCamelCase__ )
_A = self.default_image_processor
_A = prepare_img()
_A = image_processor(images=UpperCamelCase__ , return_tensors='pt' ).pixel_values.to(UpperCamelCase__ )
# prepare bool_masked_pos
_A = torch.ones((1, 196) , dtype=torch.bool ).to(UpperCamelCase__ )
# forward pass
with torch.no_grad():
_A = model(pixel_values=UpperCamelCase__ , bool_masked_pos=UpperCamelCase__ )
_A = outputs.logits
# verify the logits
_A = torch.Size((1, 196, 8_192) )
self.assertEqual(logits.shape , UpperCamelCase__ )
_A = torch.tensor(
[[-3.2437, 0.5072, -13.9174], [-3.2456, 0.4948, -13.9401], [-3.2033, 0.5121, -13.8550]] ).to(UpperCamelCase__ )
self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3] , UpperCamelCase__ , atol=1E-2 ) )
@slow
def lowerCAmelCase_ ( self : Any ):
_A = BeitForImageClassification.from_pretrained('microsoft/beit-base-patch16-224' ).to(UpperCamelCase__ )
_A = self.default_image_processor
_A = prepare_img()
_A = image_processor(images=UpperCamelCase__ , return_tensors='pt' ).to(UpperCamelCase__ )
# forward pass
with torch.no_grad():
_A = model(**UpperCamelCase__ )
_A = outputs.logits
# verify the logits
_A = torch.Size((1, 1_000) )
self.assertEqual(logits.shape , UpperCamelCase__ )
_A = torch.tensor([-1.2385, -1.0987, -1.0108] ).to(UpperCamelCase__ )
self.assertTrue(torch.allclose(logits[0, :3] , UpperCamelCase__ , atol=1E-4 ) )
_A = 281
self.assertEqual(logits.argmax(-1 ).item() , UpperCamelCase__ )
@slow
def lowerCAmelCase_ ( self : Union[str, Any] ):
_A = BeitForImageClassification.from_pretrained('microsoft/beit-large-patch16-224-pt22k-ft22k' ).to(
UpperCamelCase__ )
_A = self.default_image_processor
_A = prepare_img()
_A = image_processor(images=UpperCamelCase__ , return_tensors='pt' ).to(UpperCamelCase__ )
# forward pass
with torch.no_grad():
_A = model(**UpperCamelCase__ )
_A = outputs.logits
# verify the logits
_A = torch.Size((1, 21_841) )
self.assertEqual(logits.shape , UpperCamelCase__ )
_A = torch.tensor([1.6881, -0.2787, 0.5901] ).to(UpperCamelCase__ )
self.assertTrue(torch.allclose(logits[0, :3] , UpperCamelCase__ , atol=1E-4 ) )
_A = 2_396
self.assertEqual(logits.argmax(-1 ).item() , UpperCamelCase__ )
@slow
def lowerCAmelCase_ ( self : int ):
_A = BeitForSemanticSegmentation.from_pretrained('microsoft/beit-base-finetuned-ade-640-640' )
_A = model.to(UpperCamelCase__ )
_A = BeitImageProcessor(do_resize=UpperCamelCase__ , size=640 , do_center_crop=UpperCamelCase__ )
_A = load_dataset('hf-internal-testing/fixtures_ade20k' , split='test' )
_A = Image.open(ds[0]['file'] )
_A = image_processor(images=UpperCamelCase__ , return_tensors='pt' ).to(UpperCamelCase__ )
# forward pass
with torch.no_grad():
_A = model(**UpperCamelCase__ )
_A = outputs.logits
# verify the logits
_A = torch.Size((1, 150, 160, 160) )
self.assertEqual(logits.shape , UpperCamelCase__ )
_A = version.parse(PIL.__version__ ) < version.parse('9.0.0' )
if is_pillow_less_than_a:
_A = torch.tensor(
[
[[-4.9225, -2.3954, -3.0522], [-2.8822, -1.0046, -1.7561], [-2.9549, -1.3228, -2.1347]],
[[-5.8168, -3.4129, -4.0778], [-3.8651, -2.2214, -3.0277], [-3.8356, -2.4643, -3.3535]],
[[-0.0078, 3.9952, 4.0754], [2.9856, 4.6944, 5.0035], [3.2413, 4.7813, 4.9969]],
] , device=UpperCamelCase__ , )
else:
_A = torch.tensor(
[
[[-4.8960, -2.3688, -3.0355], [-2.8478, -0.9836, -1.7418], [-2.9449, -1.3332, -2.1456]],
[[-5.8081, -3.4124, -4.1006], [-3.8561, -2.2081, -3.0323], [-3.8365, -2.4601, -3.3669]],
[[-0.0309, 3.9868, 4.0540], [2.9640, 4.6877, 4.9976], [3.2081, 4.7690, 4.9942]],
] , device=UpperCamelCase__ , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , UpperCamelCase__ , atol=1E-4 ) )
@slow
def lowerCAmelCase_ ( self : Dict ):
_A = BeitForSemanticSegmentation.from_pretrained('microsoft/beit-base-finetuned-ade-640-640' )
_A = model.to(UpperCamelCase__ )
_A = BeitImageProcessor(do_resize=UpperCamelCase__ , size=640 , do_center_crop=UpperCamelCase__ )
_A = load_dataset('hf-internal-testing/fixtures_ade20k' , split='test' )
_A = Image.open(ds[0]['file'] )
_A = image_processor(images=UpperCamelCase__ , return_tensors='pt' ).to(UpperCamelCase__ )
# forward pass
with torch.no_grad():
_A = model(**UpperCamelCase__ )
_A = outputs.logits.detach().cpu()
_A = image_processor.post_process_semantic_segmentation(outputs=UpperCamelCase__ , target_sizes=[(500, 300)] )
_A = torch.Size((500, 300) )
self.assertEqual(segmentation[0].shape , UpperCamelCase__ )
_A = image_processor.post_process_semantic_segmentation(outputs=UpperCamelCase__ )
_A = torch.Size((160, 160) )
self.assertEqual(segmentation[0].shape , UpperCamelCase__ )
| 369 |
"""simple docstring"""
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
a = logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase : bool = field(default=__lowerCAmelCase , metadata={'''help''': '''Whether to use SortishSampler or not.'''} )
UpperCAmelCase : bool = field(
default=__lowerCAmelCase , metadata={'''help''': '''Whether to use generate to calculate generative metrics (ROUGE, BLEU).'''} )
UpperCAmelCase : Optional[int] = field(
default=__lowerCAmelCase , metadata={
'''help''': (
'''The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default '''
'''to the `max_length` value of the model configuration.'''
)
} , )
UpperCAmelCase : Optional[int] = field(
default=__lowerCAmelCase , metadata={
'''help''': (
'''The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default '''
'''to the `num_beams` value of the model configuration.'''
)
} , )
UpperCAmelCase : Optional[Union[str, Path, GenerationConfig]] = field(
default=__lowerCAmelCase , metadata={
'''help''': '''Model id, file path or url pointing to a GenerationConfig json file, to use during prediction.'''
} , )
def lowerCAmelCase_ ( self : int ):
_A = super().to_dict()
for k, v in d.items():
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
_A = v.to_dict()
return d
| 271 | 0 |
import csv
from collections import defaultdict
from dataclasses import dataclass, field
from typing import List, Optional
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.ticker import ScalarFormatter
from transformers import HfArgumentParser
def _UpperCAmelCase (UpperCamelCase__ : Optional[Any]=None , UpperCamelCase__ : int=None ):
return field(default_factory=lambda: default , metadata=UpperCamelCase__ )
@dataclass
class lowerCAmelCase__ :
'''simple docstring'''
__SCREAMING_SNAKE_CASE = field(
metadata={"help": "The csv file to plot."} , )
__SCREAMING_SNAKE_CASE = field(
default=a , metadata={"help": "Whether to plot along batch size or sequence length. Defaults to sequence length."} , )
__SCREAMING_SNAKE_CASE = field(
default=a , metadata={"help": "Whether the csv file has time results or memory results. Defaults to memory results."} , )
__SCREAMING_SNAKE_CASE = field(
default=a , metadata={"help": "Disable logarithmic scale when plotting"} , )
__SCREAMING_SNAKE_CASE = field(
default=a , metadata={
"help": "Whether the csv file has training results or inference results. Defaults to inference results."
} , )
__SCREAMING_SNAKE_CASE = field(
default=a , metadata={"help": "Filename under which the plot will be saved. If unused no plot is saved."} , )
__SCREAMING_SNAKE_CASE = list_field(
default=a , metadata={"help": "List of model names that are used instead of the ones in the csv file."})
def _UpperCAmelCase (UpperCamelCase__ : Union[str, Any] ):
try:
int(UpperCamelCase__ )
return True
except ValueError:
return False
def _UpperCAmelCase (UpperCamelCase__ : List[str] ):
try:
float(UpperCamelCase__ )
return True
except ValueError:
return False
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self , __lowerCamelCase) -> Dict:
_A : Any = args
_A : Optional[Any] = defaultdict(lambda: {"bsz": [], "seq_len": [], "result": {}})
with open(self.args.csv_file , newline="") as csv_file:
_A : Union[str, Any] = csv.DictReader(__lowerCamelCase)
for row in reader:
_A : Tuple = row["model"]
self.result_dict[model_name]["bsz"].append(int(row["batch_size"]))
self.result_dict[model_name]["seq_len"].append(int(row["sequence_length"]))
if can_convert_to_int(row["result"]):
# value is not None
_A : Union[str, Any] = int(row["result"])
elif can_convert_to_float(row["result"]):
# value is not None
_A : Tuple = float(row["result"])
def _lowerCamelCase ( self) -> int:
_A , _A : Union[str, Any] = plt.subplots()
_A : str = "Time usage" if self.args.is_time else "Memory usage"
_A : Dict = title_str + " for training" if self.args.is_train else title_str + " for inference"
if not self.args.no_log_scale:
# set logarithm scales
ax.set_xscale("log")
ax.set_yscale("log")
for axis in [ax.xaxis, ax.yaxis]:
axis.set_major_formatter(ScalarFormatter())
for model_name_idx, model_name in enumerate(self.result_dict.keys()):
_A : List[str] = sorted(set(self.result_dict[model_name]["bsz"]))
_A : int = sorted(set(self.result_dict[model_name]["seq_len"]))
_A : List[str] = self.result_dict[model_name]["result"]
((_A) , (_A)) : Dict = (
(batch_sizes, sequence_lengths) if self.args.plot_along_batch else (sequence_lengths, batch_sizes)
)
_A : Any = (
model_name if self.args.short_model_names is None else self.args.short_model_names[model_name_idx]
)
for inner_loop_value in inner_loop_array:
if self.args.plot_along_batch:
_A : Optional[int] = np.asarray(
[results[(x, inner_loop_value)] for x in x_axis_array if (x, inner_loop_value) in results] , dtype=__lowerCamelCase , )
else:
_A : List[str] = np.asarray(
[results[(inner_loop_value, x)] for x in x_axis_array if (inner_loop_value, x) in results] , dtype=np.floataa , )
((_A) , (_A)) : Union[str, Any] = (
("batch_size", "len") if self.args.plot_along_batch else ("in #tokens", "bsz")
)
_A : int = np.asarray(__lowerCamelCase , __lowerCamelCase)[: len(__lowerCamelCase)]
plt.scatter(
__lowerCamelCase , __lowerCamelCase , label=F"{label_model_name} - {inner_loop_label}: {inner_loop_value}")
plt.plot(__lowerCamelCase , __lowerCamelCase , "--")
title_str += F" {label_model_name} vs."
_A : str = title_str[:-4]
_A : int = "Time in s" if self.args.is_time else "Memory in MB"
# plot
plt.title(__lowerCamelCase)
plt.xlabel(__lowerCamelCase)
plt.ylabel(__lowerCamelCase)
plt.legend()
if self.args.figure_png_file is not None:
plt.savefig(self.args.figure_png_file)
else:
plt.show()
def _UpperCAmelCase ():
_A : List[str] = HfArgumentParser(UpperCamelCase__ )
_A : Union[str, Any] = parser.parse_args_into_dataclasses()[0]
_A : Any = Plot(args=UpperCamelCase__ )
plot.plot()
if __name__ == "__main__":
main()
| 11 |
import unittest
import torch
from diffusers import VQModel
from diffusers.utils import floats_tensor, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class lowerCAmelCase__ ( a , a , unittest.TestCase):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = VQModel
__SCREAMING_SNAKE_CASE = "sample"
@property
def _lowerCamelCase ( self , __lowerCamelCase=(3_2, 3_2)) -> Optional[Any]:
_A : Optional[int] = 4
_A : Tuple = 3
_A : List[Any] = floats_tensor((batch_size, num_channels) + sizes).to(__lowerCamelCase)
return {"sample": image}
@property
def _lowerCamelCase ( self) -> int:
return (3, 3_2, 3_2)
@property
def _lowerCamelCase ( self) -> List[Any]:
return (3, 3_2, 3_2)
def _lowerCamelCase ( self) -> Union[str, Any]:
_A : List[Any] = {
"block_out_channels": [3_2, 6_4],
"in_channels": 3,
"out_channels": 3,
"down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"],
"up_block_types": ["UpDecoderBlock2D", "UpDecoderBlock2D"],
"latent_channels": 3,
}
_A : int = self.dummy_input
return init_dict, inputs_dict
def _lowerCamelCase ( self) -> Union[str, Any]:
pass
def _lowerCamelCase ( self) -> Any:
pass
def _lowerCamelCase ( self) -> Any:
_A , _A : List[Any] = VQModel.from_pretrained("fusing/vqgan-dummy" , output_loading_info=__lowerCamelCase)
self.assertIsNotNone(__lowerCamelCase)
self.assertEqual(len(loading_info["missing_keys"]) , 0)
model.to(__lowerCamelCase)
_A : str = model(**self.dummy_input)
assert image is not None, "Make sure output is not None"
def _lowerCamelCase ( self) -> Union[str, Any]:
_A : Optional[Any] = VQModel.from_pretrained("fusing/vqgan-dummy")
model.to(__lowerCamelCase).eval()
torch.manual_seed(0)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(0)
_A : Tuple = torch.randn(1 , model.config.in_channels , model.config.sample_size , model.config.sample_size)
_A : Optional[int] = image.to(__lowerCamelCase)
with torch.no_grad():
_A : List[str] = model(__lowerCamelCase).sample
_A : int = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
_A : Optional[Any] = torch.tensor([-0.0_1_5_3, -0.4_0_4_4, -0.1_8_8_0, -0.5_1_6_1, -0.2_4_1_8, -0.4_0_7_2, -0.1_6_1_2, -0.0_6_3_3, -0.0_1_4_3])
# fmt: on
self.assertTrue(torch.allclose(__lowerCamelCase , __lowerCamelCase , atol=1e-3))
| 11 | 1 |
"""simple docstring"""
import dataclasses
import json
import warnings
from dataclasses import dataclass, field
from time import time
from typing import List
from ..utils import logging
a__ = logging.get_logger(__name__)
def __UpperCAmelCase ( __a : Union[str, Any]=None ,__a : Dict=None ) -> List[Any]:
"""simple docstring"""
return field(default_factory=lambda: default ,metadata=__a )
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
UpperCAmelCase__ : List[str] = list_field(
default=[] , metadata={
"help": (
"Model checkpoints to be provided to the AutoModel classes. Leave blank to benchmark the base version"
" of all available models"
)
} , )
UpperCAmelCase__ : List[int] = list_field(
default=[8] , metadata={"help": "List of batch sizes for which memory and time performance will be evaluated"} )
UpperCAmelCase__ : List[int] = list_field(
default=[8, 32, 128, 512] , metadata={"help": "List of sequence lengths for which memory and time performance will be evaluated"} , )
UpperCAmelCase__ : bool = field(
default=__lowercase , metadata={"help": "Whether to benchmark inference of model. Inference can be disabled via --no-inference."} , )
UpperCAmelCase__ : bool = field(
default=__lowercase , metadata={"help": "Whether to run on available cuda devices. Cuda can be disabled via --no-cuda."} , )
UpperCAmelCase__ : bool = field(
default=__lowercase , metadata={"help": "Whether to run on available tpu devices. TPU can be disabled via --no-tpu."} )
UpperCAmelCase__ : bool = field(default=__lowercase , metadata={"help": "Use FP16 to accelerate inference."} )
UpperCAmelCase__ : bool = field(default=__lowercase , metadata={"help": "Benchmark training of model"} )
UpperCAmelCase__ : bool = field(default=__lowercase , metadata={"help": "Verbose memory tracing"} )
UpperCAmelCase__ : bool = field(
default=__lowercase , metadata={"help": "Whether to perform speed measurements. Speed measurements can be disabled via --no-speed."} , )
UpperCAmelCase__ : bool = field(
default=__lowercase , metadata={
"help": "Whether to perform memory measurements. Memory measurements can be disabled via --no-memory"
} , )
UpperCAmelCase__ : bool = field(default=__lowercase , metadata={"help": "Trace memory line by line"} )
UpperCAmelCase__ : bool = field(default=__lowercase , metadata={"help": "Save result to a CSV file"} )
UpperCAmelCase__ : bool = field(default=__lowercase , metadata={"help": "Save all print statements in a log file"} )
UpperCAmelCase__ : bool = field(default=__lowercase , metadata={"help": "Whether to print environment information"} )
UpperCAmelCase__ : bool = field(
default=__lowercase , metadata={
"help": (
"Whether to use multiprocessing for memory and speed measurement. It is highly recommended to use"
" multiprocessing for accurate CPU and GPU memory measurements. This option should only be disabled"
" for debugging / testing and on TPU."
)
} , )
UpperCAmelCase__ : str = field(
default=F'''inference_time_{round(time() )}.csv''' , metadata={"help": "CSV filename used if saving time results to csv."} , )
UpperCAmelCase__ : str = field(
default=F'''inference_memory_{round(time() )}.csv''' , metadata={"help": "CSV filename used if saving memory results to csv."} , )
UpperCAmelCase__ : str = field(
default=F'''train_time_{round(time() )}.csv''' , metadata={"help": "CSV filename used if saving time results to csv for training."} , )
UpperCAmelCase__ : str = field(
default=F'''train_memory_{round(time() )}.csv''' , metadata={"help": "CSV filename used if saving memory results to csv for training."} , )
UpperCAmelCase__ : str = field(
default=F'''env_info_{round(time() )}.csv''' , metadata={"help": "CSV filename used if saving environment information."} , )
UpperCAmelCase__ : str = field(
default=F'''log_{round(time() )}.csv''' , metadata={"help": "Log filename used if print statements are saved in log."} , )
UpperCAmelCase__ : int = field(default=3 , metadata={"help": "Times an experiment will be run."} )
UpperCAmelCase__ : bool = field(
default=__lowercase , metadata={
"help": (
"Instead of loading the model as defined in `config.architectures` if exists, just load the pretrain"
" model weights."
)
} , )
def __lowercase ( self ) -> Union[str, Any]:
warnings.warn(
F"""The class {self.__class__} is deprecated. Hugging Face Benchmarking utils"""
''' are deprecated in general and it is advised to use external Benchmarking libraries '''
''' to benchmark Transformer models.''' , _a , )
def __lowercase ( self ) -> List[Any]:
return json.dumps(dataclasses.asdict(self ) , indent=2 )
@property
def __lowercase ( self ) -> List[str]:
if len(self.models ) <= 0:
raise ValueError(
'''Please make sure you provide at least one model name / model identifier, *e.g.* `--models'''
''' bert-base-cased` or `args.models = [\'bert-base-cased\'].''' )
return self.models
@property
def __lowercase ( self ) -> List[str]:
if not self.multi_process:
return False
elif self.is_tpu:
logger.info('''Multiprocessing is currently not possible on TPU.''' )
return False
else:
return True
| 353 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def __UpperCAmelCase ( ) -> Optional[Any]:
"""simple docstring"""
_a : int = ArgumentParser('''Accelerate CLI tool''' ,usage='''accelerate <command> [<args>]''' ,allow_abbrev=__a )
_a : Optional[int] = parser.add_subparsers(help='''accelerate command helpers''' )
# Register commands
get_config_parser(subparsers=__a )
env_command_parser(subparsers=__a )
launch_command_parser(subparsers=__a )
tpu_command_parser(subparsers=__a )
test_command_parser(subparsers=__a )
# Let's go
_a : Dict = parser.parse_args()
if not hasattr(__a ,'''func''' ):
parser.print_help()
exit(1 )
# Run
args.func(__a )
if __name__ == "__main__":
main()
| 15 | 0 |
def lowercase_ ( _lowerCamelCase : int , _lowerCamelCase : int):
return abs(_lowerCamelCase) if a == 0 else greatest_common_divisor(b % a , _lowerCamelCase)
def lowercase_ ( _lowerCamelCase : int , _lowerCamelCase : int):
while y: # --> when y=0 then loop will terminate and return x as final GCD.
lowercase__ , lowercase__ : Dict = y, x % y
return abs(_lowerCamelCase)
def lowercase_ ( ):
try:
lowercase__ : Dict = input("Enter two integers separated by comma (,): ").split(",")
lowercase__ : Optional[Any] = int(nums[0])
lowercase__ : List[Any] = int(nums[1])
print(
f'''greatest_common_divisor({num_a}, {num_a}) = '''
f'''{greatest_common_divisor(_lowerCamelCase , _lowerCamelCase)}''')
print(f'''By iterative gcd({num_a}, {num_a}) = {gcd_by_iterative(_lowerCamelCase , _lowerCamelCase)}''')
except (IndexError, UnboundLocalError, ValueError):
print("Wrong input")
if __name__ == "__main__":
main()
| 87 |
import unittest
from diffusers import FlaxAutoencoderKL
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax
from .test_modeling_common_flax import FlaxModelTesterMixin
if is_flax_available():
import jax
@require_flax
class _UpperCamelCase ( lowerCAmelCase , unittest.TestCase ):
UpperCAmelCase_ = FlaxAutoencoderKL
@property
def UpperCAmelCase_ ( self :int ) -> Optional[int]:
UpperCAmelCase__ = 4
UpperCAmelCase__ = 3
UpperCAmelCase__ = (32, 32)
UpperCAmelCase__ = jax.random.PRNGKey(0 )
UpperCAmelCase__ = jax.random.uniform(lowerCamelCase , ((batch_size, num_channels) + sizes) )
return {"sample": image, "prng_key": prng_key}
def UpperCAmelCase_ ( self :Optional[Any] ) -> Any:
UpperCAmelCase__ = {
"block_out_channels": [32, 64],
"in_channels": 3,
"out_channels": 3,
"down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"],
"up_block_types": ["UpDecoderBlock2D", "UpDecoderBlock2D"],
"latent_channels": 4,
}
UpperCAmelCase__ = self.dummy_input
return init_dict, inputs_dict
| 169 | 0 |
"""simple docstring"""
import unittest
from transformers.models.xlm_prophetnet.tokenization_xlm_prophetnet import SPIECE_UNDERLINE, XLMProphetNetTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
lowerCAmelCase__ = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
class SCREAMING_SNAKE_CASE__ ( lowercase , unittest.TestCase ):
"""simple docstring"""
a : int =XLMProphetNetTokenizer
a : str =False
a : Any =True
def lowercase__ ( self ):
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
lowerCAmelCase : Optional[Any] = XLMProphetNetTokenizer(snake_case__ , keep_accents=snake_case__ )
tokenizer.save_pretrained(self.tmpdirname )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Optional[int] = "[PAD]"
lowerCAmelCase : List[Any] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case__ ) , snake_case__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case__ ) , snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Union[str, Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "[PAD]" )
self.assertEqual(vocab_keys[1] , "[CLS]" )
self.assertEqual(vocab_keys[-1] , "j" )
self.assertEqual(len(snake_case__ ) , 1_012 )
def lowercase__ ( self ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1_012 )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Union[str, Any] = XLMProphetNetTokenizer(snake_case__ , keep_accents=snake_case__ )
lowerCAmelCase : List[Any] = tokenizer.tokenize("This is a test" )
self.assertListEqual(snake_case__ , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(snake_case__ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
lowerCAmelCase : Union[str, Any] = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
snake_case__ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
lowerCAmelCase : str = tokenizer.convert_tokens_to_ids(snake_case__ )
self.assertListEqual(
snake_case__ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, -9, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, -9, 4]
] , )
lowerCAmelCase : List[Any] = tokenizer.convert_ids_to_tokens(snake_case__ )
self.assertListEqual(
snake_case__ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"[UNK]",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"[UNK]",
".",
] , )
@cached_property
def lowercase__ ( self ):
"""simple docstring"""
return XLMProphetNetTokenizer.from_pretrained("microsoft/xprophetnet-large-wiki100-cased" )
@slow
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : List[Any] = "Hello World!"
lowerCAmelCase : int = [35_389, 6_672, 49, 2]
self.assertListEqual(snake_case__ , self.big_tokenizer.encode(snake_case__ ) )
@slow
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : List[str] = {"input_ids": [[11_073, 82_783, 18, 26, 82_783, 549, 51_540, 248, 17_209, 1_301, 217, 20, 215_186, 1_325, 147, 17_209, 1_301, 217, 20, 56_370, 53, 122_020, 20, 16_477, 27, 87_355, 4_548, 20, 4_728, 78_392, 17, 159_969, 18, 26, 24_491, 629, 15, 538, 22_704, 5_439, 15, 2_788, 24_491, 9_885, 15, 43_534, 605, 15, 814, 18_403, 33_200, 29, 15, 43_534, 24_458, 12_410, 111, 24_966, 83_669, 9_637, 144_068, 26, 850, 22_346, 27, 147, 24_966, 83_669, 83_490, 26, 39_113, 735, 27, 689, 656, 2_800, 1_339, 4_600, 53, 122_020, 115_785, 34, 816, 1_339, 46_887, 18, 147, 53_905, 1_951, 42_238, 41_170, 17_732, 834, 436, 15, 27_523, 98_733, 217, 147, 5_542, 4_981, 930, 17_347, 16, 2], [20_091, 629, 94, 82_786, 58, 490, 20, 1_528, 84, 53_905, 344, 80_592, 110_128, 18_822, 5_267, 1_306, 62, 152_537, 308, 7_997, 401, 124_427, 549, 35_442, 225, 109, 15_055, 25_748, 147, 7_119, 43_712, 34, 767, 135_366, 18, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [592, 63_784, 119_466, 17, 147_808, 88_214, 18, 656, 81, 32, 3_296, 10_280, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=snake_case__ , model_name="microsoft/xprophetnet-large-wiki100-cased" , revision="1acad1643ddd54a44df6a1b797ada8373685d90e" , )
| 133 |
"""simple docstring"""
import pytest
from datasets.parallel import ParallelBackendConfig, parallel_backend
from datasets.utils.py_utils import map_nested
from .utils import require_dill_gt_0_3_2, require_joblibspark, require_not_windows
def a__ ( SCREAMING_SNAKE_CASE : str ): # picklable for multiprocessing
'''simple docstring'''
return i + 1
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
def a__ ( ):
'''simple docstring'''
with parallel_backend("spark" ):
assert ParallelBackendConfig.backend_name == "spark"
lowerCAmelCase : List[str] = [1, 2, 3]
with pytest.raises(SCREAMING_SNAKE_CASE ):
with parallel_backend("unsupported backend" ):
map_nested(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , num_proc=2 )
with pytest.raises(SCREAMING_SNAKE_CASE ):
with parallel_backend("unsupported backend" ):
map_nested(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , num_proc=-1 )
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
@pytest.mark.parametrize("num_proc" , [2, -1] )
def a__ ( SCREAMING_SNAKE_CASE : Tuple ):
'''simple docstring'''
lowerCAmelCase : Tuple = [1, 2]
lowerCAmelCase : int = {"a": 1, "b": 2}
lowerCAmelCase : List[str] = {"a": [1, 2], "b": [3, 4]}
lowerCAmelCase : Dict = {"a": {"1": 1}, "b": 2}
lowerCAmelCase : Tuple = {"a": 1, "b": 2, "c": 3, "d": 4}
lowerCAmelCase : Any = [2, 3]
lowerCAmelCase : Any = {"a": 2, "b": 3}
lowerCAmelCase : Optional[int] = {"a": [2, 3], "b": [4, 5]}
lowerCAmelCase : Optional[int] = {"a": {"1": 2}, "b": 3}
lowerCAmelCase : str = {"a": 2, "b": 3, "c": 4, "d": 5}
with parallel_backend("spark" ):
assert map_nested(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , num_proc=SCREAMING_SNAKE_CASE ) == expected_map_nested_sa
assert map_nested(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , num_proc=SCREAMING_SNAKE_CASE ) == expected_map_nested_sa
assert map_nested(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , num_proc=SCREAMING_SNAKE_CASE ) == expected_map_nested_sa
assert map_nested(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , num_proc=SCREAMING_SNAKE_CASE ) == expected_map_nested_sa
assert map_nested(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , num_proc=SCREAMING_SNAKE_CASE ) == expected_map_nested_sa
| 133 | 1 |
"""simple docstring"""
import os
import tempfile
import unittest
import numpy as np
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline
@require_flax
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( self : str ):
with tempfile.TemporaryDirectory() as tmpdirname:
# pipeline has Flax weights
lowerCAmelCase_ : int = FlaxDiffusionPipeline.from_pretrained(
'hf-internal-testing/tiny-stable-diffusion-pipe' , safety_checker=SCREAMING_SNAKE_CASE_ , cache_dir=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Optional[Any] = [t[-1] for t in os.walk(os.path.join(SCREAMING_SNAKE_CASE_ , os.listdir(SCREAMING_SNAKE_CASE_ )[0] , 'snapshots' ) )]
lowerCAmelCase_ : str = [item for sublist in all_root_files for item in sublist]
# None of the downloaded files should be a PyTorch file even if we have some here:
# https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin
assert not any(f.endswith('.bin' ) for f in files )
@slow
@require_flax
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
lowerCAmelCase_ ,lowerCAmelCase_ : Optional[int] = FlaxStableDiffusionPipeline.from_pretrained(
'hf-internal-testing/tiny-stable-diffusion-pipe' , safety_checker=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : List[str] = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
lowerCAmelCase_ : Any = jax.random.PRNGKey(0 )
lowerCAmelCase_ : Tuple = 4
lowerCAmelCase_ : List[Any] = jax.device_count()
lowerCAmelCase_ : List[Any] = num_samples * [prompt]
lowerCAmelCase_ : Any = pipeline.prepare_inputs(SCREAMING_SNAKE_CASE_ )
# shard inputs and rng
lowerCAmelCase_ : Optional[Any] = replicate(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Tuple = jax.random.split(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Union[str, Any] = shard(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : List[str] = pipeline(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , jit=SCREAMING_SNAKE_CASE_ ).images
assert images.shape == (num_samples, 1, 6_4, 6_4, 3)
if jax.device_count() == 8:
assert np.abs(np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 4.1_51_47_45 ) < 1E-3
assert np.abs(np.abs(SCREAMING_SNAKE_CASE_ , dtype=np.floataa ).sum() - 4_99_47.8_75 ) < 5E-1
lowerCAmelCase_ : str = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:] ) ) )
assert len(SCREAMING_SNAKE_CASE_ ) == num_samples
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
lowerCAmelCase_ ,lowerCAmelCase_ : Union[str, Any] = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='flax' , safety_checker=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : List[Any] = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
lowerCAmelCase_ : List[Any] = jax.random.PRNGKey(0 )
lowerCAmelCase_ : Union[str, Any] = 5_0
lowerCAmelCase_ : str = jax.device_count()
lowerCAmelCase_ : Any = num_samples * [prompt]
lowerCAmelCase_ : Tuple = pipeline.prepare_inputs(SCREAMING_SNAKE_CASE_ )
# shard inputs and rng
lowerCAmelCase_ : List[str] = replicate(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Optional[Any] = jax.random.split(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : str = shard(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : List[Any] = pipeline(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , jit=SCREAMING_SNAKE_CASE_ ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.05_65_24_01) ) < 1E-3
assert np.abs((np.abs(SCREAMING_SNAKE_CASE_ , dtype=np.floataa ).sum() - 2_38_38_08.2) ) < 5E-1
def SCREAMING_SNAKE_CASE__ ( self : int ):
lowerCAmelCase_ ,lowerCAmelCase_ : Optional[int] = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : str = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
lowerCAmelCase_ : Optional[int] = jax.random.PRNGKey(0 )
lowerCAmelCase_ : List[Any] = 5_0
lowerCAmelCase_ : List[Any] = jax.device_count()
lowerCAmelCase_ : Optional[Any] = num_samples * [prompt]
lowerCAmelCase_ : Union[str, Any] = pipeline.prepare_inputs(SCREAMING_SNAKE_CASE_ )
# shard inputs and rng
lowerCAmelCase_ : List[Any] = replicate(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : List[Any] = jax.random.split(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Dict = shard(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : List[Any] = pipeline(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , jit=SCREAMING_SNAKE_CASE_ ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.04_00_39_06) ) < 1E-3
assert np.abs((np.abs(SCREAMING_SNAKE_CASE_ , dtype=np.floataa ).sum() - 2_37_35_16.75) ) < 5E-1
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
lowerCAmelCase_ ,lowerCAmelCase_ : str = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa )
lowerCAmelCase_ : List[Any] = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
lowerCAmelCase_ : Optional[Any] = jax.random.PRNGKey(0 )
lowerCAmelCase_ : List[str] = 5_0
lowerCAmelCase_ : Tuple = jax.device_count()
lowerCAmelCase_ : List[str] = num_samples * [prompt]
lowerCAmelCase_ : Optional[Any] = pipeline.prepare_inputs(SCREAMING_SNAKE_CASE_ )
# shard inputs and rng
lowerCAmelCase_ : List[Any] = replicate(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : List[Any] = jax.random.split(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : List[Any] = shard(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Dict = pipeline(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , jit=SCREAMING_SNAKE_CASE_ ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.04_00_39_06) ) < 1E-3
assert np.abs((np.abs(SCREAMING_SNAKE_CASE_ , dtype=np.floataa ).sum() - 2_37_35_16.75) ) < 5E-1
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
lowerCAmelCase_ : List[str] = FlaxDDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='scaled_linear' , set_alpha_to_one=SCREAMING_SNAKE_CASE_ , steps_offset=1 , )
lowerCAmelCase_ ,lowerCAmelCase_ : Dict = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , scheduler=SCREAMING_SNAKE_CASE_ , safety_checker=SCREAMING_SNAKE_CASE_ , )
lowerCAmelCase_ : Optional[Any] = scheduler.create_state()
lowerCAmelCase_ : List[Any] = scheduler_state
lowerCAmelCase_ : List[str] = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
lowerCAmelCase_ : Union[str, Any] = jax.random.PRNGKey(0 )
lowerCAmelCase_ : int = 5_0
lowerCAmelCase_ : Tuple = jax.device_count()
lowerCAmelCase_ : Any = num_samples * [prompt]
lowerCAmelCase_ : Dict = pipeline.prepare_inputs(SCREAMING_SNAKE_CASE_ )
# shard inputs and rng
lowerCAmelCase_ : str = replicate(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Union[str, Any] = jax.random.split(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : str = shard(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Union[str, Any] = pipeline(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , jit=SCREAMING_SNAKE_CASE_ ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0_45_04_39_45) ) < 1E-3
assert np.abs((np.abs(SCREAMING_SNAKE_CASE_ , dtype=np.floataa ).sum() - 2_34_76_93.5) ) < 5E-1
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
lowerCAmelCase_ : str = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
lowerCAmelCase_ : int = jax.device_count()
lowerCAmelCase_ : Optional[int] = num_samples * [prompt]
lowerCAmelCase_ : int = jax.random.split(jax.random.PRNGKey(0 ) , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ ,lowerCAmelCase_ : Any = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=SCREAMING_SNAKE_CASE_ , )
lowerCAmelCase_ : List[str] = replicate(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Dict = pipeline.prepare_inputs(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Optional[int] = shard(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : int = pipeline(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , jit=SCREAMING_SNAKE_CASE_ ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
lowerCAmelCase_ : Optional[Any] = images[2, 0, 2_5_6, 1_0:1_7, 1]
# With memory efficient attention
lowerCAmelCase_ ,lowerCAmelCase_ : Union[str, Any] = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=SCREAMING_SNAKE_CASE_ , use_memory_efficient_attention=SCREAMING_SNAKE_CASE_ , )
lowerCAmelCase_ : str = replicate(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Tuple = pipeline.prepare_inputs(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Optional[int] = shard(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Optional[int] = pipeline(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , jit=SCREAMING_SNAKE_CASE_ ).images
assert images_eff.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
lowerCAmelCase_ : int = images[2, 0, 2_5_6, 1_0:1_7, 1]
# I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum`
# over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now.
assert abs(slice_eff - slice ).max() < 1E-2
| 224 |
"""simple docstring"""
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_multi_gpu
from accelerate.utils import patch_environment
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
lowerCAmelCase_ : Union[str, Any] = inspect.getfile(accelerate.test_utils )
lowerCAmelCase_ : Dict = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_script.py'] )
lowerCAmelCase_ : int = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_distributed_data_loop.py'] )
lowerCAmelCase_ : Union[str, Any] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_ops.py'] )
@require_multi_gpu
def SCREAMING_SNAKE_CASE__ ( self : Any ):
print(F"Found {torch.cuda.device_count()} devices." )
lowerCAmelCase_ : Optional[int] = ['torchrun', F"--nproc_per_node={torch.cuda.device_count()}", self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(SCREAMING_SNAKE_CASE_ , env=os.environ.copy() )
@require_multi_gpu
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
print(F"Found {torch.cuda.device_count()} devices." )
lowerCAmelCase_ : int = ['torchrun', F"--nproc_per_node={torch.cuda.device_count()}", self.operation_file_path]
print(F"Command: {cmd}" )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(SCREAMING_SNAKE_CASE_ , env=os.environ.copy() )
@require_multi_gpu
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
lowerCAmelCase_ : List[Any] = ['torchrun', F"--nproc_per_node={torch.cuda.device_count()}", inspect.getfile(self.__class__ )]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(SCREAMING_SNAKE_CASE_ , env=os.environ.copy() )
@require_multi_gpu
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
print(F"Found {torch.cuda.device_count()} devices, using 2 devices only" )
lowerCAmelCase_ : Any = ['torchrun', F"--nproc_per_node={torch.cuda.device_count()}", self.data_loop_file_path]
with patch_environment(omp_num_threads=1 , cuda_visible_devices='0,1' ):
execute_subprocess_async(SCREAMING_SNAKE_CASE_ , env=os.environ.copy() )
if __name__ == "__main__":
lowercase__ : Dict = Accelerator()
lowercase__ : List[Any] = (accelerator.state.process_index + 2, 1_0)
lowercase__ : Any = torch.randint(0, 1_0, shape).to(accelerator.device)
lowercase__ : List[Any] = """"""
lowercase__ : Union[str, Any] = accelerator.pad_across_processes(tensor)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += f"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0):
error_msg += "Padding was not done with the right value (0)."
lowercase__ : Optional[Any] = accelerator.pad_across_processes(tensor, pad_first=True)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += f"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
lowercase__ : Any = accelerator.state.num_processes - accelerator.state.process_index - 1
if not torch.equal(tensora[index:], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[:index] == 0):
error_msg += "Padding was not done with the right value (0)."
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 224 | 1 |
import copy
import tempfile
import unittest
from transformers import MaMaaaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from transformers.utils import cached_property
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaMaaaForConditionalGeneration, MaMaaaModel, MaMaaaTokenizer
from transformers.models.mam_aaa.modeling_mam_aaa import MaMaaaDecoder, MaMaaaEncoder
def _UpperCamelCase (a__ :Optional[int] , a__ :List[str] , a__ :Dict , a__ :str=None , a__ :Any=None , a__ :List[Any]=None , a__ :List[Any]=None , a__ :List[Any]=None , ):
"""simple docstring"""
if attention_mask is None:
UpperCamelCase__ = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
UpperCamelCase__ = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
UpperCamelCase__ = torch.ones(config.encoder_layers , config.encoder_attention_heads , device=a__ )
if decoder_head_mask is None:
UpperCamelCase__ = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=a__ )
if cross_attn_head_mask is None:
UpperCamelCase__ = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=a__ )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
class __SCREAMING_SNAKE_CASE :
def __init__( self , __lowerCAmelCase , __lowerCAmelCase=13 , __lowerCAmelCase=7 , __lowerCAmelCase=True , __lowerCAmelCase=False , __lowerCAmelCase=99 , __lowerCAmelCase=16 , __lowerCAmelCase=2 , __lowerCAmelCase=4 , __lowerCAmelCase=4 , __lowerCAmelCase="relu" , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.0 , __lowerCAmelCase=0.0 , __lowerCAmelCase=20 , __lowerCAmelCase=2 , __lowerCAmelCase=1 , __lowerCAmelCase=0 , ):
UpperCamelCase__ = parent
UpperCamelCase__ = batch_size
UpperCamelCase__ = seq_length
UpperCamelCase__ = is_training
UpperCamelCase__ = use_labels
UpperCamelCase__ = vocab_size
UpperCamelCase__ = hidden_size
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = intermediate_size
UpperCamelCase__ = hidden_act
UpperCamelCase__ = hidden_dropout_prob
UpperCamelCase__ = attention_probs_dropout_prob
UpperCamelCase__ = encoder_layerdrop
UpperCamelCase__ = decoder_layerdrop
UpperCamelCase__ = max_position_embeddings
UpperCamelCase__ = eos_token_id
UpperCamelCase__ = pad_token_id
UpperCamelCase__ = bos_token_id
def _lowerCamelCase ( self ):
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase__ = self.eos_token_id # Eos Token
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for M2M100 the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
UpperCamelCase__ = input_ids.clamp(self.pad_token_id + 1 )
UpperCamelCase__ = decoder_input_ids.clamp(self.pad_token_id + 1 )
UpperCamelCase__ = self.get_config()
UpperCamelCase__ = prepare_mam_aaa_inputs_dict(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
return config, inputs_dict
def _lowerCamelCase ( self ):
return MaMaaaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , encoder_layerdrop=self.encoder_layerdrop , decoder_layerdrop=self.decoder_layerdrop , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , )
def _lowerCamelCase ( self ):
UpperCamelCase__ , UpperCamelCase__ = self.prepare_config_and_inputs()
return config, inputs_dict
def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase ):
UpperCamelCase__ = MaMaaaModel(config=__lowerCAmelCase ).get_decoder().to(__lowerCAmelCase ).eval()
UpperCamelCase__ = inputs_dict["""input_ids"""]
UpperCamelCase__ = inputs_dict["""attention_mask"""]
UpperCamelCase__ = inputs_dict["""head_mask"""]
# first forward pass
UpperCamelCase__ = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , head_mask=__lowerCAmelCase , use_cache=__lowerCAmelCase )
UpperCamelCase__ , UpperCamelCase__ = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
UpperCamelCase__ = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCamelCase__ = ids_tensor((self.batch_size, 3) , 2 )
# append to next input_ids and
UpperCamelCase__ = torch.cat([input_ids, next_tokens] , dim=-1 )
UpperCamelCase__ = torch.cat([attention_mask, next_attn_mask] , dim=-1 )
UpperCamelCase__ = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase )["""last_hidden_state"""]
UpperCamelCase__ = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , past_key_values=__lowerCAmelCase )[
"""last_hidden_state"""
]
# select random slice
UpperCamelCase__ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
UpperCamelCase__ = output_from_no_past[:, -3:, random_slice_idx].detach()
UpperCamelCase__ = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__lowerCAmelCase , __lowerCAmelCase , atol=1E-2 ) )
def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase ):
UpperCamelCase__ = MaMaaaModel(config=__lowerCAmelCase ).to(__lowerCAmelCase ).eval()
UpperCamelCase__ = model(**__lowerCAmelCase )
UpperCamelCase__ = outputs.encoder_last_hidden_state
UpperCamelCase__ = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCamelCase__ = model.get_encoder()
encoder.save_pretrained(__lowerCAmelCase )
UpperCamelCase__ = MaMaaaEncoder.from_pretrained(__lowerCAmelCase ).to(__lowerCAmelCase )
UpperCamelCase__ = encoder(inputs_dict["""input_ids"""] , attention_mask=inputs_dict["""attention_mask"""] )[
0
]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1E-3 )
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCamelCase__ = model.get_decoder()
decoder.save_pretrained(__lowerCAmelCase )
UpperCamelCase__ = MaMaaaDecoder.from_pretrained(__lowerCAmelCase ).to(__lowerCAmelCase )
UpperCamelCase__ = decoder(
input_ids=inputs_dict["""decoder_input_ids"""] , attention_mask=inputs_dict["""decoder_attention_mask"""] , encoder_hidden_states=__lowerCAmelCase , encoder_attention_mask=inputs_dict["""attention_mask"""] , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1E-3 )
@require_torch
class __SCREAMING_SNAKE_CASE ( _a , _a , _a , unittest.TestCase ):
snake_case : int = (
(
MaMaaaModel,
MaMaaaForConditionalGeneration,
)
if is_torch_available()
else ()
)
snake_case : Any = (MaMaaaForConditionalGeneration,) if is_torch_available() else ()
snake_case : str = (
{
"""conversational""": MaMaaaForConditionalGeneration,
"""feature-extraction""": MaMaaaModel,
"""summarization""": MaMaaaForConditionalGeneration,
"""text2text-generation""": MaMaaaForConditionalGeneration,
"""translation""": MaMaaaForConditionalGeneration,
}
if is_torch_available()
else {}
)
snake_case : Dict = True
snake_case : Any = True
snake_case : Dict = False
snake_case : List[Any] = False
def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
if pipeline_test_casse_name == "TranslationPipelineTests":
# Get `ValueError: Translation requires a `src_lang` and a `tgt_lang` for this model`.
# `M2M100Config` was never used in pipeline tests: cannot create a simple tokenizer.
return True
return False
def _lowerCamelCase ( self ):
UpperCamelCase__ = MaMaaaModelTester(self )
UpperCamelCase__ = ConfigTester(self , config_class=__lowerCAmelCase )
def _lowerCamelCase ( self ):
self.config_tester.run_common_tests()
def _lowerCamelCase ( self ):
UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
UpperCamelCase__ = model_class(__lowerCAmelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__lowerCAmelCase )
UpperCamelCase__ , UpperCamelCase__ = model_class.from_pretrained(__lowerCAmelCase , output_loading_info=__lowerCAmelCase )
self.assertEqual(info["""missing_keys"""] , [] )
def _lowerCamelCase ( self ):
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*__lowerCAmelCase )
def _lowerCamelCase ( self ):
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*__lowerCAmelCase )
def _lowerCamelCase ( self ):
UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in (MaMaaaModel, MaMaaaForConditionalGeneration):
UpperCamelCase__ = model_class(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
UpperCamelCase__ = copy.deepcopy(self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase ) )
if not self.is_encoder_decoder:
UpperCamelCase__ = inputs["""input_ids"""]
del inputs["input_ids"]
else:
UpperCamelCase__ = inputs["""input_ids"""]
UpperCamelCase__ = inputs.get("""decoder_input_ids""" , __lowerCAmelCase )
del inputs["input_ids"]
inputs.pop("""decoder_input_ids""" , __lowerCAmelCase )
UpperCamelCase__ = model.get_input_embeddings()
if not self.is_encoder_decoder:
UpperCamelCase__ = wte(__lowerCAmelCase )
else:
UpperCamelCase__ = wte(__lowerCAmelCase )
UpperCamelCase__ = wte(__lowerCAmelCase )
with torch.no_grad():
model(**__lowerCAmelCase )[0]
def _lowerCamelCase ( self ):
UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
UpperCamelCase__ = input_dict["""input_ids"""]
UpperCamelCase__ = input_ids.ne(1 ).to(__lowerCAmelCase )
UpperCamelCase__ = MaMaaaForConditionalGeneration(__lowerCAmelCase ).eval().to(__lowerCAmelCase )
if torch_device == "cuda":
model.half()
model.generate(__lowerCAmelCase , attention_mask=__lowerCAmelCase )
model.generate(num_beams=4 , do_sample=__lowerCAmelCase , early_stopping=__lowerCAmelCase , num_return_sequences=3 )
def _UpperCamelCase (a__ :Optional[int] ):
"""simple docstring"""
return torch.tensor(a__ , dtype=torch.long , device=a__ )
UpperCamelCase__ = 1e-4
@require_torch
@require_sentencepiece
@require_tokenizers
@slow
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@cached_property
def _lowerCamelCase ( self ):
return MaMaaaTokenizer.from_pretrained("""facebook/m2m100_418M""" )
def _lowerCamelCase ( self ):
UpperCamelCase__ = MaMaaaModel.from_pretrained("""facebook/m2m100_418M""" ).to(__lowerCAmelCase )
UpperCamelCase__ = _long_tensor([[128028, 98, 12, 30527, 2732, 159, 7755, 61904, 39144, 38, 2]] )
UpperCamelCase__ = _long_tensor([[2, 128028, 98, 12, 30527, 2732, 159, 7755, 61904, 39144, 38]] )
UpperCamelCase__ = prepare_mam_aaa_inputs_dict(model.config , __lowerCAmelCase , __lowerCAmelCase )
with torch.no_grad():
UpperCamelCase__ = model(**__lowerCAmelCase )[0]
UpperCamelCase__ = torch.Size((1, 11, 1024) )
self.assertEqual(output.shape , __lowerCAmelCase )
# change to expected output here
UpperCamelCase__ = torch.tensor(
[[-0.7780, -0.1676, 0.1038], [-6.7556, -1.3992, 0.0567], [-7.5383, -0.5920, -0.2779]] , device=__lowerCAmelCase )
self.assertTrue(torch.allclose(output[:, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) )
def _lowerCamelCase ( self ):
UpperCamelCase__ = MaMaaaForConditionalGeneration.from_pretrained("""facebook/m2m100_418M""" ).to(__lowerCAmelCase )
# change to intended input
UpperCamelCase__ = _long_tensor([[128028, 98, 12, 30527, 2732, 159, 7755, 61904, 39144, 38, 2]] )
UpperCamelCase__ = _long_tensor([[2, 128028, 98, 12, 30527, 2732, 159, 7755, 61904, 39144, 38]] )
UpperCamelCase__ = prepare_mam_aaa_inputs_dict(model.config , __lowerCAmelCase , __lowerCAmelCase )
with torch.no_grad():
UpperCamelCase__ = model(**__lowerCAmelCase )[0]
UpperCamelCase__ = torch.Size((1, 11, model.config.vocab_size) )
self.assertEqual(output.shape , __lowerCAmelCase )
# change to expected output here
UpperCamelCase__ = torch.tensor(
[[-1.0448, -1.0411, 3.7992], [-3.2191, -3.2386, -1.3451], [-3.6210, -3.5993, 0.4925]] , device=__lowerCAmelCase )
self.assertTrue(torch.allclose(output[:, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) )
def _lowerCamelCase ( self ):
UpperCamelCase__ = MaMaaaForConditionalGeneration.from_pretrained("""facebook/m2m100_418M""" ).to(__lowerCAmelCase )
UpperCamelCase__ = MaMaaaTokenizer.from_pretrained("""facebook/m2m100_418M""" , src_lang="""fr""" , tgt_lang="""en""" )
UpperCamelCase__ = [
"""L'affaire NSA souligne l'absence totale de débat sur le renseignement""",
"""Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.""",
"""Lorsque François Hollande téléphone à Barack Obama ou quand le ministre des affaires étrangères Laurent"""
""" Fabius convoque l'ambassadeur des Etats-Unis, ils réagissent à une vraie découverte, qui est celle de"""
""" l'ampleur de la surveillance américaine sur l'ensemble des communications en France.""",
]
# The below article tests that we don't add any hypotheses outside of the top n_beams
UpperCamelCase__ = tokenizer(__lowerCAmelCase , padding=__lowerCAmelCase , return_tensors="""pt""" )
UpperCamelCase__ = model.generate(
input_ids=dct["""input_ids"""].to(__lowerCAmelCase ) , attention_mask=dct["""attention_mask"""].to(__lowerCAmelCase ) , num_beams=5 , forced_bos_token_id=tokenizer.get_lang_id("""en""" ) , )
UpperCamelCase__ = [
"""The NSA case highlights the total absence of intelligence debate""",
"""I think there are two levels of response from the French government.""",
"""When François Hollande calls Barack Obama or when Foreign Minister Laurent Fabius calls the U.S."""
""" Ambassador, they respond to a real discovery, which is that of the scale of U.S. surveillance on all"""
""" communications in France.""",
]
UpperCamelCase__ = tokenizer.batch_decode(
hypotheses_batch.tolist() , clean_up_tokenization_spaces=__lowerCAmelCase , skip_special_tokens=__lowerCAmelCase )
assert generated == expected_en
| 87 |
import argparse
import glob
import logging
import os
from argparse import Namespace
from importlib import import_module
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch.nn import CrossEntropyLoss
from torch.utils.data import DataLoader, TensorDataset
from utils_ner import TokenClassificationTask
UpperCamelCase__ = logging.getLogger(__name__)
class __SCREAMING_SNAKE_CASE ( _a ):
snake_case : str = """token-classification"""
def __init__( self , __lowerCAmelCase ):
if type(__lowerCAmelCase ) == dict:
UpperCamelCase__ = Namespace(**__lowerCAmelCase )
UpperCamelCase__ = import_module("""tasks""" )
try:
UpperCamelCase__ = getattr(__lowerCAmelCase , hparams.task_type )
UpperCamelCase__ = token_classification_task_clazz()
except AttributeError:
raise ValueError(
f"""Task {hparams.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. """
f"""Available tasks classes are: {TokenClassificationTask.__subclasses__()}""" )
UpperCamelCase__ = self.token_classification_task.get_labels(hparams.labels )
UpperCamelCase__ = CrossEntropyLoss().ignore_index
super().__init__(__lowerCAmelCase , len(self.labels ) , self.mode )
def _lowerCamelCase ( self , **__lowerCAmelCase ):
return self.model(**__lowerCAmelCase )
def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase ):
UpperCamelCase__ = {"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]}
if self.config.model_type != "distilbert":
UpperCamelCase__ = (
batch[2] if self.config.model_type in ["""bert""", """xlnet"""] else None
) # XLM and RoBERTa don"t use token_type_ids
UpperCamelCase__ = self(**__lowerCAmelCase )
UpperCamelCase__ = outputs[0]
# tensorboard_logs = {"loss": loss, "rate": self.lr_scheduler.get_last_lr()[-1]}
return {"loss": loss}
def _lowerCamelCase ( self ):
UpperCamelCase__ = self.hparams
for mode in ["train", "dev", "test"]:
UpperCamelCase__ = self._feature_file(__lowerCAmelCase )
if os.path.exists(__lowerCAmelCase ) and not args.overwrite_cache:
logger.info("""Loading features from cached file %s""" , __lowerCAmelCase )
UpperCamelCase__ = torch.load(__lowerCAmelCase )
else:
logger.info("""Creating features from dataset file at %s""" , args.data_dir )
UpperCamelCase__ = self.token_classification_task.read_examples_from_file(args.data_dir , __lowerCAmelCase )
UpperCamelCase__ = self.token_classification_task.convert_examples_to_features(
__lowerCAmelCase , self.labels , args.max_seq_length , self.tokenizer , cls_token_at_end=bool(self.config.model_type in ["""xlnet"""] ) , cls_token=self.tokenizer.cls_token , cls_token_segment_id=2 if self.config.model_type in ["""xlnet"""] else 0 , sep_token=self.tokenizer.sep_token , sep_token_extra=__lowerCAmelCase , pad_on_left=bool(self.config.model_type in ["""xlnet"""] ) , pad_token=self.tokenizer.pad_token_id , pad_token_segment_id=self.tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info("""Saving features into cached file %s""" , __lowerCAmelCase )
torch.save(__lowerCAmelCase , __lowerCAmelCase )
def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = False ):
UpperCamelCase__ = self._feature_file(__lowerCAmelCase )
logger.info("""Loading features from cached file %s""" , __lowerCAmelCase )
UpperCamelCase__ = torch.load(__lowerCAmelCase )
UpperCamelCase__ = torch.tensor([f.input_ids for f in features] , dtype=torch.long )
UpperCamelCase__ = torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
if features[0].token_type_ids is not None:
UpperCamelCase__ = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
else:
UpperCamelCase__ = torch.tensor([0 for f in features] , dtype=torch.long )
# HACK(we will not use this anymore soon)
UpperCamelCase__ = torch.tensor([f.label_ids for f in features] , dtype=torch.long )
return DataLoader(
TensorDataset(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) , batch_size=__lowerCAmelCase )
def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase ):
"""Compute validation""" ""
UpperCamelCase__ = {"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]}
if self.config.model_type != "distilbert":
UpperCamelCase__ = (
batch[2] if self.config.model_type in ["""bert""", """xlnet"""] else None
) # XLM and RoBERTa don"t use token_type_ids
UpperCamelCase__ = self(**__lowerCAmelCase )
UpperCamelCase__ , UpperCamelCase__ = outputs[:2]
UpperCamelCase__ = logits.detach().cpu().numpy()
UpperCamelCase__ = inputs["""labels"""].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def _lowerCamelCase ( self , __lowerCAmelCase ):
UpperCamelCase__ = torch.stack([x["""val_loss"""] for x in outputs] ).mean()
UpperCamelCase__ = np.concatenate([x["""pred"""] for x in outputs] , axis=0 )
UpperCamelCase__ = np.argmax(__lowerCAmelCase , axis=2 )
UpperCamelCase__ = np.concatenate([x["""target"""] for x in outputs] , axis=0 )
UpperCamelCase__ = dict(enumerate(self.labels ) )
UpperCamelCase__ = [[] for _ in range(out_label_ids.shape[0] )]
UpperCamelCase__ = [[] for _ in range(out_label_ids.shape[0] )]
for i in range(out_label_ids.shape[0] ):
for j in range(out_label_ids.shape[1] ):
if out_label_ids[i, j] != self.pad_token_label_id:
out_label_list[i].append(label_map[out_label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
UpperCamelCase__ = {
"""val_loss""": val_loss_mean,
"""accuracy_score""": accuracy_score(__lowerCAmelCase , __lowerCAmelCase ),
"""precision""": precision_score(__lowerCAmelCase , __lowerCAmelCase ),
"""recall""": recall_score(__lowerCAmelCase , __lowerCAmelCase ),
"""f1""": fa_score(__lowerCAmelCase , __lowerCAmelCase ),
}
UpperCamelCase__ = dict(results.items() )
UpperCamelCase__ = results
return ret, preds_list, out_label_list
def _lowerCamelCase ( self , __lowerCAmelCase ):
# when stable
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = self._eval_end(__lowerCAmelCase )
UpperCamelCase__ = ret["""log"""]
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def _lowerCamelCase ( self , __lowerCAmelCase ):
# updating to test_epoch_end instead of deprecated test_end
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = self._eval_end(__lowerCAmelCase )
# Converting to the dict required by pl
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master/\
# pytorch_lightning/trainer/logging.py#L139
UpperCamelCase__ = ret["""log"""]
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def _lowerCamelCase ( __lowerCAmelCase , __lowerCAmelCase ):
# Add NER specific options
BaseTransformer.add_model_specific_args(__lowerCAmelCase , __lowerCAmelCase )
parser.add_argument(
"""--task_type""" , default="""NER""" , type=__lowerCAmelCase , help="""Task type to fine tune in training (e.g. NER, POS, etc)""" )
parser.add_argument(
"""--max_seq_length""" , default=128 , type=__lowerCAmelCase , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--labels""" , default="""""" , type=__lowerCAmelCase , help="""Path to a file containing all labels. If not specified, CoNLL-2003 labels are used.""" , )
parser.add_argument(
"""--gpus""" , default=0 , type=__lowerCAmelCase , help="""The number of GPUs allocated for this, it is by default 0 meaning none""" , )
parser.add_argument(
"""--overwrite_cache""" , action="""store_true""" , help="""Overwrite the cached training and evaluation sets""" )
return parser
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
add_generic_args(parser, os.getcwd())
UpperCamelCase__ = NERTransformer.add_model_specific_args(parser, os.getcwd())
UpperCamelCase__ = parser.parse_args()
UpperCamelCase__ = NERTransformer(args)
UpperCamelCase__ = generic_train(model, args)
if args.do_predict:
# See https://github.com/huggingface/transformers/issues/3159
# pl use this default format to create a checkpoint:
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master\
# /pytorch_lightning/callbacks/model_checkpoint.py#L322
UpperCamelCase__ = sorted(glob.glob(os.path.join(args.output_dir, "checkpoint-epoch=*.ckpt"), recursive=True))
UpperCamelCase__ = model.load_from_checkpoint(checkpoints[-1])
trainer.test(model)
| 87 | 1 |
"""simple docstring"""
import json
import logging
import os
import socket
import git
import numpy as np
import torch
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - PID: %(process)d - %(message)s""",
datefmt="""%m/%d/%Y %H:%M:%S""",
level=logging.INFO,
)
snake_case_ = logging.getLogger(__name__)
def _lowerCAmelCase ( lowercase_ ):
UpperCAmelCase = git.Repo(search_parent_directories=lowercase_ )
UpperCAmelCase = {
'repo_id': str(lowercase_ ),
'repo_sha': str(repo.head.object.hexsha ),
'repo_branch': str(repo.active_branch ),
}
with open(os.path.join(lowercase_ , 'git_log.json' ) , 'w' ) as f:
json.dump(lowercase_ , lowercase_ , indent=4 )
def _lowerCAmelCase ( lowercase_ ):
if params.n_gpu <= 0:
UpperCAmelCase = 0
UpperCAmelCase = -1
UpperCAmelCase = True
UpperCAmelCase = False
return
assert torch.cuda.is_available()
logger.info('Initializing GPUs' )
if params.n_gpu > 1:
assert params.local_rank != -1
UpperCAmelCase = int(os.environ['WORLD_SIZE'] )
UpperCAmelCase = int(os.environ['N_GPU_NODE'] )
UpperCAmelCase = int(os.environ['RANK'] )
# number of nodes / node ID
UpperCAmelCase = params.world_size // params.n_gpu_per_node
UpperCAmelCase = params.global_rank // params.n_gpu_per_node
UpperCAmelCase = True
assert params.n_nodes == int(os.environ['N_NODES'] )
assert params.node_id == int(os.environ['NODE_RANK'] )
# local job (single GPU)
else:
assert params.local_rank == -1
UpperCAmelCase = 1
UpperCAmelCase = 0
UpperCAmelCase = 0
UpperCAmelCase = 0
UpperCAmelCase = 1
UpperCAmelCase = 1
UpperCAmelCase = False
# sanity checks
assert params.n_nodes >= 1
assert 0 <= params.node_id < params.n_nodes
assert 0 <= params.local_rank <= params.global_rank < params.world_size
assert params.world_size == params.n_nodes * params.n_gpu_per_node
# define whether this is the master process / if we are in multi-node distributed mode
UpperCAmelCase = params.node_id == 0 and params.local_rank == 0
UpperCAmelCase = params.n_nodes > 1
# summary
UpperCAmelCase = F"""--- Global rank: {params.global_rank} - """
logger.info(PREFIX + 'Number of nodes: %i' % params.n_nodes )
logger.info(PREFIX + 'Node ID : %i' % params.node_id )
logger.info(PREFIX + 'Local rank : %i' % params.local_rank )
logger.info(PREFIX + 'World size : %i' % params.world_size )
logger.info(PREFIX + 'GPUs per node : %i' % params.n_gpu_per_node )
logger.info(PREFIX + 'Master : %s' % str(params.is_master ) )
logger.info(PREFIX + 'Multi-node : %s' % str(params.multi_node ) )
logger.info(PREFIX + 'Multi-GPU : %s' % str(params.multi_gpu ) )
logger.info(PREFIX + 'Hostname : %s' % socket.gethostname() )
# set GPU device
torch.cuda.set_device(params.local_rank )
# initialize multi-GPU
if params.multi_gpu:
logger.info('Initializing PyTorch distributed' )
torch.distributed.init_process_group(
init_method='env://' , backend='nccl' , )
def _lowerCAmelCase ( lowercase_ ):
np.random.seed(args.seed )
torch.manual_seed(args.seed )
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed )
| 78 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_videomae import VideoMAEImageProcessor
__lowerCAmelCase = logging.get_logger(__name__)
class UpperCAmelCase__ ( lowercase__ ):
"""simple docstring"""
def __init__( self : Tuple ,*_a : List[str] ,**_a : Any ):
'''simple docstring'''
warnings.warn(
'The class VideoMAEFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use VideoMAEImageProcessor instead.' ,_a ,)
super().__init__(*_a ,**_a )
| 271 | 0 |
'''simple docstring'''
def _lowerCamelCase ( lowerCamelCase_ : int = 1000 ):
"""simple docstring"""
return sum(e for e in range(3 , lowerCamelCase_ ) if e % 3 == 0 or e % 5 == 0 )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 274 | '''simple docstring'''
import os
import time
import numpy as np
import onnxruntime as ort
snake_case__ : Optional[int] = '''1'''
snake_case__ : str = '''0'''
snake_case__ : List[str] = '''1'''
snake_case__ : List[str] = ort.SessionOptions()
snake_case__ : str = ort.GraphOptimizationLevel.ORT_DISABLE_ALL
print('''Create inference session...''')
snake_case__ : Dict = ['''TensorrtExecutionProvider''', '''CUDAExecutionProvider''']
snake_case__ : Dict = ort.InferenceSession('''model.onnx''', sess_options=sess_opt, providers=execution_provider)
snake_case__ : str = ort.RunOptions()
snake_case__ : List[Any] = 128
snake_case__ : Union[str, Any] = 1
snake_case__ : Tuple = np.ones((batch, sequence), dtype=np.intaa)
snake_case__ : Tuple = np.ones((batch, sequence), dtype=np.intaa)
snake_case__ : Union[str, Any] = np.ones((batch, sequence), dtype=np.intaa)
print('''Warm up phase...''')
sess.run(
None,
{
sess.get_inputs()[0].name: input_ids,
sess.get_inputs()[1].name: attention_mask,
sess.get_inputs()[2].name: token_type_ids,
},
run_options=run_opt,
)
print('''Start inference...''')
snake_case__ : Union[str, Any] = time.time()
snake_case__ : str = 2000
snake_case__ : Tuple = {}
for iter in range(max_iters):
snake_case__ : str = sess.run(
None,
{
sess.get_inputs()[0].name: input_ids,
sess.get_inputs()[1].name: attention_mask,
sess.get_inputs()[2].name: token_type_ids,
},
run_options=run_opt,
)
print('''Average Inference Time = {:.3f} ms'''.format((time.time() - start_time) * 1000 / max_iters))
| 274 | 1 |
'''simple docstring'''
from __future__ import annotations
import math
from collections.abc import Callable
def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase = 1_0_0, ):
__lowerCAmelCase = x_start
__lowerCAmelCase = fnc(a_)
__lowerCAmelCase = 0.0
for _ in range(a_):
# Approximates curve as a sequence of linear lines and sums their length
__lowerCAmelCase = (x_end - x_start) / steps + xa
__lowerCAmelCase = fnc(a_)
length += math.hypot(xa - xa, fxa - fxa)
# Increment step
__lowerCAmelCase = xa
__lowerCAmelCase = fxa
return length
if __name__ == "__main__":
def __magic_name__( lowerCamelCase):
return math.sin(1_0 * x)
print("""f(x) = sin(10 * x)""")
print("""The length of the curve from x = -10 to x = 10 is:""")
_UpperCAmelCase : Tuple = 1_0
while i <= 1_0_0_0_0_0:
print(f"""With {i} steps: {line_length(f, -1_0, 1_0, i)}""")
i *= 1_0
| 174 |
import argparse
import os
import torch
from transformers import FlavaConfig, FlavaForPreTraining
from transformers.models.flava.convert_dalle_to_flava_codebook import convert_dalle_checkpoint
def UpperCAmelCase ( a_ ) -> List[str]:
"""simple docstring"""
return sum(param.float().sum() if "encoder.embeddings" not in key else 0 for key, param in state_dict.items() )
def UpperCAmelCase ( a_ , a_ ) -> Tuple:
"""simple docstring"""
__A = {}
for key, value in state_dict.items():
if "text_encoder.embeddings" in key or "image_encoder.embeddings" in key:
continue
__A = key.replace("heads.cmd.mim_head.cls.predictions" , "mmm_image_head" )
__A = key.replace("heads.cmd.mlm_head.cls.predictions" , "mmm_text_head" )
__A = key.replace("heads.cmd.itm_head.cls" , "itm_head" )
__A = key.replace("heads.cmd.itm_head.pooler" , "itm_head.pooler" )
__A = key.replace("heads.cmd.clip_head.logit_scale" , "flava.logit_scale" )
__A = key.replace("heads.fairseq_mlm.cls.predictions" , "mlm_head" )
__A = key.replace("heads.imagenet.mim_head.cls.predictions" , "mim_head" )
__A = key.replace("mm_text_projection" , "flava.text_to_mm_projection" )
__A = key.replace("mm_image_projection" , "flava.image_to_mm_projection" )
__A = key.replace("image_encoder.module" , "flava.image_model" )
__A = key.replace("text_encoder.module" , "flava.text_model" )
__A = key.replace("mm_encoder.module.encoder.cls_token" , "flava.multimodal_model.cls_token" )
__A = key.replace("mm_encoder.module" , "flava.multimodal_model" )
__A = key.replace("text_projection" , "flava.text_projection" )
__A = key.replace("image_projection" , "flava.image_projection" )
__A = value.float()
for key, value in codebook_state_dict.items():
__A = value
return upgrade
@torch.no_grad()
def UpperCAmelCase ( a_ , a_ , a_ , a_=None ) -> Tuple:
"""simple docstring"""
if config_path is not None:
__A = FlavaConfig.from_pretrained(a_ )
else:
__A = FlavaConfig()
__A = FlavaForPreTraining(a_ ).eval()
__A = convert_dalle_checkpoint(a_ , a_ , save_checkpoint=a_ )
if os.path.exists(a_ ):
__A = torch.load(a_ , map_location="cpu" )
else:
__A = torch.hub.load_state_dict_from_url(a_ , map_location="cpu" )
__A = upgrade_state_dict(a_ , a_ )
hf_model.load_state_dict(a_ )
__A = hf_model.state_dict()
__A = count_parameters(a_ )
__A = count_parameters(a_ ) + count_parameters(a_ )
assert torch.allclose(a_ , a_ , atol=1E-3 )
hf_model.save_pretrained(a_ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE :Any = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to flava checkpoint')
parser.add_argument('--codebook_path', default=None, type=str, help='Path to flava codebook checkpoint')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
SCREAMING_SNAKE_CASE :Optional[int] = parser.parse_args()
convert_flava_checkpoint(args.checkpoint_path, args.codebook_path, args.pytorch_dump_folder_path, args.config_path)
| 15 | 0 |
"""simple docstring"""
import io
import json
import fsspec
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.json import JsonDatasetReader, JsonDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def _UpperCamelCase (a__ :Optional[int] , a__ :List[str] ):
"""simple docstring"""
assert isinstance(a__ , a__ )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def _UpperCamelCase (a__ :Any , a__ :str , a__ :Union[str, Any] ):
"""simple docstring"""
UpperCamelCase__ = tmp_path / """cache"""
UpperCamelCase__ = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
UpperCamelCase__ = JsonDatasetReader(a__ , cache_dir=a__ , keep_in_memory=a__ ).read()
_check_json_dataset(a__ , a__ )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""},
{"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""},
{"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""},
{"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""},
] , )
def _UpperCamelCase (a__ :str , a__ :List[str] , a__ :Union[str, Any] ):
"""simple docstring"""
UpperCamelCase__ = tmp_path / """cache"""
UpperCamelCase__ = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
UpperCamelCase__ = features.copy() if features else default_expected_features
UpperCamelCase__ = (
Features({feature: Value(a__ ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCamelCase__ = JsonDatasetReader(a__ , features=a__ , cache_dir=a__ ).read()
_check_json_dataset(a__ , a__ )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_3""": """float64""", """col_1""": """string""", """col_2""": """int64"""},
] , )
def _UpperCamelCase (a__ :List[Any] , a__ :List[Any] , a__ :Tuple ):
"""simple docstring"""
UpperCamelCase__ = tmp_path / """cache"""
UpperCamelCase__ = {"""col_3""": """float64""", """col_1""": """string""", """col_2""": """int64"""}
UpperCamelCase__ = features.copy() if features else default_expected_features
UpperCamelCase__ = (
Features({feature: Value(a__ ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCamelCase__ = JsonDatasetReader(a__ , features=a__ , cache_dir=a__ ).read()
assert isinstance(a__ , a__ )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_3", "col_1", "col_2"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
def _UpperCamelCase (a__ :Optional[int] , a__ :Optional[Any] ):
"""simple docstring"""
UpperCamelCase__ = {"""col_2""": """int64""", """col_3""": """float64""", """col_1""": """string"""}
UpperCamelCase__ = features.copy()
UpperCamelCase__ = (
Features({feature: Value(a__ ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCamelCase__ = tmp_path / """cache"""
UpperCamelCase__ = JsonDatasetReader(a__ , features=a__ , cache_dir=a__ ).read()
assert isinstance(a__ , a__ )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_2", "col_3", "col_1"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] )
def _UpperCamelCase (a__ :List[Any] , a__ :Optional[int] , a__ :Union[str, Any] ):
"""simple docstring"""
UpperCamelCase__ = tmp_path / """cache"""
UpperCamelCase__ = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
UpperCamelCase__ = JsonDatasetReader(a__ , cache_dir=a__ , split=a__ ).read()
_check_json_dataset(a__ , a__ )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("""path_type""" , [str, list] )
def _UpperCamelCase (a__ :List[str] , a__ :List[Any] , a__ :str ):
"""simple docstring"""
if issubclass(a__ , a__ ):
UpperCamelCase__ = jsonl_path
elif issubclass(a__ , a__ ):
UpperCamelCase__ = [jsonl_path]
UpperCamelCase__ = tmp_path / """cache"""
UpperCamelCase__ = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
UpperCamelCase__ = JsonDatasetReader(a__ , cache_dir=a__ ).read()
_check_json_dataset(a__ , a__ )
def _UpperCamelCase (a__ :Any , a__ :Optional[Any] , a__ :str=("train",) ):
"""simple docstring"""
assert isinstance(a__ , a__ )
for split in splits:
UpperCamelCase__ = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def _UpperCamelCase (a__ :Union[str, Any] , a__ :Tuple , a__ :Union[str, Any] ):
"""simple docstring"""
UpperCamelCase__ = tmp_path / """cache"""
UpperCamelCase__ = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
UpperCamelCase__ = JsonDatasetReader({"""train""": jsonl_path} , cache_dir=a__ , keep_in_memory=a__ ).read()
_check_json_datasetdict(a__ , a__ )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""},
{"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""},
{"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""},
{"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""},
] , )
def _UpperCamelCase (a__ :int , a__ :Union[str, Any] , a__ :str ):
"""simple docstring"""
UpperCamelCase__ = tmp_path / """cache"""
UpperCamelCase__ = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
UpperCamelCase__ = features.copy() if features else default_expected_features
UpperCamelCase__ = (
Features({feature: Value(a__ ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCamelCase__ = JsonDatasetReader({"""train""": jsonl_path} , features=a__ , cache_dir=a__ ).read()
_check_json_datasetdict(a__ , a__ )
@pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] )
def _UpperCamelCase (a__ :Optional[Any] , a__ :Optional[int] , a__ :int ):
"""simple docstring"""
if split:
UpperCamelCase__ = {split: jsonl_path}
else:
UpperCamelCase__ = """train"""
UpperCamelCase__ = {"""train""": jsonl_path, """test""": jsonl_path}
UpperCamelCase__ = tmp_path / """cache"""
UpperCamelCase__ = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
UpperCamelCase__ = JsonDatasetReader(a__ , cache_dir=a__ ).read()
_check_json_datasetdict(a__ , a__ , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def _UpperCamelCase (a__ :Any ):
"""simple docstring"""
return json.load(a__ )
def _UpperCamelCase (a__ :Tuple ):
"""simple docstring"""
return [json.loads(a__ ) for line in buffer]
class __SCREAMING_SNAKE_CASE :
@pytest.mark.parametrize("""lines, load_json_function""" , [(True, load_json_lines), (False, load_json)] )
def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
with io.BytesIO() as buffer:
JsonDatasetWriter(__lowerCAmelCase , __lowerCAmelCase , lines=__lowerCAmelCase ).write()
buffer.seek(0 )
UpperCamelCase__ = load_json_function(__lowerCAmelCase )
assert isinstance(__lowerCAmelCase , __lowerCAmelCase )
assert isinstance(exported_content[0] , __lowerCAmelCase )
assert len(__lowerCAmelCase ) == 10
@pytest.mark.parametrize(
"""orient, container, keys, len_at""" , [
("""records""", list, {"""tokens""", """labels""", """answers""", """id"""}, None),
("""split""", dict, {"""columns""", """data"""}, """data"""),
("""index""", dict, set("""0123456789""" ), None),
("""columns""", dict, {"""tokens""", """labels""", """answers""", """id"""}, """tokens"""),
("""values""", list, None, None),
("""table""", dict, {"""schema""", """data"""}, """data"""),
] , )
def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
with io.BytesIO() as buffer:
JsonDatasetWriter(__lowerCAmelCase , __lowerCAmelCase , lines=__lowerCAmelCase , orient=__lowerCAmelCase ).write()
buffer.seek(0 )
UpperCamelCase__ = load_json(__lowerCAmelCase )
assert isinstance(__lowerCAmelCase , __lowerCAmelCase )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(__lowerCAmelCase , """keys""" ) and not hasattr(exported_content[0] , """keys""" )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(__lowerCAmelCase ) == 10
@pytest.mark.parametrize("""lines, load_json_function""" , [(True, load_json_lines), (False, load_json)] )
def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
with io.BytesIO() as buffer:
JsonDatasetWriter(__lowerCAmelCase , __lowerCAmelCase , lines=__lowerCAmelCase , num_proc=2 ).write()
buffer.seek(0 )
UpperCamelCase__ = load_json_function(__lowerCAmelCase )
assert isinstance(__lowerCAmelCase , __lowerCAmelCase )
assert isinstance(exported_content[0] , __lowerCAmelCase )
assert len(__lowerCAmelCase ) == 10
@pytest.mark.parametrize(
"""orient, container, keys, len_at""" , [
("""records""", list, {"""tokens""", """labels""", """answers""", """id"""}, None),
("""split""", dict, {"""columns""", """data"""}, """data"""),
("""index""", dict, set("""0123456789""" ), None),
("""columns""", dict, {"""tokens""", """labels""", """answers""", """id"""}, """tokens"""),
("""values""", list, None, None),
("""table""", dict, {"""schema""", """data"""}, """data"""),
] , )
def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
with io.BytesIO() as buffer:
JsonDatasetWriter(__lowerCAmelCase , __lowerCAmelCase , lines=__lowerCAmelCase , orient=__lowerCAmelCase , num_proc=2 ).write()
buffer.seek(0 )
UpperCamelCase__ = load_json(__lowerCAmelCase )
assert isinstance(__lowerCAmelCase , __lowerCAmelCase )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(__lowerCAmelCase , """keys""" ) and not hasattr(exported_content[0] , """keys""" )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(__lowerCAmelCase ) == 10
def _lowerCamelCase ( self , __lowerCAmelCase ):
with pytest.raises(__lowerCAmelCase ):
with io.BytesIO() as buffer:
JsonDatasetWriter(__lowerCAmelCase , __lowerCAmelCase , num_proc=0 )
@pytest.mark.parametrize("""compression, extension""" , [("""gzip""", """gz"""), ("""bz2""", """bz2"""), ("""xz""", """xz""")] )
def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
UpperCamelCase__ = tmp_path_factory.mktemp("""data""" ) / f"""test.json.{extension}"""
UpperCamelCase__ = str(shared_datadir / f"""test_file.json.{extension}""" )
JsonDatasetWriter(__lowerCAmelCase , __lowerCAmelCase , compression=__lowerCAmelCase ).write()
with fsspec.open(__lowerCAmelCase , """rb""" , compression="""infer""" ) as f:
UpperCamelCase__ = f.read()
with fsspec.open(__lowerCAmelCase , """rb""" , compression="""infer""" ) as f:
UpperCamelCase__ = f.read()
assert exported_content == original_content
| 356 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import is_speech_available, is_vision_available
from transformers.testing_utils import require_torch
if is_vision_available():
from transformers import TvltImageProcessor
if is_speech_available():
from transformers import TvltFeatureExtractor
from transformers import TvltProcessor
@require_torch
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def _lowerCamelCase ( self ):
UpperCamelCase__ = """ZinengTang/tvlt-base"""
UpperCamelCase__ = tempfile.mkdtemp()
def _lowerCamelCase ( self , **__lowerCAmelCase ):
return TvltImageProcessor.from_pretrained(self.checkpoint , **__lowerCAmelCase )
def _lowerCamelCase ( self , **__lowerCAmelCase ):
return TvltFeatureExtractor.from_pretrained(self.checkpoint , **__lowerCAmelCase )
def _lowerCamelCase ( self ):
shutil.rmtree(self.tmpdirname )
def _lowerCamelCase ( self ):
UpperCamelCase__ = self.get_image_processor()
UpperCamelCase__ = self.get_feature_extractor()
UpperCamelCase__ = TvltProcessor(image_processor=__lowerCAmelCase , feature_extractor=__lowerCAmelCase )
processor.save_pretrained(self.tmpdirname )
UpperCamelCase__ = TvltProcessor.from_pretrained(self.tmpdirname )
self.assertIsInstance(processor.feature_extractor , __lowerCAmelCase )
self.assertIsInstance(processor.image_processor , __lowerCAmelCase )
def _lowerCamelCase ( self ):
UpperCamelCase__ = self.get_image_processor()
UpperCamelCase__ = self.get_feature_extractor()
UpperCamelCase__ = TvltProcessor(image_processor=__lowerCAmelCase , feature_extractor=__lowerCAmelCase )
UpperCamelCase__ = np.ones([12000] )
UpperCamelCase__ = feature_extractor(__lowerCAmelCase , return_tensors="""np""" )
UpperCamelCase__ = processor(audio=__lowerCAmelCase , return_tensors="""np""" )
for key in audio_dict.keys():
self.assertAlmostEqual(audio_dict[key].sum() , input_processor[key].sum() , delta=1E-2 )
def _lowerCamelCase ( self ):
UpperCamelCase__ = self.get_image_processor()
UpperCamelCase__ = self.get_feature_extractor()
UpperCamelCase__ = TvltProcessor(image_processor=__lowerCAmelCase , feature_extractor=__lowerCAmelCase )
UpperCamelCase__ = np.ones([3, 224, 224] )
UpperCamelCase__ = image_processor(__lowerCAmelCase , return_tensors="""np""" )
UpperCamelCase__ = processor(images=__lowerCAmelCase , return_tensors="""np""" )
for key in image_dict.keys():
self.assertAlmostEqual(image_dict[key].sum() , input_processor[key].sum() , delta=1E-2 )
def _lowerCamelCase ( self ):
UpperCamelCase__ = self.get_image_processor()
UpperCamelCase__ = self.get_feature_extractor()
UpperCamelCase__ = TvltProcessor(image_processor=__lowerCAmelCase , feature_extractor=__lowerCAmelCase )
UpperCamelCase__ = np.ones([12000] )
UpperCamelCase__ = np.ones([3, 224, 224] )
UpperCamelCase__ = processor(audio=__lowerCAmelCase , images=__lowerCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ["""audio_values""", """audio_mask""", """pixel_values""", """pixel_mask"""] )
# test if it raises when no input is passed
with pytest.raises(__lowerCAmelCase ):
processor()
def _lowerCamelCase ( self ):
UpperCamelCase__ = self.get_image_processor()
UpperCamelCase__ = self.get_feature_extractor()
UpperCamelCase__ = TvltProcessor(image_processor=__lowerCAmelCase , feature_extractor=__lowerCAmelCase )
self.assertListEqual(
processor.model_input_names , image_processor.model_input_names + feature_extractor.model_input_names , msg="""`processor` and `image_processor`+`feature_extractor` model input names do not match""" , )
| 87 | 0 |
import builtins
import sys
from ...utils.imports import _is_package_available
from . import cursor, input
from .helpers import Direction, clear_line, forceWrite, linebreak, move_cursor, reset_cursor, writeColor
from .keymap import KEYMAP
lowercase_ : Union[str, Any] = False
try:
lowercase_ : Dict = _is_package_available('google.colab')
except ModuleNotFoundError:
pass
@input.register
class __lowerCAmelCase :
def __init__( self : int , snake_case__ : str = None , snake_case__ : list = [] ):
"""simple docstring"""
_UpperCAmelCase = 0
_UpperCAmelCase = choices
_UpperCAmelCase = prompt
if sys.platform == "win32":
_UpperCAmelCase = "*"
else:
_UpperCAmelCase = "➔ "
def UpperCamelCase ( self : Tuple , snake_case__ : int , snake_case__ : str = "" ):
"""simple docstring"""
if sys.platform != "win32":
writeColor(self.choices[index] , 32 , snake_case__ )
else:
forceWrite(self.choices[index] , snake_case__ )
def UpperCamelCase ( self : str , snake_case__ : int ):
"""simple docstring"""
if index == self.position:
forceWrite(F""" {self.arrow_char} """ )
self.write_choice(snake_case__ )
else:
forceWrite(F""" {self.choices[index]}""" )
reset_cursor()
def UpperCamelCase ( self : int , snake_case__ : Direction , snake_case__ : int = 1 ):
"""simple docstring"""
_UpperCAmelCase = self.position
if direction == Direction.DOWN:
if self.position + 1 >= len(self.choices ):
return
self.position += num_spaces
else:
if self.position - 1 < 0:
return
self.position -= num_spaces
clear_line()
self.print_choice(snake_case__ )
move_cursor(snake_case__ , direction.name )
self.print_choice(self.position )
@input.mark(KEYMAP["up"] )
def UpperCamelCase ( self : List[str] ):
"""simple docstring"""
self.move_direction(Direction.UP )
@input.mark(KEYMAP["down"] )
def UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
self.move_direction(Direction.DOWN )
@input.mark(KEYMAP["newline"] )
def UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
move_cursor(len(self.choices ) - self.position , "DOWN" )
return self.position
@input.mark(KEYMAP["interrupt"] )
def UpperCamelCase ( self : str ):
"""simple docstring"""
move_cursor(len(self.choices ) - self.position , "DOWN" )
raise KeyboardInterrupt
@input.mark_multiple(*[KEYMAP[str(snake_case__ )] for number in range(10 )] )
def UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
_UpperCAmelCase = int(chr(self.current_selection ) )
_UpperCAmelCase = index - self.position
if index == self.position:
return
if index < len(self.choices ):
if self.position > index:
self.move_direction(Direction.UP , -movement )
elif self.position < index:
self.move_direction(Direction.DOWN , snake_case__ )
else:
return
else:
return
def UpperCamelCase ( self : Any , snake_case__ : int = 0 ):
"""simple docstring"""
if self.prompt:
linebreak()
forceWrite(self.prompt , "\n" )
if in_colab:
forceWrite("Please input a choice index (starting from 0), and press enter" , "\n" )
else:
forceWrite("Please select a choice using the arrow or number keys, and selecting with enter" , "\n" )
_UpperCAmelCase = default_choice
for i in range(len(self.choices ) ):
self.print_choice(snake_case__ )
forceWrite("\n" )
move_cursor(len(self.choices ) - self.position , "UP" )
with cursor.hide():
while True:
if in_colab:
try:
_UpperCAmelCase = int(builtins.input() )
except ValueError:
_UpperCAmelCase = default_choice
else:
_UpperCAmelCase = self.handle_input()
if choice is not None:
reset_cursor()
for _ in range(len(self.choices ) + 1 ):
move_cursor(1 , "UP" )
clear_line()
self.write_choice(snake_case__ , "\n" )
return choice
| 133 |
from typing import Any
class __lowerCAmelCase :
def __init__( self : List[Any] , snake_case__ : Any ):
"""simple docstring"""
_UpperCAmelCase = data
_UpperCAmelCase = None
class __lowerCAmelCase :
def __init__( self : Optional[Any] ):
"""simple docstring"""
_UpperCAmelCase = None
def UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
_UpperCAmelCase = self.head
while temp is not None:
print(temp.data , end=" " )
_UpperCAmelCase = temp.next
print()
def UpperCamelCase ( self : Any , snake_case__ : Any ):
"""simple docstring"""
_UpperCAmelCase = Node(snake_case__ )
_UpperCAmelCase = self.head
_UpperCAmelCase = new_node
def UpperCamelCase ( self : List[str] , snake_case__ : int , snake_case__ : Optional[Any] ):
"""simple docstring"""
if node_data_a == node_data_a:
return
else:
_UpperCAmelCase = self.head
while node_a is not None and node_a.data != node_data_a:
_UpperCAmelCase = node_a.next
_UpperCAmelCase = self.head
while node_a is not None and node_a.data != node_data_a:
_UpperCAmelCase = node_a.next
if node_a is None or node_a is None:
return
_UpperCAmelCase , _UpperCAmelCase = node_a.data, node_a.data
if __name__ == "__main__":
lowercase_ : Union[str, Any] = LinkedList()
for i in range(5, 0, -1):
ll.push(i)
ll.print_list()
ll.swap_nodes(1, 4)
print('After swapping')
ll.print_list()
| 133 | 1 |
'''simple docstring'''
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
SCREAMING_SNAKE_CASE_: Any =logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_: List[Any] =OrderedDict(
[
# Base model mapping
('albert', 'FlaxAlbertModel'),
('bart', 'FlaxBartModel'),
('beit', 'FlaxBeitModel'),
('bert', 'FlaxBertModel'),
('big_bird', 'FlaxBigBirdModel'),
('blenderbot', 'FlaxBlenderbotModel'),
('blenderbot-small', 'FlaxBlenderbotSmallModel'),
('clip', 'FlaxCLIPModel'),
('distilbert', 'FlaxDistilBertModel'),
('electra', 'FlaxElectraModel'),
('gpt-sw3', 'FlaxGPT2Model'),
('gpt2', 'FlaxGPT2Model'),
('gpt_neo', 'FlaxGPTNeoModel'),
('gptj', 'FlaxGPTJModel'),
('longt5', 'FlaxLongT5Model'),
('marian', 'FlaxMarianModel'),
('mbart', 'FlaxMBartModel'),
('mt5', 'FlaxMT5Model'),
('opt', 'FlaxOPTModel'),
('pegasus', 'FlaxPegasusModel'),
('regnet', 'FlaxRegNetModel'),
('resnet', 'FlaxResNetModel'),
('roberta', 'FlaxRobertaModel'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormModel'),
('roformer', 'FlaxRoFormerModel'),
('t5', 'FlaxT5Model'),
('vision-text-dual-encoder', 'FlaxVisionTextDualEncoderModel'),
('vit', 'FlaxViTModel'),
('wav2vec2', 'FlaxWav2Vec2Model'),
('whisper', 'FlaxWhisperModel'),
('xglm', 'FlaxXGLMModel'),
('xlm-roberta', 'FlaxXLMRobertaModel'),
]
)
SCREAMING_SNAKE_CASE_: List[Any] =OrderedDict(
[
# Model for pre-training mapping
('albert', 'FlaxAlbertForPreTraining'),
('bart', 'FlaxBartForConditionalGeneration'),
('bert', 'FlaxBertForPreTraining'),
('big_bird', 'FlaxBigBirdForPreTraining'),
('electra', 'FlaxElectraForPreTraining'),
('longt5', 'FlaxLongT5ForConditionalGeneration'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('mt5', 'FlaxMT5ForConditionalGeneration'),
('roberta', 'FlaxRobertaForMaskedLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMaskedLM'),
('roformer', 'FlaxRoFormerForMaskedLM'),
('t5', 'FlaxT5ForConditionalGeneration'),
('wav2vec2', 'FlaxWav2Vec2ForPreTraining'),
('whisper', 'FlaxWhisperForConditionalGeneration'),
('xlm-roberta', 'FlaxXLMRobertaForMaskedLM'),
]
)
SCREAMING_SNAKE_CASE_: Any =OrderedDict(
[
# Model for Masked LM mapping
('albert', 'FlaxAlbertForMaskedLM'),
('bart', 'FlaxBartForConditionalGeneration'),
('bert', 'FlaxBertForMaskedLM'),
('big_bird', 'FlaxBigBirdForMaskedLM'),
('distilbert', 'FlaxDistilBertForMaskedLM'),
('electra', 'FlaxElectraForMaskedLM'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('roberta', 'FlaxRobertaForMaskedLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMaskedLM'),
('roformer', 'FlaxRoFormerForMaskedLM'),
('xlm-roberta', 'FlaxXLMRobertaForMaskedLM'),
]
)
SCREAMING_SNAKE_CASE_: int =OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
('bart', 'FlaxBartForConditionalGeneration'),
('blenderbot', 'FlaxBlenderbotForConditionalGeneration'),
('blenderbot-small', 'FlaxBlenderbotSmallForConditionalGeneration'),
('encoder-decoder', 'FlaxEncoderDecoderModel'),
('longt5', 'FlaxLongT5ForConditionalGeneration'),
('marian', 'FlaxMarianMTModel'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('mt5', 'FlaxMT5ForConditionalGeneration'),
('pegasus', 'FlaxPegasusForConditionalGeneration'),
('t5', 'FlaxT5ForConditionalGeneration'),
]
)
SCREAMING_SNAKE_CASE_: str =OrderedDict(
[
# Model for Image-classsification
('beit', 'FlaxBeitForImageClassification'),
('regnet', 'FlaxRegNetForImageClassification'),
('resnet', 'FlaxResNetForImageClassification'),
('vit', 'FlaxViTForImageClassification'),
]
)
SCREAMING_SNAKE_CASE_: str =OrderedDict(
[
('vision-encoder-decoder', 'FlaxVisionEncoderDecoderModel'),
]
)
SCREAMING_SNAKE_CASE_: List[Any] =OrderedDict(
[
# Model for Causal LM mapping
('bart', 'FlaxBartForCausalLM'),
('bert', 'FlaxBertForCausalLM'),
('big_bird', 'FlaxBigBirdForCausalLM'),
('electra', 'FlaxElectraForCausalLM'),
('gpt-sw3', 'FlaxGPT2LMHeadModel'),
('gpt2', 'FlaxGPT2LMHeadModel'),
('gpt_neo', 'FlaxGPTNeoForCausalLM'),
('gptj', 'FlaxGPTJForCausalLM'),
('opt', 'FlaxOPTForCausalLM'),
('roberta', 'FlaxRobertaForCausalLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForCausalLM'),
('xglm', 'FlaxXGLMForCausalLM'),
('xlm-roberta', 'FlaxXLMRobertaForCausalLM'),
]
)
SCREAMING_SNAKE_CASE_: Optional[int] =OrderedDict(
[
# Model for Sequence Classification mapping
('albert', 'FlaxAlbertForSequenceClassification'),
('bart', 'FlaxBartForSequenceClassification'),
('bert', 'FlaxBertForSequenceClassification'),
('big_bird', 'FlaxBigBirdForSequenceClassification'),
('distilbert', 'FlaxDistilBertForSequenceClassification'),
('electra', 'FlaxElectraForSequenceClassification'),
('mbart', 'FlaxMBartForSequenceClassification'),
('roberta', 'FlaxRobertaForSequenceClassification'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForSequenceClassification'),
('roformer', 'FlaxRoFormerForSequenceClassification'),
('xlm-roberta', 'FlaxXLMRobertaForSequenceClassification'),
]
)
SCREAMING_SNAKE_CASE_: Any =OrderedDict(
[
# Model for Question Answering mapping
('albert', 'FlaxAlbertForQuestionAnswering'),
('bart', 'FlaxBartForQuestionAnswering'),
('bert', 'FlaxBertForQuestionAnswering'),
('big_bird', 'FlaxBigBirdForQuestionAnswering'),
('distilbert', 'FlaxDistilBertForQuestionAnswering'),
('electra', 'FlaxElectraForQuestionAnswering'),
('mbart', 'FlaxMBartForQuestionAnswering'),
('roberta', 'FlaxRobertaForQuestionAnswering'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForQuestionAnswering'),
('roformer', 'FlaxRoFormerForQuestionAnswering'),
('xlm-roberta', 'FlaxXLMRobertaForQuestionAnswering'),
]
)
SCREAMING_SNAKE_CASE_: Any =OrderedDict(
[
# Model for Token Classification mapping
('albert', 'FlaxAlbertForTokenClassification'),
('bert', 'FlaxBertForTokenClassification'),
('big_bird', 'FlaxBigBirdForTokenClassification'),
('distilbert', 'FlaxDistilBertForTokenClassification'),
('electra', 'FlaxElectraForTokenClassification'),
('roberta', 'FlaxRobertaForTokenClassification'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForTokenClassification'),
('roformer', 'FlaxRoFormerForTokenClassification'),
('xlm-roberta', 'FlaxXLMRobertaForTokenClassification'),
]
)
SCREAMING_SNAKE_CASE_: Optional[Any] =OrderedDict(
[
# Model for Multiple Choice mapping
('albert', 'FlaxAlbertForMultipleChoice'),
('bert', 'FlaxBertForMultipleChoice'),
('big_bird', 'FlaxBigBirdForMultipleChoice'),
('distilbert', 'FlaxDistilBertForMultipleChoice'),
('electra', 'FlaxElectraForMultipleChoice'),
('roberta', 'FlaxRobertaForMultipleChoice'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMultipleChoice'),
('roformer', 'FlaxRoFormerForMultipleChoice'),
('xlm-roberta', 'FlaxXLMRobertaForMultipleChoice'),
]
)
SCREAMING_SNAKE_CASE_: int =OrderedDict(
[
('bert', 'FlaxBertForNextSentencePrediction'),
]
)
SCREAMING_SNAKE_CASE_: List[Any] =OrderedDict(
[
('speech-encoder-decoder', 'FlaxSpeechEncoderDecoderModel'),
('whisper', 'FlaxWhisperForConditionalGeneration'),
]
)
SCREAMING_SNAKE_CASE_: Optional[int] =OrderedDict(
[
('whisper', 'FlaxWhisperForAudioClassification'),
]
)
SCREAMING_SNAKE_CASE_: str =_LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
SCREAMING_SNAKE_CASE_: str =_LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
SCREAMING_SNAKE_CASE_: List[str] =_LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
SCREAMING_SNAKE_CASE_: List[Any] =_LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE_: Optional[Any] =_LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE_: Dict =_LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
SCREAMING_SNAKE_CASE_: Optional[Any] =_LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
SCREAMING_SNAKE_CASE_: Any =_LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE_: List[str] =_LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE_: Dict =_LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE_: Any =_LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE_: Dict =_LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE_: Dict =_LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE_: Optional[Any] =_LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class __A ( _BaseAutoModelClass ):
a__ : int = FLAX_MODEL_MAPPING
SCREAMING_SNAKE_CASE_: Dict =auto_class_update(FlaxAutoModel)
class __A ( _BaseAutoModelClass ):
a__ : str = FLAX_MODEL_FOR_PRETRAINING_MAPPING
SCREAMING_SNAKE_CASE_: Dict =auto_class_update(FlaxAutoModelForPreTraining, head_doc='pretraining')
class __A ( _BaseAutoModelClass ):
a__ : Optional[Any] = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
SCREAMING_SNAKE_CASE_: Tuple =auto_class_update(FlaxAutoModelForCausalLM, head_doc='causal language modeling')
class __A ( _BaseAutoModelClass ):
a__ : Optional[Any] = FLAX_MODEL_FOR_MASKED_LM_MAPPING
SCREAMING_SNAKE_CASE_: Optional[Any] =auto_class_update(FlaxAutoModelForMaskedLM, head_doc='masked language modeling')
class __A ( _BaseAutoModelClass ):
a__ : List[Any] = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
SCREAMING_SNAKE_CASE_: Optional[Any] =auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc='sequence-to-sequence language modeling', checkpoint_for_example='t5-base'
)
class __A ( _BaseAutoModelClass ):
a__ : Union[str, Any] = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
SCREAMING_SNAKE_CASE_: Optional[int] =auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc='sequence classification'
)
class __A ( _BaseAutoModelClass ):
a__ : Optional[int] = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
SCREAMING_SNAKE_CASE_: List[Any] =auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc='question answering')
class __A ( _BaseAutoModelClass ):
a__ : Tuple = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
SCREAMING_SNAKE_CASE_: List[Any] =auto_class_update(
FlaxAutoModelForTokenClassification, head_doc='token classification'
)
class __A ( _BaseAutoModelClass ):
a__ : Dict = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
SCREAMING_SNAKE_CASE_: Any =auto_class_update(FlaxAutoModelForMultipleChoice, head_doc='multiple choice')
class __A ( _BaseAutoModelClass ):
a__ : Union[str, Any] = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
SCREAMING_SNAKE_CASE_: int =auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc='next sentence prediction'
)
class __A ( _BaseAutoModelClass ):
a__ : int = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
SCREAMING_SNAKE_CASE_: Dict =auto_class_update(
FlaxAutoModelForImageClassification, head_doc='image classification'
)
class __A ( _BaseAutoModelClass ):
a__ : Any = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
SCREAMING_SNAKE_CASE_: Optional[int] =auto_class_update(FlaxAutoModelForVisionaSeq, head_doc='vision-to-text modeling')
class __A ( _BaseAutoModelClass ):
a__ : List[Any] = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
SCREAMING_SNAKE_CASE_: Union[str, Any] =auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc='sequence-to-sequence speech-to-text modeling'
)
| 363 | '''simple docstring'''
import argparse
import pathlib
import fairseq
import torch
from fairseq.models.roberta import RobertaModel as FairseqRobertaModel
from fairseq.modules import TransformerSentenceEncoderLayer
from packaging import version
from transformers import XLMRobertaConfig, XLMRobertaXLForMaskedLM, XLMRobertaXLForSequenceClassification
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.models.roberta.modeling_roberta import RobertaAttention
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse('1.0.0a'):
raise Exception('requires fairseq >= 1.0.0a')
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE_: List[Any] =logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_: Tuple ='Hello world! cécé herlolip'
def lowerCAmelCase_ ( snake_case_ : str , snake_case_ : str , snake_case_ : bool ) -> Any:
'''simple docstring'''
UpperCAmelCase_ = FairseqRobertaModel.from_pretrained(snake_case_ )
roberta.eval() # disable dropout
UpperCAmelCase_ = roberta.model.encoder.sentence_encoder
UpperCAmelCase_ = XLMRobertaConfig(
vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings , hidden_size=roberta.cfg.model.encoder_embed_dim , num_hidden_layers=roberta.cfg.model.encoder_layers , num_attention_heads=roberta.cfg.model.encoder_attention_heads , intermediate_size=roberta.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=5_14 , type_vocab_size=1 , layer_norm_eps=1E-5 , )
if classification_head:
UpperCAmelCase_ = roberta.model.classification_heads["mnli"].out_proj.weight.shape[0]
print("Our RoBERTa config:" , snake_case_ )
UpperCAmelCase_ = XLMRobertaXLForSequenceClassification(snake_case_ ) if classification_head else XLMRobertaXLForMaskedLM(snake_case_ )
model.eval()
# Now let's copy all the weights.
# Embeddings
UpperCAmelCase_ = roberta_sent_encoder.embed_tokens.weight
UpperCAmelCase_ = roberta_sent_encoder.embed_positions.weight
UpperCAmelCase_ = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c RoBERTa doesn't use them.
UpperCAmelCase_ = roberta_sent_encoder.layer_norm.weight
UpperCAmelCase_ = roberta_sent_encoder.layer_norm.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
UpperCAmelCase_ = model.roberta.encoder.layer[i]
UpperCAmelCase_ = roberta_sent_encoder.layers[i]
UpperCAmelCase_ = layer.attention
UpperCAmelCase_ = roberta_layer.self_attn_layer_norm.weight
UpperCAmelCase_ = roberta_layer.self_attn_layer_norm.bias
# self attention
UpperCAmelCase_ = layer.attention.self
assert (
roberta_layer.self_attn.k_proj.weight.data.shape
== roberta_layer.self_attn.q_proj.weight.data.shape
== roberta_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
)
UpperCAmelCase_ = roberta_layer.self_attn.q_proj.weight
UpperCAmelCase_ = roberta_layer.self_attn.q_proj.bias
UpperCAmelCase_ = roberta_layer.self_attn.k_proj.weight
UpperCAmelCase_ = roberta_layer.self_attn.k_proj.bias
UpperCAmelCase_ = roberta_layer.self_attn.v_proj.weight
UpperCAmelCase_ = roberta_layer.self_attn.v_proj.bias
# self-attention output
UpperCAmelCase_ = layer.attention.output
assert self_output.dense.weight.shape == roberta_layer.self_attn.out_proj.weight.shape
UpperCAmelCase_ = roberta_layer.self_attn.out_proj.weight
UpperCAmelCase_ = roberta_layer.self_attn.out_proj.bias
# this one is final layer norm
UpperCAmelCase_ = roberta_layer.final_layer_norm.weight
UpperCAmelCase_ = roberta_layer.final_layer_norm.bias
# intermediate
UpperCAmelCase_ = layer.intermediate
assert intermediate.dense.weight.shape == roberta_layer.fca.weight.shape
UpperCAmelCase_ = roberta_layer.fca.weight
UpperCAmelCase_ = roberta_layer.fca.bias
# output
UpperCAmelCase_ = layer.output
assert bert_output.dense.weight.shape == roberta_layer.fca.weight.shape
UpperCAmelCase_ = roberta_layer.fca.weight
UpperCAmelCase_ = roberta_layer.fca.bias
# end of layer
if classification_head:
UpperCAmelCase_ = roberta.model.classification_heads["mnli"].dense.weight
UpperCAmelCase_ = roberta.model.classification_heads["mnli"].dense.bias
UpperCAmelCase_ = roberta.model.classification_heads["mnli"].out_proj.weight
UpperCAmelCase_ = roberta.model.classification_heads["mnli"].out_proj.bias
else:
# LM Head
UpperCAmelCase_ = roberta.model.encoder.lm_head.dense.weight
UpperCAmelCase_ = roberta.model.encoder.lm_head.dense.bias
UpperCAmelCase_ = roberta.model.encoder.lm_head.layer_norm.weight
UpperCAmelCase_ = roberta.model.encoder.lm_head.layer_norm.bias
UpperCAmelCase_ = roberta.model.encoder.lm_head.weight
UpperCAmelCase_ = roberta.model.encoder.lm_head.bias
# Let's check that we get the same results.
UpperCAmelCase_ = roberta.encode(snake_case_ ).unsqueeze(0 ) # batch of size 1
UpperCAmelCase_ = model(snake_case_ )[0]
if classification_head:
UpperCAmelCase_ = roberta.model.classification_heads["mnli"](roberta.extract_features(snake_case_ ) )
else:
UpperCAmelCase_ = roberta.model(snake_case_ )[0]
print(our_output.shape , their_output.shape )
UpperCAmelCase_ = torch.max(torch.abs(our_output - their_output ) ).item()
print(f"""max_absolute_diff = {max_absolute_diff}""" ) # ~ 1e-7
UpperCAmelCase_ = torch.allclose(snake_case_ , snake_case_ , atol=1E-3 )
print("Do both models output the same tensors?" , "🔥" if success else "💩" )
if not success:
raise Exception("Something went wRoNg" )
pathlib.Path(snake_case_ ).mkdir(parents=snake_case_ , exist_ok=snake_case_ )
print(f"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(snake_case_ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_: Optional[int] =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--roberta_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--classification_head', action='store_true', help='Whether to convert a final classification head.'
)
SCREAMING_SNAKE_CASE_: Union[str, Any] =parser.parse_args()
convert_xlm_roberta_xl_checkpoint_to_pytorch(
args.roberta_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 106 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCamelCase = {'''configuration_yolos''': ['''YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''YolosConfig''', '''YolosOnnxConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ['''YolosFeatureExtractor''']
UpperCamelCase = ['''YolosImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'''YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''YolosForObjectDetection''',
'''YolosModel''',
'''YolosPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_yolos import YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP, YolosConfig, YolosOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_yolos import YolosFeatureExtractor
from .image_processing_yolos import YolosImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_yolos import (
YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST,
YolosForObjectDetection,
YolosModel,
YolosPreTrainedModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 87 | from __future__ import annotations
import sys
from collections import deque
from typing import Generic, TypeVar
UpperCamelCase = TypeVar('''T''')
class snake_case_ ( Generic[T] ):
__A : deque[T] # Cache store of keys
__A : set[T] # References of the keys in cache
__A : int = 10 # Maximum capacity of cache
def __init__( self : Union[str, Any] , lowercase_ : int ) -> None:
lowercase__ : int = deque()
lowercase__ : str = set()
if not n:
lowercase__ : str = sys.maxsize
elif n < 0:
raise ValueError("n should be an integer greater than 0." )
else:
lowercase__ : List[Any] = n
def __UpperCamelCase ( self : Dict , lowercase_ : T ) -> None:
if x not in self.key_reference:
if len(self.dq_store ) == LRUCache._MAX_CAPACITY:
lowercase__ : Dict = self.dq_store.pop()
self.key_reference.remove(lowercase_ )
else:
self.dq_store.remove(lowercase_ )
self.dq_store.appendleft(lowercase_ )
self.key_reference.add(lowercase_ )
def __UpperCamelCase ( self : Dict ) -> None:
for k in self.dq_store:
print(lowercase_ )
def __repr__( self : Optional[int] ) -> str:
return F'''LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}'''
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase = LRUCache(4)
lru_cache.refer('''A''')
lru_cache.refer(2)
lru_cache.refer(3)
lru_cache.refer('''A''')
lru_cache.refer(4)
lru_cache.refer(5)
lru_cache.display()
print(lru_cache)
assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]"
| 87 | 1 |
'''simple docstring'''
import torch
def _lowercase ( ):
'''simple docstring'''
if torch.cuda.is_available():
__UpperCamelCase = torch.cuda.device_count()
else:
__UpperCamelCase = 0
print(f"Successfully ran on {num_gpus} GPUs" )
if __name__ == "__main__":
main()
| 243 |
'''simple docstring'''
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def _lowercase ( __A ,__A ,__A ,__A ,__A=True ,__A="pt" ):
'''simple docstring'''
__UpperCamelCase = {"""add_prefix_space""": True} if isinstance(__A ,__A ) and not line.startswith(""" """ ) else {}
__UpperCamelCase = padding_side
return tokenizer(
[line] ,max_length=__A ,padding="""max_length""" if pad_to_max_length else None ,truncation=__A ,return_tensors=__A ,add_special_tokens=__A ,**__A ,)
def _lowercase ( __A ,__A ,__A=None ,):
'''simple docstring'''
__UpperCamelCase = input_ids.ne(__A ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class UpperCAmelCase__ ( UpperCAmelCase_):
def __init__( self , lowercase , lowercase , lowercase , lowercase , lowercase="train" , lowercase=None , lowercase=None , lowercase=None , lowercase="" , ) -> List[Any]:
super().__init__()
__UpperCamelCase = Path(lowercase ).joinpath(type_path + """.source""" )
__UpperCamelCase = Path(lowercase ).joinpath(type_path + """.target""" )
__UpperCamelCase = self.get_char_lens(self.src_file )
__UpperCamelCase = max_source_length
__UpperCamelCase = max_target_length
assert min(self.src_lens ) > 0, f"found empty line in {self.src_file}"
__UpperCamelCase = tokenizer
__UpperCamelCase = prefix
if n_obs is not None:
__UpperCamelCase = self.src_lens[:n_obs]
__UpperCamelCase = src_lang
__UpperCamelCase = tgt_lang
def __len__( self ) -> Union[str, Any]:
return len(self.src_lens )
def __getitem__( self , lowercase ) -> Dict[str, torch.Tensor]:
__UpperCamelCase = index + 1 # linecache starts at 1
__UpperCamelCase = self.prefix + linecache.getline(str(self.src_file ) , lowercase ).rstrip("""\n""" )
__UpperCamelCase = linecache.getline(str(self.tgt_file ) , lowercase ).rstrip("""\n""" )
assert source_line, f"empty source line for index {index}"
assert tgt_line, f"empty tgt line for index {index}"
# Need to add eos token manually for T5
if isinstance(self.tokenizer , lowercase ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
__UpperCamelCase = (
self.tokenizer.question_encoder if isinstance(self.tokenizer , lowercase ) else self.tokenizer
)
__UpperCamelCase = self.tokenizer.generator if isinstance(self.tokenizer , lowercase ) else self.tokenizer
__UpperCamelCase = encode_line(lowercase , lowercase , self.max_source_length , """right""" )
__UpperCamelCase = encode_line(lowercase , lowercase , self.max_target_length , """right""" )
__UpperCamelCase = source_inputs["""input_ids"""].squeeze()
__UpperCamelCase = target_inputs["""input_ids"""].squeeze()
__UpperCamelCase = source_inputs["""attention_mask"""].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def __lowerCamelCase ( lowercase ) -> str:
return [len(lowercase ) for x in Path(lowercase ).open().readlines()]
def __lowerCamelCase ( self , lowercase ) -> Dict[str, torch.Tensor]:
__UpperCamelCase = torch.stack([x["""input_ids"""] for x in batch] )
__UpperCamelCase = torch.stack([x["""attention_mask"""] for x in batch] )
__UpperCamelCase = torch.stack([x["""decoder_input_ids"""] for x in batch] )
__UpperCamelCase = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , lowercase )
else self.tokenizer.pad_token_id
)
__UpperCamelCase = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , lowercase )
else self.tokenizer.pad_token_id
)
__UpperCamelCase = trim_batch(lowercase , lowercase )
__UpperCamelCase , __UpperCamelCase = trim_batch(lowercase , lowercase , attention_mask=lowercase )
__UpperCamelCase = {
"""input_ids""": source_ids,
"""attention_mask""": source_mask,
"""decoder_input_ids""": y,
}
return batch
a__ : Optional[int] = getLogger(__name__)
def _lowercase ( __A ):
'''simple docstring'''
return list(itertools.chain.from_iterable(__A ) )
def _lowercase ( __A ):
'''simple docstring'''
__UpperCamelCase = get_git_info()
save_json(__A ,os.path.join(__A ,"""git_log.json""" ) )
def _lowercase ( __A ,__A ,__A=4 ,**__A ):
'''simple docstring'''
with open(__A ,"""w""" ) as f:
json.dump(__A ,__A ,indent=__A ,**__A )
def _lowercase ( __A ):
'''simple docstring'''
with open(__A ) as f:
return json.load(__A )
def _lowercase ( ):
'''simple docstring'''
__UpperCamelCase = git.Repo(search_parent_directories=__A )
__UpperCamelCase = {
"""repo_id""": str(__A ),
"""repo_sha""": str(repo.head.object.hexsha ),
"""repo_branch""": str(repo.active_branch ),
"""hostname""": str(socket.gethostname() ),
}
return repo_infos
def _lowercase ( __A ,__A ):
'''simple docstring'''
return list(map(__A ,__A ) )
def _lowercase ( __A ,__A ):
'''simple docstring'''
with open(__A ,"""wb""" ) as f:
return pickle.dump(__A ,__A )
def _lowercase ( __A ):
'''simple docstring'''
def remove_articles(__A ):
return re.sub(R"""\b(a|an|the)\b""" ,""" """ ,__A )
def white_space_fix(__A ):
return " ".join(text.split() )
def remove_punc(__A ):
__UpperCamelCase = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(__A ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(__A ) ) ) )
def _lowercase ( __A ,__A ):
'''simple docstring'''
__UpperCamelCase = normalize_answer(__A ).split()
__UpperCamelCase = normalize_answer(__A ).split()
__UpperCamelCase = Counter(__A ) & Counter(__A )
__UpperCamelCase = sum(common.values() )
if num_same == 0:
return 0
__UpperCamelCase = 1.0 * num_same / len(__A )
__UpperCamelCase = 1.0 * num_same / len(__A )
__UpperCamelCase = (2 * precision * recall) / (precision + recall)
return fa
def _lowercase ( __A ,__A ):
'''simple docstring'''
return normalize_answer(__A ) == normalize_answer(__A )
def _lowercase ( __A ,__A ):
'''simple docstring'''
assert len(__A ) == len(__A )
__UpperCamelCase = 0
for hypo, pred in zip(__A ,__A ):
em += exact_match_score(__A ,__A )
if len(__A ) > 0:
em /= len(__A )
return {"em": em}
def _lowercase ( __A ):
'''simple docstring'''
return model_prefix.startswith("""rag""" )
def _lowercase ( __A ,__A ,__A ):
'''simple docstring'''
__UpperCamelCase = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
__UpperCamelCase = """dropout_rate"""
for p in extra_params:
if getattr(__A ,__A ,__A ):
if not hasattr(__A ,__A ) and not hasattr(__A ,equivalent_param[p] ):
logger.info("""config doesn't have a `{}` attribute""".format(__A ) )
delattr(__A ,__A )
continue
__UpperCamelCase = p if hasattr(__A ,__A ) else equivalent_param[p]
setattr(__A ,__A ,getattr(__A ,__A ) )
delattr(__A ,__A )
return hparams, config
| 243 | 1 |
import math
def __lowerCamelCase ( __a :int ) -> list[int]:
"""simple docstring"""
A__ = []
A__ = 2
A__ = int(math.sqrt(__a ) ) # Size of every segment
A__ = [True] * (end + 1)
A__ = []
while start <= end:
if temp[start] is True:
in_prime.append(__a )
for i in range(start * start , end + 1 , __a ):
A__ = False
start += 1
prime += in_prime
A__ = end + 1
A__ = min(2 * end , __a )
while low <= n:
A__ = [True] * (high - low + 1)
for each in in_prime:
A__ = math.floor(low / each ) * each
if t < low:
t += each
for j in range(__a , high + 1 , __a ):
A__ = False
for j in range(len(__a ) ):
if temp[j] is True:
prime.append(j + low )
A__ = high + 1
A__ = min(high + end , __a )
return prime
print(sieve(1_0**6))
| 274 |
import dataclasses
import json
import warnings
from dataclasses import dataclass, field
from time import time
from typing import List
from ..utils import logging
A : Dict = logging.get_logger(__name__)
def __lowerCamelCase ( __a :int=None , __a :Optional[Any]=None ) -> int:
"""simple docstring"""
return field(default_factory=lambda: default , metadata=__a )
@dataclass
class A :
'''simple docstring'''
__lowerCamelCase : List[str] = list_field(
default=[] , metadata={
'''help''': (
'''Model checkpoints to be provided to the AutoModel classes. Leave blank to benchmark the base version'''
''' of all available models'''
)
} , )
__lowerCamelCase : List[int] = list_field(
default=[8] , metadata={'''help''': '''List of batch sizes for which memory and time performance will be evaluated'''} )
__lowerCamelCase : List[int] = list_field(
default=[8, 32, 128, 512] , metadata={'''help''': '''List of sequence lengths for which memory and time performance will be evaluated'''} , )
__lowerCamelCase : bool = field(
default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Whether to benchmark inference of model. Inference can be disabled via --no-inference.'''} , )
__lowerCamelCase : bool = field(
default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Whether to run on available cuda devices. Cuda can be disabled via --no-cuda.'''} , )
__lowerCamelCase : bool = field(
default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Whether to run on available tpu devices. TPU can be disabled via --no-tpu.'''} )
__lowerCamelCase : bool = field(default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Use FP16 to accelerate inference.'''} )
__lowerCamelCase : bool = field(default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Benchmark training of model'''} )
__lowerCamelCase : bool = field(default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Verbose memory tracing'''} )
__lowerCamelCase : bool = field(
default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Whether to perform speed measurements. Speed measurements can be disabled via --no-speed.'''} , )
__lowerCamelCase : bool = field(
default=SCREAMING_SNAKE_CASE , metadata={
'''help''': '''Whether to perform memory measurements. Memory measurements can be disabled via --no-memory'''
} , )
__lowerCamelCase : bool = field(default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Trace memory line by line'''} )
__lowerCamelCase : bool = field(default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Save result to a CSV file'''} )
__lowerCamelCase : bool = field(default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Save all print statements in a log file'''} )
__lowerCamelCase : bool = field(default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Whether to print environment information'''} )
__lowerCamelCase : bool = field(
default=SCREAMING_SNAKE_CASE , metadata={
'''help''': (
'''Whether to use multiprocessing for memory and speed measurement. It is highly recommended to use'''
''' multiprocessing for accurate CPU and GPU memory measurements. This option should only be disabled'''
''' for debugging / testing and on TPU.'''
)
} , )
__lowerCamelCase : str = field(
default=F'''inference_time_{round(time() )}.csv''' , metadata={'''help''': '''CSV filename used if saving time results to csv.'''} , )
__lowerCamelCase : str = field(
default=F'''inference_memory_{round(time() )}.csv''' , metadata={'''help''': '''CSV filename used if saving memory results to csv.'''} , )
__lowerCamelCase : str = field(
default=F'''train_time_{round(time() )}.csv''' , metadata={'''help''': '''CSV filename used if saving time results to csv for training.'''} , )
__lowerCamelCase : str = field(
default=F'''train_memory_{round(time() )}.csv''' , metadata={'''help''': '''CSV filename used if saving memory results to csv for training.'''} , )
__lowerCamelCase : str = field(
default=F'''env_info_{round(time() )}.csv''' , metadata={'''help''': '''CSV filename used if saving environment information.'''} , )
__lowerCamelCase : str = field(
default=F'''log_{round(time() )}.csv''' , metadata={'''help''': '''Log filename used if print statements are saved in log.'''} , )
__lowerCamelCase : int = field(default=3 , metadata={'''help''': '''Times an experiment will be run.'''} )
__lowerCamelCase : bool = field(
default=SCREAMING_SNAKE_CASE , metadata={
'''help''': (
'''Instead of loading the model as defined in `config.architectures` if exists, just load the pretrain'''
''' model weights.'''
)
} , )
def a_ ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
warnings.warn(
f'The class {self.__class__} is deprecated. Hugging Face Benchmarking utils'
""" are deprecated in general and it is advised to use external Benchmarking libraries """
""" to benchmark Transformer models.""" , __lowerCAmelCase , )
def a_ ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
return json.dumps(dataclasses.asdict(self ) , indent=2 )
@property
def a_ ( self : Tuple ) -> List[str]:
"""simple docstring"""
if len(self.models ) <= 0:
raise ValueError(
"""Please make sure you provide at least one model name / model identifier, *e.g.* `--models"""
""" bert-base-cased` or `args.models = ['bert-base-cased'].""" )
return self.models
@property
def a_ ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
if not self.multi_process:
return False
elif self.is_tpu:
logger.info("""Multiprocessing is currently not possible on TPU.""" )
return False
else:
return True
| 274 | 1 |
def lowerCamelCase ( a_ ) -> Optional[int]:
lowerCAmelCase_ = [0] * len(a_ )
lowerCAmelCase_ = []
lowerCAmelCase_ = [1] * len(a_ )
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(a_ ) ):
if indegree[i] == 0:
queue.append(a_ )
while queue:
lowerCAmelCase_ = queue.pop(0 )
for x in graph[vertex]:
indegree[x] -= 1
if long_dist[vertex] + 1 > long_dist[x]:
lowerCAmelCase_ = long_dist[vertex] + 1
if indegree[x] == 0:
queue.append(a_ )
print(max(a_ ) )
# Adjacency list of Graph
lowerCamelCase_ = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []}
longest_distance(graph)
| 14 |
from __future__ import annotations
lowerCamelCase_ = 1_0
def lowerCamelCase ( a_ ) -> list[int]:
lowerCAmelCase_ = 1
lowerCAmelCase_ = max(a_ )
while placement <= max_digit:
# declare and initialize empty buckets
lowerCAmelCase_ = [[] for _ in range(a_ )]
# split list_of_ints between the buckets
for i in list_of_ints:
lowerCAmelCase_ = int((i / placement) % RADIX )
buckets[tmp].append(a_ )
# put each buckets' contents into list_of_ints
lowerCAmelCase_ = 0
for b in range(a_ ):
for i in buckets[b]:
lowerCAmelCase_ = i
a += 1
# move to next
placement *= RADIX
return list_of_ints
if __name__ == "__main__":
import doctest
doctest.testmod()
| 14 | 1 |
import fire
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoTokenizer
from utils import SeqaSeqDataset, pickle_save
def a_ ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : int=1_024 , SCREAMING_SNAKE_CASE__ : List[Any]=1_024 , SCREAMING_SNAKE_CASE__ : List[Any]=False , **SCREAMING_SNAKE_CASE__ : Optional[int] ):
'''simple docstring'''
_lowerCamelCase : Optional[Any] =AutoTokenizer.from_pretrained(_lowerCamelCase )
_lowerCamelCase : Optional[Any] =SeqaSeqDataset(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , type_path='train' , **_lowerCamelCase )
_lowerCamelCase : Union[str, Any] =tok.pad_token_id
def get_lens(SCREAMING_SNAKE_CASE__ : Optional[Any] ):
_lowerCamelCase : Optional[int] =tqdm(
DataLoader(_lowerCamelCase , batch_size=512 , num_workers=8 , shuffle=_lowerCamelCase , collate_fn=ds.collate_fn ) , desc=str(ds.len_file ) , )
_lowerCamelCase : List[str] =[]
for batch in dl:
_lowerCamelCase : Any =batch["input_ids"].ne(_lowerCamelCase ).sum(1 ).tolist()
_lowerCamelCase : List[str] =batch["labels"].ne(_lowerCamelCase ).sum(1 ).tolist()
if consider_target:
for src, tgt in zip(_lowerCamelCase , _lowerCamelCase ):
max_lens.append(max(_lowerCamelCase , _lowerCamelCase ) )
else:
max_lens.extend(_lowerCamelCase )
return max_lens
_lowerCamelCase : List[Any] =get_lens(_lowerCamelCase )
_lowerCamelCase : List[Any] =SeqaSeqDataset(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , type_path='val' , **_lowerCamelCase )
_lowerCamelCase : int =get_lens(_lowerCamelCase )
pickle_save(_lowerCamelCase , train_ds.len_file )
pickle_save(_lowerCamelCase , val_ds.len_file )
if __name__ == "__main__":
fire.Fire(save_len_file)
| 199 | from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
UpperCamelCase = logging.get_logger(__name__)
if is_vision_available():
import PIL
class snake_case_ ( __A ):
__A : str = ["pixel_values"]
def __init__( self : int , lowercase_ : bool = True , lowercase_ : Dict[str, int] = None , lowercase_ : PILImageResampling = PILImageResampling.BICUBIC , lowercase_ : bool = True , lowercase_ : Dict[str, int] = None , lowercase_ : bool = True , lowercase_ : Union[int, float] = 1 / 2_55 , lowercase_ : bool = True , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : bool = True , **lowercase_ : Union[str, Any] , ) -> None:
super().__init__(**lowercase_ )
lowercase__ : Tuple = size if size is not None else {"shortest_edge": 2_24}
lowercase__ : Tuple = get_size_dict(lowercase_ , default_to_square=lowercase_ )
lowercase__ : List[str] = crop_size if crop_size is not None else {"height": 2_24, "width": 2_24}
lowercase__ : Tuple = get_size_dict(lowercase_ , default_to_square=lowercase_ , param_name="crop_size" )
lowercase__ : Dict = do_resize
lowercase__ : List[Any] = size
lowercase__ : int = resample
lowercase__ : Union[str, Any] = do_center_crop
lowercase__ : Optional[int] = crop_size
lowercase__ : List[str] = do_rescale
lowercase__ : int = rescale_factor
lowercase__ : List[Any] = do_normalize
lowercase__ : Union[str, Any] = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
lowercase__ : str = image_std if image_std is not None else OPENAI_CLIP_STD
lowercase__ : Dict = do_convert_rgb
def __UpperCamelCase ( self : Optional[Any] , lowercase_ : np.ndarray , lowercase_ : Dict[str, int] , lowercase_ : PILImageResampling = PILImageResampling.BICUBIC , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : Union[str, Any] , ) -> np.ndarray:
lowercase__ : str = get_size_dict(lowercase_ , default_to_square=lowercase_ )
if "shortest_edge" not in size:
raise ValueError(F'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' )
lowercase__ : Dict = get_resize_output_image_size(lowercase_ , size=size["shortest_edge"] , default_to_square=lowercase_ )
return resize(lowercase_ , size=lowercase_ , resample=lowercase_ , data_format=lowercase_ , **lowercase_ )
def __UpperCamelCase ( self : int , lowercase_ : np.ndarray , lowercase_ : Dict[str, int] , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : int , ) -> np.ndarray:
lowercase__ : Optional[Any] = get_size_dict(lowercase_ )
if "height" not in size or "width" not in size:
raise ValueError(F'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' )
return center_crop(lowercase_ , size=(size["height"], size["width"]) , data_format=lowercase_ , **lowercase_ )
def __UpperCamelCase ( self : Optional[Any] , lowercase_ : np.ndarray , lowercase_ : Union[int, float] , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : Optional[Any] , ) -> Any:
return rescale(lowercase_ , scale=lowercase_ , data_format=lowercase_ , **lowercase_ )
def __UpperCamelCase ( self : str , lowercase_ : np.ndarray , lowercase_ : Union[float, List[float]] , lowercase_ : Union[float, List[float]] , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : str , ) -> np.ndarray:
return normalize(lowercase_ , mean=lowercase_ , std=lowercase_ , data_format=lowercase_ , **lowercase_ )
def __UpperCamelCase ( self : Optional[Any] , lowercase_ : ImageInput , lowercase_ : bool = None , lowercase_ : Dict[str, int] = None , lowercase_ : PILImageResampling = None , lowercase_ : bool = None , lowercase_ : int = None , lowercase_ : bool = None , lowercase_ : float = None , lowercase_ : bool = None , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : bool = None , lowercase_ : Optional[Union[str, TensorType]] = None , lowercase_ : Optional[ChannelDimension] = ChannelDimension.FIRST , **lowercase_ : Union[str, Any] , ) -> PIL.Image.Image:
lowercase__ : int = do_resize if do_resize is not None else self.do_resize
lowercase__ : Dict = size if size is not None else self.size
lowercase__ : List[Any] = get_size_dict(lowercase_ , param_name="size" , default_to_square=lowercase_ )
lowercase__ : Dict = resample if resample is not None else self.resample
lowercase__ : int = do_center_crop if do_center_crop is not None else self.do_center_crop
lowercase__ : Dict = crop_size if crop_size is not None else self.crop_size
lowercase__ : List[str] = get_size_dict(lowercase_ , param_name="crop_size" , default_to_square=lowercase_ )
lowercase__ : int = do_rescale if do_rescale is not None else self.do_rescale
lowercase__ : Dict = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase__ : Union[str, Any] = do_normalize if do_normalize is not None else self.do_normalize
lowercase__ : int = image_mean if image_mean is not None else self.image_mean
lowercase__ : List[str] = image_std if image_std is not None else self.image_std
lowercase__ : Union[str, Any] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
lowercase__ : Union[str, Any] = make_list_of_images(lowercase_ )
if not valid_images(lowercase_ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
lowercase__ : Dict = [convert_to_rgb(lowercase_ ) for image in images]
# All transformations expect numpy arrays.
lowercase__ : Optional[Any] = [to_numpy_array(lowercase_ ) for image in images]
if do_resize:
lowercase__ : List[Any] = [self.resize(image=lowercase_ , size=lowercase_ , resample=lowercase_ ) for image in images]
if do_center_crop:
lowercase__ : int = [self.center_crop(image=lowercase_ , size=lowercase_ ) for image in images]
if do_rescale:
lowercase__ : str = [self.rescale(image=lowercase_ , scale=lowercase_ ) for image in images]
if do_normalize:
lowercase__ : Optional[int] = [self.normalize(image=lowercase_ , mean=lowercase_ , std=lowercase_ ) for image in images]
lowercase__ : Optional[Any] = [to_channel_dimension_format(lowercase_ , lowercase_ ) for image in images]
lowercase__ : List[str] = {"pixel_values": images}
return BatchFeature(data=lowercase_ , tensor_type=lowercase_ )
| 87 | 0 |
'''simple docstring'''
import fire
from utils import calculate_rouge, save_json
def a_ ( _UpperCAmelCase : Union[str, Any] ,_UpperCAmelCase : int ,_UpperCAmelCase : str=None ,**_UpperCAmelCase : str ) -> str:
__snake_case : List[str] = [x.strip() for x in open(_UpperCAmelCase ).readlines()]
__snake_case : List[str] = [x.strip() for x in open(_UpperCAmelCase ).readlines()][: len(_UpperCAmelCase )]
__snake_case : List[Any] = calculate_rouge(_UpperCAmelCase ,_UpperCAmelCase ,**_UpperCAmelCase )
if save_path is not None:
save_json(_UpperCAmelCase ,_UpperCAmelCase ,indent=_UpperCAmelCase )
return metrics # these print nicely
if __name__ == "__main__":
fire.Fire(calculate_rouge_path)
| 354 |
'''simple docstring'''
from tempfile import TemporaryDirectory
from unittest import TestCase
from unittest.mock import MagicMock, patch
from transformers import AutoModel, TFAutoModel
from transformers.onnx import FeaturesManager
from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch
@require_torch
@require_tf
class snake_case__ ( SCREAMING_SNAKE_CASE_ ):
def A_ ( self : List[Any] ) -> int:
'''simple docstring'''
__snake_case : Optional[int] = SMALL_MODEL_IDENTIFIER
__snake_case : str = 'pt'
__snake_case : Union[str, Any] = 'tf'
def A_ ( self : Dict , __a : Tuple ) -> Dict:
'''simple docstring'''
__snake_case : Optional[int] = AutoModel.from_pretrained(self.test_model )
model_pt.save_pretrained(__a )
def A_ ( self : Any , __a : Optional[Any] ) -> Dict:
'''simple docstring'''
__snake_case : Union[str, Any] = TFAutoModel.from_pretrained(self.test_model , from_pt=__a )
model_tf.save_pretrained(__a )
def A_ ( self : Any ) -> Tuple:
'''simple docstring'''
__snake_case : Tuple = 'mock_framework'
# Framework provided - return whatever the user provides
__snake_case : int = FeaturesManager.determine_framework(self.test_model , __a )
self.assertEqual(__a , __a )
# Local checkpoint and framework provided - return provided framework
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(__a )
__snake_case : List[Any] = FeaturesManager.determine_framework(__a , __a )
self.assertEqual(__a , __a )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(__a )
__snake_case : Tuple = FeaturesManager.determine_framework(__a , __a )
self.assertEqual(__a , __a )
def A_ ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(__a )
__snake_case : Tuple = FeaturesManager.determine_framework(__a )
self.assertEqual(__a , self.framework_pt )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(__a )
__snake_case : Union[str, Any] = FeaturesManager.determine_framework(__a )
self.assertEqual(__a , self.framework_tf )
# Invalid local checkpoint
with TemporaryDirectory() as local_invalid_ckpt:
with self.assertRaises(__a ):
__snake_case : Optional[int] = FeaturesManager.determine_framework(__a )
def A_ ( self : Any ) -> List[Any]:
'''simple docstring'''
__snake_case : Union[str, Any] = MagicMock(return_value=__a )
with patch('transformers.onnx.features.is_tf_available' , __a ):
__snake_case : int = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(__a , self.framework_pt )
# PyTorch not in environment -> use TensorFlow
__snake_case : Tuple = MagicMock(return_value=__a )
with patch('transformers.onnx.features.is_torch_available' , __a ):
__snake_case : Dict = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(__a , self.framework_tf )
# Both in environment -> use PyTorch
__snake_case : Optional[Any] = MagicMock(return_value=__a )
__snake_case : Tuple = MagicMock(return_value=__a )
with patch('transformers.onnx.features.is_tf_available' , __a ), patch(
'transformers.onnx.features.is_torch_available' , __a ):
__snake_case : Dict = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(__a , self.framework_pt )
# Both not in environment -> raise error
__snake_case : str = MagicMock(return_value=__a )
__snake_case : List[Any] = MagicMock(return_value=__a )
with patch('transformers.onnx.features.is_tf_available' , __a ), patch(
'transformers.onnx.features.is_torch_available' , __a ):
with self.assertRaises(__a ):
__snake_case : Tuple = FeaturesManager.determine_framework(self.test_model )
| 0 | 0 |
import argparse
import torch
from transformers import (
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForAudioFrameClassification,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
logging,
)
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = WavaVecaForSequenceClassification.from_pretrained(A_ , config=A_ )
SCREAMING_SNAKE_CASE = downstream_dict['''projector.weight''']
SCREAMING_SNAKE_CASE = downstream_dict['''projector.bias''']
SCREAMING_SNAKE_CASE = downstream_dict['''model.post_net.linear.weight''']
SCREAMING_SNAKE_CASE = downstream_dict['''model.post_net.linear.bias''']
return model
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = WavaVecaForAudioFrameClassification.from_pretrained(A_ , config=A_ )
SCREAMING_SNAKE_CASE = downstream_dict['''model.linear.weight''']
SCREAMING_SNAKE_CASE = downstream_dict['''model.linear.bias''']
return model
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = WavaVecaForXVector.from_pretrained(A_ , config=A_ )
SCREAMING_SNAKE_CASE = downstream_dict['''connector.weight''']
SCREAMING_SNAKE_CASE = downstream_dict['''connector.bias''']
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
SCREAMING_SNAKE_CASE = downstream_dict[
F"""model.framelevel_feature_extractor.module.{i}.kernel.weight"""
]
SCREAMING_SNAKE_CASE = downstream_dict[F"""model.framelevel_feature_extractor.module.{i}.kernel.bias"""]
SCREAMING_SNAKE_CASE = downstream_dict['''model.utterancelevel_feature_extractor.linear1.weight''']
SCREAMING_SNAKE_CASE = downstream_dict['''model.utterancelevel_feature_extractor.linear1.bias''']
SCREAMING_SNAKE_CASE = downstream_dict['''model.utterancelevel_feature_extractor.linear2.weight''']
SCREAMING_SNAKE_CASE = downstream_dict['''model.utterancelevel_feature_extractor.linear2.bias''']
SCREAMING_SNAKE_CASE = downstream_dict['''objective.W''']
return model
@torch.no_grad()
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = torch.load(A_ , map_location="""cpu""" )
SCREAMING_SNAKE_CASE = checkpoint['''Downstream''']
SCREAMING_SNAKE_CASE = WavaVecaConfig.from_pretrained(A_ )
SCREAMING_SNAKE_CASE = WavaVecaFeatureExtractor.from_pretrained(
A_ , return_attention_mask=A_ , do_normalize=A_ )
SCREAMING_SNAKE_CASE = hf_config.architectures[0]
if arch.endswith("""ForSequenceClassification""" ):
SCREAMING_SNAKE_CASE = convert_classification(A_ , A_ , A_ )
elif arch.endswith("""ForAudioFrameClassification""" ):
SCREAMING_SNAKE_CASE = convert_diarization(A_ , A_ , A_ )
elif arch.endswith("""ForXVector""" ):
SCREAMING_SNAKE_CASE = convert_xvector(A_ , A_ , A_ )
else:
raise NotImplementedError(F"""S3PRL weights conversion is not supported for {arch}""" )
if hf_config.use_weighted_layer_sum:
SCREAMING_SNAKE_CASE = checkpoint['''Featurizer''']['''weights''']
hf_feature_extractor.save_pretrained(A_ )
hf_model.save_pretrained(A_ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser()
parser.add_argument(
"""--base_model_name""", default=None, type=str, help="""Name of the huggingface pretrained base model."""
)
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to the huggingface classifier config.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to the s3prl checkpoint.""")
parser.add_argument("""--model_dump_path""", default=None, type=str, help="""Path to the final converted model.""")
SCREAMING_SNAKE_CASE_ = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 296 |
"""simple docstring"""
import webbrowser
from sys import argv
from urllib.parse import parse_qs, quote
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
__UpperCamelCase : Tuple = '''%20'''.join(argv[1:]) if len(argv) > 1 else quote(str(input('''Search: ''')))
print('''Googling.....''')
__UpperCamelCase : Optional[int] = F'''https://www.google.com/search?q={query}&num=100'''
__UpperCamelCase : Optional[Any] = requests.get(
url,
headers={'''User-Agent''': str(UserAgent().random)},
)
try:
__UpperCamelCase : Union[str, Any] = (
BeautifulSoup(res.text, '''html.parser''')
.find('''div''', attrs={'''class''': '''yuRUbf'''})
.find('''a''')
.get('''href''')
)
except AttributeError:
__UpperCamelCase : str = parse_qs(
BeautifulSoup(res.text, '''html.parser''')
.find('''div''', attrs={'''class''': '''kCrYT'''})
.find('''a''')
.get('''href''')
)['''url'''][0]
webbrowser.open(link)
| 106 | 0 |
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
UpperCamelCase_ =logging.get_logger(__name__) # pylint: disable=invalid-name
UpperCamelCase_ ="""
Examples:
```py
>>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline
>>> from diffusers.utils import load_image
>>> import torch
>>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(
... \"kandinsky-community/kandinsky-2-2-prior\", torch_dtype=torch.float16
... )
>>> pipe_prior.to(\"cuda\")
>>> prompt = \"A red cartoon frog, 4k\"
>>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False)
>>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained(
... \"kandinsky-community/kandinsky-2-2-decoder\", torch_dtype=torch.float16
... )
>>> pipe.to(\"cuda\")
>>> init_image = load_image(
... \"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main\"
... \"/kandinsky/frog.png\"
... )
>>> image = pipe(
... image=init_image,
... image_embeds=image_emb,
... negative_image_embeds=zero_image_emb,
... height=768,
... width=768,
... num_inference_steps=100,
... strength=0.2,
... ).images
>>> image[0].save(\"red_frog.png\")
```
"""
def a_ ( _lowercase , _lowercase , _lowercase=8 ):
_UpperCamelCase : Dict = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
_UpperCamelCase : List[Any] = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
def a_ ( _lowercase , _lowercase=512 , _lowercase=512 ):
_UpperCamelCase : List[str] = pil_image.resize((w, h) , resample=Image.BICUBIC , reducing_gap=1 )
_UpperCamelCase : Tuple = np.array(pil_image.convert('''RGB''' ) )
_UpperCamelCase : Dict = arr.astype(np.floataa ) / 127.5 - 1
_UpperCamelCase : Optional[int] = np.transpose(_lowercase , [2, 0, 1] )
_UpperCamelCase : List[Any] = torch.from_numpy(_lowercase ).unsqueeze(0 )
return image
class _a ( _lowerCAmelCase ):
def __init__( self : int, lowerCAmelCase__ : UNetaDConditionModel, lowerCAmelCase__ : DDPMScheduler, lowerCAmelCase__ : VQModel, ) -> Union[str, Any]:
'''simple docstring'''
super().__init__()
self.register_modules(
unet=lowerCAmelCase__, scheduler=lowerCAmelCase__, movq=lowerCAmelCase__, )
_UpperCamelCase : List[str] = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def snake_case ( self : str, lowerCAmelCase__ : List[Any], lowerCAmelCase__ : List[str], lowerCAmelCase__ : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase : List[str] = min(int(num_inference_steps * strength ), lowerCAmelCase__ )
_UpperCamelCase : Optional[Any] = max(num_inference_steps - init_timestep, 0 )
_UpperCamelCase : int = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def snake_case ( self : Union[str, Any], lowerCAmelCase__ : Dict, lowerCAmelCase__ : Optional[int], lowerCAmelCase__ : Tuple, lowerCAmelCase__ : Any, lowerCAmelCase__ : int, lowerCAmelCase__ : Any, lowerCAmelCase__ : Tuple=None ) -> Optional[Any]:
'''simple docstring'''
if not isinstance(lowerCAmelCase__, (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
f"""`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(lowerCAmelCase__ )}""" )
_UpperCamelCase : int = image.to(device=lowerCAmelCase__, dtype=lowerCAmelCase__ )
_UpperCamelCase : str = batch_size * num_images_per_prompt
if image.shape[1] == 4:
_UpperCamelCase : Any = image
else:
if isinstance(lowerCAmelCase__, lowerCAmelCase__ ) and len(lowerCAmelCase__ ) != batch_size:
raise ValueError(
f"""You have passed a list of generators of length {len(lowerCAmelCase__ )}, but requested an effective batch"""
f""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
elif isinstance(lowerCAmelCase__, lowerCAmelCase__ ):
_UpperCamelCase : Optional[Any] = [
self.movq.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(lowerCAmelCase__ )
]
_UpperCamelCase : List[str] = torch.cat(lowerCAmelCase__, dim=0 )
else:
_UpperCamelCase : Dict = self.movq.encode(lowerCAmelCase__ ).latent_dist.sample(lowerCAmelCase__ )
_UpperCamelCase : List[str] = self.movq.config.scaling_factor * init_latents
_UpperCamelCase : Any = torch.cat([init_latents], dim=0 )
_UpperCamelCase : int = init_latents.shape
_UpperCamelCase : Any = randn_tensor(lowerCAmelCase__, generator=lowerCAmelCase__, device=lowerCAmelCase__, dtype=lowerCAmelCase__ )
# get latents
_UpperCamelCase : Any = self.scheduler.add_noise(lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__ )
_UpperCamelCase : int = init_latents
return latents
def snake_case ( self : Dict, lowerCAmelCase__ : str=0 ) -> Dict:
'''simple docstring'''
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('''Please install accelerate via `pip install accelerate`''' )
_UpperCamelCase : int = torch.device(f"""cuda:{gpu_id}""" )
_UpperCamelCase : Tuple = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(lowerCAmelCase__, lowerCAmelCase__ )
def snake_case ( self : List[str], lowerCAmelCase__ : Tuple=0 ) -> Union[str, Any]:
'''simple docstring'''
if is_accelerate_available() and is_accelerate_version('''>=''', '''0.17.0.dev0''' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('''`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.''' )
_UpperCamelCase : int = torch.device(f"""cuda:{gpu_id}""" )
if self.device.type != "cpu":
self.to('''cpu''', silence_dtype_warnings=lowerCAmelCase__ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
_UpperCamelCase : int = None
for cpu_offloaded_model in [self.unet, self.movq]:
_UpperCamelCase , _UpperCamelCase : List[Any] = cpu_offload_with_hook(lowerCAmelCase__, lowerCAmelCase__, prev_module_hook=lowerCAmelCase__ )
# We'll offload the last model manually.
_UpperCamelCase : Dict = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def snake_case ( self : Optional[Any] ) -> Tuple:
'''simple docstring'''
if not hasattr(self.unet, '''_hf_hook''' ):
return self.device
for module in self.unet.modules():
if (
hasattr(lowerCAmelCase__, '''_hf_hook''' )
and hasattr(module._hf_hook, '''execution_device''' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(lowerCAmelCase__ )
def __call__( self : int, lowerCAmelCase__ : Union[torch.FloatTensor, List[torch.FloatTensor]], lowerCAmelCase__ : Union[torch.FloatTensor, PIL.Image.Image, List[torch.FloatTensor], List[PIL.Image.Image]], lowerCAmelCase__ : Union[torch.FloatTensor, List[torch.FloatTensor]], lowerCAmelCase__ : int = 5_1_2, lowerCAmelCase__ : int = 5_1_2, lowerCAmelCase__ : int = 1_0_0, lowerCAmelCase__ : float = 4.0, lowerCAmelCase__ : float = 0.3, lowerCAmelCase__ : int = 1, lowerCAmelCase__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None, lowerCAmelCase__ : Optional[str] = "pil", lowerCAmelCase__ : bool = True, ) -> Tuple:
'''simple docstring'''
_UpperCamelCase : Dict = self._execution_device
_UpperCamelCase : Union[str, Any] = guidance_scale > 1.0
if isinstance(lowerCAmelCase__, lowerCAmelCase__ ):
_UpperCamelCase : int = torch.cat(lowerCAmelCase__, dim=0 )
_UpperCamelCase : List[Any] = image_embeds.shape[0]
if isinstance(lowerCAmelCase__, lowerCAmelCase__ ):
_UpperCamelCase : Optional[int] = torch.cat(lowerCAmelCase__, dim=0 )
if do_classifier_free_guidance:
_UpperCamelCase : Tuple = image_embeds.repeat_interleave(lowerCAmelCase__, dim=0 )
_UpperCamelCase : int = negative_image_embeds.repeat_interleave(lowerCAmelCase__, dim=0 )
_UpperCamelCase : Dict = torch.cat([negative_image_embeds, image_embeds], dim=0 ).to(dtype=self.unet.dtype, device=lowerCAmelCase__ )
if not isinstance(lowerCAmelCase__, lowerCAmelCase__ ):
_UpperCamelCase : Tuple = [image]
if not all(isinstance(lowerCAmelCase__, (PIL.Image.Image, torch.Tensor) ) for i in image ):
raise ValueError(
f"""Input is in incorrect format: {[type(lowerCAmelCase__ ) for i in image]}. Currently, we only support PIL image and pytorch tensor""" )
_UpperCamelCase : Union[str, Any] = torch.cat([prepare_image(lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__ ) for i in image], dim=0 )
_UpperCamelCase : str = image.to(dtype=image_embeds.dtype, device=lowerCAmelCase__ )
_UpperCamelCase : Optional[Any] = self.movq.encode(lowerCAmelCase__ )['''latents''']
_UpperCamelCase : Optional[Any] = latents.repeat_interleave(lowerCAmelCase__, dim=0 )
self.scheduler.set_timesteps(lowerCAmelCase__, device=lowerCAmelCase__ )
_UpperCamelCase , _UpperCamelCase : List[Any] = self.get_timesteps(lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__ )
_UpperCamelCase : str = timesteps[:1].repeat(batch_size * num_images_per_prompt )
_UpperCamelCase , _UpperCamelCase : Tuple = downscale_height_and_width(lowerCAmelCase__, lowerCAmelCase__, self.movq_scale_factor )
_UpperCamelCase : Optional[Any] = self.prepare_latents(
lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, image_embeds.dtype, lowerCAmelCase__, lowerCAmelCase__ )
for i, t in enumerate(self.progress_bar(lowerCAmelCase__ ) ):
# expand the latents if we are doing classifier free guidance
_UpperCamelCase : int = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_UpperCamelCase : Dict = {'''image_embeds''': image_embeds}
_UpperCamelCase : List[Any] = self.unet(
sample=lowerCAmelCase__, timestep=lowerCAmelCase__, encoder_hidden_states=lowerCAmelCase__, added_cond_kwargs=lowerCAmelCase__, return_dict=lowerCAmelCase__, )[0]
if do_classifier_free_guidance:
_UpperCamelCase , _UpperCamelCase : Optional[int] = noise_pred.split(latents.shape[1], dim=1 )
_UpperCamelCase , _UpperCamelCase : List[str] = noise_pred.chunk(2 )
_UpperCamelCase , _UpperCamelCase : int = variance_pred.chunk(2 )
_UpperCamelCase : List[Any] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
_UpperCamelCase : List[str] = torch.cat([noise_pred, variance_pred_text], dim=1 )
if not (
hasattr(self.scheduler.config, '''variance_type''' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
_UpperCamelCase , _UpperCamelCase : Optional[Any] = noise_pred.split(latents.shape[1], dim=1 )
# compute the previous noisy sample x_t -> x_t-1
_UpperCamelCase : List[Any] = self.scheduler.step(
lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, generator=lowerCAmelCase__, )[0]
# post-processing
_UpperCamelCase : int = self.movq.decode(lowerCAmelCase__, force_not_quantize=lowerCAmelCase__ )['''sample''']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" )
if output_type in ["np", "pil"]:
_UpperCamelCase : List[Any] = image * 0.5 + 0.5
_UpperCamelCase : Dict = image.clamp(0, 1 )
_UpperCamelCase : Tuple = image.cpu().permute(0, 2, 3, 1 ).float().numpy()
if output_type == "pil":
_UpperCamelCase : Any = self.numpy_to_pil(lowerCAmelCase__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowerCAmelCase__ )
| 128 |
"""simple docstring"""
import argparse
import json
from typing import List
from ltp import LTP
from transformers.models.bert.tokenization_bert import BertTokenizer
def a_ ( _lowercase ):
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0X4E00 and cp <= 0X9FFF)
or (cp >= 0X3400 and cp <= 0X4DBF) #
or (cp >= 0X20000 and cp <= 0X2A6DF) #
or (cp >= 0X2A700 and cp <= 0X2B73F) #
or (cp >= 0X2B740 and cp <= 0X2B81F) #
or (cp >= 0X2B820 and cp <= 0X2CEAF) #
or (cp >= 0XF900 and cp <= 0XFAFF)
or (cp >= 0X2F800 and cp <= 0X2FA1F) #
): #
return True
return False
def a_ ( _lowercase ):
# word like '180' or '身高' or '神'
for char in word:
_UpperCamelCase : Dict = ord(_lowercase )
if not _is_chinese_char(_lowercase ):
return 0
return 1
def a_ ( _lowercase ):
_UpperCamelCase : List[str] = set()
for token in tokens:
_UpperCamelCase : int = len(_lowercase ) > 1 and is_chinese(_lowercase )
if chinese_word:
word_set.add(_lowercase )
_UpperCamelCase : Optional[int] = list(_lowercase )
return word_list
def a_ ( _lowercase , _lowercase ):
if not chinese_word_set:
return bert_tokens
_UpperCamelCase : Tuple = max([len(_lowercase ) for w in chinese_word_set] )
_UpperCamelCase : int = bert_tokens
_UpperCamelCase , _UpperCamelCase : Union[str, Any] = 0, len(_lowercase )
while start < end:
_UpperCamelCase : Union[str, Any] = True
if is_chinese(bert_word[start] ):
_UpperCamelCase : List[Any] = min(end - start , _lowercase )
for i in range(_lowercase , 1 , -1 ):
_UpperCamelCase : str = ''''''.join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
_UpperCamelCase : int = '''##''' + bert_word[j]
_UpperCamelCase : int = start + i
_UpperCamelCase : Union[str, Any] = False
break
if single_word:
start += 1
return bert_word
def a_ ( _lowercase , _lowercase , _lowercase ):
_UpperCamelCase : List[Any] = []
for i in range(0 , len(_lowercase ) , 100 ):
_UpperCamelCase : Optional[int] = ltp_tokenizer.pipeline(lines[i : i + 100] , tasks=['''cws'''] ).cws
_UpperCamelCase : Optional[int] = [get_chinese_word(_lowercase ) for r in res]
ltp_res.extend(_lowercase )
assert len(_lowercase ) == len(_lowercase )
_UpperCamelCase : Dict = []
for i in range(0 , len(_lowercase ) , 100 ):
_UpperCamelCase : Optional[int] = bert_tokenizer(lines[i : i + 100] , add_special_tokens=_lowercase , truncation=_lowercase , max_length=512 )
bert_res.extend(res['''input_ids'''] )
assert len(_lowercase ) == len(_lowercase )
_UpperCamelCase : Optional[Any] = []
for input_ids, chinese_word in zip(_lowercase , _lowercase ):
_UpperCamelCase : str = []
for id in input_ids:
_UpperCamelCase : Dict = bert_tokenizer._convert_id_to_token(_lowercase )
input_tokens.append(_lowercase )
_UpperCamelCase : str = add_sub_symbol(_lowercase , _lowercase )
_UpperCamelCase : List[str] = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(_lowercase ):
if token[:2] == "##":
_UpperCamelCase : int = token[2:]
# save chinese tokens' pos
if len(_lowercase ) == 1 and _is_chinese_char(ord(_lowercase ) ):
ref_id.append(_lowercase )
ref_ids.append(_lowercase )
assert len(_lowercase ) == len(_lowercase )
return ref_ids
def a_ ( _lowercase ):
# For Chinese (Ro)Bert, the best result is from : RoBERTa-wwm-ext (https://github.com/ymcui/Chinese-BERT-wwm)
# If we want to fine-tune these model, we have to use same tokenizer : LTP (https://github.com/HIT-SCIR/ltp)
with open(args.file_name , '''r''' , encoding='''utf-8''' ) as f:
_UpperCamelCase : Union[str, Any] = f.readlines()
_UpperCamelCase : Tuple = [line.strip() for line in data if len(_lowercase ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
_UpperCamelCase : List[Any] = LTP(args.ltp ) # faster in GPU device
_UpperCamelCase : int = BertTokenizer.from_pretrained(args.bert )
_UpperCamelCase : List[str] = prepare_ref(_lowercase , _lowercase , _lowercase )
with open(args.save_path , '''w''' , encoding='''utf-8''' ) as f:
_UpperCamelCase : List[Any] = [json.dumps(_lowercase ) + '''\n''' for ref in ref_ids]
f.writelines(_lowercase )
if __name__ == "__main__":
UpperCamelCase_ =argparse.ArgumentParser(description="""prepare_chinese_ref""")
parser.add_argument(
"""--file_name""",
required=False,
type=str,
default="""./resources/chinese-demo.txt""",
help="""file need process, same as training data in lm""",
)
parser.add_argument(
"""--ltp""",
required=False,
type=str,
default="""./resources/ltp""",
help="""resources for LTP tokenizer, usually a path""",
)
parser.add_argument(
"""--bert""",
required=False,
type=str,
default="""./resources/robert""",
help="""resources for Bert tokenizer""",
)
parser.add_argument(
"""--save_path""",
required=False,
type=str,
default="""./resources/ref.txt""",
help="""path to save res""",
)
UpperCamelCase_ =parser.parse_args()
main(args)
| 128 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
'hustvl/yolos-small': 'https://huggingface.co/hustvl/yolos-small/resolve/main/config.json',
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class snake_case ( SCREAMING_SNAKE_CASE_ ):
a_ : Optional[int] = """yolos"""
def __init__( self , __UpperCAmelCase=7_68 , __UpperCAmelCase=12 , __UpperCAmelCase=12 , __UpperCAmelCase=30_72 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.02 , __UpperCAmelCase=1E-12 , __UpperCAmelCase=[5_12, 8_64] , __UpperCAmelCase=16 , __UpperCAmelCase=3 , __UpperCAmelCase=True , __UpperCAmelCase=1_00 , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase=1 , __UpperCAmelCase=5 , __UpperCAmelCase=2 , __UpperCAmelCase=5 , __UpperCAmelCase=2 , __UpperCAmelCase=0.1 , **__UpperCAmelCase , ) ->str:
super().__init__(**__UpperCAmelCase)
a_ = hidden_size
a_ = num_hidden_layers
a_ = num_attention_heads
a_ = intermediate_size
a_ = hidden_act
a_ = hidden_dropout_prob
a_ = attention_probs_dropout_prob
a_ = initializer_range
a_ = layer_norm_eps
a_ = image_size
a_ = patch_size
a_ = num_channels
a_ = qkv_bias
a_ = num_detection_tokens
a_ = use_mid_position_embeddings
a_ = auxiliary_loss
# Hungarian matcher
a_ = class_cost
a_ = bbox_cost
a_ = giou_cost
# Loss coefficients
a_ = bbox_loss_coefficient
a_ = giou_loss_coefficient
a_ = eos_coefficient
class snake_case ( SCREAMING_SNAKE_CASE_ ):
a_ : List[Any] = version.parse("""1.11""" )
@property
def UpperCAmelCase__ ( self) ->Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
])
@property
def UpperCAmelCase__ ( self) ->float:
return 1E-4
@property
def UpperCAmelCase__ ( self) ->int:
return 12 | 243 |
"""simple docstring"""
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=True , UpperCAmelCase="pt" ) ->Tuple:
"""simple docstring"""
a_ = {"add_prefix_space": True} if isinstance(UpperCAmelCase , UpperCAmelCase ) and not line.startswith(" " ) else {}
a_ = padding_side
return tokenizer(
[line] , max_length=UpperCAmelCase , padding="max_length" if pad_to_max_length else None , truncation=UpperCAmelCase , return_tensors=UpperCAmelCase , add_special_tokens=UpperCAmelCase , **UpperCAmelCase , )
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=None , ) ->Tuple:
"""simple docstring"""
a_ = input_ids.ne(UpperCAmelCase ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class snake_case ( SCREAMING_SNAKE_CASE_ ):
def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase="train" , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase="" , ) ->Any:
super().__init__()
a_ = Path(__UpperCAmelCase).joinpath(type_path + ".source")
a_ = Path(__UpperCAmelCase).joinpath(type_path + ".target")
a_ = self.get_char_lens(self.src_file)
a_ = max_source_length
a_ = max_target_length
assert min(self.src_lens) > 0, F'''found empty line in {self.src_file}'''
a_ = tokenizer
a_ = prefix
if n_obs is not None:
a_ = self.src_lens[:n_obs]
a_ = src_lang
a_ = tgt_lang
def __len__( self) ->Optional[Any]:
return len(self.src_lens)
def __getitem__( self , __UpperCAmelCase) ->Dict[str, torch.Tensor]:
a_ = index + 1 # linecache starts at 1
a_ = self.prefix + linecache.getline(str(self.src_file) , __UpperCAmelCase).rstrip("\n")
a_ = linecache.getline(str(self.tgt_file) , __UpperCAmelCase).rstrip("\n")
assert source_line, F'''empty source line for index {index}'''
assert tgt_line, F'''empty tgt line for index {index}'''
# Need to add eos token manually for T5
if isinstance(self.tokenizer , __UpperCAmelCase):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
a_ = (
self.tokenizer.question_encoder if isinstance(self.tokenizer , __UpperCAmelCase) else self.tokenizer
)
a_ = self.tokenizer.generator if isinstance(self.tokenizer , __UpperCAmelCase) else self.tokenizer
a_ = encode_line(__UpperCAmelCase , __UpperCAmelCase , self.max_source_length , "right")
a_ = encode_line(__UpperCAmelCase , __UpperCAmelCase , self.max_target_length , "right")
a_ = source_inputs["input_ids"].squeeze()
a_ = target_inputs["input_ids"].squeeze()
a_ = source_inputs["attention_mask"].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def UpperCAmelCase__ ( __UpperCAmelCase) ->Dict:
return [len(__UpperCAmelCase) for x in Path(__UpperCAmelCase).open().readlines()]
def UpperCAmelCase__ ( self , __UpperCAmelCase) ->Dict[str, torch.Tensor]:
a_ = torch.stack([x["input_ids"] for x in batch])
a_ = torch.stack([x["attention_mask"] for x in batch])
a_ = torch.stack([x["decoder_input_ids"] for x in batch])
a_ = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , __UpperCAmelCase)
else self.tokenizer.pad_token_id
)
a_ = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , __UpperCAmelCase)
else self.tokenizer.pad_token_id
)
a_ = trim_batch(__UpperCAmelCase , __UpperCAmelCase)
a_ , a_ = trim_batch(__UpperCAmelCase , __UpperCAmelCase , attention_mask=__UpperCAmelCase)
a_ = {
"input_ids": source_ids,
"attention_mask": source_mask,
"decoder_input_ids": y,
}
return batch
UpperCamelCase_ = getLogger(__name__)
def UpperCamelCase ( UpperCAmelCase ) ->Optional[int]:
"""simple docstring"""
return list(itertools.chain.from_iterable(UpperCAmelCase ) )
def UpperCamelCase ( UpperCAmelCase ) ->None:
"""simple docstring"""
a_ = get_git_info()
save_json(UpperCAmelCase , os.path.join(UpperCAmelCase , "git_log.json" ) )
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=4 , **UpperCAmelCase ) ->Tuple:
"""simple docstring"""
with open(UpperCAmelCase , "w" ) as f:
json.dump(UpperCAmelCase , UpperCAmelCase , indent=UpperCAmelCase , **UpperCAmelCase )
def UpperCamelCase ( UpperCAmelCase ) ->Tuple:
"""simple docstring"""
with open(UpperCAmelCase ) as f:
return json.load(UpperCAmelCase )
def UpperCamelCase ( ) ->Dict:
"""simple docstring"""
a_ = git.Repo(search_parent_directories=UpperCAmelCase )
a_ = {
"repo_id": str(UpperCAmelCase ),
"repo_sha": str(repo.head.object.hexsha ),
"repo_branch": str(repo.active_branch ),
"hostname": str(socket.gethostname() ),
}
return repo_infos
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ) ->List:
"""simple docstring"""
return list(map(UpperCAmelCase , UpperCAmelCase ) )
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ) ->List[str]:
"""simple docstring"""
with open(UpperCAmelCase , "wb" ) as f:
return pickle.dump(UpperCAmelCase , UpperCAmelCase )
def UpperCamelCase ( UpperCAmelCase ) ->List[str]:
"""simple docstring"""
def remove_articles(UpperCAmelCase ):
return re.sub(r"\b(a|an|the)\b" , " " , UpperCAmelCase )
def white_space_fix(UpperCAmelCase ):
return " ".join(text.split() )
def remove_punc(UpperCAmelCase ):
a_ = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(UpperCAmelCase ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(UpperCAmelCase ) ) ) )
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ) ->int:
"""simple docstring"""
a_ = normalize_answer(UpperCAmelCase ).split()
a_ = normalize_answer(UpperCAmelCase ).split()
a_ = Counter(UpperCAmelCase ) & Counter(UpperCAmelCase )
a_ = sum(common.values() )
if num_same == 0:
return 0
a_ = 1.0 * num_same / len(UpperCAmelCase )
a_ = 1.0 * num_same / len(UpperCAmelCase )
a_ = (2 * precision * recall) / (precision + recall)
return fa
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ) ->Optional[int]:
"""simple docstring"""
return normalize_answer(UpperCAmelCase ) == normalize_answer(UpperCAmelCase )
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ) ->Dict:
"""simple docstring"""
assert len(UpperCAmelCase ) == len(UpperCAmelCase )
a_ = 0
for hypo, pred in zip(UpperCAmelCase , UpperCAmelCase ):
em += exact_match_score(UpperCAmelCase , UpperCAmelCase )
if len(UpperCAmelCase ) > 0:
em /= len(UpperCAmelCase )
return {"em": em}
def UpperCamelCase ( UpperCAmelCase ) ->Optional[Any]:
"""simple docstring"""
return model_prefix.startswith("rag" )
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->str:
"""simple docstring"""
a_ = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
a_ = "dropout_rate"
for p in extra_params:
if getattr(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
if not hasattr(UpperCAmelCase , UpperCAmelCase ) and not hasattr(UpperCAmelCase , equivalent_param[p] ):
logger.info("config doesn't have a `{}` attribute".format(UpperCAmelCase ) )
delattr(UpperCAmelCase , UpperCAmelCase )
continue
a_ = p if hasattr(UpperCAmelCase , UpperCAmelCase ) else equivalent_param[p]
setattr(UpperCAmelCase , UpperCAmelCase , getattr(UpperCAmelCase , UpperCAmelCase ) )
delattr(UpperCAmelCase , UpperCAmelCase )
return hparams, config | 243 | 1 |
"""simple docstring"""
from __future__ import annotations
import unittest
import numpy as np
from transformers import BlipTextConfig
from transformers.testing_utils import require_tf, slow
from transformers.utils import is_tf_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
if is_tf_available():
import tensorflow as tf
from transformers import TFBlipTextModel
from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST
class _SCREAMING_SNAKE_CASE:
def __init__( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__=12 ,SCREAMING_SNAKE_CASE__=7 ,SCREAMING_SNAKE_CASE__=True ,SCREAMING_SNAKE_CASE__=True ,SCREAMING_SNAKE_CASE__=True ,SCREAMING_SNAKE_CASE__=99 ,SCREAMING_SNAKE_CASE__=32 ,SCREAMING_SNAKE_CASE__=32 ,SCREAMING_SNAKE_CASE__=2 ,SCREAMING_SNAKE_CASE__=4 ,SCREAMING_SNAKE_CASE__=37 ,SCREAMING_SNAKE_CASE__=0.1 ,SCREAMING_SNAKE_CASE__=0.1 ,SCREAMING_SNAKE_CASE__=5_12 ,SCREAMING_SNAKE_CASE__=0.0_2 ,SCREAMING_SNAKE_CASE__=0 ,SCREAMING_SNAKE_CASE__=None ,) -> Optional[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :List[str] = parent
__SCREAMING_SNAKE_CASE :Any = batch_size
__SCREAMING_SNAKE_CASE :List[str] = seq_length
__SCREAMING_SNAKE_CASE :int = is_training
__SCREAMING_SNAKE_CASE :Optional[int] = use_input_mask
__SCREAMING_SNAKE_CASE :Union[str, Any] = use_labels
__SCREAMING_SNAKE_CASE :int = vocab_size
__SCREAMING_SNAKE_CASE :int = hidden_size
__SCREAMING_SNAKE_CASE :Tuple = projection_dim
__SCREAMING_SNAKE_CASE :str = num_hidden_layers
__SCREAMING_SNAKE_CASE :Dict = num_attention_heads
__SCREAMING_SNAKE_CASE :int = intermediate_size
__SCREAMING_SNAKE_CASE :Any = dropout
__SCREAMING_SNAKE_CASE :List[str] = attention_dropout
__SCREAMING_SNAKE_CASE :List[str] = max_position_embeddings
__SCREAMING_SNAKE_CASE :Dict = initializer_range
__SCREAMING_SNAKE_CASE :int = scope
__SCREAMING_SNAKE_CASE :List[Any] = bos_token_id
def _UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :List[str] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
__SCREAMING_SNAKE_CASE :Optional[int] = None
if self.use_input_mask:
__SCREAMING_SNAKE_CASE :List[str] = random_attention_mask([self.batch_size, self.seq_length] )
if input_mask is not None:
__SCREAMING_SNAKE_CASE :Any = input_mask.numpy()
__SCREAMING_SNAKE_CASE :str = input_mask.shape
__SCREAMING_SNAKE_CASE :Union[str, Any] = np.random.randint(1 ,seq_length - 1 ,size=(batch_size,) )
for batch_idx, start_index in enumerate(SCREAMING_SNAKE_CASE__ ):
__SCREAMING_SNAKE_CASE :Dict = 1
__SCREAMING_SNAKE_CASE :Any = 0
__SCREAMING_SNAKE_CASE :Dict = self.get_config()
return config, input_ids, tf.convert_to_tensor(SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
return BlipTextConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,projection_dim=self.projection_dim ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,dropout=self.dropout ,attention_dropout=self.attention_dropout ,max_position_embeddings=self.max_position_embeddings ,initializer_range=self.initializer_range ,bos_token_id=self.bos_token_id ,)
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ) -> Optional[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :List[str] = TFBlipTextModel(config=SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :Union[str, Any] = model(SCREAMING_SNAKE_CASE__ ,attention_mask=SCREAMING_SNAKE_CASE__ ,training=SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :Any = model(SCREAMING_SNAKE_CASE__ ,training=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.hidden_size) )
def _UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Any = self.prepare_config_and_inputs()
__SCREAMING_SNAKE_CASE :Union[str, Any] = config_and_inputs
__SCREAMING_SNAKE_CASE :List[str] = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class _SCREAMING_SNAKE_CASE( A , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ : int = (TFBlipTextModel,) if is_tf_available() else ()
SCREAMING_SNAKE_CASE_ : Dict = False
SCREAMING_SNAKE_CASE_ : Dict = False
SCREAMING_SNAKE_CASE_ : Tuple = False
def _UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :List[str] = BlipTextModelTester(self )
__SCREAMING_SNAKE_CASE :List[str] = ConfigTester(self ,config_class=SCREAMING_SNAKE_CASE__ ,hidden_size=37 )
def _UpperCamelCase ( self ) -> int:
"""simple docstring"""
self.config_tester.run_common_tests()
def _UpperCamelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ) -> Any:
"""simple docstring"""
pass
def _UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
pass
@unittest.skip(reason='''Blip does not use inputs_embeds''' )
def _UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
pass
@unittest.skip(reason='''BlipTextModel has no base class and is not available in MODEL_MAPPING''' )
def _UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
pass
@unittest.skip(reason='''BlipTextModel has no base class and is not available in MODEL_MAPPING''' )
def _UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
pass
@slow
def _UpperCamelCase ( self ) -> int:
"""simple docstring"""
for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__SCREAMING_SNAKE_CASE :List[str] = TFBlipTextModel.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__=True ) -> Union[str, Any]:
"""simple docstring"""
super().test_pt_tf_model_equivalence(allow_missing_keys=SCREAMING_SNAKE_CASE__ ) | 353 |
"""simple docstring"""
def __lowerCamelCase ( a_ : str , a_ : str ) -> str:
__SCREAMING_SNAKE_CASE :int = len(a_ )
__SCREAMING_SNAKE_CASE :int = len(a_ )
__SCREAMING_SNAKE_CASE :int = (
first_str_length if first_str_length > second_str_length else second_str_length
)
__SCREAMING_SNAKE_CASE :list = []
for char_count in range(a_ ):
if char_count < first_str_length:
output_list.append(first_str[char_count] )
if char_count < second_str_length:
output_list.append(second_str[char_count] )
return "".join(a_ )
if __name__ == "__main__":
print(alternative_string_arrange("AB", "XYZ"), end=" ") | 239 | 0 |
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> Any:
"""simple docstring"""
A__ = [0] * len(lowercase_ )
A__ = []
A__ = [1] * len(lowercase_ )
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(lowercase_ ) ):
if indegree[i] == 0:
queue.append(lowercase_ )
while queue:
A__ = queue.pop(0 )
for x in graph[vertex]:
indegree[x] -= 1
if long_dist[vertex] + 1 > long_dist[x]:
A__ = long_dist[vertex] + 1
if indegree[x] == 0:
queue.append(lowercase_ )
print(max(lowercase_ ) )
# Adjacency list of Graph
_lowerCamelCase : Optional[int] = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []}
longest_distance(graph)
| 14 |
import os
import sys
import unittest
_lowerCamelCase : Optional[Any] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, """utils"""))
import get_test_info # noqa: E402
from get_test_info import ( # noqa: E402
get_model_to_test_mapping,
get_model_to_tester_mapping,
get_test_to_tester_mapping,
)
_lowerCamelCase : Any = os.path.join("""tests""", """models""", """bert""", """test_modeling_bert.py""")
_lowerCamelCase : str = os.path.join("""tests""", """models""", """blip""", """test_modeling_blip.py""")
class UpperCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE ( self : Tuple) ->Tuple:
'''simple docstring'''
A__ = get_test_to_tester_mapping(UpperCAmelCase__)
A__ = get_test_to_tester_mapping(UpperCAmelCase__)
A__ = {'''BertModelTest''': '''BertModelTester'''}
A__ = {
'''BlipModelTest''': '''BlipModelTester''',
'''BlipTextImageModelTest''': '''BlipTextImageModelsModelTester''',
'''BlipTextModelTest''': '''BlipTextModelTester''',
'''BlipTextRetrievalModelTest''': '''BlipTextRetrievalModelTester''',
'''BlipVQAModelTest''': '''BlipVQAModelTester''',
'''BlipVisionModelTest''': '''BlipVisionModelTester''',
}
self.assertEqual(get_test_info.to_json(UpperCAmelCase__) , UpperCAmelCase__)
self.assertEqual(get_test_info.to_json(UpperCAmelCase__) , UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Tuple) ->List[Any]:
'''simple docstring'''
A__ = get_model_to_test_mapping(UpperCAmelCase__)
A__ = get_model_to_test_mapping(UpperCAmelCase__)
A__ = {
'''BertForMaskedLM''': ['''BertModelTest'''],
'''BertForMultipleChoice''': ['''BertModelTest'''],
'''BertForNextSentencePrediction''': ['''BertModelTest'''],
'''BertForPreTraining''': ['''BertModelTest'''],
'''BertForQuestionAnswering''': ['''BertModelTest'''],
'''BertForSequenceClassification''': ['''BertModelTest'''],
'''BertForTokenClassification''': ['''BertModelTest'''],
'''BertLMHeadModel''': ['''BertModelTest'''],
'''BertModel''': ['''BertModelTest'''],
}
A__ = {
'''BlipForConditionalGeneration''': ['''BlipTextImageModelTest'''],
'''BlipForImageTextRetrieval''': ['''BlipTextRetrievalModelTest'''],
'''BlipForQuestionAnswering''': ['''BlipVQAModelTest'''],
'''BlipModel''': ['''BlipModelTest'''],
'''BlipTextModel''': ['''BlipTextModelTest'''],
'''BlipVisionModel''': ['''BlipVisionModelTest'''],
}
self.assertEqual(get_test_info.to_json(UpperCAmelCase__) , UpperCAmelCase__)
self.assertEqual(get_test_info.to_json(UpperCAmelCase__) , UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->str:
'''simple docstring'''
A__ = get_model_to_tester_mapping(UpperCAmelCase__)
A__ = get_model_to_tester_mapping(UpperCAmelCase__)
A__ = {
'''BertForMaskedLM''': ['''BertModelTester'''],
'''BertForMultipleChoice''': ['''BertModelTester'''],
'''BertForNextSentencePrediction''': ['''BertModelTester'''],
'''BertForPreTraining''': ['''BertModelTester'''],
'''BertForQuestionAnswering''': ['''BertModelTester'''],
'''BertForSequenceClassification''': ['''BertModelTester'''],
'''BertForTokenClassification''': ['''BertModelTester'''],
'''BertLMHeadModel''': ['''BertModelTester'''],
'''BertModel''': ['''BertModelTester'''],
}
A__ = {
'''BlipForConditionalGeneration''': ['''BlipTextImageModelsModelTester'''],
'''BlipForImageTextRetrieval''': ['''BlipTextRetrievalModelTester'''],
'''BlipForQuestionAnswering''': ['''BlipVQAModelTester'''],
'''BlipModel''': ['''BlipModelTester'''],
'''BlipTextModel''': ['''BlipTextModelTester'''],
'''BlipVisionModel''': ['''BlipVisionModelTester'''],
}
self.assertEqual(get_test_info.to_json(UpperCAmelCase__) , UpperCAmelCase__)
self.assertEqual(get_test_info.to_json(UpperCAmelCase__) , UpperCAmelCase__)
| 14 | 1 |
from torch import nn
def __UpperCamelCase ( lowercase__ : Dict ) -> List[str]:
'''simple docstring'''
if act_fn in ["swish", "silu"]:
return nn.SiLU()
elif act_fn == "mish":
return nn.Mish()
elif act_fn == "gelu":
return nn.GELU()
else:
raise ValueError(f'Unsupported activation function: {act_fn}' )
| 28 |
import warnings
from ...utils import logging
from .image_processing_glpn import GLPNImageProcessor
__UpperCAmelCase = logging.get_logger(__name__)
class __a ( __UpperCamelCase ):
def __init__( self : Union[str, Any] , *UpperCAmelCase : Optional[Any] , **UpperCAmelCase : Dict ):
warnings.warn(
"""The class GLPNFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use GLPNImageProcessor instead.""" , UpperCAmelCase , )
super().__init__(*UpperCAmelCase , **UpperCAmelCase )
| 28 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__snake_case = {
'''configuration_pix2struct''': [
'''PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''Pix2StructConfig''',
'''Pix2StructTextConfig''',
'''Pix2StructVisionConfig''',
],
'''processing_pix2struct''': ['''Pix2StructProcessor'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = ['''Pix2StructImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'''PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Pix2StructPreTrainedModel''',
'''Pix2StructForConditionalGeneration''',
'''Pix2StructVisionModel''',
'''Pix2StructTextModel''',
]
if TYPE_CHECKING:
from .configuration_pixastruct import (
PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP,
PixaStructConfig,
PixaStructTextConfig,
PixaStructVisionConfig,
)
from .processing_pixastruct import PixaStructProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_pixastruct import PixaStructImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pixastruct import (
PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST,
PixaStructForConditionalGeneration,
PixaStructPreTrainedModel,
PixaStructTextModel,
PixaStructVisionModel,
)
else:
import sys
__snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 348 |
from tempfile import TemporaryDirectory
from unittest import TestCase
from unittest.mock import MagicMock, patch
from transformers import AutoModel, TFAutoModel
from transformers.onnx import FeaturesManager
from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch
@require_torch
@require_tf
class lowercase_ ( lowercase ):
'''simple docstring'''
def __lowerCAmelCase ( self : str ) ->int:
"""simple docstring"""
a = SMALL_MODEL_IDENTIFIER
a = '''pt'''
a = '''tf'''
def __lowerCAmelCase ( self : Optional[Any] , __UpperCAmelCase : str ) ->Union[str, Any]:
"""simple docstring"""
a = AutoModel.from_pretrained(self.test_model )
model_pt.save_pretrained(__UpperCAmelCase )
def __lowerCAmelCase ( self : Any , __UpperCAmelCase : Union[str, Any] ) ->List[str]:
"""simple docstring"""
a = TFAutoModel.from_pretrained(self.test_model , from_pt=__UpperCAmelCase )
model_tf.save_pretrained(__UpperCAmelCase )
def __lowerCAmelCase ( self : Any ) ->int:
"""simple docstring"""
a = '''mock_framework'''
# Framework provided - return whatever the user provides
a = FeaturesManager.determine_framework(self.test_model , __UpperCAmelCase )
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase )
# Local checkpoint and framework provided - return provided framework
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(__UpperCAmelCase )
a = FeaturesManager.determine_framework(__UpperCAmelCase , __UpperCAmelCase )
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(__UpperCAmelCase )
a = FeaturesManager.determine_framework(__UpperCAmelCase , __UpperCAmelCase )
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase )
def __lowerCAmelCase ( self : str ) ->int:
"""simple docstring"""
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(__UpperCAmelCase )
a = FeaturesManager.determine_framework(__UpperCAmelCase )
self.assertEqual(__UpperCAmelCase , self.framework_pt )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(__UpperCAmelCase )
a = FeaturesManager.determine_framework(__UpperCAmelCase )
self.assertEqual(__UpperCAmelCase , self.framework_tf )
# Invalid local checkpoint
with TemporaryDirectory() as local_invalid_ckpt:
with self.assertRaises(__UpperCAmelCase ):
a = FeaturesManager.determine_framework(__UpperCAmelCase )
def __lowerCAmelCase ( self : Optional[int] ) ->List[str]:
"""simple docstring"""
a = MagicMock(return_value=__UpperCAmelCase )
with patch('''transformers.onnx.features.is_tf_available''' , __UpperCAmelCase ):
a = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(__UpperCAmelCase , self.framework_pt )
# PyTorch not in environment -> use TensorFlow
a = MagicMock(return_value=__UpperCAmelCase )
with patch('''transformers.onnx.features.is_torch_available''' , __UpperCAmelCase ):
a = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(__UpperCAmelCase , self.framework_tf )
# Both in environment -> use PyTorch
a = MagicMock(return_value=__UpperCAmelCase )
a = MagicMock(return_value=__UpperCAmelCase )
with patch('''transformers.onnx.features.is_tf_available''' , __UpperCAmelCase ), patch(
'''transformers.onnx.features.is_torch_available''' , __UpperCAmelCase ):
a = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(__UpperCAmelCase , self.framework_pt )
# Both not in environment -> raise error
a = MagicMock(return_value=__UpperCAmelCase )
a = MagicMock(return_value=__UpperCAmelCase )
with patch('''transformers.onnx.features.is_tf_available''' , __UpperCAmelCase ), patch(
'''transformers.onnx.features.is_torch_available''' , __UpperCAmelCase ):
with self.assertRaises(__UpperCAmelCase ):
a = FeaturesManager.determine_framework(self.test_model )
| 0 | 0 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor
class snake_case__ ( unittest.TestCase ):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=7 , lowerCAmelCase__=3 , lowerCAmelCase__=18 , lowerCAmelCase__=30 , lowerCAmelCase__=4_00 , lowerCAmelCase__=True , lowerCAmelCase__=None , lowerCAmelCase__=True , lowerCAmelCase__=None , lowerCAmelCase__=True , lowerCAmelCase__=[0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3] , lowerCAmelCase__=[0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1] , lowerCAmelCase__=True , ) -> List[str]:
__magic_name__ : Tuple = size if size is not None else {'''height''': 2_24, '''width''': 2_24}
__magic_name__ : List[str] = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
__magic_name__ : List[Any] = parent
__magic_name__ : List[str] = batch_size
__magic_name__ : str = num_channels
__magic_name__ : int = image_size
__magic_name__ : Union[str, Any] = min_resolution
__magic_name__ : str = max_resolution
__magic_name__ : Tuple = do_resize
__magic_name__ : Union[str, Any] = size
__magic_name__ : Any = do_center_crop
__magic_name__ : int = crop_size
__magic_name__ : Any = do_normalize
__magic_name__ : str = image_mean
__magic_name__ : List[Any] = image_std
__magic_name__ : Dict = do_convert_rgb
def __magic_name__ ( self ) -> Any:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_convert_rgb": self.do_convert_rgb,
}
def __magic_name__ ( self , lowerCAmelCase__=False , lowerCAmelCase__=False , lowerCAmelCase__=False ) -> Optional[int]:
assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time"
if equal_resolution:
__magic_name__ : Union[str, Any] = []
for i in range(self.batch_size ):
image_inputs.append(
np.random.randint(
2_55 , size=(self.num_channels, self.max_resolution, self.max_resolution) , dtype=np.uinta ) )
else:
__magic_name__ : Any = []
for i in range(self.batch_size ):
__magic_name__ : Optional[Any] = np.random.choice(np.arange(self.min_resolution , self.max_resolution ) , 2 )
image_inputs.append(np.random.randint(2_55 , size=(self.num_channels, width, height) , dtype=np.uinta ) )
if not numpify and not torchify:
# PIL expects the channel dimension as last dimension
__magic_name__ : List[Any] = [Image.fromarray(np.moveaxis(lowerCAmelCase__ , 0 , -1 ) ) for x in image_inputs]
if torchify:
__magic_name__ : List[str] = [torch.from_numpy(lowerCAmelCase__ ) for x in image_inputs]
return image_inputs
@require_torch
@require_vision
class snake_case__ ( _lowerCAmelCase , unittest.TestCase ):
lowercase__ : List[str] = ChineseCLIPImageProcessor if is_vision_available() else None
def __magic_name__ ( self ) -> List[str]:
__magic_name__ : Any = ChineseCLIPImageProcessingTester(self , do_center_crop=lowerCAmelCase__ )
@property
def __magic_name__ ( self ) -> List[Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def __magic_name__ ( self ) -> str:
__magic_name__ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase__ , """do_resize""" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , """size""" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , """do_center_crop""" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , """center_crop""" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , """do_normalize""" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , """image_mean""" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , """image_std""" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , """do_convert_rgb""" ) )
def __magic_name__ ( self ) -> Optional[Any]:
__magic_name__ : Any = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 2_24, """width""": 2_24} )
self.assertEqual(image_processor.crop_size , {"""height""": 18, """width""": 18} )
__magic_name__ : Dict = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42} )
self.assertEqual(image_processor.crop_size , {"""height""": 84, """width""": 84} )
def __magic_name__ ( self ) -> Tuple:
pass
def __magic_name__ ( self ) -> Union[str, Any]:
# Initialize image_processing
__magic_name__ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__magic_name__ : Union[str, Any] = self.image_processor_tester.prepare_inputs(equal_resolution=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , Image.Image )
# Test not batched input
__magic_name__ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
__magic_name__ : Any = image_processing(lowerCAmelCase__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def __magic_name__ ( self ) -> str:
# Initialize image_processing
__magic_name__ : str = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__magic_name__ : Any = self.image_processor_tester.prepare_inputs(equal_resolution=lowerCAmelCase__ , numpify=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , np.ndarray )
# Test not batched input
__magic_name__ : Tuple = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
__magic_name__ : Dict = image_processing(lowerCAmelCase__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def __magic_name__ ( self ) -> Tuple:
# Initialize image_processing
__magic_name__ : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__magic_name__ : List[str] = self.image_processor_tester.prepare_inputs(equal_resolution=lowerCAmelCase__ , torchify=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , torch.Tensor )
# Test not batched input
__magic_name__ : List[Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
__magic_name__ : List[str] = image_processing(lowerCAmelCase__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
@require_torch
@require_vision
class snake_case__ ( _lowerCAmelCase , unittest.TestCase ):
lowercase__ : Union[str, Any] = ChineseCLIPImageProcessor if is_vision_available() else None
def __magic_name__ ( self ) -> str:
__magic_name__ : Any = ChineseCLIPImageProcessingTester(self , num_channels=4 , do_center_crop=lowerCAmelCase__ )
__magic_name__ : List[Any] = 3
@property
def __magic_name__ ( self ) -> Tuple:
return self.image_processor_tester.prepare_image_processor_dict()
def __magic_name__ ( self ) -> Optional[Any]:
__magic_name__ : List[str] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase__ , """do_resize""" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , """size""" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , """do_center_crop""" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , """center_crop""" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , """do_normalize""" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , """image_mean""" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , """image_std""" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , """do_convert_rgb""" ) )
def __magic_name__ ( self ) -> int:
pass
def __magic_name__ ( self ) -> Optional[Any]:
# Initialize image_processing
__magic_name__ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__magic_name__ : str = self.image_processor_tester.prepare_inputs(equal_resolution=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , Image.Image )
# Test not batched input
__magic_name__ : str = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
__magic_name__ : Optional[int] = image_processing(lowerCAmelCase__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
| 352 |
__magic_name__: List[Any] = "\n# Transformers 설치 방법\n! pip install transformers datasets\n# 마지막 릴리스 대신 소스에서 설치하려면, 위 명령을 주석으로 바꾸고 아래 명령을 해제하세요.\n# ! pip install git+https://github.com/huggingface/transformers.git\n"
__magic_name__: Dict = [{"type": "code", "content": INSTALL_CONTENT}]
__magic_name__: Optional[Any] = {
"{processor_class}": "FakeProcessorClass",
"{model_class}": "FakeModelClass",
"{object_class}": "FakeObjectClass",
}
| 138 | 0 |
def _lowerCAmelCase (_lowerCAmelCase , _lowerCAmelCase):
if discount_rate < 0:
raise ValueError("Discount rate cannot be negative")
if not cash_flows:
raise ValueError("Cash flows list cannot be empty")
UpperCamelCase_ = sum(
cash_flow / ((1 + discount_rate) ** i) for i, cash_flow in enumerate(_lowerCAmelCase))
return round(_lowerCAmelCase , ndigits=2)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 128 |
import argparse
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline
if __name__ == "__main__":
UpperCAmelCase : Dict =argparse.ArgumentParser()
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
parser.add_argument(
"""--txt2img_unclip""",
default="""kakaobrain/karlo-v1-alpha""",
type=str,
required=False,
help="""The pretrained txt2img unclip.""",
)
UpperCAmelCase : Optional[int] =parser.parse_args()
UpperCAmelCase : List[Any] =UnCLIPPipeline.from_pretrained(args.txtaimg_unclip)
UpperCAmelCase : List[str] =CLIPImageProcessor()
UpperCAmelCase : Optional[int] =CLIPVisionModelWithProjection.from_pretrained("""openai/clip-vit-large-patch14""")
UpperCAmelCase : Any =UnCLIPImageVariationPipeline(
decoder=txtaimg.decoder,
text_encoder=txtaimg.text_encoder,
tokenizer=txtaimg.tokenizer,
text_proj=txtaimg.text_proj,
feature_extractor=feature_extractor,
image_encoder=image_encoder,
super_res_first=txtaimg.super_res_first,
super_res_last=txtaimg.super_res_last,
decoder_scheduler=txtaimg.decoder_scheduler,
super_res_scheduler=txtaimg.super_res_scheduler,
)
imgaimg.save_pretrained(args.dump_path)
| 128 | 1 |
def _UpperCamelCase ( UpperCamelCase_ : Optional[int] , UpperCamelCase_ : int , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : int ) -> Optional[int]:
"""simple docstring"""
if index == r:
for j in range(UpperCamelCase_ ):
print(data[j] , end=' ' )
print(' ' )
return
# When no more elements are there to put in data[]
if i >= n:
return
# current is included, put next at next location
lowerCAmelCase__ = arr[i]
combination_util(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , index + 1 , UpperCamelCase_ , i + 1 )
# current is excluded, replace it with
# next (Note that i+1 is passed, but
# index is not changed)
combination_util(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , i + 1 )
# The main function that prints all combinations
# of size r in arr[] of size n. This function
# mainly uses combinationUtil()
def _UpperCamelCase ( UpperCamelCase_ : str , UpperCamelCase_ : Any , UpperCamelCase_ : Optional[Any] ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase__ = [0] * r
# Print all combination using temporary array 'data[]'
combination_util(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , 0 , UpperCamelCase_ , 0 )
if __name__ == "__main__":
# Driver code to check the function above
__snake_case : Dict = [10, 20, 30, 40, 50]
print_combination(arr, len(arr), 3)
# This code is contributed by Ambuj sahu
| 122 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DeformableDetrImageProcessor
class __SCREAMING_SNAKE_CASE ( unittest.TestCase):
def __init__( self , _UpperCamelCase , _UpperCamelCase=7 , _UpperCamelCase=3 , _UpperCamelCase=30 , _UpperCamelCase=4_00 , _UpperCamelCase=True , _UpperCamelCase=None , _UpperCamelCase=True , _UpperCamelCase=[0.5, 0.5, 0.5] , _UpperCamelCase=[0.5, 0.5, 0.5] , _UpperCamelCase=True , _UpperCamelCase=1 / 2_55 , _UpperCamelCase=True , ):
"""simple docstring"""
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
lowerCAmelCase__ = size if size is not None else {'shortest_edge': 18, 'longest_edge': 13_33}
lowerCAmelCase__ = parent
lowerCAmelCase__ = batch_size
lowerCAmelCase__ = num_channels
lowerCAmelCase__ = min_resolution
lowerCAmelCase__ = max_resolution
lowerCAmelCase__ = do_resize
lowerCAmelCase__ = size
lowerCAmelCase__ = do_normalize
lowerCAmelCase__ = image_mean
lowerCAmelCase__ = image_std
lowerCAmelCase__ = do_rescale
lowerCAmelCase__ = rescale_factor
lowerCAmelCase__ = do_pad
def UpperCamelCase__ ( self ):
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def UpperCamelCase__ ( self , _UpperCamelCase , _UpperCamelCase=False ):
"""simple docstring"""
if not batched:
lowerCAmelCase__ = image_inputs[0]
if isinstance(_UpperCamelCase , Image.Image ):
lowerCAmelCase__ , lowerCAmelCase__ = image.size
else:
lowerCAmelCase__ , lowerCAmelCase__ = image.shape[1], image.shape[2]
if w < h:
lowerCAmelCase__ = int(self.size['shortest_edge'] * h / w )
lowerCAmelCase__ = self.size['shortest_edge']
elif w > h:
lowerCAmelCase__ = self.size['shortest_edge']
lowerCAmelCase__ = int(self.size['shortest_edge'] * w / h )
else:
lowerCAmelCase__ = self.size['shortest_edge']
lowerCAmelCase__ = self.size['shortest_edge']
else:
lowerCAmelCase__ = []
for image in image_inputs:
lowerCAmelCase__ , lowerCAmelCase__ = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
lowerCAmelCase__ = max(_UpperCamelCase , key=lambda _UpperCamelCase : item[0] )[0]
lowerCAmelCase__ = max(_UpperCamelCase , key=lambda _UpperCamelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class __SCREAMING_SNAKE_CASE ( __lowercase , unittest.TestCase):
_SCREAMING_SNAKE_CASE : Dict = DeformableDetrImageProcessor if is_vision_available() else None
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = DeformableDetrImageProcessingTester(self )
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_UpperCamelCase , 'image_mean' ) )
self.assertTrue(hasattr(_UpperCamelCase , 'image_std' ) )
self.assertTrue(hasattr(_UpperCamelCase , 'do_normalize' ) )
self.assertTrue(hasattr(_UpperCamelCase , 'do_resize' ) )
self.assertTrue(hasattr(_UpperCamelCase , 'do_rescale' ) )
self.assertTrue(hasattr(_UpperCamelCase , 'do_pad' ) )
self.assertTrue(hasattr(_UpperCamelCase , 'size' ) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 18, 'longest_edge': 13_33} )
self.assertEqual(image_processor.do_pad , _UpperCamelCase )
lowerCAmelCase__ = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=_UpperCamelCase )
self.assertEqual(image_processor.size , {'shortest_edge': 42, 'longest_edge': 84} )
self.assertEqual(image_processor.do_pad , _UpperCamelCase )
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass
def UpperCamelCase__ ( self ):
"""simple docstring"""
# Initialize image_processing
lowerCAmelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCAmelCase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCamelCase , Image.Image )
# Test not batched input
lowerCAmelCase__ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
lowerCAmelCase__ , lowerCAmelCase__ = self.image_processor_tester.get_expected_values(_UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCAmelCase__ , lowerCAmelCase__ = self.image_processor_tester.get_expected_values(_UpperCamelCase , batched=_UpperCamelCase )
lowerCAmelCase__ = image_processing(_UpperCamelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase__ ( self ):
"""simple docstring"""
# Initialize image_processing
lowerCAmelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCAmelCase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCamelCase , numpify=_UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCamelCase , np.ndarray )
# Test not batched input
lowerCAmelCase__ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
lowerCAmelCase__ , lowerCAmelCase__ = self.image_processor_tester.get_expected_values(_UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCAmelCase__ = image_processing(_UpperCamelCase , return_tensors='pt' ).pixel_values
lowerCAmelCase__ , lowerCAmelCase__ = self.image_processor_tester.get_expected_values(_UpperCamelCase , batched=_UpperCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase__ ( self ):
"""simple docstring"""
# Initialize image_processing
lowerCAmelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCAmelCase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCamelCase , torchify=_UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCamelCase , torch.Tensor )
# Test not batched input
lowerCAmelCase__ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
lowerCAmelCase__ , lowerCAmelCase__ = self.image_processor_tester.get_expected_values(_UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCAmelCase__ = image_processing(_UpperCamelCase , return_tensors='pt' ).pixel_values
lowerCAmelCase__ , lowerCAmelCase__ = self.image_processor_tester.get_expected_values(_UpperCamelCase , batched=_UpperCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
# prepare image and target
lowerCAmelCase__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' , 'r' ) as f:
lowerCAmelCase__ = json.loads(f.read() )
lowerCAmelCase__ = {'image_id': 3_97_69, 'annotations': target}
# encode them
lowerCAmelCase__ = DeformableDetrImageProcessor()
lowerCAmelCase__ = image_processing(images=_UpperCamelCase , annotations=_UpperCamelCase , return_tensors='pt' )
# verify pixel values
lowerCAmelCase__ = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding['pixel_values'].shape , _UpperCamelCase )
lowerCAmelCase__ = torch.tensor([0.27_96, 0.31_38, 0.34_81] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , _UpperCamelCase , atol=1E-4 ) )
# verify area
lowerCAmelCase__ = torch.tensor([58_87.96_00, 1_12_50.20_61, 48_93_53.84_38, 83_71_22.75_00, 14_79_67.51_56, 16_57_32.34_38] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , _UpperCamelCase ) )
# verify boxes
lowerCAmelCase__ = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , _UpperCamelCase )
lowerCAmelCase__ = torch.tensor([0.55_03, 0.27_65, 0.06_04, 0.22_15] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , _UpperCamelCase , atol=1E-3 ) )
# verify image_id
lowerCAmelCase__ = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , _UpperCamelCase ) )
# verify is_crowd
lowerCAmelCase__ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , _UpperCamelCase ) )
# verify class_labels
lowerCAmelCase__ = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , _UpperCamelCase ) )
# verify orig_size
lowerCAmelCase__ = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , _UpperCamelCase ) )
# verify size
lowerCAmelCase__ = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , _UpperCamelCase ) )
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
# prepare image, target and masks_path
lowerCAmelCase__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' , 'r' ) as f:
lowerCAmelCase__ = json.loads(f.read() )
lowerCAmelCase__ = {'file_name': '000000039769.png', 'image_id': 3_97_69, 'segments_info': target}
lowerCAmelCase__ = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' )
# encode them
lowerCAmelCase__ = DeformableDetrImageProcessor(format='coco_panoptic' )
lowerCAmelCase__ = image_processing(images=_UpperCamelCase , annotations=_UpperCamelCase , masks_path=_UpperCamelCase , return_tensors='pt' )
# verify pixel values
lowerCAmelCase__ = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding['pixel_values'].shape , _UpperCamelCase )
lowerCAmelCase__ = torch.tensor([0.27_96, 0.31_38, 0.34_81] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , _UpperCamelCase , atol=1E-4 ) )
# verify area
lowerCAmelCase__ = torch.tensor([14_79_79.68_75, 16_55_27.04_69, 48_46_38.59_38, 1_12_92.93_75, 58_79.65_62, 76_34.11_47] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , _UpperCamelCase ) )
# verify boxes
lowerCAmelCase__ = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , _UpperCamelCase )
lowerCAmelCase__ = torch.tensor([0.26_25, 0.54_37, 0.46_88, 0.86_25] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , _UpperCamelCase , atol=1E-3 ) )
# verify image_id
lowerCAmelCase__ = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , _UpperCamelCase ) )
# verify is_crowd
lowerCAmelCase__ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , _UpperCamelCase ) )
# verify class_labels
lowerCAmelCase__ = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , _UpperCamelCase ) )
# verify masks
lowerCAmelCase__ = 82_28_73
self.assertEqual(encoding['labels'][0]['masks'].sum().item() , _UpperCamelCase )
# verify orig_size
lowerCAmelCase__ = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , _UpperCamelCase ) )
# verify size
lowerCAmelCase__ = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , _UpperCamelCase ) )
| 122 | 1 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_a = logging.get_logger(__name__)
_a = {"vocab_file": "spiece.model"}
_a = {
"vocab_file": {
"bert_for_seq_generation": (
"https://huggingface.co/google/bert_for_seq_generation_L-24_bbc_encoder/resolve/main/spiece.model"
),
}
}
_a = {"bert_for_seq_generation": 512}
class __A ( _UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = []
lowerCAmelCase_ = ["""input_ids""", """attention_mask"""]
def __init__( self , __lowerCAmelCase , __lowerCAmelCase="<s>" , __lowerCAmelCase="</s>" , __lowerCAmelCase="<unk>" , __lowerCAmelCase="<pad>" , __lowerCAmelCase="<::::>" , __lowerCAmelCase = None , **__lowerCAmelCase , ):
'''simple docstring'''
lowerCamelCase__ = {} if sp_model_kwargs is None else sp_model_kwargs
# Add extra_ids to the special token list
super().__init__(
bos_token=lowercase_ , eos_token=lowercase_ , unk_token=lowercase_ , pad_token=lowercase_ , sep_token=lowercase_ , sp_model_kwargs=self.sp_model_kwargs , **lowercase_ , )
lowerCamelCase__ = vocab_file
lowerCamelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowercase_ )
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.sp_model.get_piece_size()
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = {self.convert_ids_to_tokens(lowercase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
'''simple docstring'''
lowerCamelCase__ = self.__dict__.copy()
lowerCamelCase__ = None
return state
def __setstate__( self , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
lowerCamelCase__ = {}
lowerCamelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __lowerCamelCase ( self , __lowerCAmelCase ):
'''simple docstring'''
return self.sp_model.encode(lowercase_ , out_type=lowercase_ )
def __lowerCamelCase ( self , __lowerCAmelCase ):
'''simple docstring'''
return self.sp_model.piece_to_id(lowercase_ )
def __lowerCamelCase ( self , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = self.sp_model.IdToPiece(lowercase_ )
return token
def __lowerCamelCase ( self , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = []
lowerCamelCase__ = """"""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(lowercase_ ) + token
lowerCamelCase__ = []
else:
current_sub_tokens.append(lowercase_ )
out_string += self.sp_model.decode(lowercase_ )
return out_string.strip()
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase = None ):
'''simple docstring'''
if not os.path.isdir(lowercase_ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
lowerCamelCase__ = os.path.join(
lowercase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowercase_ )
elif not os.path.isfile(self.vocab_file ):
with open(lowercase_ , '''wb''' ) as fi:
lowerCamelCase__ = self.sp_model.serialized_model_proto()
fi.write(lowercase_ )
return (out_vocab_file,)
| 209 | '''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_albert import AlbertTokenizer
else:
_lowercase : int = None
_lowercase : Union[str, Any] = logging.get_logger(__name__)
_lowercase : Tuple = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
_lowercase : Dict = {
"vocab_file": {
"albert-base-v1": "https://huggingface.co/albert-base-v1/resolve/main/spiece.model",
"albert-large-v1": "https://huggingface.co/albert-large-v1/resolve/main/spiece.model",
"albert-xlarge-v1": "https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model",
"albert-xxlarge-v1": "https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model",
"albert-base-v2": "https://huggingface.co/albert-base-v2/resolve/main/spiece.model",
"albert-large-v2": "https://huggingface.co/albert-large-v2/resolve/main/spiece.model",
"albert-xlarge-v2": "https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model",
"albert-xxlarge-v2": "https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model",
},
"tokenizer_file": {
"albert-base-v1": "https://huggingface.co/albert-base-v1/resolve/main/tokenizer.json",
"albert-large-v1": "https://huggingface.co/albert-large-v1/resolve/main/tokenizer.json",
"albert-xlarge-v1": "https://huggingface.co/albert-xlarge-v1/resolve/main/tokenizer.json",
"albert-xxlarge-v1": "https://huggingface.co/albert-xxlarge-v1/resolve/main/tokenizer.json",
"albert-base-v2": "https://huggingface.co/albert-base-v2/resolve/main/tokenizer.json",
"albert-large-v2": "https://huggingface.co/albert-large-v2/resolve/main/tokenizer.json",
"albert-xlarge-v2": "https://huggingface.co/albert-xlarge-v2/resolve/main/tokenizer.json",
"albert-xxlarge-v2": "https://huggingface.co/albert-xxlarge-v2/resolve/main/tokenizer.json",
},
}
_lowercase : int = {
"albert-base-v1": 512,
"albert-large-v1": 512,
"albert-xlarge-v1": 512,
"albert-xxlarge-v1": 512,
"albert-base-v2": 512,
"albert-large-v2": 512,
"albert-xlarge-v2": 512,
"albert-xxlarge-v2": 512,
}
_lowercase : Tuple = "▁"
class __magic_name__ ( _UpperCAmelCase):
UpperCamelCase__ = VOCAB_FILES_NAMES
UpperCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase__ = AlbertTokenizer
def __init__( self : Optional[Any] , lowercase_ : Union[str, Any]=None , lowercase_ : str=None , lowercase_ : Any=True , lowercase_ : Optional[int]=True , lowercase_ : List[str]=False , lowercase_ : Optional[int]="[CLS]" , lowercase_ : Any="[SEP]" , lowercase_ : int="<unk>" , lowercase_ : Any="[SEP]" , lowercase_ : int="<pad>" , lowercase_ : Tuple="[CLS]" , lowercase_ : Dict="[MASK]" , **lowercase_ : Optional[int] , ):
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
lowercase_ : Tuple = (
AddedToken(lowercase_ , lstrip=lowercase_ , rstrip=lowercase_ , normalized=lowercase_ )
if isinstance(lowercase_ , lowercase_ )
else mask_token
)
super().__init__(
lowercase_ , tokenizer_file=lowercase_ , do_lower_case=lowercase_ , remove_space=lowercase_ , keep_accents=lowercase_ , bos_token=lowercase_ , eos_token=lowercase_ , unk_token=lowercase_ , sep_token=lowercase_ , pad_token=lowercase_ , cls_token=lowercase_ , mask_token=lowercase_ , **lowercase_ , )
lowercase_ : Optional[int] = do_lower_case
lowercase_ : Any = remove_space
lowercase_ : Dict = keep_accents
lowercase_ : Optional[int] = vocab_file
lowercase_ : Any = False if not self.vocab_file else True
def SCREAMING_SNAKE_CASE_ ( self : Tuple , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None ):
lowercase_ : Tuple = [self.sep_token_id]
lowercase_ : str = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE_ ( self : Any , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None ):
lowercase_ : Union[str, Any] = [self.sep_token_id]
lowercase_ : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , lowercase_ : str , lowercase_ : Optional[str] = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(lowercase_ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowercase_ : Optional[Any] = os.path.join(
lowercase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase_ ):
copyfile(self.vocab_file , lowercase_ )
return (out_vocab_file,)
| 239 | 0 |
import os
import sys
__snake_case : int = os.path.join(os.path.dirname(__file__), """src""")
sys.path.append(SRC_DIR)
from transformers import (
AutoConfig,
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForQuestionAnswering,
AutoModelForSequenceClassification,
AutoTokenizer,
add_start_docstrings,
)
__snake_case : Union[str, Any] = [
'torch',
'numpy',
'tokenizers',
'filelock',
'requests',
'tqdm',
'regex',
'sentencepiece',
'sacremoses',
'importlib_metadata',
'huggingface_hub',
]
@add_start_docstrings(AutoConfig.__doc__ )
def _UpperCamelCase ( *UpperCamelCase_ : Union[str, Any] , **UpperCamelCase_ : Union[str, Any] ) -> Any:
"""simple docstring"""
return AutoConfig.from_pretrained(*UpperCamelCase__ , **UpperCamelCase__ )
@add_start_docstrings(AutoTokenizer.__doc__ )
def _UpperCamelCase ( *UpperCamelCase_ : Dict , **UpperCamelCase_ : Any ) -> Dict:
"""simple docstring"""
return AutoTokenizer.from_pretrained(*UpperCamelCase__ , **UpperCamelCase__ )
@add_start_docstrings(AutoModel.__doc__ )
def _UpperCamelCase ( *UpperCamelCase_ : Union[str, Any] , **UpperCamelCase_ : Optional[Any] ) -> int:
"""simple docstring"""
return AutoModel.from_pretrained(*UpperCamelCase__ , **UpperCamelCase__ )
@add_start_docstrings(AutoModelForCausalLM.__doc__ )
def _UpperCamelCase ( *UpperCamelCase_ : int , **UpperCamelCase_ : Union[str, Any] ) -> Any:
"""simple docstring"""
return AutoModelForCausalLM.from_pretrained(*UpperCamelCase__ , **UpperCamelCase__ )
@add_start_docstrings(AutoModelForMaskedLM.__doc__ )
def _UpperCamelCase ( *UpperCamelCase_ : Union[str, Any] , **UpperCamelCase_ : Union[str, Any] ) -> int:
"""simple docstring"""
return AutoModelForMaskedLM.from_pretrained(*UpperCamelCase__ , **UpperCamelCase__ )
@add_start_docstrings(AutoModelForSequenceClassification.__doc__ )
def _UpperCamelCase ( *UpperCamelCase_ : Union[str, Any] , **UpperCamelCase_ : Any ) -> Optional[Any]:
"""simple docstring"""
return AutoModelForSequenceClassification.from_pretrained(*UpperCamelCase__ , **UpperCamelCase__ )
@add_start_docstrings(AutoModelForQuestionAnswering.__doc__ )
def _UpperCamelCase ( *UpperCamelCase_ : Any , **UpperCamelCase_ : Union[str, Any] ) -> Tuple:
"""simple docstring"""
return AutoModelForQuestionAnswering.from_pretrained(*UpperCamelCase__ , **UpperCamelCase__ )
| 367 |
from .glue import GlueDataset, GlueDataTrainingArguments
from .language_modeling import (
LineByLineTextDataset,
LineByLineWithRefDataset,
LineByLineWithSOPTextDataset,
TextDataset,
TextDatasetForNextSentencePrediction,
)
from .squad import SquadDataset, SquadDataTrainingArguments
| 122 | 0 |
'''simple docstring'''
from timeit import timeit
def __lowerCamelCase ( A__ ) -> int:
"""simple docstring"""
if number < 0:
raise ValueError('the value of input must not be negative' )
UpperCamelCase = 0
while number:
number &= number - 1
result += 1
return result
def __lowerCamelCase ( A__ ) -> int:
"""simple docstring"""
if number < 0:
raise ValueError('the value of input must not be negative' )
UpperCamelCase = 0
while number:
if number % 2 == 1:
result += 1
number >>= 1
return result
def __lowerCamelCase ( ) -> None:
"""simple docstring"""
def do_benchmark(A__ ) -> None:
UpperCamelCase = 'import __main__ as z'
print(F"""Benchmark when {number = }:""" )
print(F"""{get_set_bits_count_using_modulo_operator(A__ ) = }""" )
UpperCamelCase = timeit('z.get_set_bits_count_using_modulo_operator(25)' , setup=A__ )
print(F"""timeit() runs in {timing} seconds""" )
print(F"""{get_set_bits_count_using_brian_kernighans_algorithm(A__ ) = }""" )
UpperCamelCase = timeit(
'z.get_set_bits_count_using_brian_kernighans_algorithm(25)' , setup=A__ , )
print(F"""timeit() runs in {timing} seconds""" )
for number in (25, 37, 58, 0):
do_benchmark(A__ )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 28 |
'''simple docstring'''
import math
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self : Union[str, Any] , UpperCamelCase__ : Optional[Any]=0 ): # a graph with Node 0,1,...,N-1
"""simple docstring"""
UpperCamelCase = n
UpperCamelCase = [
[math.inf for j in range(0 , UpperCamelCase__ )] for i in range(0 , UpperCamelCase__ )
] # adjacency matrix for weight
UpperCamelCase = [
[math.inf for j in range(0 , UpperCamelCase__ )] for i in range(0 , UpperCamelCase__ )
] # dp[i][j] stores minimum distance from i to j
def A ( self : Optional[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Tuple ):
"""simple docstring"""
UpperCamelCase = w
def A ( self : str ):
"""simple docstring"""
for k in range(0 , self.n ):
for i in range(0 , self.n ):
for j in range(0 , self.n ):
UpperCamelCase = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] )
def A ( self : Optional[Any] , UpperCamelCase__ : str , UpperCamelCase__ : List[Any] ):
"""simple docstring"""
return self.dp[u][v]
if __name__ == "__main__":
_lowerCamelCase : List[str] = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 10)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 10)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3)
| 28 | 1 |
from random import shuffle
import tensorflow as tf
from numpy import array
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Tuple , __UpperCamelCase : List[str] ) -> Tuple:
UpperCAmelCase_ = int(__UpperCamelCase )
assert noofclusters < len(__UpperCamelCase )
# Find out the dimensionality
UpperCAmelCase_ = len(vectors[0] )
# Will help select random centroids from among the available vectors
UpperCAmelCase_ = list(range(len(__UpperCamelCase ) ) )
shuffle(__UpperCamelCase )
# GRAPH OF COMPUTATION
# We initialize a new graph and set it as the default during each run
# of this algorithm. This ensures that as this function is called
# multiple times, the default graph doesn't keep getting crowded with
# unused ops and Variables from previous function calls.
UpperCAmelCase_ = tf.Graph()
with graph.as_default():
# SESSION OF COMPUTATION
UpperCAmelCase_ = tf.Session()
##CONSTRUCTING THE ELEMENTS OF COMPUTATION
##First lets ensure we have a Variable vector for each centroid,
##initialized to one of the vectors from the available data points
UpperCAmelCase_ = [
tf.Variable(vectors[vector_indices[i]] ) for i in range(__UpperCamelCase )
]
##These nodes will assign the centroid Variables the appropriate
##values
UpperCAmelCase_ = tf.placeholder('''float64''' , [dim] )
UpperCAmelCase_ = []
for centroid in centroids:
cent_assigns.append(tf.assign(__UpperCamelCase , __UpperCamelCase ) )
##Variables for cluster assignments of individual vectors(initialized
##to 0 at first)
UpperCAmelCase_ = [tf.Variable(0 ) for i in range(len(__UpperCamelCase ) )]
##These nodes will assign an assignment Variable the appropriate
##value
UpperCAmelCase_ = tf.placeholder('''int32''' )
UpperCAmelCase_ = []
for assignment in assignments:
cluster_assigns.append(tf.assign(__UpperCamelCase , __UpperCamelCase ) )
##Now lets construct the node that will compute the mean
# The placeholder for the input
UpperCAmelCase_ = tf.placeholder('''float''' , [None, dim] )
# The Node/op takes the input and computes a mean along the 0th
# dimension, i.e. the list of input vectors
UpperCAmelCase_ = tf.reduce_mean(__UpperCamelCase , 0 )
##Node for computing Euclidean distances
# Placeholders for input
UpperCAmelCase_ = tf.placeholder('''float''' , [dim] )
UpperCAmelCase_ = tf.placeholder('''float''' , [dim] )
UpperCAmelCase_ = tf.sqrt(tf.reduce_sum(tf.pow(tf.sub(__UpperCamelCase , __UpperCamelCase ) , 2 ) ) )
##This node will figure out which cluster to assign a vector to,
##based on Euclidean distances of the vector from the centroids.
# Placeholder for input
UpperCAmelCase_ = tf.placeholder('''float''' , [noofclusters] )
UpperCAmelCase_ = tf.argmin(__UpperCamelCase , 0 )
##INITIALIZING STATE VARIABLES
##This will help initialization of all Variables defined with respect
##to the graph. The Variable-initializer should be defined after
##all the Variables have been constructed, so that each of them
##will be included in the initialization.
UpperCAmelCase_ = tf.initialize_all_variables()
# Initialize all variables
sess.run(__UpperCamelCase )
##CLUSTERING ITERATIONS
# Now perform the Expectation-Maximization steps of K-Means clustering
# iterations. To keep things simple, we will only do a set number of
# iterations, instead of using a Stopping Criterion.
UpperCAmelCase_ = 100
for _ in range(__UpperCamelCase ):
##EXPECTATION STEP
##Based on the centroid locations till last iteration, compute
##the _expected_ centroid assignments.
# Iterate over each vector
for vector_n in range(len(__UpperCamelCase ) ):
UpperCAmelCase_ = vectors[vector_n]
# Compute Euclidean distance between this vector and each
# centroid. Remember that this list cannot be named
#'centroid_distances', since that is the input to the
# cluster assignment node.
UpperCAmelCase_ = [
sess.run(__UpperCamelCase , feed_dict={va: vect, va: sess.run(__UpperCamelCase )} )
for centroid in centroids
]
# Now use the cluster assignment node, with the distances
# as the input
UpperCAmelCase_ = sess.run(
__UpperCamelCase , feed_dict={centroid_distances: distances} )
# Now assign the value to the appropriate state variable
sess.run(
cluster_assigns[vector_n] , feed_dict={assignment_value: assignment} )
##MAXIMIZATION STEP
# Based on the expected state computed from the Expectation Step,
# compute the locations of the centroids so as to maximize the
# overall objective of minimizing within-cluster Sum-of-Squares
for cluster_n in range(__UpperCamelCase ):
# Collect all the vectors assigned to this cluster
UpperCAmelCase_ = [
vectors[i]
for i in range(len(__UpperCamelCase ) )
if sess.run(assignments[i] ) == cluster_n
]
# Compute new centroid location
UpperCAmelCase_ = sess.run(
__UpperCamelCase , feed_dict={mean_input: array(__UpperCamelCase )} )
# Assign value to appropriate variable
sess.run(
cent_assigns[cluster_n] , feed_dict={centroid_value: new_location} )
# Return centroids and assignments
UpperCAmelCase_ = sess.run(__UpperCamelCase )
UpperCAmelCase_ = sess.run(__UpperCamelCase )
return centroids, assignments
| 351 |
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Dict=2_8123 ) -> str:
UpperCAmelCase_ = [1] * (limit + 1)
for i in range(2 , int(limit**0.5 ) + 1 ):
sum_divs[i * i] += i
for k in range(i + 1 , limit // i + 1 ):
sum_divs[k * i] += k + i
UpperCAmelCase_ = set()
UpperCAmelCase_ = 0
for n in range(1 , limit + 1 ):
if sum_divs[n] > n:
abundants.add(__UpperCamelCase )
if not any((n - a in abundants) for a in abundants ):
res += n
return res
if __name__ == "__main__":
print(solution())
| 177 | 0 |
import pprint
import requests
UpperCamelCase__ = 'https://zenquotes.io/api'
def lowerCAmelCase_ ( ) -> list:
'''simple docstring'''
return requests.get(API_ENDPOINT_URL + "/today" ).json()
def lowerCAmelCase_ ( ) -> list:
'''simple docstring'''
return requests.get(API_ENDPOINT_URL + "/random" ).json()
if __name__ == "__main__":
UpperCamelCase__ = random_quotes()
pprint.pprint(response)
| 65 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A : List[str] = logging.get_logger(__name__)
__A : Optional[Any] = {
'''facebook/vit-mae-base''': '''https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json''',
# See all ViT MAE models at https://huggingface.co/models?filter=vit-mae
}
class __A ( lowerCAmelCase ):
lowerCAmelCase_ : List[Any] = "vit_mae"
def __init__( self : Optional[int] , UpperCAmelCase_ : Dict=768 , UpperCAmelCase_ : int=12 , UpperCAmelCase_ : Dict=12 , UpperCAmelCase_ : Optional[Any]=3072 , UpperCAmelCase_ : List[str]="gelu" , UpperCAmelCase_ : Union[str, Any]=0.0 , UpperCAmelCase_ : Optional[Any]=0.0 , UpperCAmelCase_ : List[str]=0.02 , UpperCAmelCase_ : Union[str, Any]=1E-12 , UpperCAmelCase_ : Any=224 , UpperCAmelCase_ : int=16 , UpperCAmelCase_ : Union[str, Any]=3 , UpperCAmelCase_ : Optional[Any]=True , UpperCAmelCase_ : int=16 , UpperCAmelCase_ : Union[str, Any]=512 , UpperCAmelCase_ : Any=8 , UpperCAmelCase_ : List[Any]=2048 , UpperCAmelCase_ : Tuple=0.75 , UpperCAmelCase_ : str=False , **UpperCAmelCase_ : str , ):
super().__init__(**UpperCAmelCase_ )
lowerCAmelCase : Optional[Any] = hidden_size
lowerCAmelCase : List[str] = num_hidden_layers
lowerCAmelCase : Union[str, Any] = num_attention_heads
lowerCAmelCase : Optional[int] = intermediate_size
lowerCAmelCase : str = hidden_act
lowerCAmelCase : Optional[int] = hidden_dropout_prob
lowerCAmelCase : Optional[int] = attention_probs_dropout_prob
lowerCAmelCase : List[Any] = initializer_range
lowerCAmelCase : Union[str, Any] = layer_norm_eps
lowerCAmelCase : Any = image_size
lowerCAmelCase : Union[str, Any] = patch_size
lowerCAmelCase : Union[str, Any] = num_channels
lowerCAmelCase : Any = qkv_bias
lowerCAmelCase : Union[str, Any] = decoder_num_attention_heads
lowerCAmelCase : Tuple = decoder_hidden_size
lowerCAmelCase : int = decoder_num_hidden_layers
lowerCAmelCase : Optional[Any] = decoder_intermediate_size
lowerCAmelCase : Union[str, Any] = mask_ratio
lowerCAmelCase : Any = norm_pix_loss
| 138 | 0 |
def snake_case_(_UpperCamelCase ) -> list[int]:
"""simple docstring"""
if num <= 0:
raise ValueError('''Input must be a positive integer''' )
_snake_case = [True] * (num + 1)
_snake_case = 2
while p * p <= num:
if primes[p]:
for i in range(p * p , num + 1 , snake_case__ ):
_snake_case = False
p += 1
return [prime for prime in range(2 , num + 1 ) if primes[prime]]
if __name__ == "__main__":
import doctest
doctest.testmod()
__A = int(input('''Enter a positive integer: ''').strip())
print(prime_sieve_eratosthenes(user_num))
| 365 |
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from transformers import TvltFeatureExtractor, is_datasets_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
if is_datasets_available():
from datasets import load_dataset
__A = random.Random()
def snake_case_(_UpperCamelCase , _UpperCamelCase=1.0 , _UpperCamelCase=None , _UpperCamelCase=None ) -> Optional[int]:
"""simple docstring"""
if rng is None:
_snake_case = global_rng
_snake_case = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class lowercase_ ( unittest.TestCase ):
def __init__( self : List[Any] , A__ : List[Any] , A__ : int=7 , A__ : Tuple=400 , A__ : int=2000 , A__ : Any=2048 , A__ : List[Any]=128 , A__ : Optional[int]=1 , A__ : Optional[Any]=512 , A__ : Any=30 , A__ : Any=44100 , ) -> int:
_snake_case = parent
_snake_case = batch_size
_snake_case = min_seq_length
_snake_case = max_seq_length
_snake_case = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
_snake_case = spectrogram_length
_snake_case = feature_size
_snake_case = num_audio_channels
_snake_case = hop_length
_snake_case = chunk_length
_snake_case = sampling_rate
def UpperCamelCase_ ( self : str ) -> Optional[int]:
return {
"spectrogram_length": self.spectrogram_length,
"feature_size": self.feature_size,
"num_audio_channels": self.num_audio_channels,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"sampling_rate": self.sampling_rate,
}
def UpperCamelCase_ ( self : Any , A__ : Any=False , A__ : List[str]=False ) -> Tuple:
def _flatten(A__ : List[str] ):
return list(itertools.chain(*A__ ) )
if equal_length:
_snake_case = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
_snake_case = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
_snake_case = [np.asarray(A__ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class lowercase_ ( __lowercase , unittest.TestCase ):
UpperCamelCase_ : Optional[int] = TvltFeatureExtractor
def UpperCamelCase_ ( self : Dict ) -> List[str]:
_snake_case = TvltFeatureExtractionTester(self )
def UpperCamelCase_ ( self : int ) -> Optional[int]:
_snake_case = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(A__ , '''spectrogram_length''' ) )
self.assertTrue(hasattr(A__ , '''feature_size''' ) )
self.assertTrue(hasattr(A__ , '''num_audio_channels''' ) )
self.assertTrue(hasattr(A__ , '''hop_length''' ) )
self.assertTrue(hasattr(A__ , '''chunk_length''' ) )
self.assertTrue(hasattr(A__ , '''sampling_rate''' ) )
def UpperCamelCase_ ( self : Any ) -> Union[str, Any]:
_snake_case = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_snake_case = feat_extract_first.save_pretrained(A__ )[0]
check_json_file_has_correct_format(A__ )
_snake_case = self.feature_extraction_class.from_pretrained(A__ )
_snake_case = feat_extract_first.to_dict()
_snake_case = feat_extract_second.to_dict()
_snake_case = dict_first.pop('''mel_filters''' )
_snake_case = dict_second.pop('''mel_filters''' )
self.assertTrue(np.allclose(A__ , A__ ) )
self.assertEqual(A__ , A__ )
def UpperCamelCase_ ( self : int ) -> Union[str, Any]:
_snake_case = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_snake_case = os.path.join(A__ , '''feat_extract.json''' )
feat_extract_first.to_json_file(A__ )
_snake_case = self.feature_extraction_class.from_json_file(A__ )
_snake_case = feat_extract_first.to_dict()
_snake_case = feat_extract_second.to_dict()
_snake_case = dict_first.pop('''mel_filters''' )
_snake_case = dict_second.pop('''mel_filters''' )
self.assertTrue(np.allclose(A__ , A__ ) )
self.assertEqual(A__ , A__ )
def UpperCamelCase_ ( self : Union[str, Any] ) -> Any:
# Initialize feature_extractor
_snake_case = self.feature_extraction_class(**self.feat_extract_dict )
# create three inputs of length 800, 1000, and 1200
_snake_case = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
_snake_case = [np.asarray(A__ ) for speech_input in speech_inputs]
# Test not batched input
_snake_case = feature_extractor(np_speech_inputs[0] , return_tensors='''np''' , sampling_rate=44100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test batched
_snake_case = feature_extractor(A__ , return_tensors='''np''' , sampling_rate=44100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test audio masking
_snake_case = feature_extractor(
A__ , return_tensors='''np''' , sampling_rate=44100 , mask_audio=A__ ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test 2-D numpy arrays are batched.
_snake_case = [floats_list((1, x) )[0] for x in (800, 800, 800)]
_snake_case = np.asarray(A__ )
_snake_case = feature_extractor(A__ , return_tensors='''np''' , sampling_rate=44100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
def UpperCamelCase_ ( self : Optional[Any] , A__ : Any ) -> Optional[int]:
_snake_case = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' )
# automatic decoding with librispeech
_snake_case = ds.sort('''id''' ).select(range(A__ ) )[:num_samples]['''audio''']
return [x["array"] for x in speech_samples]
def UpperCamelCase_ ( self : List[str] ) -> Optional[Any]:
_snake_case = self._load_datasamples(1 )
_snake_case = TvltFeatureExtractor()
_snake_case = feature_extractor(A__ , return_tensors='''pt''' ).audio_values
self.assertEquals(audio_values.shape , (1, 1, 192, 128) )
_snake_case = torch.tensor([[-0.3032, -0.2708], [-0.4434, -0.4007]] )
self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2] , A__ , atol=1e-4 ) )
| 278 | 0 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_A = logging.get_logger(__name__)
_A = '''▁'''
_A = {'''vocab_file''': '''sentencepiece.bpe.model''', '''monolingual_vocab_file''': '''dict.txt'''}
_A = {
'''vocab_file''': {
'''vinai/bartpho-syllable''': '''https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model''',
},
'''monolingual_vocab_file''': {
'''vinai/bartpho-syllable''': '''https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt''',
},
}
_A = {'''vinai/bartpho-syllable''': 1_024}
class lowercase_ ( __SCREAMING_SNAKE_CASE ):
A__ : List[Any] = VOCAB_FILES_NAMES
A__ : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
A__ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ : Union[str, Any] = ["""input_ids""", """attention_mask"""]
def __init__( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase="<s>" , __UpperCamelCase="</s>" , __UpperCamelCase="</s>" , __UpperCamelCase="<s>" , __UpperCamelCase="<unk>" , __UpperCamelCase="<pad>" , __UpperCamelCase="<mask>" , __UpperCamelCase = None , **__UpperCamelCase , ):
"""simple docstring"""
UpperCamelCase_ = AddedToken(__UpperCamelCase , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else mask_token
UpperCamelCase_ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__UpperCamelCase , eos_token=__UpperCamelCase , unk_token=__UpperCamelCase , sep_token=__UpperCamelCase , cls_token=__UpperCamelCase , pad_token=__UpperCamelCase , mask_token=__UpperCamelCase , sp_model_kwargs=self.sp_model_kwargs , **__UpperCamelCase , )
UpperCamelCase_ = vocab_file
UpperCamelCase_ = monolingual_vocab_file
UpperCamelCase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__UpperCamelCase ) )
# Load the reduced vocab
# Keep order of special tokens for backward compatibility
UpperCamelCase_ = {}
UpperCamelCase_ = 0
for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]:
if str(__UpperCamelCase ) not in self.fairseq_tokens_to_ids:
UpperCamelCase_ = cnt
cnt += 1
with open(__UpperCamelCase , """r""" , encoding="""utf-8""" ) as f:
for line in f.readlines():
UpperCamelCase_ = line.strip().split()[0]
UpperCamelCase_ = len(self.fairseq_tokens_to_ids )
if str(__UpperCamelCase ) not in self.fairseq_tokens_to_ids:
UpperCamelCase_ = len(self.fairseq_tokens_to_ids )
UpperCamelCase_ = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ):
"""simple docstring"""
UpperCamelCase_ = self.__dict__.copy()
UpperCamelCase_ = None
UpperCamelCase_ = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , __UpperCamelCase ):
"""simple docstring"""
UpperCamelCase_ = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
UpperCamelCase_ = {}
UpperCamelCase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def lowerCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase = None ):
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCamelCase_ = [self.cls_token_id]
UpperCamelCase_ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowerCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__UpperCamelCase , token_ids_a=__UpperCamelCase , already_has_special_tokens=__UpperCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(__UpperCamelCase )) + [1]
return [1] + ([0] * len(__UpperCamelCase )) + [1, 1] + ([0] * len(__UpperCamelCase )) + [1]
def lowerCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase = None ):
"""simple docstring"""
UpperCamelCase_ = [self.sep_token_id]
UpperCamelCase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def lowerCamelCase_ ( self ):
"""simple docstring"""
return len(self.fairseq_ids_to_tokens )
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = {self.convert_ids_to_tokens(__UpperCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowerCamelCase_ ( self , __UpperCamelCase ):
"""simple docstring"""
return self.sp_model.encode(__UpperCamelCase , out_type=__UpperCamelCase )
def lowerCamelCase_ ( self , __UpperCamelCase ):
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
else:
return self.unk_token_id
def lowerCamelCase_ ( self , __UpperCamelCase ):
"""simple docstring"""
return self.fairseq_ids_to_tokens[index]
def lowerCamelCase_ ( self , __UpperCamelCase ):
"""simple docstring"""
UpperCamelCase_ = """""".join(__UpperCamelCase ).replace(__UpperCamelCase , """ """ ).strip()
return out_string
def lowerCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase = None ):
"""simple docstring"""
if not os.path.isdir(__UpperCamelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
UpperCamelCase_ = os.path.join(
__UpperCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
UpperCamelCase_ = os.path.join(
__UpperCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""monolingual_vocab_file"""] , )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __UpperCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__UpperCamelCase , """wb""" ) as fi:
UpperCamelCase_ = self.sp_model.serialized_model_proto()
fi.write(__UpperCamelCase )
if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath(
__UpperCamelCase ) and os.path.isfile(self.monolingual_vocab_file ):
copyfile(self.monolingual_vocab_file , __UpperCamelCase )
elif not os.path.isfile(self.monolingual_vocab_file ):
with open(__UpperCamelCase , """w""" , encoding="""utf-8""" ) as fp:
for token in self.fairseq_tokens_to_ids:
if token not in self.all_special_tokens:
fp.write(f'''{str(__UpperCamelCase )} \n''' )
return out_vocab_file, out_monolingual_vocab_file
| 122 |
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer
@dataclass
class lowercase_ ( __SCREAMING_SNAKE_CASE ):
A__ : torch.FloatTensor
class lowercase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
@register_to_config
def __init__( self , __UpperCamelCase = 3 , __UpperCamelCase = 3 , __UpperCamelCase = ("DownEncoderBlock2D",) , __UpperCamelCase = ("UpDecoderBlock2D",) , __UpperCamelCase = (6_4,) , __UpperCamelCase = 1 , __UpperCamelCase = "silu" , __UpperCamelCase = 3 , __UpperCamelCase = 3_2 , __UpperCamelCase = 2_5_6 , __UpperCamelCase = 3_2 , __UpperCamelCase = None , __UpperCamelCase = 0.18_215 , __UpperCamelCase = "group" , ):
"""simple docstring"""
super().__init__()
# pass init params to Encoder
UpperCamelCase_ = Encoder(
in_channels=__UpperCamelCase , out_channels=__UpperCamelCase , down_block_types=__UpperCamelCase , block_out_channels=__UpperCamelCase , layers_per_block=__UpperCamelCase , act_fn=__UpperCamelCase , norm_num_groups=__UpperCamelCase , double_z=__UpperCamelCase , )
UpperCamelCase_ = vq_embed_dim if vq_embed_dim is not None else latent_channels
UpperCamelCase_ = nn.Convad(__UpperCamelCase , __UpperCamelCase , 1 )
UpperCamelCase_ = VectorQuantizer(__UpperCamelCase , __UpperCamelCase , beta=0.25 , remap=__UpperCamelCase , sane_index_shape=__UpperCamelCase )
UpperCamelCase_ = nn.Convad(__UpperCamelCase , __UpperCamelCase , 1 )
# pass init params to Decoder
UpperCamelCase_ = Decoder(
in_channels=__UpperCamelCase , out_channels=__UpperCamelCase , up_block_types=__UpperCamelCase , block_out_channels=__UpperCamelCase , layers_per_block=__UpperCamelCase , act_fn=__UpperCamelCase , norm_num_groups=__UpperCamelCase , norm_type=__UpperCamelCase , )
@apply_forward_hook
def lowerCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase = True ):
"""simple docstring"""
UpperCamelCase_ = self.encoder(__UpperCamelCase )
UpperCamelCase_ = self.quant_conv(__UpperCamelCase )
if not return_dict:
return (h,)
return VQEncoderOutput(latents=__UpperCamelCase )
@apply_forward_hook
def lowerCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase = False , __UpperCamelCase = True ):
"""simple docstring"""
if not force_not_quantize:
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = self.quantize(__UpperCamelCase )
else:
UpperCamelCase_ = h
UpperCamelCase_ = self.post_quant_conv(__UpperCamelCase )
UpperCamelCase_ = self.decoder(__UpperCamelCase , quant if self.config.norm_type == """spatial""" else None )
if not return_dict:
return (dec,)
return DecoderOutput(sample=__UpperCamelCase )
def lowerCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase = True ):
"""simple docstring"""
UpperCamelCase_ = sample
UpperCamelCase_ = self.encode(__UpperCamelCase ).latents
UpperCamelCase_ = self.decode(__UpperCamelCase ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=__UpperCamelCase )
| 122 | 1 |
"""simple docstring"""
import argparse
import os
import re
import tensorflow as tf
import torch
from transformers import BertConfig, BertModel
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase =logging.get_logger(__name__)
def _A ( _a : Tuple , _a : Any , _a : int ):
"""simple docstring"""
A = os.path.abspath(_a )
logger.info(f'Converting TensorFlow checkpoint from {tf_path}' )
# Load weights from TF model
A = tf.train.list_variables(_a )
A = []
A = []
A = []
for full_name, shape in init_vars:
# logger.info(f"Loading TF weight {name} with shape {shape}")
A = full_name.split("""/""" )
if full_name == "_CHECKPOINTABLE_OBJECT_GRAPH" or name[0] in ["global_step", "save_counter"]:
logger.info(f'Skipping non-model layer {full_name}' )
continue
if "optimizer" in full_name:
logger.info(f'Skipping optimization layer {full_name}' )
continue
if name[0] == "model":
# ignore initial 'model'
A = name[1:]
# figure out how many levels deep the name is
A = 0
for _name in name:
if _name.startswith("""layer_with_weights""" ):
depth += 1
else:
break
layer_depth.append(_a )
# read data
A = tf.train.load_variable(_a , _a )
names.append("""/""".join(_a ) )
arrays.append(_a )
logger.info(f'Read a total of {len(_a ):,} layers' )
# Sanity check
if len(set(_a ) ) != 1:
raise ValueError(f'Found layer names with different depths (layer depth {list(set(_a ) )})' )
A = list(set(_a ) )[0]
if layer_depth != 1:
raise ValueError(
"""The model contains more than just the embedding/encoder layers. This script does not handle MLM/NSP"""
""" heads.""" )
# convert layers
logger.info("""Converting weights...""" )
for full_name, array in zip(_a , _a ):
A = full_name.split("""/""" )
A = model
A = []
for i, m_name in enumerate(_a ):
if m_name == ".ATTRIBUTES":
# variable names end with .ATTRIBUTES/VARIABLE_VALUE
break
if m_name.startswith("""layer_with_weights""" ):
A = int(m_name.split("""-""" )[-1] )
if layer_num <= 2:
# embedding layers
# layer_num 0: word_embeddings
# layer_num 1: position_embeddings
# layer_num 2: token_type_embeddings
continue
elif layer_num == 3:
# embedding LayerNorm
trace.extend(["""embeddings""", """LayerNorm"""] )
A = getattr(_a , """embeddings""" )
A = getattr(_a , """LayerNorm""" )
elif layer_num > 3 and layer_num < config.num_hidden_layers + 4:
# encoder layers
trace.extend(["""encoder""", """layer""", str(layer_num - 4 )] )
A = getattr(_a , """encoder""" )
A = getattr(_a , """layer""" )
A = pointer[layer_num - 4]
elif layer_num == config.num_hidden_layers + 4:
# pooler layer
trace.extend(["""pooler""", """dense"""] )
A = getattr(_a , """pooler""" )
A = getattr(_a , """dense""" )
elif m_name == "embeddings":
trace.append("""embeddings""" )
A = getattr(_a , """embeddings""" )
if layer_num == 0:
trace.append("""word_embeddings""" )
A = getattr(_a , """word_embeddings""" )
elif layer_num == 1:
trace.append("""position_embeddings""" )
A = getattr(_a , """position_embeddings""" )
elif layer_num == 2:
trace.append("""token_type_embeddings""" )
A = getattr(_a , """token_type_embeddings""" )
else:
raise ValueError(f'Unknown embedding layer with name {full_name}' )
trace.append("""weight""" )
A = getattr(_a , """weight""" )
elif m_name == "_attention_layer":
# self-attention layer
trace.extend(["""attention""", """self"""] )
A = getattr(_a , """attention""" )
A = getattr(_a , """self""" )
elif m_name == "_attention_layer_norm":
# output attention norm
trace.extend(["""attention""", """output""", """LayerNorm"""] )
A = getattr(_a , """attention""" )
A = getattr(_a , """output""" )
A = getattr(_a , """LayerNorm""" )
elif m_name == "_attention_output_dense":
# output attention dense
trace.extend(["""attention""", """output""", """dense"""] )
A = getattr(_a , """attention""" )
A = getattr(_a , """output""" )
A = getattr(_a , """dense""" )
elif m_name == "_output_dense":
# output dense
trace.extend(["""output""", """dense"""] )
A = getattr(_a , """output""" )
A = getattr(_a , """dense""" )
elif m_name == "_output_layer_norm":
# output dense
trace.extend(["""output""", """LayerNorm"""] )
A = getattr(_a , """output""" )
A = getattr(_a , """LayerNorm""" )
elif m_name == "_key_dense":
# attention key
trace.append("""key""" )
A = getattr(_a , """key""" )
elif m_name == "_query_dense":
# attention query
trace.append("""query""" )
A = getattr(_a , """query""" )
elif m_name == "_value_dense":
# attention value
trace.append("""value""" )
A = getattr(_a , """value""" )
elif m_name == "_intermediate_dense":
# attention intermediate dense
trace.extend(["""intermediate""", """dense"""] )
A = getattr(_a , """intermediate""" )
A = getattr(_a , """dense""" )
elif m_name == "_output_layer_norm":
# output layer norm
trace.append("""output""" )
A = getattr(_a , """output""" )
# weights & biases
elif m_name in ["bias", "beta"]:
trace.append("""bias""" )
A = getattr(_a , """bias""" )
elif m_name in ["kernel", "gamma"]:
trace.append("""weight""" )
A = getattr(_a , """weight""" )
else:
logger.warning(f'Ignored {m_name}' )
# for certain layers reshape is necessary
A = """.""".join(_a )
if re.match(r"""(\S+)\.attention\.self\.(key|value|query)\.(bias|weight)""" , _a ) or re.match(
r"""(\S+)\.attention\.output\.dense\.weight""" , _a ):
A = array.reshape(pointer.data.shape )
if "kernel" in full_name:
A = array.transpose()
if pointer.shape == array.shape:
A = torch.from_numpy(_a )
else:
raise ValueError(
f'Shape mismatch in layer {full_name}: Model expects shape {pointer.shape} but layer contains shape:'
f' {array.shape}' )
logger.info(f'Successfully set variable {full_name} to PyTorch layer {trace}' )
return model
def _A ( _a : Any , _a : int , _a : str ):
"""simple docstring"""
logger.info(f'Loading model based on config from {config_path}...' )
A = BertConfig.from_json_file(_a )
A = BertModel(_a )
# Load weights from checkpoint
logger.info(f'Loading weights from checkpoint {tf_checkpoint_path}...' )
load_tfa_weights_in_bert(_a , _a , _a )
# Save pytorch-model
logger.info(f'Saving PyTorch model to {pytorch_dump_path}...' )
torch.save(model.state_dict() , _a )
if __name__ == "__main__":
UpperCAmelCase =argparse.ArgumentParser()
parser.add_argument(
"--tf_checkpoint_path", type=str, required=True, help="Path to the TensorFlow 2.x checkpoint path."
)
parser.add_argument(
"--bert_config_file",
type=str,
required=True,
help="The config json file corresponding to the BERT model. This specifies the model architecture.",
)
parser.add_argument(
"--pytorch_dump_path",
type=str,
required=True,
help="Path to the output PyTorch model (must include filename).",
)
UpperCAmelCase =parser.parse_args()
convert_tfa_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 77 |
"""simple docstring"""
from __future__ import annotations
import math
from collections.abc import Callable
def _A ( _a : Callable[[int | float], int | float] , _a : int | float , _a : int | float , _a : int = 1_0_0 , ):
"""simple docstring"""
A = x_start
A = fnc(_a )
A = 0.0
for _ in range(_a ):
# Approximates curve as a sequence of linear lines and sums their length
A = (x_end - x_start) / steps + xa
A = fnc(_a )
length += math.hypot(xa - xa , fxa - fxa )
# Increment step
A = xa
A = fxa
return length
if __name__ == "__main__":
def _A ( _a : Tuple ):
"""simple docstring"""
return math.sin(1_0 * x )
print("f(x) = sin(10 * x)")
print("The length of the curve from x = -10 to x = 10 is:")
UpperCAmelCase =10
while i <= 100_000:
print(f"""With {i} steps: {line_length(f, -10, 10, i)}""")
i *= 10
| 77 | 1 |
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
return (pointa[0] - pointa[0]) ** 2 + (pointa[1] - pointa[1]) ** 2
def A ( _lowerCamelCase , _lowerCamelCase=0 ):
'''simple docstring'''
return sorted(_lowerCamelCase , key=lambda _lowerCamelCase : x[column] )
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=float("inf" ) ):
'''simple docstring'''
for i in range(points_counts - 1 ):
for j in range(i + 1 , _lowerCamelCase ):
_lowerCAmelCase : int = euclidean_distance_sqr(points[i] , points[j] )
if current_dis < min_dis:
_lowerCAmelCase : List[str] = current_dis
return min_dis
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=float("inf" ) ):
'''simple docstring'''
for i in range(min(6 , points_counts - 1 ) , _lowerCamelCase ):
for j in range(max(0 , i - 6 ) , _lowerCamelCase ):
_lowerCAmelCase : str = euclidean_distance_sqr(points[i] , points[j] )
if current_dis < min_dis:
_lowerCAmelCase : List[Any] = current_dis
return min_dis
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if points_counts <= 3:
return dis_between_closest_pair(_lowerCamelCase , _lowerCamelCase )
# recursion
_lowerCAmelCase : Any = points_counts // 2
_lowerCAmelCase : Dict = closest_pair_of_points_sqr(
_lowerCamelCase , points_sorted_on_y[:mid] , _lowerCamelCase )
_lowerCAmelCase : str = closest_pair_of_points_sqr(
_lowerCamelCase , points_sorted_on_y[mid:] , points_counts - mid )
_lowerCAmelCase : Union[str, Any] = min(_lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase : str = []
for point in points_sorted_on_x:
if abs(point[0] - points_sorted_on_x[mid][0] ) < closest_pair_dis:
cross_strip.append(_lowerCamelCase )
_lowerCAmelCase : str = dis_between_closest_in_strip(
_lowerCamelCase , len(_lowerCamelCase ) , _lowerCamelCase )
return min(_lowerCamelCase , _lowerCamelCase )
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Any = column_based_sort(_lowerCamelCase , column=0 )
_lowerCAmelCase : Optional[int] = column_based_sort(_lowerCamelCase , column=1 )
return (
closest_pair_of_points_sqr(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
) ** 0.5
if __name__ == "__main__":
_snake_case = [(2, 3), (12, 30), (40, 50), (5, 1), (12, 10), (3, 4)]
print("Distance:", closest_pair_of_points(points, len(points)))
| 36 |
from __future__ import annotations
from itertools import permutations
from random import randint
from timeit import repeat
def lowerCamelCase__ ( ) -> tuple[list[int], int]:
UpperCamelCase_ = [randint(-1000 , 1000 ) for i in range(10 )]
UpperCamelCase_ = randint(-5000 , 5000 )
return (arr, r)
_A = make_dataset()
def lowerCamelCase__ ( a__ : list[int] , a__ : int ) -> tuple[int, ...]:
for triplet in permutations(a__ , 3 ):
if sum(a__ ) == target:
return tuple(sorted(a__ ) )
return (0, 0, 0)
def lowerCamelCase__ ( a__ : list[int] , a__ : int ) -> tuple[int, int, int]:
arr.sort()
UpperCamelCase_ = len(a__ )
for i in range(n - 1 ):
UpperCamelCase_ , UpperCamelCase_ = i + 1, n - 1
while left < right:
if arr[i] + arr[left] + arr[right] == target:
return (arr[i], arr[left], arr[right])
elif arr[i] + arr[left] + arr[right] < target:
left += 1
elif arr[i] + arr[left] + arr[right] > target:
right -= 1
return (0, 0, 0)
def lowerCamelCase__ ( ) -> tuple[float, float]:
UpperCamelCase_ = """
from __main__ import dataset, triplet_sum1, triplet_sum2
"""
UpperCamelCase_ = """
triplet_sum1(*dataset)
"""
UpperCamelCase_ = """
triplet_sum2(*dataset)
"""
UpperCamelCase_ = repeat(setup=a__ , stmt=a__ , repeat=5 , number=1_0000 )
UpperCamelCase_ = repeat(setup=a__ , stmt=a__ , repeat=5 , number=1_0000 )
return (min(a__ ), min(a__ ))
if __name__ == "__main__":
from doctest import testmod
testmod()
_A = solution_times()
print(F'''The time for naive implementation is {times[0]}.''')
print(F'''The time for optimized implementation is {times[1]}.''')
| 122 | 0 |
'''simple docstring'''
from math import cos, sin, sqrt, tau
from audio_filters.iir_filter import IIRFilter
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : float = 1 / sqrt(2 ) ):
'''simple docstring'''
UpperCAmelCase__ = tau * frequency / samplerate
UpperCAmelCase__ = sin(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase__ = cos(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase__ = _sin / (2 * q_factor)
UpperCAmelCase__ = (1 - _cos) / 2
UpperCAmelCase__ = 1 - _cos
UpperCAmelCase__ = 1 + alpha
UpperCAmelCase__ = -2 * _cos
UpperCAmelCase__ = 1 - alpha
UpperCAmelCase__ = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : float = 1 / sqrt(2 ) ):
'''simple docstring'''
UpperCAmelCase__ = tau * frequency / samplerate
UpperCAmelCase__ = sin(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase__ = cos(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase__ = _sin / (2 * q_factor)
UpperCAmelCase__ = (1 + _cos) / 2
UpperCAmelCase__ = -1 - _cos
UpperCAmelCase__ = 1 + alpha
UpperCAmelCase__ = -2 * _cos
UpperCAmelCase__ = 1 - alpha
UpperCAmelCase__ = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : float = 1 / sqrt(2 ) ):
'''simple docstring'''
UpperCAmelCase__ = tau * frequency / samplerate
UpperCAmelCase__ = sin(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase__ = cos(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase__ = _sin / (2 * q_factor)
UpperCAmelCase__ = _sin / 2
UpperCAmelCase__ = 0
UpperCAmelCase__ = -ba
UpperCAmelCase__ = 1 + alpha
UpperCAmelCase__ = -2 * _cos
UpperCAmelCase__ = 1 - alpha
UpperCAmelCase__ = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : float = 1 / sqrt(2 ) ):
'''simple docstring'''
UpperCAmelCase__ = tau * frequency / samplerate
UpperCAmelCase__ = sin(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase__ = cos(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase__ = _sin / (2 * q_factor)
UpperCAmelCase__ = 1 - alpha
UpperCAmelCase__ = -2 * _cos
UpperCAmelCase__ = 1 + alpha
UpperCAmelCase__ = IIRFilter(2 )
filt.set_coefficients([ba, ba, ba] , [ba, ba, ba] )
return filt
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : float = 1 / sqrt(2 ) , ):
'''simple docstring'''
UpperCAmelCase__ = tau * frequency / samplerate
UpperCAmelCase__ = sin(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase__ = cos(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase__ = _sin / (2 * q_factor)
UpperCAmelCase__ = 10 ** (gain_db / 40)
UpperCAmelCase__ = 1 + alpha * big_a
UpperCAmelCase__ = -2 * _cos
UpperCAmelCase__ = 1 - alpha * big_a
UpperCAmelCase__ = 1 + alpha / big_a
UpperCAmelCase__ = -2 * _cos
UpperCAmelCase__ = 1 - alpha / big_a
UpperCAmelCase__ = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : float = 1 / sqrt(2 ) , ):
'''simple docstring'''
UpperCAmelCase__ = tau * frequency / samplerate
UpperCAmelCase__ = sin(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase__ = cos(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase__ = _sin / (2 * q_factor)
UpperCAmelCase__ = 10 ** (gain_db / 40)
UpperCAmelCase__ = (big_a + 1) - (big_a - 1) * _cos
UpperCAmelCase__ = (big_a + 1) + (big_a - 1) * _cos
UpperCAmelCase__ = (big_a - 1) - (big_a + 1) * _cos
UpperCAmelCase__ = (big_a - 1) + (big_a + 1) * _cos
UpperCAmelCase__ = 2 * sqrt(SCREAMING_SNAKE_CASE__ ) * alpha
UpperCAmelCase__ = big_a * (pmc + aaa)
UpperCAmelCase__ = 2 * big_a * mpc
UpperCAmelCase__ = big_a * (pmc - aaa)
UpperCAmelCase__ = ppmc + aaa
UpperCAmelCase__ = -2 * pmpc
UpperCAmelCase__ = ppmc - aaa
UpperCAmelCase__ = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : float = 1 / sqrt(2 ) , ):
'''simple docstring'''
UpperCAmelCase__ = tau * frequency / samplerate
UpperCAmelCase__ = sin(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase__ = cos(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase__ = _sin / (2 * q_factor)
UpperCAmelCase__ = 10 ** (gain_db / 40)
UpperCAmelCase__ = (big_a + 1) - (big_a - 1) * _cos
UpperCAmelCase__ = (big_a + 1) + (big_a - 1) * _cos
UpperCAmelCase__ = (big_a - 1) - (big_a + 1) * _cos
UpperCAmelCase__ = (big_a - 1) + (big_a + 1) * _cos
UpperCAmelCase__ = 2 * sqrt(SCREAMING_SNAKE_CASE__ ) * alpha
UpperCAmelCase__ = big_a * (ppmc + aaa)
UpperCAmelCase__ = -2 * big_a * pmpc
UpperCAmelCase__ = big_a * (ppmc - aaa)
UpperCAmelCase__ = pmc + aaa
UpperCAmelCase__ = 2 * mpc
UpperCAmelCase__ = pmc - aaa
UpperCAmelCase__ = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
| 364 |
'''simple docstring'''
import os
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : Any ):
'''simple docstring'''
UpperCAmelCase__ = len(grid[0] )
UpperCAmelCase__ = len(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase__ = 0
UpperCAmelCase__ = 0
UpperCAmelCase__ = 0
# Check vertically, horizontally, diagonally at the same time (only works
# for nxn grid)
for i in range(SCREAMING_SNAKE_CASE__ ):
for j in range(n_rows - 3 ):
UpperCAmelCase__ = grid[j][i] * grid[j + 1][i] * grid[j + 2][i] * grid[j + 3][i]
UpperCAmelCase__ = grid[i][j] * grid[i][j + 1] * grid[i][j + 2] * grid[i][j + 3]
# Left-to-right diagonal (\) product
if i < n_columns - 3:
UpperCAmelCase__ = (
grid[i][j]
* grid[i + 1][j + 1]
* grid[i + 2][j + 2]
* grid[i + 3][j + 3]
)
# Right-to-left diagonal(/) product
if i > 2:
UpperCAmelCase__ = (
grid[i][j]
* grid[i - 1][j + 1]
* grid[i - 2][j + 2]
* grid[i - 3][j + 3]
)
UpperCAmelCase__ = max(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if max_product > largest:
UpperCAmelCase__ = max_product
return largest
def _UpperCamelCase ( ):
'''simple docstring'''
UpperCAmelCase__ = []
with open(os.path.dirname(SCREAMING_SNAKE_CASE__ ) + """/grid.txt""" ) as file:
for line in file:
grid.append(line.strip("""\n""" ).split(""" """ ) )
UpperCAmelCase__ = [[int(SCREAMING_SNAKE_CASE__ ) for i in grid[j]] for j in range(len(SCREAMING_SNAKE_CASE__ ) )]
return largest_product(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
print(solution())
| 61 | 0 |
import random
import unittest
import torch
from diffusers import IFInpaintingSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class _lowerCAmelCase ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
_lowercase =IFInpaintingSuperResolutionPipeline
_lowercase =TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"width", "height"}
_lowercase =TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({'''original_image'''} )
_lowercase =PipelineTesterMixin.required_optional_params - {"latents"}
def __a ( self ) -> Dict:
return self._get_superresolution_dummy_components()
def __a ( self , _UpperCamelCase , _UpperCamelCase=0 ) -> List[Any]:
if str(_UpperCAmelCase ).startswith("mps" ):
lowerCAmelCase_ = torch.manual_seed(_UpperCAmelCase )
else:
lowerCAmelCase_ = torch.Generator(device=_UpperCAmelCase ).manual_seed(_UpperCAmelCase )
lowerCAmelCase_ = floats_tensor((1, 3, 16, 16) , rng=random.Random(_UpperCAmelCase ) ).to(_UpperCAmelCase )
lowerCAmelCase_ = floats_tensor((1, 3, 32, 32) , rng=random.Random(_UpperCAmelCase ) ).to(_UpperCAmelCase )
lowerCAmelCase_ = floats_tensor((1, 3, 32, 32) , rng=random.Random(_UpperCAmelCase ) ).to(_UpperCAmelCase )
lowerCAmelCase_ = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''original_image''': original_image,
'''mask_image''': mask_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def __a ( self ) -> Optional[Any]:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def __a ( self ) -> Optional[int]:
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != "cuda" , reason="float16 requires CUDA" )
def __a ( self ) -> Dict:
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1e-1 )
def __a ( self ) -> Optional[Any]:
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def __a ( self ) -> Optional[int]:
self._test_save_load_local()
def __a ( self ) -> Dict:
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
| 231 | """simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ....tokenization_utils_fast import PreTrainedTokenizerFast
from ....utils import logging
from .tokenization_retribert import RetriBertTokenizer
__A = logging.get_logger(__name__)
__A = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
__A = {
"vocab_file": {
"yjernite/retribert-base-uncased": (
"https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"yjernite/retribert-base-uncased": (
"https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/tokenizer.json"
),
},
}
__A = {
"yjernite/retribert-base-uncased": 5_1_2,
}
__A = {
"yjernite/retribert-base-uncased": {"do_lower_case": True},
}
class UpperCAmelCase (_UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase :Dict = VOCAB_FILES_NAMES
_UpperCAmelCase :str = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase :Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase :Optional[int] = PRETRAINED_INIT_CONFIGURATION
_UpperCAmelCase :str = RetriBertTokenizer
_UpperCAmelCase :List[str] = ["input_ids", "attention_mask"]
def __init__( self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=True , _UpperCAmelCase="[UNK]" , _UpperCAmelCase="[SEP]" , _UpperCAmelCase="[PAD]" , _UpperCAmelCase="[CLS]" , _UpperCAmelCase="[MASK]" , _UpperCAmelCase=True , _UpperCAmelCase=None , **_UpperCAmelCase , ):
super().__init__(
_UpperCAmelCase , tokenizer_file=_UpperCAmelCase , do_lower_case=_UpperCAmelCase , unk_token=_UpperCAmelCase , sep_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , cls_token=_UpperCAmelCase , mask_token=_UpperCAmelCase , tokenize_chinese_chars=_UpperCAmelCase , strip_accents=_UpperCAmelCase , **_UpperCAmelCase , )
lowercase__: Optional[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , _UpperCAmelCase ) != do_lower_case
or normalizer_state.get('''strip_accents''' , _UpperCAmelCase ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , _UpperCAmelCase ) != tokenize_chinese_chars
):
lowercase__: str = getattr(_UpperCAmelCase , normalizer_state.pop('''type''' ) )
lowercase__: Optional[Any] = do_lower_case
lowercase__: Tuple = strip_accents
lowercase__: str = tokenize_chinese_chars
lowercase__: Union[str, Any] = normalizer_class(**_UpperCAmelCase )
lowercase__: List[str] = do_lower_case
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase=None ):
lowercase__: Union[str, Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase = None ):
lowercase__: Optional[int] = [self.sep_token_id]
lowercase__: Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase = None ):
lowercase__: List[str] = self._tokenizer.model.save(_UpperCAmelCase , name=_UpperCAmelCase )
return tuple(_UpperCAmelCase )
| 177 | 0 |
'''simple docstring'''
from pickle import UnpicklingError
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict
from ..utils import logging
lowerCAmelCase__ : Union[str, Any] = logging.get_logger(__name__)
def __UpperCamelCase ( _UpperCAmelCase, _UpperCAmelCase ):
try:
with open(_UpperCAmelCase, "rb" ) as flax_state_f:
__UpperCAmelCase : List[Any] = from_bytes(_UpperCAmelCase, flax_state_f.read() )
except UnpicklingError as e:
try:
with open(_UpperCAmelCase ) as f:
if f.read().startswith("version" ):
raise OSError(
"You seem to have cloned a repository without having git-lfs installed. Please"
" install git-lfs and run `git lfs install` followed by `git lfs pull` in the"
" folder you cloned." )
else:
raise ValueError from e
except (UnicodeDecodeError, ValueError):
raise EnvironmentError(F"Unable to convert {model_file} to Flax deserializable object. " )
return load_flax_weights_in_pytorch_model(_UpperCAmelCase, _UpperCAmelCase )
def __UpperCamelCase ( _UpperCAmelCase, _UpperCAmelCase ):
try:
import torch # noqa: F401
except ImportError:
logger.error(
"Loading Flax weights in PyTorch requires both PyTorch and Flax to be installed. Please see"
" https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation"
" instructions." )
raise
# check if we have bf16 weights
__UpperCAmelCase : Optional[int] = flatten_dict(jax.tree_util.tree_map(lambda _UpperCAmelCase : x.dtype == jnp.bfloataa, _UpperCAmelCase ) ).values()
if any(_UpperCAmelCase ):
# convert all weights to fp32 if they are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
"Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` "
"before loading those in PyTorch model." )
__UpperCAmelCase : Optional[int] = jax.tree_util.tree_map(
lambda _UpperCAmelCase : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params, _UpperCAmelCase )
__UpperCAmelCase : int = ""
__UpperCAmelCase : Optional[Any] = flatten_dict(_UpperCAmelCase, sep="." )
__UpperCAmelCase : List[Any] = pt_model.state_dict()
# keep track of unexpected & missing keys
__UpperCAmelCase : Optional[Any] = []
__UpperCAmelCase : int = set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
__UpperCAmelCase : Optional[Any] = flax_key_tuple.split("." )
if flax_key_tuple_array[-1] == "kernel" and flax_tensor.ndim == 4:
__UpperCAmelCase : List[str] = flax_key_tuple_array[:-1] + ["weight"]
__UpperCAmelCase : int = jnp.transpose(_UpperCAmelCase, (3, 2, 0, 1) )
elif flax_key_tuple_array[-1] == "kernel":
__UpperCAmelCase : Union[str, Any] = flax_key_tuple_array[:-1] + ["weight"]
__UpperCAmelCase : List[Any] = flax_tensor.T
elif flax_key_tuple_array[-1] == "scale":
__UpperCAmelCase : Dict = flax_key_tuple_array[:-1] + ["weight"]
if "time_embedding" not in flax_key_tuple_array:
for i, flax_key_tuple_string in enumerate(_UpperCAmelCase ):
__UpperCAmelCase : Optional[Any] = (
flax_key_tuple_string.replace("_0", ".0" )
.replace("_1", ".1" )
.replace("_2", ".2" )
.replace("_3", ".3" )
.replace("_4", ".4" )
.replace("_5", ".5" )
.replace("_6", ".6" )
.replace("_7", ".7" )
.replace("_8", ".8" )
.replace("_9", ".9" )
)
__UpperCAmelCase : Optional[Any] = ".".join(_UpperCAmelCase )
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
F"Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected "
F"to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}." )
else:
# add weight to pytorch dict
__UpperCAmelCase : Any = np.asarray(_UpperCAmelCase ) if not isinstance(_UpperCAmelCase, np.ndarray ) else flax_tensor
__UpperCAmelCase : Any = torch.from_numpy(_UpperCAmelCase )
# remove from missing keys
missing_keys.remove(_UpperCAmelCase )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(_UpperCAmelCase )
pt_model.load_state_dict(_UpperCAmelCase )
# re-transform missing_keys to list
__UpperCAmelCase : str = list(_UpperCAmelCase )
if len(_UpperCAmelCase ) > 0:
logger.warning(
"Some weights of the Flax model were not used when initializing the PyTorch model"
F" {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing"
F" {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture"
" (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This"
F" IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect"
" to be exactly identical (e.g. initializing a BertForSequenceClassification model from a"
" FlaxBertForSequenceClassification model)." )
if len(_UpperCAmelCase ) > 0:
logger.warning(
F"Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly"
F" initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to"
" use it for predictions and inference." )
return pt_model
| 37 |
'''simple docstring'''
from __future__ import annotations
def __UpperCamelCase ( _UpperCAmelCase ):
if not nums:
raise ValueError("List is empty" )
return sum(_UpperCAmelCase ) / len(_UpperCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 37 | 1 |
"""simple docstring"""
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_tf_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_tf_available():
import tensorflow as tf
lowercase__ = logging.get_logger(__name__)
@dataclass
class lowerCAmelCase__ ( lowercase ):
'''simple docstring'''
lowerCamelCase__ = [
"""no_inference""",
"""no_cuda""",
"""no_tpu""",
"""no_speed""",
"""no_memory""",
"""no_env_print""",
"""no_multi_process""",
]
def __init__( self , **lowercase ):
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
_lowerCamelCase : Any = deprecated_arg[3:]
_lowerCamelCase : int = not kwargs.pop(lowercase )
logger.warning(
F'''{deprecated_arg} is depreciated. Please use --no-{positive_arg} or'''
F''' {positive_arg}={kwargs[positive_arg]}''' )
_lowerCamelCase : Any = kwargs.pop('tpu_name' , self.tpu_name )
_lowerCamelCase : Dict = kwargs.pop('device_idx' , self.device_idx )
_lowerCamelCase : int = kwargs.pop('eager_mode' , self.eager_mode )
_lowerCamelCase : List[str] = kwargs.pop('use_xla' , self.use_xla )
super().__init__(**lowercase )
lowerCamelCase__ = field(
default=lowercase, metadata={"""help""": """Name of TPU"""}, )
lowerCamelCase__ = field(
default=0, metadata={"""help""": """CPU / GPU device index. Defaults to 0."""}, )
lowerCamelCase__ = field(default=lowercase, metadata={"""help""": """Benchmark models in eager model."""} )
lowerCamelCase__ = field(
default=lowercase, metadata={
"""help""": """Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`."""
}, )
@cached_property
def A_ ( self ):
requires_backends(self , ['tf'] )
_lowerCamelCase : int = None
if self.tpu:
try:
if self.tpu_name:
_lowerCamelCase : List[Any] = tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name )
else:
_lowerCamelCase : str = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
_lowerCamelCase : List[str] = None
return tpu
@cached_property
def A_ ( self ):
requires_backends(self , ['tf'] )
if self.is_tpu:
tf.config.experimental_connect_to_cluster(self._setup_tpu )
tf.tpu.experimental.initialize_tpu_system(self._setup_tpu )
_lowerCamelCase : Any = tf.distribute.TPUStrategy(self._setup_tpu )
else:
# currently no multi gpu is allowed
if self.is_gpu:
# TODO: Currently only single GPU is supported
tf.config.set_visible_devices(self.gpu_list[self.device_idx] , 'GPU' )
_lowerCamelCase : Dict = tf.distribute.OneDeviceStrategy(device=F'''/gpu:{self.device_idx}''' )
else:
tf.config.set_visible_devices([] , 'GPU' ) # disable GPU
_lowerCamelCase : int = tf.distribute.OneDeviceStrategy(device=F'''/cpu:{self.device_idx}''' )
return strategy
@property
def A_ ( self ):
requires_backends(self , ['tf'] )
return self._setup_tpu is not None
@property
def A_ ( self ):
requires_backends(self , ['tf'] )
return self._setup_strategy
@property
def A_ ( self ):
requires_backends(self , ['tf'] )
return tf.config.list_physical_devices('GPU' )
@property
def A_ ( self ):
requires_backends(self , ['tf'] )
if self.cuda:
return len(self.gpu_list )
return 0
@property
def A_ ( self ):
return self.n_gpu > 0 | 96 |
import argparse
import re
from typing import Dict
import torch
from datasets import Audio, Dataset, load_dataset, load_metric
from transformers import AutoFeatureExtractor, pipeline
def __UpperCamelCase ( _A , _A ):
lowerCAmelCase_ = args.log_outputs
lowerCAmelCase_ = '''_'''.join(args.dataset.split('''/''' ) + [args.config, args.split] )
# load metric
lowerCAmelCase_ = load_metric('''wer''' )
lowerCAmelCase_ = load_metric('''cer''' )
# compute metrics
lowerCAmelCase_ = wer.compute(references=result['''target'''] , predictions=result['''prediction'''] )
lowerCAmelCase_ = cer.compute(references=result['''target'''] , predictions=result['''prediction'''] )
# print & log results
lowerCAmelCase_ = f"WER: {wer_result}\nCER: {cer_result}"
print(_A )
with open(f"{dataset_id}_eval_results.txt" , '''w''' ) as f:
f.write(_A )
# log all results in text file. Possibly interesting for analysis
if log_outputs is not None:
lowerCAmelCase_ = f"log_{dataset_id}_predictions.txt"
lowerCAmelCase_ = f"log_{dataset_id}_targets.txt"
with open(_A , '''w''' ) as p, open(_A , '''w''' ) as t:
# mapping function to write output
def write_to_file(_A , _A ):
p.write(f"{i}" + '''\n''' )
p.write(batch['''prediction'''] + '''\n''' )
t.write(f"{i}" + '''\n''' )
t.write(batch['''target'''] + '''\n''' )
result.map(_A , with_indices=_A )
def __UpperCamelCase ( _A ):
lowerCAmelCase_ = '''[,?.!\-\;\:"“%‘”�—’…–]''' # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training
lowerCAmelCase_ = re.sub(_A , '''''' , text.lower() )
# In addition, we can normalize the target text, e.g. removing new lines characters etc...
# note that order is important here!
lowerCAmelCase_ = ['''\n\n''', '''\n''', ''' ''', ''' ''']
for t in token_sequences_to_ignore:
lowerCAmelCase_ = ''' '''.join(text.split(_A ) )
return text
def __UpperCamelCase ( _A ):
# load dataset
lowerCAmelCase_ = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=_A )
# for testing: only process the first two examples as a test
# dataset = dataset.select(range(10))
# load processor
lowerCAmelCase_ = AutoFeatureExtractor.from_pretrained(args.model_id )
lowerCAmelCase_ = feature_extractor.sampling_rate
# resample audio
lowerCAmelCase_ = dataset.cast_column('''audio''' , Audio(sampling_rate=_A ) )
# load eval pipeline
if args.device is None:
lowerCAmelCase_ = 0 if torch.cuda.is_available() else -1
lowerCAmelCase_ = pipeline('''automatic-speech-recognition''' , model=args.model_id , device=args.device )
# map function to decode audio
def map_to_pred(_A ):
lowerCAmelCase_ = asr(
batch['''audio''']['''array'''] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s )
lowerCAmelCase_ = prediction['''text''']
lowerCAmelCase_ = normalize_text(batch['''sentence'''] )
return batch
# run inference on all examples
lowerCAmelCase_ = dataset.map(_A , remove_columns=dataset.column_names )
# compute and log_results
# do not change function below
log_results(_A , _A )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
parser.add_argument(
'''--model_id''', type=str, required=True, help='''Model identifier. Should be loadable with 🤗 Transformers'''
)
parser.add_argument(
'''--dataset''',
type=str,
required=True,
help='''Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets''',
)
parser.add_argument(
'''--config''', type=str, required=True, help='''Config of the dataset. *E.g.* `\'en\'` for Common Voice'''
)
parser.add_argument('''--split''', type=str, required=True, help='''Split of the dataset. *E.g.* `\'test\'`''')
parser.add_argument(
'''--chunk_length_s''', type=float, default=None, help='''Chunk length in seconds. Defaults to 5 seconds.'''
)
parser.add_argument(
'''--stride_length_s''', type=float, default=None, help='''Stride of the audio chunks. Defaults to 1 second.'''
)
parser.add_argument(
'''--log_outputs''', action='''store_true''', help='''If defined, write outputs to log file for analysis.'''
)
parser.add_argument(
'''--device''',
type=int,
default=None,
help='''The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.''',
)
_A = parser.parse_args()
main(args)
| 278 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__lowerCamelCase = {'''configuration_unispeech''': ['''UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''UniSpeechConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
'''UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''UniSpeechForCTC''',
'''UniSpeechForPreTraining''',
'''UniSpeechForSequenceClassification''',
'''UniSpeechModel''',
'''UniSpeechPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_unispeech import (
UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST,
UniSpeechForCTC,
UniSpeechForPreTraining,
UniSpeechForSequenceClassification,
UniSpeechModel,
UniSpeechPreTrainedModel,
)
else:
import sys
__lowerCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 101 |
'''simple docstring'''
import os
import time
import warnings
from dataclasses import dataclass, field
from enum import Enum
from typing import List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import logging
from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors
from ..processors.utils import InputFeatures
__lowerCamelCase = logging.get_logger(__name__)
@dataclass
class A__ :
lowercase = field(metadata={"help": "The name of the task to train on: " + ", ".join(glue_processors.keys() )} )
lowercase = field(
metadata={"help": "The input data dir. Should contain the .tsv files (or other data files) for the task."} )
lowercase = field(
default=128 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
lowercase = field(
default=_snake_case , metadata={"help": "Overwrite the cached training and evaluation sets"} )
def snake_case_ ( self ) -> Any:
'''simple docstring'''
A_ = self.task_name.lower()
class A__ ( _snake_case ):
lowercase = "train"
lowercase = "dev"
lowercase = "test"
class A__ ( _snake_case ):
lowercase = 42
lowercase = 42
lowercase = 42
def __init__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = None , UpperCamelCase__ = Split.train , UpperCamelCase__ = None , ) -> List[Any]:
'''simple docstring'''
warnings.warn(
"""This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets """
"""library. You can have a look at this example script for pointers: """
"""https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py""" , UpperCamelCase__ , )
A_ = args
A_ = glue_processors[args.task_name]()
A_ = glue_output_modes[args.task_name]
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
try:
A_ = Split[mode]
except KeyError:
raise KeyError("""mode is not a valid split name""" )
# Load data features from cache or dataset file
A_ = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , f'''cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{args.task_name}''' , )
A_ = self.processor.get_labels()
if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__.__name__ in (
"RobertaTokenizer",
"RobertaTokenizerFast",
"XLMRobertaTokenizer",
"BartTokenizer",
"BartTokenizerFast",
):
# HACK(label indices are swapped in RoBERTa pretrained model)
A_ , A_ = label_list[2], label_list[1]
A_ = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
A_ = cached_features_file + """.lock"""
with FileLock(UpperCamelCase__ ):
if os.path.exists(UpperCamelCase__ ) and not args.overwrite_cache:
A_ = time.time()
A_ = torch.load(UpperCamelCase__ )
logger.info(
f'''Loading features from cached file {cached_features_file} [took %.3f s]''' , time.time() - start )
else:
logger.info(f'''Creating features from dataset file at {args.data_dir}''' )
if mode == Split.dev:
A_ = self.processor.get_dev_examples(args.data_dir )
elif mode == Split.test:
A_ = self.processor.get_test_examples(args.data_dir )
else:
A_ = self.processor.get_train_examples(args.data_dir )
if limit_length is not None:
A_ = examples[:limit_length]
A_ = glue_convert_examples_to_features(
UpperCamelCase__ , UpperCamelCase__ , max_length=args.max_seq_length , label_list=UpperCamelCase__ , output_mode=self.output_mode , )
A_ = time.time()
torch.save(self.features , UpperCamelCase__ )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f'''Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]''' )
def __len__( self ) -> str:
'''simple docstring'''
return len(self.features )
def __getitem__( self , UpperCamelCase__ ) -> InputFeatures:
'''simple docstring'''
return self.features[i]
def snake_case_ ( self ) -> Dict:
'''simple docstring'''
return self.label_list
| 101 | 1 |
"""simple docstring"""
def a_ ( _lowerCAmelCase : List[str] ):
'''simple docstring'''
lowercase__ : Optional[int] = [0] * len(_lowerCAmelCase )
lowercase__ : Tuple = []
lowercase__ : Tuple = []
lowercase__ : str = 0
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(_lowerCAmelCase ) ):
if indegree[i] == 0:
queue.append(_lowerCAmelCase )
while queue:
lowercase__ : Any = queue.pop(0 )
cnt += 1
topo.append(_lowerCAmelCase )
for x in graph[vertex]:
indegree[x] -= 1
if indegree[x] == 0:
queue.append(_lowerCAmelCase )
if cnt != len(_lowerCAmelCase ):
print('Cycle exists' )
else:
print(_lowerCAmelCase )
# Adjacency List of Graph
_UpperCamelCase : Optional[Any] = {0: [1, 2], 1: [3], 2: [3], 3: [4, 5], 4: [], 5: []}
topological_sort(graph)
| 77 | """simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCamelCase : Tuple = logging.get_logger(__name__)
_UpperCamelCase : Union[str, Any] = {
"alibaba-damo/mgp-str-base": "https://huggingface.co/alibaba-damo/mgp-str-base/resolve/main/config.json",
}
class UpperCAmelCase_ ( _a):
lowerCamelCase__ : List[Any] = "mgp-str"
def __init__( self , a=[3_2, 1_2_8] , a=4 , a=3 , a=2_7 , a=3_8 , a=5_0_2_5_7 , a=3_0_5_2_2 , a=7_6_8 , a=1_2 , a=1_2 , a=4.0 , a=True , a=False , a=1e-5 , a=0.0 , a=0.0 , a=0.0 , a=False , a=0.02 , **a , ) -> Tuple:
super().__init__(**a )
lowercase__ : int = image_size
lowercase__ : List[Any] = patch_size
lowercase__ : Optional[Any] = num_channels
lowercase__ : Optional[Any] = max_token_length
lowercase__ : Dict = num_character_labels
lowercase__ : Optional[int] = num_bpe_labels
lowercase__ : Dict = num_wordpiece_labels
lowercase__ : Tuple = hidden_size
lowercase__ : List[Any] = num_hidden_layers
lowercase__ : Union[str, Any] = num_attention_heads
lowercase__ : Dict = mlp_ratio
lowercase__ : Optional[int] = distilled
lowercase__ : Optional[int] = layer_norm_eps
lowercase__ : Optional[int] = drop_rate
lowercase__ : List[str] = qkv_bias
lowercase__ : Optional[int] = attn_drop_rate
lowercase__ : Any = drop_path_rate
lowercase__ : List[Any] = output_aa_attentions
lowercase__ : Tuple = initializer_range
| 77 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A : List[str] = {
'configuration_table_transformer': [
'TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'TableTransformerConfig',
'TableTransformerOnnxConfig',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Optional[Any] = [
'TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TableTransformerForObjectDetection',
'TableTransformerModel',
'TableTransformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TableTransformerConfig,
TableTransformerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TableTransformerForObjectDetection,
TableTransformerModel,
TableTransformerPreTrainedModel,
)
else:
import sys
A : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 358 |
'''simple docstring'''
# DISCLAIMER: This code is strongly influenced by https://github.com/pesser/pytorch_diffusion
# and https://github.com/hojonathanho/diffusion
import math
from dataclasses import dataclass
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from diffusers.configuration_utils import ConfigMixin, register_to_config
from diffusers.schedulers.scheduling_utils import SchedulerMixin
from diffusers.utils import BaseOutput, deprecate
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->DDIM
class __lowerCamelCase ( a_ ):
"""simple docstring"""
a = 42
a = None
def lowerCAmelCase__ ( lowerCamelCase : int ,lowerCamelCase : Tuple=0.999 ,lowerCamelCase : int="cosine" ,):
if alpha_transform_type == "cosine":
def alpha_bar_fn(lowerCamelCase : str ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(lowerCamelCase : Dict ):
return math.exp(t * -12.0 )
else:
raise ValueError(F'Unsupported alpha_tranform_type: {alpha_transform_type}' )
_A : Tuple = []
for i in range(lowerCamelCase ):
_A : Optional[Any] = i / num_diffusion_timesteps
_A : Tuple = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(lowerCamelCase ) / alpha_bar_fn(lowerCamelCase ) ,lowerCamelCase ) )
return torch.tensor(lowerCamelCase ,dtype=torch.floataa )
class __lowerCamelCase ( a_ , a_ ):
"""simple docstring"""
a = 1
@register_to_config
def __init__( self : Tuple , SCREAMING_SNAKE_CASE : int = 1000 , SCREAMING_SNAKE_CASE : float = 0.0001 , SCREAMING_SNAKE_CASE : float = 0.02 , SCREAMING_SNAKE_CASE : str = "linear" , SCREAMING_SNAKE_CASE : Optional[Union[np.ndarray, List[float]]] = None , SCREAMING_SNAKE_CASE : bool = True , SCREAMING_SNAKE_CASE : bool = True , SCREAMING_SNAKE_CASE : int = 0 , SCREAMING_SNAKE_CASE : str = "epsilon" , SCREAMING_SNAKE_CASE : float = 1.0 , **SCREAMING_SNAKE_CASE : List[str] , ):
if kwargs.get('set_alpha_to_one' , SCREAMING_SNAKE_CASE) is not None:
_A : Tuple = (
'The `set_alpha_to_one` argument is deprecated. Please use `set_alpha_to_zero` instead.'
)
deprecate('set_alpha_to_one' , '1.0.0' , SCREAMING_SNAKE_CASE , standard_warn=SCREAMING_SNAKE_CASE)
_A : Tuple = kwargs['set_alpha_to_one']
if trained_betas is not None:
_A : Any = torch.tensor(SCREAMING_SNAKE_CASE , dtype=torch.floataa)
elif beta_schedule == "linear":
_A : List[Any] = torch.linspace(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , dtype=torch.floataa)
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
_A : List[str] = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , SCREAMING_SNAKE_CASE , dtype=torch.floataa) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
_A : List[Any] = betas_for_alpha_bar(SCREAMING_SNAKE_CASE)
else:
raise NotImplementedError(F'{beta_schedule} does is not implemented for {self.__class__}')
_A : Optional[int] = 1.0 - self.betas
_A : Union[str, Any] = torch.cumprod(self.alphas , dim=0)
# At every step in inverted ddim, we are looking into the next alphas_cumprod
# For the final step, there is no next alphas_cumprod, and the index is out of bounds
# `set_alpha_to_zero` decides whether we set this parameter simply to zero
# in this case, self.step() just output the predicted noise
# or whether we use the final alpha of the "non-previous" one.
_A : Optional[int] = torch.tensor(0.0) if set_alpha_to_zero else self.alphas_cumprod[-1]
# standard deviation of the initial noise distribution
_A : Union[str, Any] = 1.0
# setable values
_A : List[str] = None
_A : Dict = torch.from_numpy(np.arange(0 , SCREAMING_SNAKE_CASE).copy().astype(np.intaa))
def A ( self : str , SCREAMING_SNAKE_CASE : torch.FloatTensor , SCREAMING_SNAKE_CASE : Optional[int] = None):
return sample
def A ( self : Dict , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Union[str, torch.device] = None):
if num_inference_steps > self.config.num_train_timesteps:
raise ValueError(
F'`num_inference_steps`: {num_inference_steps} cannot be larger than `self.config.train_timesteps`:'
F' {self.config.num_train_timesteps} as the unet model trained with this scheduler can only handle'
F' maximal {self.config.num_train_timesteps} timesteps.')
_A : Optional[Any] = num_inference_steps
_A : List[Any] = self.config.num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
_A : List[str] = (np.arange(0 , SCREAMING_SNAKE_CASE) * step_ratio).round().copy().astype(np.intaa)
_A : int = torch.from_numpy(SCREAMING_SNAKE_CASE).to(SCREAMING_SNAKE_CASE)
self.timesteps += self.config.steps_offset
def A ( self : List[Any] , SCREAMING_SNAKE_CASE : torch.FloatTensor , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : torch.FloatTensor , SCREAMING_SNAKE_CASE : float = 0.0 , SCREAMING_SNAKE_CASE : bool = False , SCREAMING_SNAKE_CASE : Optional[torch.FloatTensor] = None , SCREAMING_SNAKE_CASE : bool = True , ):
# 1. get previous step value (=t+1)
_A : Union[str, Any] = timestep + self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
# change original implementation to exactly match noise levels for analogous forward process
_A : List[str] = self.alphas_cumprod[timestep]
_A : List[str] = (
self.alphas_cumprod[prev_timestep]
if prev_timestep < self.config.num_train_timesteps
else self.final_alpha_cumprod
)
_A : List[str] = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
if self.config.prediction_type == "epsilon":
_A : Any = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
_A : List[Any] = model_output
elif self.config.prediction_type == "sample":
_A : List[Any] = model_output
_A : Dict = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
elif self.config.prediction_type == "v_prediction":
_A : List[str] = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
_A : Optional[int] = (alpha_prod_t**0.5) * model_output + (beta_prod_t**0.5) * sample
else:
raise ValueError(
F'prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or'
' `v_prediction`')
# 4. Clip or threshold "predicted x_0"
if self.config.clip_sample:
_A : str = pred_original_sample.clamp(
-self.config.clip_sample_range , self.config.clip_sample_range)
# 5. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_A : Any = (1 - alpha_prod_t_prev) ** 0.5 * pred_epsilon
# 6. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_A : Tuple = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if not return_dict:
return (prev_sample, pred_original_sample)
return DDIMSchedulerOutput(prev_sample=SCREAMING_SNAKE_CASE , pred_original_sample=SCREAMING_SNAKE_CASE)
def __len__( self : List[Any]):
return self.config.num_train_timesteps
| 227 | 0 |
import argparse
from pathlib import Path
import torch
from packaging import version
from torch.onnx import export
from diffusers import AutoencoderKL
snake_case_ = version.parse(version.parse(torch.__version__).base_version) < version.parse('1.11')
def lowerCamelCase__ ( snake_case_ : List[Any] , snake_case_ : Union[str, Any] , snake_case_ : Union[str, Any] , snake_case_ : str , snake_case_ : Tuple , snake_case_ : Union[str, Any] , snake_case_ : List[str] , snake_case_ : str=False , ) -> Optional[Any]:
output_path.parent.mkdir(parents=__lowerCamelCase , exist_ok=__lowerCamelCase )
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
__lowerCamelCase , __lowerCamelCase , f=output_path.as_posix() , input_names=__lowerCamelCase , output_names=__lowerCamelCase , dynamic_axes=__lowerCamelCase , do_constant_folding=__lowerCamelCase , use_external_data_format=__lowerCamelCase , enable_onnx_checker=__lowerCamelCase , opset_version=__lowerCamelCase , )
else:
export(
__lowerCamelCase , __lowerCamelCase , f=output_path.as_posix() , input_names=__lowerCamelCase , output_names=__lowerCamelCase , dynamic_axes=__lowerCamelCase , do_constant_folding=__lowerCamelCase , opset_version=__lowerCamelCase , )
@torch.no_grad()
def lowerCamelCase__ ( snake_case_ : int , snake_case_ : Union[str, Any] , snake_case_ : int , snake_case_ : Union[str, Any] = False ) -> int:
__snake_case = torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
__snake_case = "cuda"
elif fpaa and not torch.cuda.is_available():
raise ValueError('''`float16` model export is only supported on GPUs with CUDA''' )
else:
__snake_case = "cpu"
__snake_case = Path(__lowerCamelCase )
# VAE DECODER
__snake_case = AutoencoderKL.from_pretrained(model_path + '''/vae''' )
__snake_case = vae_decoder.config.latent_channels
# forward only through the decoder part
__snake_case = vae_decoder.decode
onnx_export(
__lowerCamelCase , model_args=(
torch.randn(1 , __lowerCamelCase , 25 , 25 ).to(device=__lowerCamelCase , dtype=__lowerCamelCase ),
False,
) , output_path=output_path / '''vae_decoder''' / '''model.onnx''' , ordered_input_names=['''latent_sample''', '''return_dict'''] , output_names=['''sample'''] , dynamic_axes={
'''latent_sample''': {0: '''batch''', 1: '''channels''', 2: '''height''', 3: '''width'''},
} , opset=__lowerCamelCase , )
del vae_decoder
if __name__ == "__main__":
snake_case_ = argparse.ArgumentParser()
parser.add_argument(
'--model_path',
type=str,
required=True,
help='Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).',
)
parser.add_argument('--output_path', type=str, required=True, help='Path to the output model.')
parser.add_argument(
'--opset',
default=14,
type=int,
help='The version of the ONNX operator set to use.',
)
parser.add_argument('--fp16', action='store_true', default=False, help='Export the models in `float16` mode')
snake_case_ = parser.parse_args()
print(args.output_path)
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
print('SD: Done: ONNX')
| 24 |
"""simple docstring"""
from collections import namedtuple
_a = namedtuple('from_to', 'from_ to')
_a = {
'cubicmeter': from_to(1, 1),
'litre': from_to(0.001, 1_000),
'kilolitre': from_to(1, 1),
'gallon': from_to(0.0_0454, 264.172),
'cubicyard': from_to(0.7_6455, 1.3_0795),
'cubicfoot': from_to(0.028, 35.3147),
'cup': from_to(0.0_0023_6588, 4226.75),
}
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
if from_type not in METRIC_CONVERSION:
raise ValueError(
f"""Invalid 'from_type' value: {from_type!r} Supported values are:\n"""
+ ", ".join(__lowerCamelCase ) )
if to_type not in METRIC_CONVERSION:
raise ValueError(
f"""Invalid 'to_type' value: {to_type!r}. Supported values are:\n"""
+ ", ".join(__lowerCamelCase ) )
return value * METRIC_CONVERSION[from_type].from_ * METRIC_CONVERSION[to_type].to
if __name__ == "__main__":
import doctest
doctest.testmod()
| 61 | 0 |
'''simple docstring'''
import math
def UpperCamelCase__ ( lowerCAmelCase ):
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(lowerCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def UpperCamelCase__ ( lowerCAmelCase = 0.1 ):
"""simple docstring"""
_lowerCAmelCase = 3
_lowerCAmelCase = 3
while primes / (2 * j - 1) >= ratio:
for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1 ):
primes += is_prime(lowerCAmelCase )
j += 2
return j
if __name__ == "__main__":
import doctest
doctest.testmod()
| 220 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
A__ : List[str] ={
'''Acehnese Arabic''': '''ace_Arab''',
'''Acehnese Latin''': '''ace_Latn''',
'''Mesopotamian Arabic''': '''acm_Arab''',
'''Ta\'izzi-Adeni Arabic''': '''acq_Arab''',
'''Tunisian Arabic''': '''aeb_Arab''',
'''Afrikaans''': '''afr_Latn''',
'''South Levantine Arabic''': '''ajp_Arab''',
'''Akan''': '''aka_Latn''',
'''Amharic''': '''amh_Ethi''',
'''North Levantine Arabic''': '''apc_Arab''',
'''Modern Standard Arabic''': '''arb_Arab''',
'''Modern Standard Arabic Romanized''': '''arb_Latn''',
'''Najdi Arabic''': '''ars_Arab''',
'''Moroccan Arabic''': '''ary_Arab''',
'''Egyptian Arabic''': '''arz_Arab''',
'''Assamese''': '''asm_Beng''',
'''Asturian''': '''ast_Latn''',
'''Awadhi''': '''awa_Deva''',
'''Central Aymara''': '''ayr_Latn''',
'''South Azerbaijani''': '''azb_Arab''',
'''North Azerbaijani''': '''azj_Latn''',
'''Bashkir''': '''bak_Cyrl''',
'''Bambara''': '''bam_Latn''',
'''Balinese''': '''ban_Latn''',
'''Belarusian''': '''bel_Cyrl''',
'''Bemba''': '''bem_Latn''',
'''Bengali''': '''ben_Beng''',
'''Bhojpuri''': '''bho_Deva''',
'''Banjar Arabic''': '''bjn_Arab''',
'''Banjar Latin''': '''bjn_Latn''',
'''Standard Tibetan''': '''bod_Tibt''',
'''Bosnian''': '''bos_Latn''',
'''Buginese''': '''bug_Latn''',
'''Bulgarian''': '''bul_Cyrl''',
'''Catalan''': '''cat_Latn''',
'''Cebuano''': '''ceb_Latn''',
'''Czech''': '''ces_Latn''',
'''Chokwe''': '''cjk_Latn''',
'''Central Kurdish''': '''ckb_Arab''',
'''Crimean Tatar''': '''crh_Latn''',
'''Welsh''': '''cym_Latn''',
'''Danish''': '''dan_Latn''',
'''German''': '''deu_Latn''',
'''Southwestern Dinka''': '''dik_Latn''',
'''Dyula''': '''dyu_Latn''',
'''Dzongkha''': '''dzo_Tibt''',
'''Greek''': '''ell_Grek''',
'''English''': '''eng_Latn''',
'''Esperanto''': '''epo_Latn''',
'''Estonian''': '''est_Latn''',
'''Basque''': '''eus_Latn''',
'''Ewe''': '''ewe_Latn''',
'''Faroese''': '''fao_Latn''',
'''Fijian''': '''fij_Latn''',
'''Finnish''': '''fin_Latn''',
'''Fon''': '''fon_Latn''',
'''French''': '''fra_Latn''',
'''Friulian''': '''fur_Latn''',
'''Nigerian Fulfulde''': '''fuv_Latn''',
'''Scottish Gaelic''': '''gla_Latn''',
'''Irish''': '''gle_Latn''',
'''Galician''': '''glg_Latn''',
'''Guarani''': '''grn_Latn''',
'''Gujarati''': '''guj_Gujr''',
'''Haitian Creole''': '''hat_Latn''',
'''Hausa''': '''hau_Latn''',
'''Hebrew''': '''heb_Hebr''',
'''Hindi''': '''hin_Deva''',
'''Chhattisgarhi''': '''hne_Deva''',
'''Croatian''': '''hrv_Latn''',
'''Hungarian''': '''hun_Latn''',
'''Armenian''': '''hye_Armn''',
'''Igbo''': '''ibo_Latn''',
'''Ilocano''': '''ilo_Latn''',
'''Indonesian''': '''ind_Latn''',
'''Icelandic''': '''isl_Latn''',
'''Italian''': '''ita_Latn''',
'''Javanese''': '''jav_Latn''',
'''Japanese''': '''jpn_Jpan''',
'''Kabyle''': '''kab_Latn''',
'''Jingpho''': '''kac_Latn''',
'''Kamba''': '''kam_Latn''',
'''Kannada''': '''kan_Knda''',
'''Kashmiri Arabic''': '''kas_Arab''',
'''Kashmiri Devanagari''': '''kas_Deva''',
'''Georgian''': '''kat_Geor''',
'''Central Kanuri Arabic''': '''knc_Arab''',
'''Central Kanuri Latin''': '''knc_Latn''',
'''Kazakh''': '''kaz_Cyrl''',
'''Kabiyè''': '''kbp_Latn''',
'''Kabuverdianu''': '''kea_Latn''',
'''Khmer''': '''khm_Khmr''',
'''Kikuyu''': '''kik_Latn''',
'''Kinyarwanda''': '''kin_Latn''',
'''Kyrgyz''': '''kir_Cyrl''',
'''Kimbundu''': '''kmb_Latn''',
'''Northern Kurdish''': '''kmr_Latn''',
'''Kikongo''': '''kon_Latn''',
'''Korean''': '''kor_Hang''',
'''Lao''': '''lao_Laoo''',
'''Ligurian''': '''lij_Latn''',
'''Limburgish''': '''lim_Latn''',
'''Lingala''': '''lin_Latn''',
'''Lithuanian''': '''lit_Latn''',
'''Lombard''': '''lmo_Latn''',
'''Latgalian''': '''ltg_Latn''',
'''Luxembourgish''': '''ltz_Latn''',
'''Luba-Kasai''': '''lua_Latn''',
'''Ganda''': '''lug_Latn''',
'''Luo''': '''luo_Latn''',
'''Mizo''': '''lus_Latn''',
'''Standard Latvian''': '''lvs_Latn''',
'''Magahi''': '''mag_Deva''',
'''Maithili''': '''mai_Deva''',
'''Malayalam''': '''mal_Mlym''',
'''Marathi''': '''mar_Deva''',
'''Minangkabau Arabic ''': '''min_Arab''',
'''Minangkabau Latin''': '''min_Latn''',
'''Macedonian''': '''mkd_Cyrl''',
'''Plateau Malagasy''': '''plt_Latn''',
'''Maltese''': '''mlt_Latn''',
'''Meitei Bengali''': '''mni_Beng''',
'''Halh Mongolian''': '''khk_Cyrl''',
'''Mossi''': '''mos_Latn''',
'''Maori''': '''mri_Latn''',
'''Burmese''': '''mya_Mymr''',
'''Dutch''': '''nld_Latn''',
'''Norwegian Nynorsk''': '''nno_Latn''',
'''Norwegian Bokmål''': '''nob_Latn''',
'''Nepali''': '''npi_Deva''',
'''Northern Sotho''': '''nso_Latn''',
'''Nuer''': '''nus_Latn''',
'''Nyanja''': '''nya_Latn''',
'''Occitan''': '''oci_Latn''',
'''West Central Oromo''': '''gaz_Latn''',
'''Odia''': '''ory_Orya''',
'''Pangasinan''': '''pag_Latn''',
'''Eastern Panjabi''': '''pan_Guru''',
'''Papiamento''': '''pap_Latn''',
'''Western Persian''': '''pes_Arab''',
'''Polish''': '''pol_Latn''',
'''Portuguese''': '''por_Latn''',
'''Dari''': '''prs_Arab''',
'''Southern Pashto''': '''pbt_Arab''',
'''Ayacucho Quechua''': '''quy_Latn''',
'''Romanian''': '''ron_Latn''',
'''Rundi''': '''run_Latn''',
'''Russian''': '''rus_Cyrl''',
'''Sango''': '''sag_Latn''',
'''Sanskrit''': '''san_Deva''',
'''Santali''': '''sat_Olck''',
'''Sicilian''': '''scn_Latn''',
'''Shan''': '''shn_Mymr''',
'''Sinhala''': '''sin_Sinh''',
'''Slovak''': '''slk_Latn''',
'''Slovenian''': '''slv_Latn''',
'''Samoan''': '''smo_Latn''',
'''Shona''': '''sna_Latn''',
'''Sindhi''': '''snd_Arab''',
'''Somali''': '''som_Latn''',
'''Southern Sotho''': '''sot_Latn''',
'''Spanish''': '''spa_Latn''',
'''Tosk Albanian''': '''als_Latn''',
'''Sardinian''': '''srd_Latn''',
'''Serbian''': '''srp_Cyrl''',
'''Swati''': '''ssw_Latn''',
'''Sundanese''': '''sun_Latn''',
'''Swedish''': '''swe_Latn''',
'''Swahili''': '''swh_Latn''',
'''Silesian''': '''szl_Latn''',
'''Tamil''': '''tam_Taml''',
'''Tatar''': '''tat_Cyrl''',
'''Telugu''': '''tel_Telu''',
'''Tajik''': '''tgk_Cyrl''',
'''Tagalog''': '''tgl_Latn''',
'''Thai''': '''tha_Thai''',
'''Tigrinya''': '''tir_Ethi''',
'''Tamasheq Latin''': '''taq_Latn''',
'''Tamasheq Tifinagh''': '''taq_Tfng''',
'''Tok Pisin''': '''tpi_Latn''',
'''Tswana''': '''tsn_Latn''',
'''Tsonga''': '''tso_Latn''',
'''Turkmen''': '''tuk_Latn''',
'''Tumbuka''': '''tum_Latn''',
'''Turkish''': '''tur_Latn''',
'''Twi''': '''twi_Latn''',
'''Central Atlas Tamazight''': '''tzm_Tfng''',
'''Uyghur''': '''uig_Arab''',
'''Ukrainian''': '''ukr_Cyrl''',
'''Umbundu''': '''umb_Latn''',
'''Urdu''': '''urd_Arab''',
'''Northern Uzbek''': '''uzn_Latn''',
'''Venetian''': '''vec_Latn''',
'''Vietnamese''': '''vie_Latn''',
'''Waray''': '''war_Latn''',
'''Wolof''': '''wol_Latn''',
'''Xhosa''': '''xho_Latn''',
'''Eastern Yiddish''': '''ydd_Hebr''',
'''Yoruba''': '''yor_Latn''',
'''Yue Chinese''': '''yue_Hant''',
'''Chinese Simplified''': '''zho_Hans''',
'''Chinese Traditional''': '''zho_Hant''',
'''Standard Malay''': '''zsm_Latn''',
'''Zulu''': '''zul_Latn''',
}
class UpperCAmelCase ( snake_case_ ):
_lowercase: Dict = '''facebook/nllb-200-distilled-600M'''
_lowercase: int = (
'''This is a tool that translates text from a language to another. It takes three inputs: `text`, which should '''
'''be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, '''
'''which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in '''
'''plain English, such as \'Romanian\', or \'Albanian\'. It returns the text translated in `tgt_lang`.'''
)
_lowercase: Any = '''translator'''
_lowercase: Optional[int] = AutoTokenizer
_lowercase: str = AutoModelForSeqaSeqLM
_lowercase: List[Any] = LANGUAGE_CODES
_lowercase: Tuple = ['''text''', '''text''', '''text''']
_lowercase: List[str] = ['''text''']
def lowercase__ ( self : str , __snake_case : Union[str, Any] , __snake_case : Optional[int] , __snake_case : Dict ) -> Optional[Any]:
if src_lang not in self.lang_to_code:
raise ValueError(f"{src_lang} is not a supported language." )
if tgt_lang not in self.lang_to_code:
raise ValueError(f"{tgt_lang} is not a supported language." )
_lowerCAmelCase = self.lang_to_code[src_lang]
_lowerCAmelCase = self.lang_to_code[tgt_lang]
return self.pre_processor._build_translation_inputs(
__snake_case , return_tensors="""pt""" , src_lang=__snake_case , tgt_lang=__snake_case )
def lowercase__ ( self : Optional[int] , __snake_case : Any ) -> List[str]:
return self.model.generate(**__snake_case )
def lowercase__ ( self : Dict , __snake_case : List[Any] ) -> Any:
return self.post_processor.decode(outputs[0].tolist() , skip_special_tokens=__snake_case )
| 220 | 1 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class lowerCAmelCase_( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
__lowercase : Tuple = ['''keras_nlp''']
def __init__( self ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Union[str, Any]:
requires_backends(self ,["""keras_nlp"""] )
| 37 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
'''facebook/xlm-roberta-xl''': '''https://huggingface.co/facebook/xlm-roberta-xl/resolve/main/config.json''',
'''facebook/xlm-roberta-xxl''': '''https://huggingface.co/facebook/xlm-roberta-xxl/resolve/main/config.json''',
# See all XLM-RoBERTa-XL models at https://huggingface.co/models?filter=xlm-roberta-xl
}
class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
__lowercase : int = '''xlm-roberta-xl'''
def __init__( self ,__UpperCAmelCase=25_0880 ,__UpperCAmelCase=2560 ,__UpperCAmelCase=36 ,__UpperCAmelCase=32 ,__UpperCAmelCase=1_0240 ,__UpperCAmelCase="gelu" ,__UpperCAmelCase=0.1 ,__UpperCAmelCase=0.1 ,__UpperCAmelCase=514 ,__UpperCAmelCase=1 ,__UpperCAmelCase=0.0_2 ,__UpperCAmelCase=1E-05 ,__UpperCAmelCase=1 ,__UpperCAmelCase=0 ,__UpperCAmelCase=2 ,__UpperCAmelCase="absolute" ,__UpperCAmelCase=True ,__UpperCAmelCase=None ,**__UpperCAmelCase ,) -> str:
super().__init__(pad_token_id=__UpperCAmelCase ,bos_token_id=__UpperCAmelCase ,eos_token_id=__UpperCAmelCase ,**__UpperCAmelCase )
lowerCAmelCase__ : List[Any] = vocab_size
lowerCAmelCase__ : int = hidden_size
lowerCAmelCase__ : int = num_hidden_layers
lowerCAmelCase__ : str = num_attention_heads
lowerCAmelCase__ : int = hidden_act
lowerCAmelCase__ : Dict = intermediate_size
lowerCAmelCase__ : List[Any] = hidden_dropout_prob
lowerCAmelCase__ : str = attention_probs_dropout_prob
lowerCAmelCase__ : Optional[int] = max_position_embeddings
lowerCAmelCase__ : List[str] = type_vocab_size
lowerCAmelCase__ : List[Any] = initializer_range
lowerCAmelCase__ : Tuple = layer_norm_eps
lowerCAmelCase__ : int = position_embedding_type
lowerCAmelCase__ : Optional[Any] = use_cache
lowerCAmelCase__ : Optional[Any] = classifier_dropout
class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
@property
def UpperCAmelCase_ ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
lowerCAmelCase__ : Dict = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
lowerCAmelCase__ : Any = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 37 | 1 |
"""simple docstring"""
class __A :
def __init__( self ):
_lowerCAmelCase : Dict = """"""
_lowerCAmelCase : Optional[Any] = """"""
_lowerCAmelCase : Optional[int] = []
def __A ( self , a__ , a__ ):
if m == -1:
return n + 1
elif n == -1:
return m + 1
elif self.dp[m][n] > -1:
return self.dp[m][n]
else:
if self.worda[m] == self.worda[n]:
_lowerCAmelCase : List[str] = self.__min_dist_top_down_dp(m - 1 , n - 1 )
else:
_lowerCAmelCase : Optional[int] = self.__min_dist_top_down_dp(a__ , n - 1 )
_lowerCAmelCase : str = self.__min_dist_top_down_dp(m - 1 , a__ )
_lowerCAmelCase : Any = self.__min_dist_top_down_dp(m - 1 , n - 1 )
_lowerCAmelCase : Dict = 1 + min(a__ , a__ , a__ )
return self.dp[m][n]
def __A ( self , a__ , a__ ):
_lowerCAmelCase : int = worda
_lowerCAmelCase : Dict = worda
_lowerCAmelCase : Dict = [[-1 for _ in range(len(a__ ) )] for _ in range(len(a__ ) )]
return self.__min_dist_top_down_dp(len(a__ ) - 1 , len(a__ ) - 1 )
def __A ( self , a__ , a__ ):
_lowerCAmelCase : Optional[Any] = worda
_lowerCAmelCase : Optional[Any] = worda
_lowerCAmelCase : Optional[Any] = len(a__ )
_lowerCAmelCase : Union[str, Any] = len(a__ )
_lowerCAmelCase : List[Any] = [[0 for _ in range(n + 1 )] for _ in range(m + 1 )]
for i in range(m + 1 ):
for j in range(n + 1 ):
if i == 0: # first string is empty
_lowerCAmelCase : Union[str, Any] = j
elif j == 0: # second string is empty
_lowerCAmelCase : List[str] = i
elif worda[i - 1] == worda[j - 1]: # last characters are equal
_lowerCAmelCase : Any = self.dp[i - 1][j - 1]
else:
_lowerCAmelCase : List[Any] = self.dp[i][j - 1]
_lowerCAmelCase : Dict = self.dp[i - 1][j]
_lowerCAmelCase : Union[str, Any] = self.dp[i - 1][j - 1]
_lowerCAmelCase : Any = 1 + min(a__ , a__ , a__ )
return self.dp[m][n]
if __name__ == "__main__":
_a : Any = EditDistance()
print('****************** Testing Edit Distance DP Algorithm ******************')
print()
_a : Tuple = input('Enter the first string: ').strip()
_a : Tuple = input('Enter the second string: ').strip()
print()
print(F"""The minimum edit distance is: {solver.min_dist_top_down(Sa, Sa)}""")
print(F"""The minimum edit distance is: {solver.min_dist_bottom_up(Sa, Sa)}""")
print()
print('*************** End of Testing Edit Distance DP Algorithm ***************')
| 126 | """simple docstring"""
from __future__ import annotations
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : list ) -> float:
if not nums:
raise ValueError("""List is empty""" )
return sum(_lowerCamelCase ) / len(_lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 126 | 1 |
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import evaluate
import numpy as np
import torch
from datasets import load_dataset
from PIL import Image
from torchvision.transforms import (
CenterCrop,
Compose,
Normalize,
RandomHorizontalFlip,
RandomResizedCrop,
Resize,
ToTensor,
)
import transformers
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
AutoConfig,
AutoImageProcessor,
AutoModelForImageClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
lowercase__ :List[Any] = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.31.0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/image-classification/requirements.txt")
lowercase__ :Dict = list(MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING.keys())
lowercase__ :List[str] = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
with open(lowerCAmelCase__ , '''rb''' ) as f:
lowercase = Image.open(lowerCAmelCase__ )
return im.convert('''RGB''' )
@dataclass
class lowercase :
lowercase_ : Optional[str] =field(
default=SCREAMING_SNAKE_CASE__ , metadata={
'''help''': '''Name of a dataset from the hub (could be your own, possibly private dataset hosted on the hub).'''
} , )
lowercase_ : Optional[str] =field(
default=SCREAMING_SNAKE_CASE__ , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} )
lowercase_ : Optional[str] =field(default=SCREAMING_SNAKE_CASE__ , metadata={'''help''': '''A folder containing the training data.'''} )
lowercase_ : Optional[str] =field(default=SCREAMING_SNAKE_CASE__ , metadata={'''help''': '''A folder containing the validation data.'''} )
lowercase_ : Optional[float] =field(
default=0.15 , metadata={'''help''': '''Percent to split off of train for validation.'''} )
lowercase_ : Optional[int] =field(
default=SCREAMING_SNAKE_CASE__ , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of training examples to this '''
'''value if set.'''
)
} , )
lowercase_ : Optional[int] =field(
default=SCREAMING_SNAKE_CASE__ , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of evaluation examples to this '''
'''value if set.'''
)
} , )
def A__ ( self):
if self.dataset_name is None and (self.train_dir is None and self.validation_dir is None):
raise ValueError(
'''You must specify either a dataset name from the hub or a train and/or validation directory.''')
@dataclass
class lowercase :
lowercase_ : str =field(
default='''google/vit-base-patch16-224-in21k''' , metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} , )
lowercase_ : Optional[str] =field(
default=SCREAMING_SNAKE_CASE__ , metadata={'''help''': '''If training from scratch, pass a model type from the list: ''' + ''', '''.join(SCREAMING_SNAKE_CASE__ )} , )
lowercase_ : Optional[str] =field(
default=SCREAMING_SNAKE_CASE__ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
lowercase_ : Optional[str] =field(
default=SCREAMING_SNAKE_CASE__ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from s3'''} )
lowercase_ : str =field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
lowercase_ : str =field(default=SCREAMING_SNAKE_CASE__ , metadata={'''help''': '''Name or path of preprocessor config.'''} )
lowercase_ : bool =field(
default=SCREAMING_SNAKE_CASE__ , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
lowercase_ : bool =field(
default=SCREAMING_SNAKE_CASE__ , metadata={'''help''': '''Will enable to load a pretrained model whose head dimensions are different.'''} , )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase = torch.stack([example['''pixel_values'''] for example in examples] )
lowercase = torch.tensor([example['''labels'''] for example in examples] )
return {"pixel_values": pixel_values, "labels": labels}
def UpperCamelCase ( ):
'''simple docstring'''
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowercase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowercase , lowercase , lowercase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowercase , lowercase , lowercase = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('''run_image_classification''' , lowerCAmelCase__ , lowerCAmelCase__ )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
lowercase = training_args.get_process_log_level()
logger.setLevel(lowerCAmelCase__ )
transformers.utils.logging.set_verbosity(lowerCAmelCase__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'
+ f'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
logger.info(f'Training/evaluation parameters {training_args}' )
# Detecting last checkpoint.
lowercase = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowercase = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'Output directory ({training_args.output_dir}) already exists and is not empty. '
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Initialize our dataset and prepare it for the 'image-classification' task.
if data_args.dataset_name is not None:
lowercase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir , task='''image-classification''' , use_auth_token=True if model_args.use_auth_token else None , )
else:
lowercase = {}
if data_args.train_dir is not None:
lowercase = os.path.join(data_args.train_dir , '''**''' )
if data_args.validation_dir is not None:
lowercase = os.path.join(data_args.validation_dir , '''**''' )
lowercase = load_dataset(
'''imagefolder''' , data_files=lowerCAmelCase__ , cache_dir=model_args.cache_dir , task='''image-classification''' , )
# If we don't have a validation split, split off a percentage of train as validation.
lowercase = None if '''validation''' in dataset.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , lowerCAmelCase__ ) and data_args.train_val_split > 0.0:
lowercase = dataset['''train'''].train_test_split(data_args.train_val_split )
lowercase = split['''train''']
lowercase = split['''test''']
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
lowercase = dataset['''train'''].features['''labels'''].names
lowercase , lowercase = {}, {}
for i, label in enumerate(lowerCAmelCase__ ):
lowercase = str(lowerCAmelCase__ )
lowercase = label
# Load the accuracy metric from the datasets package
lowercase = evaluate.load('''accuracy''' )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(lowerCAmelCase__ ):
return metric.compute(predictions=np.argmax(p.predictions , axis=1 ) , references=p.label_ids )
lowercase = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(lowerCAmelCase__ ) , labelaid=lowerCAmelCase__ , idalabel=lowerCAmelCase__ , finetuning_task='''image-classification''' , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
lowercase = AutoModelForImageClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=lowerCAmelCase__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
lowercase = AutoImageProcessor.from_pretrained(
model_args.image_processor_name or model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Define torchvision transforms to be applied to each image.
if "shortest_edge" in image_processor.size:
lowercase = image_processor.size['''shortest_edge''']
else:
lowercase = (image_processor.size['''height'''], image_processor.size['''width'''])
lowercase = Normalize(mean=image_processor.image_mean , std=image_processor.image_std )
lowercase = Compose(
[
RandomResizedCrop(lowerCAmelCase__ ),
RandomHorizontalFlip(),
ToTensor(),
normalize,
] )
lowercase = Compose(
[
Resize(lowerCAmelCase__ ),
CenterCrop(lowerCAmelCase__ ),
ToTensor(),
normalize,
] )
def train_transforms(lowerCAmelCase__ ):
lowercase = [
_train_transforms(pil_img.convert('''RGB''' ) ) for pil_img in example_batch['''image''']
]
return example_batch
def val_transforms(lowerCAmelCase__ ):
lowercase = [_val_transforms(pil_img.convert('''RGB''' ) ) for pil_img in example_batch['''image''']]
return example_batch
if training_args.do_train:
if "train" not in dataset:
raise ValueError('''--do_train requires a train dataset''' )
if data_args.max_train_samples is not None:
lowercase = (
dataset['''train'''].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
dataset["train"].set_transform(lowerCAmelCase__ )
if training_args.do_eval:
if "validation" not in dataset:
raise ValueError('''--do_eval requires a validation dataset''' )
if data_args.max_eval_samples is not None:
lowercase = (
dataset['''validation'''].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
dataset["validation"].set_transform(lowerCAmelCase__ )
# Initalize our trainer
lowercase = Trainer(
model=lowerCAmelCase__ , args=lowerCAmelCase__ , train_dataset=dataset['''train'''] if training_args.do_train else None , eval_dataset=dataset['''validation'''] if training_args.do_eval else None , compute_metrics=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ , data_collator=lowerCAmelCase__ , )
# Training
if training_args.do_train:
lowercase = None
if training_args.resume_from_checkpoint is not None:
lowercase = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowercase = last_checkpoint
lowercase = trainer.train(resume_from_checkpoint=lowerCAmelCase__ )
trainer.save_model()
trainer.log_metrics('''train''' , train_result.metrics )
trainer.save_metrics('''train''' , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
lowercase = trainer.evaluate()
trainer.log_metrics('''eval''' , lowerCAmelCase__ )
trainer.save_metrics('''eval''' , lowerCAmelCase__ )
# Write model card and (optionally) push to hub
lowercase = {
'''finetuned_from''': model_args.model_name_or_path,
'''tasks''': '''image-classification''',
'''dataset''': data_args.dataset_name,
'''tags''': ['''image-classification''', '''vision'''],
}
if training_args.push_to_hub:
trainer.push_to_hub(**lowerCAmelCase__ )
else:
trainer.create_model_card(**lowerCAmelCase__ )
if __name__ == "__main__":
main()
| 101 |
import json
import os
import unittest
from transformers.models.roc_bert.tokenization_roc_bert import (
VOCAB_FILES_NAMES,
RoCBertBasicTokenizer,
RoCBertTokenizer,
RoCBertWordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class lowercase ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
lowercase_ : Union[str, Any] =RoCBertTokenizer
lowercase_ : str =None
lowercase_ : Optional[Any] =False
lowercase_ : Any =True
lowercase_ : int =filter_non_english
def A__ ( self):
super().setUp()
lowercase = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''你''', '''好''', '''是''', '''谁''', '''a''', '''b''', '''c''', '''d''']
lowercase = {}
lowercase = {}
for i, value in enumerate(A__):
lowercase = i
lowercase = i
lowercase = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['''vocab_file'''])
lowercase = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['''word_shape_file'''])
lowercase = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['''word_pronunciation_file'''])
with open(self.vocab_file ,'''w''' ,encoding='''utf-8''') as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens]))
with open(self.word_shape_file ,'''w''' ,encoding='''utf-8''') as word_shape_writer:
json.dump(A__ ,A__ ,ensure_ascii=A__)
with open(self.word_pronunciation_file ,'''w''' ,encoding='''utf-8''') as word_pronunciation_writer:
json.dump(A__ ,A__ ,ensure_ascii=A__)
def A__ ( self):
lowercase = self.tokenizer_class(self.vocab_file ,self.word_shape_file ,self.word_pronunciation_file)
lowercase = tokenizer.tokenize('''你好[SEP]你是谁''')
self.assertListEqual(A__ ,['''你''', '''好''', '''[SEP]''', '''你''', '''是''', '''谁'''])
self.assertListEqual(tokenizer.convert_tokens_to_ids(A__) ,[5, 6, 2, 5, 7, 8])
self.assertListEqual(tokenizer.convert_tokens_to_shape_ids(A__) ,[5, 6, 2, 5, 7, 8])
self.assertListEqual(tokenizer.convert_tokens_to_pronunciation_ids(A__) ,[5, 6, 2, 5, 7, 8])
def A__ ( self):
lowercase = RoCBertBasicTokenizer()
self.assertListEqual(tokenizer.tokenize('''ah\u535A\u63A8zz''') ,['''ah''', '''\u535A''', '''\u63A8''', '''zz'''])
def A__ ( self):
lowercase = RoCBertBasicTokenizer(do_lower_case=A__)
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''') ,['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''])
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''') ,['''hello'''])
def A__ ( self):
lowercase = RoCBertBasicTokenizer(do_lower_case=A__ ,strip_accents=A__)
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''') ,['''hällo''', '''!''', '''how''', '''are''', '''you''', '''?'''])
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''') ,['''h\u00E9llo'''])
def A__ ( self):
lowercase = RoCBertBasicTokenizer(do_lower_case=A__ ,strip_accents=A__)
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''') ,['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''])
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''') ,['''hello'''])
def A__ ( self):
lowercase = RoCBertBasicTokenizer(do_lower_case=A__)
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''') ,['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''])
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''') ,['''hello'''])
def A__ ( self):
lowercase = RoCBertBasicTokenizer(do_lower_case=A__)
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''') ,['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''])
def A__ ( self):
lowercase = RoCBertBasicTokenizer(do_lower_case=A__ ,strip_accents=A__)
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''') ,['''HäLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''])
def A__ ( self):
lowercase = RoCBertBasicTokenizer(do_lower_case=A__ ,strip_accents=A__)
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''') ,['''HaLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''])
def A__ ( self):
lowercase = RoCBertBasicTokenizer(do_lower_case=A__ ,never_split=['''[UNK]'''])
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? [UNK]''') ,['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?''', '''[UNK]'''])
def A__ ( self):
lowercase = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''']
lowercase = {}
for i, token in enumerate(A__):
lowercase = i
lowercase = RoCBertWordpieceTokenizer(vocab=A__ ,unk_token='''[UNK]''')
self.assertListEqual(tokenizer.tokenize('''''') ,[])
self.assertListEqual(tokenizer.tokenize('''unwanted running''') ,['''un''', '''##want''', '''##ed''', '''runn''', '''##ing'''])
self.assertListEqual(tokenizer.tokenize('''unwantedX running''') ,['''[UNK]''', '''runn''', '''##ing'''])
def A__ ( self):
self.assertTrue(_is_whitespace(''' '''))
self.assertTrue(_is_whitespace('''\t'''))
self.assertTrue(_is_whitespace('''\r'''))
self.assertTrue(_is_whitespace('''\n'''))
self.assertTrue(_is_whitespace('''\u00A0'''))
self.assertFalse(_is_whitespace('''A'''))
self.assertFalse(_is_whitespace('''-'''))
def A__ ( self):
self.assertTrue(_is_control('''\u0005'''))
self.assertFalse(_is_control('''A'''))
self.assertFalse(_is_control(''' '''))
self.assertFalse(_is_control('''\t'''))
self.assertFalse(_is_control('''\r'''))
def A__ ( self):
self.assertTrue(_is_punctuation('''-'''))
self.assertTrue(_is_punctuation('''$'''))
self.assertTrue(_is_punctuation('''`'''))
self.assertTrue(_is_punctuation('''.'''))
self.assertFalse(_is_punctuation('''A'''))
self.assertFalse(_is_punctuation(''' '''))
def A__ ( self):
lowercase = self.get_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(A__) for t in ['''Test''', '''\xad''', '''test''']] ,[['''[UNK]'''], [], ['''[UNK]''']])
if self.test_rust_tokenizer:
lowercase = self.get_rust_tokenizer()
self.assertListEqual(
[rust_tokenizer.tokenize(A__) for t in ['''Test''', '''\xad''', '''test''']] ,[['''[UNK]'''], [], ['''[UNK]''']])
def A__ ( self):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})'):
lowercase = self.rust_tokenizer_class.from_pretrained(A__ ,**A__)
lowercase = f'A, naïve {tokenizer_r.mask_token} AllenNLP sentence.'
lowercase = tokenizer_r.encode_plus(
A__ ,return_attention_mask=A__ ,return_token_type_ids=A__ ,return_offsets_mapping=A__ ,add_special_tokens=A__ ,)
lowercase = tokenizer_r.do_lower_case if hasattr(A__ ,'''do_lower_case''') else False
lowercase = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), '''A'''),
((1, 2), ''','''),
((3, 5), '''na'''),
((5, 6), '''##ï'''),
((6, 8), '''##ve'''),
((9, 1_5), tokenizer_r.mask_token),
((1_6, 2_1), '''Allen'''),
((2_1, 2_3), '''##NL'''),
((2_3, 2_4), '''##P'''),
((2_5, 3_3), '''sentence'''),
((3_3, 3_4), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), '''a'''),
((1, 2), ''','''),
((3, 8), '''naive'''),
((9, 1_5), tokenizer_r.mask_token),
((1_6, 2_1), '''allen'''),
((2_1, 2_3), '''##nl'''),
((2_3, 2_4), '''##p'''),
((2_5, 3_3), '''sentence'''),
((3_3, 3_4), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] ,tokenizer_r.convert_ids_to_tokens(tokens['''input_ids''']))
self.assertEqual([e[0] for e in expected_results] ,tokens['''offset_mapping'''])
def A__ ( self):
lowercase = ['''的''', '''人''', '''有''']
lowercase = ''''''.join(A__)
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})'):
lowercase = True
lowercase = self.tokenizer_class.from_pretrained(A__ ,**A__)
lowercase = self.rust_tokenizer_class.from_pretrained(A__ ,**A__)
lowercase = tokenizer_p.encode(A__ ,add_special_tokens=A__)
lowercase = tokenizer_r.encode(A__ ,add_special_tokens=A__)
lowercase = tokenizer_r.convert_ids_to_tokens(A__)
lowercase = tokenizer_p.convert_ids_to_tokens(A__)
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(A__ ,A__)
self.assertListEqual(A__ ,A__)
lowercase = False
lowercase = self.rust_tokenizer_class.from_pretrained(A__ ,**A__)
lowercase = self.tokenizer_class.from_pretrained(A__ ,**A__)
lowercase = tokenizer_r.encode(A__ ,add_special_tokens=A__)
lowercase = tokenizer_p.encode(A__ ,add_special_tokens=A__)
lowercase = tokenizer_r.convert_ids_to_tokens(A__)
lowercase = tokenizer_p.convert_ids_to_tokens(A__)
# it is expected that only the first Chinese character is not preceded by "##".
lowercase = [
f'##{token}' if idx != 0 else token for idx, token in enumerate(A__)
]
self.assertListEqual(A__ ,A__)
self.assertListEqual(A__ ,A__)
@slow
def A__ ( self):
lowercase = self.tokenizer_class(self.vocab_file ,self.word_shape_file ,self.word_pronunciation_file)
lowercase = tokenizer.encode('''你好''' ,add_special_tokens=A__)
lowercase = tokenizer.encode('''你是谁''' ,add_special_tokens=A__)
lowercase = tokenizer.build_inputs_with_special_tokens(A__)
lowercase = tokenizer.build_inputs_with_special_tokens(A__ ,A__)
assert encoded_sentence == [1] + text + [2]
assert encoded_pair == [1] + text + [2] + text_a + [2]
def A__ ( self):
lowercase = self.get_tokenizers(do_lower_case=A__)
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}'):
lowercase = '''你好,你是谁'''
lowercase = tokenizer.tokenize(A__)
lowercase = tokenizer.convert_tokens_to_ids(A__)
lowercase = tokenizer.convert_tokens_to_shape_ids(A__)
lowercase = tokenizer.convert_tokens_to_pronunciation_ids(A__)
lowercase = tokenizer.prepare_for_model(
A__ ,A__ ,A__ ,add_special_tokens=A__)
lowercase = tokenizer.encode_plus(A__ ,add_special_tokens=A__)
self.assertEqual(A__ ,A__)
| 101 | 1 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..utils import cached_file
# docstyle-ignore
__UpperCAmelCase = '\nHuman: <<task>>\n\nAssistant: '
__UpperCAmelCase = 'huggingface-tools/default-prompts'
__UpperCAmelCase = {'chat': 'chat_prompt_template.txt', 'run': 'run_prompt_template.txt'}
def __UpperCamelCase ( lowercase__ : Union[str, Any] , lowercase__ : List[str] , lowercase__ : Optional[int]="run" ) -> Any:
'''simple docstring'''
if prompt_or_repo_id is None:
lowerCAmelCase_ : int = DEFAULT_PROMPTS_REPO
# prompt is considered a repo ID when it does not contain any kind of space
if re.search("""\\s""" , lowercase__ ) is not None:
return prompt_or_repo_id
lowerCAmelCase_ : Optional[Any] = cached_file(
lowercase__ , PROMPT_FILES[mode] , repo_type="""dataset""" , user_agent={"""agent""": agent_name} )
with open(lowercase__ , """r""" , encoding="""utf-8""" ) as f:
return f.read()
| 28 |
from math import ceil
def __UpperCamelCase ( lowercase__ : int = 1001 ) -> int:
'''simple docstring'''
lowerCAmelCase_ : List[str] = 1
for i in range(1 , int(ceil(n / 2.0 ) ) ):
lowerCAmelCase_ : Optional[Any] = 2 * i + 1
lowerCAmelCase_ : Union[str, Any] = 2 * i
lowerCAmelCase_ : Optional[Any] = total + 4 * odd**2 - 6 * even
return total
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution())
else:
try:
__UpperCAmelCase = int(sys.argv[1])
print(solution(n))
except ValueError:
print('Invalid entry - please enter a number')
| 28 | 1 |
"""simple docstring"""
from __future__ import annotations
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self : Any , lowercase_ : int = 0):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[Any] = key
def _SCREAMING_SNAKE_CASE ( self : Any , lowercase_ : str , lowercase_ : int):
'''simple docstring'''
assert isinstance(lowercase_ , lowercase_) and isinstance(lowercase_ , lowercase_)
SCREAMING_SNAKE_CASE_ : Dict = key or self.__key or 1
# make sure key is an appropriate size
key %= 255
return [chr(ord(lowercase_) ^ key) for ch in content]
def _SCREAMING_SNAKE_CASE ( self : str , lowercase_ : str , lowercase_ : int):
'''simple docstring'''
assert isinstance(lowercase_ , lowercase_) and isinstance(lowercase_ , lowercase_)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = key or self.__key or 1
# make sure key is an appropriate size
key %= 255
return [chr(ord(lowercase_) ^ key) for ch in content]
def _SCREAMING_SNAKE_CASE ( self : Tuple , lowercase_ : str , lowercase_ : int = 0):
'''simple docstring'''
assert isinstance(lowercase_ , lowercase_) and isinstance(lowercase_ , lowercase_)
SCREAMING_SNAKE_CASE_ : int = key or self.__key or 1
# make sure key can be any size
while key > 255:
key -= 255
# This will be returned
SCREAMING_SNAKE_CASE_ : List[str] = ''''''
for ch in content:
ans += chr(ord(lowercase_) ^ key)
return ans
def _SCREAMING_SNAKE_CASE ( self : int , lowercase_ : str , lowercase_ : int = 0):
'''simple docstring'''
assert isinstance(lowercase_ , lowercase_) and isinstance(lowercase_ , lowercase_)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = key or self.__key or 1
# make sure key can be any size
while key > 255:
key -= 255
# This will be returned
SCREAMING_SNAKE_CASE_ : List[Any] = ''''''
for ch in content:
ans += chr(ord(lowercase_) ^ key)
return ans
def _SCREAMING_SNAKE_CASE ( self : str , lowercase_ : str , lowercase_ : int = 0):
'''simple docstring'''
assert isinstance(lowercase_ , lowercase_) and isinstance(lowercase_ , lowercase_)
try:
with open(lowercase_) as fin, open('''encrypt.out''' , '''w+''') as fout:
# actual encrypt-process
for line in fin:
fout.write(self.encrypt_string(lowercase_ , lowercase_))
except OSError:
return False
return True
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowercase_ : str , lowercase_ : int):
'''simple docstring'''
assert isinstance(lowercase_ , lowercase_) and isinstance(lowercase_ , lowercase_)
try:
with open(lowercase_) as fin, open('''decrypt.out''' , '''w+''') as fout:
# actual encrypt-process
for line in fin:
fout.write(self.decrypt_string(lowercase_ , lowercase_))
except OSError:
return False
return True
# Tests
# crypt = XORCipher()
# key = 67
# # test encrypt
# print(crypt.encrypt("hallo welt",key))
# # test decrypt
# print(crypt.decrypt(crypt.encrypt("hallo welt",key), key))
# # test encrypt_string
# print(crypt.encrypt_string("hallo welt",key))
# # test decrypt_string
# print(crypt.decrypt_string(crypt.encrypt_string("hallo welt",key),key))
# if (crypt.encrypt_file("test.txt",key)):
# print("encrypt successful")
# else:
# print("encrypt unsuccessful")
# if (crypt.decrypt_file("encrypt.out",key)):
# print("decrypt successful")
# else:
# print("decrypt unsuccessful")
| 91 |
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
_lowercase: Optional[int] = re.compile("[^A-Za-z_0-9]")
# parameters used in DuplicationIndex
_lowercase: Dict = 10
_lowercase: Optional[Any] = 256
def a( A : List[str] ) -> Optional[MinHash]:
"""simple docstring"""
if len(A ) < MIN_NUM_TOKENS:
return None
a = MinHash(num_perm=A )
for token in set(A ):
min_hash.update(token.encode() )
return min_hash
def a( A : str ) -> Set[str]:
"""simple docstring"""
return {t for t in NON_ALPHA.split(A ) if len(t.strip() ) > 0}
class _lowercase :
"""simple docstring"""
def __init__(self , *,
lowerCamelCase_ = 0.85 , ):
"""simple docstring"""
a = duplication_jaccard_threshold
a = NUM_PERM
a = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm )
a = defaultdict(lowerCamelCase_ )
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
a = self._index.query(lowerCamelCase_ )
if code_key in self._index.keys:
print(F'''Duplicate key {code_key}''' )
return
self._index.insert(lowerCamelCase_ , lowerCamelCase_ )
if len(lowerCamelCase_ ) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(lowerCamelCase_ )
break
else:
self._duplicate_clusters[close_duplicates[0]].add(lowerCamelCase_ )
def UpperCamelCase_ (self ):
"""simple docstring"""
a = []
for base, duplicates in self._duplicate_clusters.items():
a = [base] + list(lowerCamelCase_ )
# reformat the cluster to be a list of dict
a = [{"base_index": el[0], "repo_name": el[1], "path": el[2]} for el in cluster]
duplicate_clusters.append(lowerCamelCase_ )
return duplicate_clusters
def UpperCamelCase_ (self , lowerCamelCase_ ):
"""simple docstring"""
a = self.get_duplicate_clusters()
with open(lowerCamelCase_ , "w" ) as f:
json.dump(lowerCamelCase_ , lowerCamelCase_ )
def a( A : Any ) -> List[Any]:
"""simple docstring"""
a , a = element
a = get_min_hash([t for t in NON_ALPHA.split(data["content"] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def a( A : Type[Dataset] ) -> List[Any]:
"""simple docstring"""
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash , ThreadedIterator(A , max_queue_size=1_0000 ) , chunksize=100 , ):
if data is not None:
yield data
def a( A : Type[Dataset] , A : float ) -> Dict:
"""simple docstring"""
a = DuplicationIndex(duplication_jaccard_threshold=A )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(A ) ) , max_queue_size=100 ) ):
di.add(A , A )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def a( A : str , A : str ) -> float:
"""simple docstring"""
a = get_tokens(A )
a = get_tokens(A )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
_lowercase: int = None
def a( A : str , A : Tuple ) -> int:
"""simple docstring"""
a = []
for elementa in cluster:
a = _shared_dataset[elementa["base_index"]]["content"]
for elementa in extremes:
a = _shared_dataset[elementa["base_index"]]["content"]
if jaccard_similarity(A , A ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
a = 1
extremes.append(A )
return extremes
def a( A : str , A : List[str] , A : int ) -> Tuple:
"""simple docstring"""
global _shared_dataset
a = dataset
a = []
a = partial(_find_cluster_extremes_shared , jaccard_threshold=A )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
A , A , ) , total=len(A ) , ):
extremes_list.append(A )
return extremes_list
def a( A : Type[Dataset] , A : float = 0.85 ) -> Tuple[Type[Dataset], List[List[Dict]]]:
"""simple docstring"""
a = make_duplicate_clusters(A , A )
a = {x["base_index"] for cluster in duplicate_clusters for x in cluster}
a = {}
a = find_extremes(A , A , A )
for extremes in extremes_clusters:
for element in extremes:
a = element
a = duplicate_indices - set(extreme_dict.keys() )
a = dataset.filter(lambda A , A : idx not in remove_indices , with_indices=A )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
a = element["base_index"] in extreme_dict
if element["is_extreme"]:
a = extreme_dict[element["base_index"]]["copies"]
print(f'''Original dataset size: {len(A )}''' )
print(f'''Number of duplicate clusters: {len(A )}''' )
print(f'''Files in duplicate cluster: {len(A )}''' )
print(f'''Unique files in duplicate cluster: {len(A )}''' )
print(f'''Filtered dataset size: {len(A )}''' )
return ds_filter, duplicate_clusters
| 227 | 0 |
import math
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import SchedulerMixin, SchedulerOutput
class UpperCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
lowerCAmelCase_ = 1
@register_to_config
def __init__( self , lowerCAmelCase_ = 1000 , lowerCAmelCase_ = None ) -> Optional[int]:
# set `betas`, `alphas`, `timesteps`
self.set_timesteps(lowerCAmelCase_ )
# standard deviation of the initial noise distribution
_snake_case = 1.0
# For now we only support F-PNDM, i.e. the runge-kutta method
# For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf
# mainly at formula (9), (12), (13) and the Algorithm 2.
_snake_case = 4
# running values
_snake_case = []
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None ) -> List[str]:
_snake_case = num_inference_steps
_snake_case = torch.linspace(1 , 0 , num_inference_steps + 1 )[:-1]
_snake_case = torch.cat([steps, torch.tensor([0.0] )] )
if self.config.trained_betas is not None:
_snake_case = torch.tensor(self.config.trained_betas , dtype=torch.floataa )
else:
_snake_case = torch.sin(steps * math.pi / 2 ) ** 2
_snake_case = (1.0 - self.betas**2) ** 0.5
_snake_case = (torch.atana(self.betas , self.alphas ) / math.pi * 2)[:-1]
_snake_case = timesteps.to(lowerCAmelCase_ )
_snake_case = []
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = True , ) -> Union[SchedulerOutput, Tuple]:
if self.num_inference_steps is None:
raise ValueError(
'Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler' )
_snake_case = (self.timesteps == timestep).nonzero().item()
_snake_case = timestep_index + 1
_snake_case = sample * self.betas[timestep_index] + model_output * self.alphas[timestep_index]
self.ets.append(lowerCAmelCase_ )
if len(self.ets ) == 1:
_snake_case = self.ets[-1]
elif len(self.ets ) == 2:
_snake_case = (3 * self.ets[-1] - self.ets[-2]) / 2
elif len(self.ets ) == 3:
_snake_case = (23 * self.ets[-1] - 16 * self.ets[-2] + 5 * self.ets[-3]) / 12
else:
_snake_case = (1 / 24) * (55 * self.ets[-1] - 59 * self.ets[-2] + 37 * self.ets[-3] - 9 * self.ets[-4])
_snake_case = self._get_prev_sample(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=lowerCAmelCase_ )
def lowerCAmelCase ( self , lowerCAmelCase_ , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> torch.FloatTensor:
return sample
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Any:
_snake_case = self.alphas[timestep_index]
_snake_case = self.betas[timestep_index]
_snake_case = self.alphas[prev_timestep_index]
_snake_case = self.betas[prev_timestep_index]
_snake_case = (sample - sigma * ets) / max(lowerCAmelCase_ , 1E-8 )
_snake_case = next_alpha * pred + ets * next_sigma
return prev_sample
def __len__( self ) -> Optional[Any]:
return self.config.num_train_timesteps
| 368 |
import warnings
from ...utils import logging
from .image_processing_flava import FlavaImageProcessor
UpperCAmelCase_ = logging.get_logger(__name__)
class UpperCamelCase_ ( _lowerCamelCase ):
def __init__( self , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> None:
warnings.warn(
'The class FlavaFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use FlavaImageProcessor instead.' , lowerCAmelCase_ , )
super().__init__(*lowerCAmelCase_ , **lowerCAmelCase_ )
| 295 | 0 |
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( __snake_case : str ):
'''simple docstring'''
lowercase = 0
for ch in input_str:
lowercase = ord(__snake_case )
lowercase = pow(2 , __snake_case )
# If we already turned on bit for current character's unicode
if bitmap >> ch_unicode & 1 == 1:
return False
bitmap |= ch_bit_index_on
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 220 |
"""simple docstring"""
import unittest
from transformers import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING, is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class a :
@staticmethod
def UpperCamelCase_ ( *_lowerCamelCase , **_lowerCamelCase ):
pass
@is_pipeline_test
@require_torch
@require_vision
class a ( unittest.TestCase ):
UpperCAmelCase_ : Union[str, Any] =MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
def UpperCamelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
lowercase = pipeline('visual-question-answering' , model='hf-internal-testing/tiny-vilt-random-vqa' )
lowercase = [
{
'image': Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ),
'question': 'How many cats are there?',
},
{
'image': './tests/fixtures/tests_samples/COCO/000000039769.png',
'question': 'How many cats are there?',
},
]
return vqa_pipeline, examples
def UpperCamelCase_ ( self , _lowerCamelCase , _lowerCamelCase ):
lowercase = vqa_pipeline(_lowerCamelCase , top_k=1 )
self.assertEqual(
_lowerCamelCase , [
[{'score': ANY(_lowerCamelCase ), 'answer': ANY(_lowerCamelCase )}],
[{'score': ANY(_lowerCamelCase ), 'answer': ANY(_lowerCamelCase )}],
] , )
@require_torch
def UpperCamelCase_ ( self ):
lowercase = pipeline('visual-question-answering' , model='hf-internal-testing/tiny-vilt-random-vqa' )
lowercase = './tests/fixtures/tests_samples/COCO/000000039769.png'
lowercase = 'How many cats are there?'
lowercase = vqa_pipeline(image=_lowerCamelCase , question='How many cats are there?' , top_k=2 )
self.assertEqual(
_lowerCamelCase , [{'score': ANY(_lowerCamelCase ), 'answer': ANY(_lowerCamelCase )}, {'score': ANY(_lowerCamelCase ), 'answer': ANY(_lowerCamelCase )}] )
lowercase = vqa_pipeline({'image': image, 'question': question} , top_k=2 )
self.assertEqual(
_lowerCamelCase , [{'score': ANY(_lowerCamelCase ), 'answer': ANY(_lowerCamelCase )}, {'score': ANY(_lowerCamelCase ), 'answer': ANY(_lowerCamelCase )}] )
@slow
@require_torch
def UpperCamelCase_ ( self ):
lowercase = pipeline('visual-question-answering' , model='dandelin/vilt-b32-finetuned-vqa' )
lowercase = './tests/fixtures/tests_samples/COCO/000000039769.png'
lowercase = 'How many cats are there?'
lowercase = vqa_pipeline(image=_lowerCamelCase , question=_lowerCamelCase , top_k=2 )
self.assertEqual(
nested_simplify(_lowerCamelCase , decimals=4 ) , [{'score': 0.8_7_9_9, 'answer': '2'}, {'score': 0.2_9_6, 'answer': '1'}] )
lowercase = vqa_pipeline({'image': image, 'question': question} , top_k=2 )
self.assertEqual(
nested_simplify(_lowerCamelCase , decimals=4 ) , [{'score': 0.8_7_9_9, 'answer': '2'}, {'score': 0.2_9_6, 'answer': '1'}] )
lowercase = vqa_pipeline(
[{'image': image, 'question': question}, {'image': image, 'question': question}] , top_k=2 )
self.assertEqual(
nested_simplify(_lowerCamelCase , decimals=4 ) , [[{'score': 0.8_7_9_9, 'answer': '2'}, {'score': 0.2_9_6, 'answer': '1'}]] * 2 , )
@require_tf
@unittest.skip('Visual question answering not implemented in TF' )
def UpperCamelCase_ ( self ):
pass
| 220 | 1 |
'''simple docstring'''
lowerCAmelCase: int = [sum(int(c, 1_0) ** 2 for c in i.__str__()) for i in range(1_0_0_0_0_0)]
def lowerCamelCase__ ( _A ):
a : Any = 0
while number:
# Increased Speed Slightly by checking every 5 digits together.
sum_of_digits_squared += DIGITS_SQUARED[number % 10_0000]
number //= 10_0000
return sum_of_digits_squared
# There are 2 Chains made,
# One ends with 89 with the chain member 58 being the one which when declared first,
# there will be the least number of iterations for all the members to be checked.
# The other one ends with 1 and has only one element 1.
# So 58 and 1 are chosen to be declared at the starting.
# Changed dictionary to an array to quicken the solution
lowerCAmelCase: list[bool | None] = [None] * 1_0_0_0_0_0_0_0
lowerCAmelCase: Tuple = True
lowerCAmelCase: Dict = False
def lowerCamelCase__ ( _A ):
if CHAINS[number - 1] is not None:
return CHAINS[number - 1] # type: ignore
a : Tuple = chain(next_number(_A ) )
a : Dict = number_chain
while number < 1000_0000:
a : Dict = number_chain
number *= 10
return number_chain
def lowerCamelCase__ ( _A = 1000_0000 ):
for i in range(1 , _A ):
if CHAINS[i] is None:
chain(i + 1 )
return CHAINS[:number].count(_A )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F"{solution() = }") | 96 |
'''simple docstring'''
import re
import string
from collections import Counter
import sacrebleu
import sacremoses
from packaging import version
import datasets
lowerCAmelCase: Dict = '\n@inproceedings{xu-etal-2016-optimizing,\n title = {Optimizing Statistical Machine Translation for Text Simplification},\n authors={Xu, Wei and Napoles, Courtney and Pavlick, Ellie and Chen, Quanze and Callison-Burch, Chris},\n journal = {Transactions of the Association for Computational Linguistics},\n volume = {4},\n year={2016},\n url = {https://www.aclweb.org/anthology/Q16-1029},\n pages = {401--415\n},\n@inproceedings{post-2018-call,\n title = "A Call for Clarity in Reporting {BLEU} Scores",\n author = "Post, Matt",\n booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",\n month = oct,\n year = "2018",\n address = "Belgium, Brussels",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W18-6319",\n pages = "186--191",\n}\n'
lowerCAmelCase: Optional[Any] = '\\nWIKI_SPLIT is the combination of three metrics SARI, EXACT and SACREBLEU\nIt can be used to evaluate the quality of machine-generated texts.\n'
lowerCAmelCase: List[Any] = '\nCalculates sari score (between 0 and 100) given a list of source and predicted\nsentences, and a list of lists of reference sentences. It also computes the BLEU score as well as the exact match score.\nArgs:\n sources: list of source sentences where each sentence should be a string.\n predictions: list of predicted sentences where each sentence should be a string.\n references: list of lists of reference sentences where each sentence should be a string.\nReturns:\n sari: sari score\n sacrebleu: sacrebleu score\n exact: exact score\n\nExamples:\n >>> sources=["About 95 species are currently accepted ."]\n >>> predictions=["About 95 you now get in ."]\n >>> references=[["About 95 species are currently known ."]]\n >>> wiki_split = datasets.load_metric("wiki_split")\n >>> results = wiki_split.compute(sources=sources, predictions=predictions, references=references)\n >>> print(results)\n {\'sari\': 21.805555555555557, \'sacrebleu\': 14.535768424205482, \'exact\': 0.0}\n'
def lowerCamelCase__ ( _A ):
def remove_articles(_A ):
a : Union[str, Any] = re.compile(r'\b(a|an|the)\b' , re.UNICODE )
return re.sub(_A , ' ' , _A )
def white_space_fix(_A ):
return " ".join(text.split() )
def remove_punc(_A ):
a : Tuple = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(_A ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(_A ) ) ) )
def lowerCamelCase__ ( _A , _A ):
return int(normalize_answer(_A ) == normalize_answer(_A ) )
def lowerCamelCase__ ( _A , _A ):
a : List[Any] = [any(compute_exact(_A , _A ) for ref in refs ) for pred, refs in zip(_A , _A )]
return (sum(_A ) / len(_A )) * 100
def lowerCamelCase__ ( _A , _A , _A , _A ):
a : List[Any] = [rgram for rgrams in rgramslist for rgram in rgrams]
a : Any = Counter(_A )
a : Dict = Counter(_A )
a : Tuple = Counter()
for sgram, scount in sgramcounter.items():
a : List[str] = scount * numref
a : Optional[int] = Counter(_A )
a : Optional[int] = Counter()
for cgram, ccount in cgramcounter.items():
a : List[str] = ccount * numref
# KEEP
a : Optional[Any] = sgramcounter_rep & cgramcounter_rep
a : Union[str, Any] = keepgramcounter_rep & rgramcounter
a : Any = sgramcounter_rep & rgramcounter
a : str = 0
a : Optional[Any] = 0
for keepgram in keepgramcountergood_rep:
keeptmpscorea += keepgramcountergood_rep[keepgram] / keepgramcounter_rep[keepgram]
# Fix an alleged bug [2] in the keep score computation.
# keeptmpscore2 += keepgramcountergood_rep[keepgram] / keepgramcounterall_rep[keepgram]
keeptmpscorea += keepgramcountergood_rep[keepgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
a : Tuple = 1
a : Any = 1
if len(_A ) > 0:
a : Optional[int] = keeptmpscorea / len(_A )
if len(_A ) > 0:
# Fix an alleged bug [2] in the keep score computation.
# keepscore_recall = keeptmpscore2 / len(keepgramcounterall_rep)
a : Tuple = keeptmpscorea / sum(keepgramcounterall_rep.values() )
a : List[str] = 0
if keepscore_precision > 0 or keepscore_recall > 0:
a : Dict = 2 * keepscore_precision * keepscore_recall / (keepscore_precision + keepscore_recall)
# DELETION
a : List[Any] = sgramcounter_rep - cgramcounter_rep
a : Any = delgramcounter_rep - rgramcounter
a : Union[str, Any] = sgramcounter_rep - rgramcounter
a : Tuple = 0
a : str = 0
for delgram in delgramcountergood_rep:
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounter_rep[delgram]
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounterall_rep[delgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
a : str = 1
if len(_A ) > 0:
a : Tuple = deltmpscorea / len(_A )
# ADDITION
a : Any = set(_A ) - set(_A )
a : Optional[int] = set(_A ) & set(_A )
a : Union[str, Any] = set(_A ) - set(_A )
a : List[str] = 0
for addgram in addgramcountergood:
addtmpscore += 1
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
a : Tuple = 1
a : Optional[Any] = 1
if len(_A ) > 0:
a : str = addtmpscore / len(_A )
if len(_A ) > 0:
a : Optional[Any] = addtmpscore / len(_A )
a : str = 0
if addscore_precision > 0 or addscore_recall > 0:
a : List[str] = 2 * addscore_precision * addscore_recall / (addscore_precision + addscore_recall)
return (keepscore, delscore_precision, addscore)
def lowerCamelCase__ ( _A , _A , _A ):
a : List[str] = len(_A )
a : List[str] = ssent.split(' ' )
a : int = csent.split(' ' )
a : Optional[Any] = []
a : Tuple = []
a : Optional[Any] = []
a : Optional[Any] = []
a : Union[str, Any] = []
a : str = []
a : Dict = []
a : Dict = []
a : str = []
a : Any = []
for rsent in rsents:
a : List[Any] = rsent.split(' ' )
a : Dict = []
a : Optional[Any] = []
a : Optional[Any] = []
ragramslist.append(_A )
for i in range(0 , len(_A ) - 1 ):
if i < len(_A ) - 1:
a : List[str] = ragrams[i] + ' ' + ragrams[i + 1]
ragrams.append(_A )
if i < len(_A ) - 2:
a : Union[str, Any] = ragrams[i] + ' ' + ragrams[i + 1] + ' ' + ragrams[i + 2]
ragrams.append(_A )
if i < len(_A ) - 3:
a : Tuple = ragrams[i] + ' ' + ragrams[i + 1] + ' ' + ragrams[i + 2] + ' ' + ragrams[i + 3]
ragrams.append(_A )
ragramslist.append(_A )
ragramslist.append(_A )
ragramslist.append(_A )
for i in range(0 , len(_A ) - 1 ):
if i < len(_A ) - 1:
a : Tuple = sagrams[i] + ' ' + sagrams[i + 1]
sagrams.append(_A )
if i < len(_A ) - 2:
a : Union[str, Any] = sagrams[i] + ' ' + sagrams[i + 1] + ' ' + sagrams[i + 2]
sagrams.append(_A )
if i < len(_A ) - 3:
a : List[str] = sagrams[i] + ' ' + sagrams[i + 1] + ' ' + sagrams[i + 2] + ' ' + sagrams[i + 3]
sagrams.append(_A )
for i in range(0 , len(_A ) - 1 ):
if i < len(_A ) - 1:
a : Any = cagrams[i] + ' ' + cagrams[i + 1]
cagrams.append(_A )
if i < len(_A ) - 2:
a : Optional[Any] = cagrams[i] + ' ' + cagrams[i + 1] + ' ' + cagrams[i + 2]
cagrams.append(_A )
if i < len(_A ) - 3:
a : Optional[Any] = cagrams[i] + ' ' + cagrams[i + 1] + ' ' + cagrams[i + 2] + ' ' + cagrams[i + 3]
cagrams.append(_A )
((a) , (a) , (a)) : int = SARIngram(_A , _A , _A , _A )
((a) , (a) , (a)) : Optional[int] = SARIngram(_A , _A , _A , _A )
((a) , (a) , (a)) : Union[str, Any] = SARIngram(_A , _A , _A , _A )
((a) , (a) , (a)) : int = SARIngram(_A , _A , _A , _A )
a : Dict = sum([keepascore, keepascore, keepascore, keepascore] ) / 4
a : Any = sum([delascore, delascore, delascore, delascore] ) / 4
a : Union[str, Any] = sum([addascore, addascore, addascore, addascore] ) / 4
a : str = (avgkeepscore + avgdelscore + avgaddscore) / 3
return finalscore
def lowerCamelCase__ ( _A , _A = True , _A = "13a" , _A = True ):
# Normalization is requried for the ASSET dataset (one of the primary
# datasets in sentence simplification) to allow using space
# to split the sentence. Even though Wiki-Auto and TURK datasets,
# do not require normalization, we do it for consistency.
# Code adapted from the EASSE library [1] written by the authors of the ASSET dataset.
# [1] https://github.com/feralvam/easse/blob/580bba7e1378fc8289c663f864e0487188fe8067/easse/utils/preprocessing.py#L7
if lowercase:
a : Dict = sentence.lower()
if tokenizer in ["13a", "intl"]:
if version.parse(sacrebleu.__version__ ).major >= 2:
a : List[str] = sacrebleu.metrics.bleu._get_tokenizer(_A )()(_A )
else:
a : str = sacrebleu.TOKENIZERS[tokenizer]()(_A )
elif tokenizer == "moses":
a : List[Any] = sacremoses.MosesTokenizer().tokenize(_A , return_str=_A , escape=_A )
elif tokenizer == "penn":
a : Tuple = sacremoses.MosesTokenizer().penn_tokenize(_A , return_str=_A )
else:
a : List[Any] = sentence
if not return_str:
a : Optional[Any] = normalized_sent.split()
return normalized_sent
def lowerCamelCase__ ( _A , _A , _A ):
if not (len(_A ) == len(_A ) == len(_A )):
raise ValueError('Sources length must match predictions and references lengths.' )
a : Tuple = 0
for src, pred, refs in zip(_A , _A , _A ):
sari_score += SARIsent(normalize(_A ) , normalize(_A ) , [normalize(_A ) for sent in refs] )
a : Tuple = sari_score / len(_A )
return 100 * sari_score
def lowerCamelCase__ ( _A , _A , _A="exp" , _A=None , _A=False , _A=False , _A=False , ):
a : Optional[int] = len(references[0] )
if any(len(_A ) != references_per_prediction for refs in references ):
raise ValueError('Sacrebleu requires the same number of references for each prediction' )
a : List[Any] = [[refs[i] for refs in references] for i in range(_A )]
a : Optional[Any] = sacrebleu.corpus_bleu(
_A , _A , smooth_method=_A , smooth_value=_A , force=_A , lowercase=_A , use_effective_order=_A , )
return output.score
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a__( datasets.Metric ):
def lowercase_ ( self : Optional[int] ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Sequence(datasets.Value('string' , id='sequence' ) , id='references' ),
} ) , codebase_urls=[
'https://github.com/huggingface/transformers/blob/master/src/transformers/data/metrics/squad_metrics.py',
'https://github.com/cocoxu/simplification/blob/master/SARI.py',
'https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/sari_hook.py',
'https://github.com/mjpost/sacreBLEU',
] , reference_urls=[
'https://www.aclweb.org/anthology/Q16-1029.pdf',
'https://github.com/mjpost/sacreBLEU',
'https://en.wikipedia.org/wiki/BLEU',
'https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213',
] , )
def lowercase_ ( self : str , __snake_case : Optional[Any] , __snake_case : int , __snake_case : str ):
a : int = {}
result.update({'sari': compute_sari(sources=__snake_case , predictions=__snake_case , references=__snake_case )} )
result.update({'sacrebleu': compute_sacrebleu(predictions=__snake_case , references=__snake_case )} )
result.update({'exact': compute_em(predictions=__snake_case , references=__snake_case )} )
return result | 96 | 1 |
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class A_ :
"""simple docstring"""
def __init__( self :str , lowerCamelCase_ :int , lowerCamelCase_ :List[str]=13 , lowerCamelCase_ :List[Any]=7 , lowerCamelCase_ :str=True , lowerCamelCase_ :int=True , lowerCamelCase_ :List[str]=True , lowerCamelCase_ :Any=99 , lowerCamelCase_ :Optional[int]=32 , lowerCamelCase_ :Dict=5 , lowerCamelCase_ :Any=4 , lowerCamelCase_ :Tuple=37 , lowerCamelCase_ :Union[str, Any]="gelu" , lowerCamelCase_ :Dict=0.1 , lowerCamelCase_ :Any=0.1 , lowerCamelCase_ :List[str]=512 , lowerCamelCase_ :int=16 , lowerCamelCase_ :List[str]=2 , lowerCamelCase_ :List[str]=0.02 , lowerCamelCase_ :List[Any]=3 , lowerCamelCase_ :Dict=4 , lowerCamelCase_ :Optional[Any]=None , ):
"""simple docstring"""
lowerCamelCase__ : Any =parent
lowerCamelCase__ : Union[str, Any] =batch_size
lowerCamelCase__ : Dict =seq_length
lowerCamelCase__ : List[str] =is_training
lowerCamelCase__ : List[Any] =use_token_type_ids
lowerCamelCase__ : Union[str, Any] =use_labels
lowerCamelCase__ : Optional[Any] =vocab_size
lowerCamelCase__ : List[Any] =hidden_size
lowerCamelCase__ : Optional[int] =num_hidden_layers
lowerCamelCase__ : Tuple =num_attention_heads
lowerCamelCase__ : Optional[Any] =intermediate_size
lowerCamelCase__ : Optional[int] =hidden_act
lowerCamelCase__ : List[Any] =hidden_dropout_prob
lowerCamelCase__ : str =attention_probs_dropout_prob
lowerCamelCase__ : Tuple =max_position_embeddings
lowerCamelCase__ : Union[str, Any] =type_vocab_size
lowerCamelCase__ : Dict =type_sequence_label_size
lowerCamelCase__ : str =initializer_range
lowerCamelCase__ : Any =num_labels
lowerCamelCase__ : int =num_choices
lowerCamelCase__ : List[str] =scope
lowerCamelCase__ : List[str] =self.vocab_size - 1
def UpperCAmelCase__ ( self :Any ):
"""simple docstring"""
lowerCamelCase__ : str =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase__ : Union[str, Any] =None
if self.use_token_type_ids:
lowerCamelCase__ : Optional[int] =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCamelCase__ : Any =None
lowerCamelCase__ : Any =None
lowerCamelCase__ : str =None
if self.use_labels:
lowerCamelCase__ : int =ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase__ : List[Any] =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase__ : Any =ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase__ : int =OpenAIGPTConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
lowerCamelCase__ : List[str] =ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def UpperCAmelCase__ ( self :List[str] , lowerCamelCase_ :List[Any] , lowerCamelCase_ :List[str] , lowerCamelCase_ :List[str] , lowerCamelCase_ :Optional[int] , *lowerCamelCase_ :Any ):
"""simple docstring"""
lowerCamelCase__ : Any =OpenAIGPTModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : int =model(lowerCamelCase_ , token_type_ids=lowerCamelCase_ , head_mask=lowerCamelCase_ )
lowerCamelCase__ : Optional[int] =model(lowerCamelCase_ , token_type_ids=lowerCamelCase_ )
lowerCamelCase__ : Union[str, Any] =model(lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase__ ( self :List[str] , lowerCamelCase_ :str , lowerCamelCase_ :Any , lowerCamelCase_ :Dict , lowerCamelCase_ :List[str] , *lowerCamelCase_ :List[Any] ):
"""simple docstring"""
lowerCamelCase__ : int =OpenAIGPTLMHeadModel(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : int =model(lowerCamelCase_ , token_type_ids=lowerCamelCase_ , labels=lowerCamelCase_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase__ ( self :Tuple , lowerCamelCase_ :List[str] , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :List[str] , lowerCamelCase_ :str , *lowerCamelCase_ :Dict ):
"""simple docstring"""
lowerCamelCase__ : Tuple =OpenAIGPTDoubleHeadsModel(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : Optional[Any] =model(lowerCamelCase_ , token_type_ids=lowerCamelCase_ , labels=lowerCamelCase_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase__ ( self :List[Any] , lowerCamelCase_ :int , lowerCamelCase_ :Optional[Any] , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :str , *lowerCamelCase_ :List[Any] ):
"""simple docstring"""
lowerCamelCase__ : List[str] =self.num_labels
lowerCamelCase__ : Tuple =OpenAIGPTForSequenceClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : Optional[int] =ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase__ : List[str] =model(lowerCamelCase_ , token_type_ids=lowerCamelCase_ , labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase__ ( self :List[str] ):
"""simple docstring"""
lowerCamelCase__ : str =self.prepare_config_and_inputs()
(
(
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) ,
) : Dict =config_and_inputs
lowerCamelCase__ : Tuple ={
'input_ids': input_ids,
'token_type_ids': token_type_ids,
'head_mask': head_mask,
}
return config, inputs_dict
@require_torch
class A_ ( A__ , A__ , A__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE_ = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
SCREAMING_SNAKE_CASE_ = (
{
"""feature-extraction""": OpenAIGPTModel,
"""text-classification""": OpenAIGPTForSequenceClassification,
"""text-generation""": OpenAIGPTLMHeadModel,
"""zero-shot""": OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def UpperCAmelCase__ ( self :int , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :List[Any] , lowerCamelCase_ :List[Any] , lowerCamelCase_ :List[Any] , lowerCamelCase_ :List[str] ):
"""simple docstring"""
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def UpperCAmelCase__ ( self :Dict , lowerCamelCase_ :Tuple , lowerCamelCase_ :Optional[Any] , lowerCamelCase_ :List[Any]=False ):
"""simple docstring"""
lowerCamelCase__ : str =super()._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ , return_labels=lowerCamelCase_ )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
lowerCamelCase__ : Dict =torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=lowerCamelCase_ , )
lowerCamelCase__ : Union[str, Any] =inputs_dict['labels']
lowerCamelCase__ : Tuple =inputs_dict['labels']
lowerCamelCase__ : int =torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=lowerCamelCase_ , )
lowerCamelCase__ : Optional[Any] =torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCamelCase_ )
return inputs_dict
def UpperCAmelCase__ ( self :str ):
"""simple docstring"""
lowerCamelCase__ : List[str] =OpenAIGPTModelTester(self )
lowerCamelCase__ : Union[str, Any] =ConfigTester(self , config_class=lowerCamelCase_ , n_embd=37 )
def UpperCAmelCase__ ( self :int ):
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self :Union[str, Any] ):
"""simple docstring"""
lowerCamelCase__ : Union[str, Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*lowerCamelCase_ )
def UpperCAmelCase__ ( self :Union[str, Any] ):
"""simple docstring"""
lowerCamelCase__ : List[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*lowerCamelCase_ )
def UpperCAmelCase__ ( self :Union[str, Any] ):
"""simple docstring"""
lowerCamelCase__ : int =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*lowerCamelCase_ )
def UpperCAmelCase__ ( self :str ):
"""simple docstring"""
lowerCamelCase__ : str =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*lowerCamelCase_ )
@slow
def UpperCAmelCase__ ( self :List[str] ):
"""simple docstring"""
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ : Optional[Any] =OpenAIGPTModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
@require_torch
class A_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCAmelCase__ ( self :Optional[Any] ):
"""simple docstring"""
lowerCamelCase__ : Optional[Any] =OpenAIGPTLMHeadModel.from_pretrained('openai-gpt' )
model.to(lowerCamelCase_ )
lowerCamelCase__ : List[str] =torch.tensor([[481, 4_735, 544]] , dtype=torch.long , device=lowerCamelCase_ ) # the president is
lowerCamelCase__ : List[Any] =[
481,
4_735,
544,
246,
963,
870,
762,
239,
244,
40_477,
244,
249,
719,
881,
487,
544,
240,
244,
603,
481,
] # the president is a very good man. " \n " i\'m sure he is, " said the
lowerCamelCase__ : Tuple =model.generate(lowerCamelCase_ , do_sample=lowerCamelCase_ )
self.assertListEqual(output_ids[0].tolist() , lowerCamelCase_ ) | 126 |
"""simple docstring"""
import torch
from transformers import AutoModel
class A_ ( torch.nn.Module ):
"""simple docstring"""
def __init__( self :Optional[Any] , lowerCamelCase_ :Dict="sayef/fsner-bert-base-uncased" ):
"""simple docstring"""
super(lowerCamelCase_ , self ).__init__()
lowerCamelCase__ : Dict =AutoModel.from_pretrained(lowerCamelCase_ , return_dict=lowerCamelCase_ )
lowerCamelCase__ : Union[str, Any] =torch.nn.CosineSimilarity(3 , 1e-08 )
lowerCamelCase__ : int =torch.nn.Softmax(dim=1 )
def UpperCAmelCase__ ( self :str , **lowerCamelCase_ :Optional[Any] ):
"""simple docstring"""
return self.bert(**lowerCamelCase_ ).last_hidden_state
def UpperCAmelCase__ ( self :Optional[Any] , lowerCamelCase_ :int ):
"""simple docstring"""
return token_embeddings.sum(2 , keepdim=lowerCamelCase_ )
def UpperCAmelCase__ ( self :Optional[int] , lowerCamelCase_ :Any , lowerCamelCase_ :Optional[Any] , lowerCamelCase_ :int=1 ):
"""simple docstring"""
return self.softmax(T * self.cos(lowerCamelCase_ , lowerCamelCase_ ) )
def UpperCAmelCase__ ( self :Optional[int] , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :str ):
"""simple docstring"""
lowerCamelCase__ : List[str] =W_supports['sizes'].tolist()
lowerCamelCase__ : Tuple =W_supports['start_token_id'].item()
lowerCamelCase__ : Optional[Any] =W_supports['end_token_id'].item()
del W_supports["sizes"]
del W_supports["start_token_id"]
del W_supports["end_token_id"]
lowerCamelCase__ : int =self.BERT(**lowerCamelCase_ )
lowerCamelCase__ : Dict =self.BERT(**lowerCamelCase_ )
lowerCamelCase__ : List[str] =None
lowerCamelCase__ : Any =None
lowerCamelCase__ : Any =W_supports['input_ids'] == start_token_id
lowerCamelCase__ : Union[str, Any] =W_supports['input_ids'] == end_token_id
for i, size in enumerate(lowerCamelCase_ ):
if i == 0:
lowerCamelCase__ : Optional[Any] =0
else:
lowerCamelCase__ : Union[str, Any] =support_sizes[i - 1]
lowerCamelCase__ : List[Any] =S[s : s + size][start_token_masks[s : s + size]]
lowerCamelCase__ : Dict =S[s : s + size][end_token_masks[s : s + size]]
lowerCamelCase__ : str =torch.matmul(q[i] , s_start.T ).sum(1 ).softmax(0 )
lowerCamelCase__ : Union[str, Any] =torch.matmul(q[i] , s_end.T ).sum(1 ).softmax(0 )
if p_starts is not None:
lowerCamelCase__ : Optional[Any] =torch.vstack((p_starts, p_start) )
lowerCamelCase__ : List[Any] =torch.vstack((p_ends, p_end) )
else:
lowerCamelCase__ : Any =p_start
lowerCamelCase__ : Any =p_end
return p_starts, p_ends | 126 | 1 |
"""simple docstring"""
import inspect
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel, VQModel
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class __A ( SCREAMING_SNAKE_CASE_ ):
def __init__( self , a__ , a__ , a__ ):
super().__init__()
self.register_modules(vqvae=a__ , unet=a__ , scheduler=a__ )
@torch.no_grad()
def __call__( self , a__ = 1 , a__ = None , a__ = 0.0 , a__ = 50 , a__ = "pil" , a__ = True , **a__ , ):
_lowerCAmelCase : str = randn_tensor(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , generator=a__ , )
_lowerCAmelCase : List[str] = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
_lowerCAmelCase : Optional[int] = latents * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(a__ )
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
_lowerCAmelCase : List[Any] = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
_lowerCAmelCase : Dict = {}
if accepts_eta:
_lowerCAmelCase : List[Any] = eta
for t in self.progress_bar(self.scheduler.timesteps ):
_lowerCAmelCase : Union[str, Any] = self.scheduler.scale_model_input(a__ , a__ )
# predict the noise residual
_lowerCAmelCase : Any = self.unet(a__ , a__ ).sample
# compute the previous noisy sample x_t -> x_t-1
_lowerCAmelCase : List[str] = self.scheduler.step(a__ , a__ , a__ , **a__ ).prev_sample
# decode the image latents with the VAE
_lowerCAmelCase : List[Any] = self.vqvae.decode(a__ ).sample
_lowerCAmelCase : Any = (image / 2 + 0.5).clamp(0 , 1 )
_lowerCAmelCase : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
_lowerCAmelCase : str = self.numpy_to_pil(a__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=a__ )
| 370 | """simple docstring"""
from ..utils import DummyObject, requires_backends
class __A ( metaclass=SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Optional[Any] = ["sentencepiece"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ["""sentencepiece"""] )
class __A ( metaclass=SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : List[Any] = ["sentencepiece"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ["""sentencepiece"""] )
class __A ( metaclass=SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : int = ["sentencepiece"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ["""sentencepiece"""] )
class __A ( metaclass=SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : str = ["sentencepiece"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ["""sentencepiece"""] )
class __A ( metaclass=SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Optional[int] = ["sentencepiece"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ["""sentencepiece"""] )
class __A ( metaclass=SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : List[Any] = ["sentencepiece"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ["""sentencepiece"""] )
class __A ( metaclass=SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : str = ["sentencepiece"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ["""sentencepiece"""] )
class __A ( metaclass=SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Union[str, Any] = ["sentencepiece"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ["""sentencepiece"""] )
class __A ( metaclass=SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Optional[int] = ["sentencepiece"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ["""sentencepiece"""] )
class __A ( metaclass=SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : List[str] = ["sentencepiece"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ["""sentencepiece"""] )
class __A ( metaclass=SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Any = ["sentencepiece"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ["""sentencepiece"""] )
class __A ( metaclass=SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : str = ["sentencepiece"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ["""sentencepiece"""] )
class __A ( metaclass=SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Tuple = ["sentencepiece"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ["""sentencepiece"""] )
class __A ( metaclass=SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : str = ["sentencepiece"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ["""sentencepiece"""] )
class __A ( metaclass=SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Tuple = ["sentencepiece"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ["""sentencepiece"""] )
class __A ( metaclass=SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : str = ["sentencepiece"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ["""sentencepiece"""] )
class __A ( metaclass=SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Tuple = ["sentencepiece"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ["""sentencepiece"""] )
class __A ( metaclass=SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : str = ["sentencepiece"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ["""sentencepiece"""] )
class __A ( metaclass=SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Dict = ["sentencepiece"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ["""sentencepiece"""] )
class __A ( metaclass=SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Dict = ["sentencepiece"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ["""sentencepiece"""] )
class __A ( metaclass=SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Union[str, Any] = ["sentencepiece"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ["""sentencepiece"""] )
class __A ( metaclass=SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Tuple = ["sentencepiece"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ["""sentencepiece"""] )
class __A ( metaclass=SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : List[Any] = ["sentencepiece"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ["""sentencepiece"""] )
class __A ( metaclass=SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Optional[int] = ["sentencepiece"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ["""sentencepiece"""] )
class __A ( metaclass=SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Optional[Any] = ["sentencepiece"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ["""sentencepiece"""] )
class __A ( metaclass=SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Optional[Any] = ["sentencepiece"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ["""sentencepiece"""] )
class __A ( metaclass=SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : List[Any] = ["sentencepiece"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ["""sentencepiece"""] )
class __A ( metaclass=SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : int = ["sentencepiece"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ["""sentencepiece"""] )
class __A ( metaclass=SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Dict = ["sentencepiece"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ["""sentencepiece"""] )
class __A ( metaclass=SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : List[str] = ["sentencepiece"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ["""sentencepiece"""] )
class __A ( metaclass=SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : int = ["sentencepiece"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ["""sentencepiece"""] )
| 126 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.