code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
'''simple docstring'''
def lowerCAmelCase ():
"""simple docstring"""
return [list(range(1_000 - i , -1_000 - i , -1)) for i in range(1_000)]
lowercase_ = generate_large_matrix()
lowercase_ = (
[[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]],
[[3, 2], [1, 0]],
[[7, 7, 6]],
[[7, 7, 6], [-1, -2, -3]],
grid,
)
def lowerCAmelCase (__A):
"""simple docstring"""
assert all(row == sorted(__lowercase , reverse=__lowercase) for row in grid)
assert all(list(__lowercase) == sorted(__lowercase , reverse=__lowercase) for col in zip(*__lowercase))
def lowerCAmelCase (__A):
"""simple docstring"""
_a = 0
_a = len(__lowercase) - 1
# Edge cases such as no values or all numbers are negative.
if not array or array[0] < 0:
return 0
while right + 1 > left:
_a = (left + right) // 2
_a = array[mid]
# Num must be negative and the index must be greater than or equal to 0.
if num < 0 and array[mid - 1] >= 0:
return mid
if num >= 0:
_a = mid + 1
else:
_a = mid - 1
# No negative numbers so return the last index of the array + 1 which is the length.
return len(__lowercase)
def lowerCAmelCase (__A):
"""simple docstring"""
_a = 0
_a = len(grid[0])
for i in range(len(__lowercase)):
_a = find_negative_index(grid[i][:bound])
total += bound
return (len(__lowercase) * len(grid[0])) - total
def lowerCAmelCase (__A):
"""simple docstring"""
return len([number for row in grid for number in row if number < 0])
def lowerCAmelCase (__A):
"""simple docstring"""
_a = 0
for row in grid:
for i, number in enumerate(__lowercase):
if number < 0:
total += len(__lowercase) - i
break
return total
def lowerCAmelCase ():
"""simple docstring"""
from timeit import timeit
print('''Running benchmarks''')
_a = (
'''from __main__ import count_negatives_binary_search, '''
'''count_negatives_brute_force, count_negatives_brute_force_with_break, grid'''
)
for func in (
"count_negatives_binary_search", # took 0.7727 seconds
"count_negatives_brute_force_with_break", # took 4.6505 seconds
"count_negatives_brute_force", # took 12.8160 seconds
):
_a = timeit(F'''{func}(grid=grid)''' , setup=__lowercase , number=500)
print(F'''{func}() took {time:0.4f} seconds''')
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 211 |
'''simple docstring'''
import argparse
from transformers import (
TapasConfig,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasTokenizer,
load_tf_weights_in_tapas,
)
from transformers.utils import logging
logging.set_verbosity_info()
def UpperCAmelCase_ ( __lowercase : int , __lowercase : Dict , __lowercase : str , __lowercase : Optional[Any] , __lowercase : str ) -> List[str]:
'''simple docstring'''
_UpperCAmelCase = TapasConfig.from_json_file(__lowercase )
# set absolute/relative position embeddings parameter
_UpperCAmelCase = reset_position_index_per_cell
# set remaining parameters of TapasConfig as well as the model based on the task
if task == "SQA":
_UpperCAmelCase = TapasForQuestionAnswering(config=__lowercase )
elif task == "WTQ":
# run_task_main.py hparams
_UpperCAmelCase = 4
_UpperCAmelCase = True
# hparam_utils.py hparams
_UpperCAmelCase = 0.66_4694
_UpperCAmelCase = 0.20_7951
_UpperCAmelCase = 0.12_1194
_UpperCAmelCase = True
_UpperCAmelCase = True
_UpperCAmelCase = False
_UpperCAmelCase = 0.035_2513
_UpperCAmelCase = TapasForQuestionAnswering(config=__lowercase )
elif task == "WIKISQL_SUPERVISED":
# run_task_main.py hparams
_UpperCAmelCase = 4
_UpperCAmelCase = False
# hparam_utils.py hparams
_UpperCAmelCase = 36.4519
_UpperCAmelCase = 0.90_3421
_UpperCAmelCase = 222.088
_UpperCAmelCase = True
_UpperCAmelCase = True
_UpperCAmelCase = True
_UpperCAmelCase = 0.76_3141
_UpperCAmelCase = TapasForQuestionAnswering(config=__lowercase )
elif task == "TABFACT":
_UpperCAmelCase = TapasForSequenceClassification(config=__lowercase )
elif task == "MLM":
_UpperCAmelCase = TapasForMaskedLM(config=__lowercase )
elif task == "INTERMEDIATE_PRETRAINING":
_UpperCAmelCase = TapasModel(config=__lowercase )
else:
raise ValueError(f'Task {task} not supported.' )
print(f'Building PyTorch model from configuration: {config}' )
# Load weights from tf checkpoint
load_tf_weights_in_tapas(__lowercase , __lowercase , __lowercase )
# Save pytorch-model (weights and configuration)
print(f'Save PyTorch model to {pytorch_dump_path}' )
model.save_pretrained(__lowercase )
# Save tokenizer files
print(f'Save tokenizer files to {pytorch_dump_path}' )
_UpperCAmelCase = TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + "vocab.txt" , model_max_length=512 )
tokenizer.save_pretrained(__lowercase )
print("Used relative position embeddings:" , model.config.reset_position_index_per_cell )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE :List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--task''', default='''SQA''', type=str, help='''Model task for which to convert a checkpoint. Defaults to SQA.'''
)
parser.add_argument(
'''--reset_position_index_per_cell''',
default=False,
action='''store_true''',
help='''Whether to use relative position embeddings or not. Defaults to True.''',
)
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--tapas_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained TAPAS model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
__SCREAMING_SNAKE_CASE :List[str] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.task,
args.reset_position_index_per_cell,
args.tf_checkpoint_path,
args.tapas_config_file,
args.pytorch_dump_path,
)
| 22 | 0 |
"""simple docstring"""
import os
import tempfile
import unittest
from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter
from transformers.testing_utils import slow
from transformers.utils import cached_property
@unittest.skipUnless(os.path.exists(a) , 'Tatoeba directory does not exist.')
class UpperCAmelCase_ ( unittest.TestCase):
@cached_property
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : str = tempfile.mkdtemp()
return TatoebaConverter(save_dir=__a)
@slow
def snake_case__ ( self):
'''simple docstring'''
self.resolver.convert_models(["heb-eng"])
@slow
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.resolver.write_model_card("opus-mt-he-en", dry_run=__a)
assert mmeta["long_pair"] == "heb-eng"
| 370 |
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class UpperCAmelCase_ ( a):
@staticmethod
@abstractmethod
def snake_case__ ( __a):
'''simple docstring'''
raise NotImplementedError()
@abstractmethod
def snake_case__ ( self):
'''simple docstring'''
raise NotImplementedError()
| 300 | 0 |
import os
import string
import sys
lowercase : Tuple = 1 << 8
lowercase : Dict = {
"""tab""": ord("""\t"""),
"""newline""": ord("""\r"""),
"""esc""": 2_7,
"""up""": 6_5 + ARROW_KEY_FLAG,
"""down""": 6_6 + ARROW_KEY_FLAG,
"""right""": 6_7 + ARROW_KEY_FLAG,
"""left""": 6_8 + ARROW_KEY_FLAG,
"""mod_int""": 9_1,
"""undefined""": sys.maxsize,
"""interrupt""": 3,
"""insert""": 5_0,
"""delete""": 5_1,
"""pg_up""": 5_3,
"""pg_down""": 5_4,
}
lowercase : Optional[int] = KEYMAP["""up"""]
lowercase : List[str] = KEYMAP["""left"""]
if sys.platform == "win32":
lowercase : Dict = []
lowercase : Optional[int] = {
b"""\xe0H""": KEYMAP["""up"""] - ARROW_KEY_FLAG,
b"""\x00H""": KEYMAP["""up"""] - ARROW_KEY_FLAG,
b"""\xe0P""": KEYMAP["""down"""] - ARROW_KEY_FLAG,
b"""\x00P""": KEYMAP["""down"""] - ARROW_KEY_FLAG,
b"""\xe0M""": KEYMAP["""right"""] - ARROW_KEY_FLAG,
b"""\x00M""": KEYMAP["""right"""] - ARROW_KEY_FLAG,
b"""\xe0K""": KEYMAP["""left"""] - ARROW_KEY_FLAG,
b"""\x00K""": KEYMAP["""left"""] - ARROW_KEY_FLAG,
}
for i in range(1_0):
lowercase : List[str] = ord(str(i))
def A_ ( ) -> int:
if os.name == "nt":
import msvcrt
a__ : Optional[int] = 'mbcs'
# Flush the keyboard buffer
while msvcrt.kbhit():
msvcrt.getch()
if len(A__ ) == 0:
# Read the keystroke
a__ : int = msvcrt.getch()
# If it is a prefix char, get second part
if ch in (b"\x00", b"\xe0"):
a__ : Dict = ch + msvcrt.getch()
# Translate actual Win chars to bullet char types
try:
a__ : Optional[Any] = chr(WIN_KEYMAP[cha] )
WIN_CH_BUFFER.append(chr(KEYMAP['mod_int'] ) )
WIN_CH_BUFFER.append(A__ )
if ord(A__ ) in (
KEYMAP["insert"] - 1 << 9,
KEYMAP["delete"] - 1 << 9,
KEYMAP["pg_up"] - 1 << 9,
KEYMAP["pg_down"] - 1 << 9,
):
WIN_CH_BUFFER.append(chr(126 ) )
a__ : List[Any] = chr(KEYMAP['esc'] )
except KeyError:
a__ : Tuple = cha[1]
else:
a__ : Union[str, Any] = ch.decode(A__ )
else:
a__ : Any = WIN_CH_BUFFER.pop(0 )
elif os.name == "posix":
import termios
import tty
a__ : List[Any] = sys.stdin.fileno()
a__ : Any = termios.tcgetattr(A__ )
try:
tty.setraw(A__ )
a__ : Optional[int] = sys.stdin.read(1 )
finally:
termios.tcsetattr(A__ , termios.TCSADRAIN , A__ )
return ch
def A_ ( ) -> List[Any]:
a__ : Union[str, Any] = get_raw_chars()
if ord(A__ ) in [KEYMAP["interrupt"], KEYMAP["newline"]]:
return char
elif ord(A__ ) == KEYMAP["esc"]:
a__ : Any = get_raw_chars()
if ord(A__ ) == KEYMAP["mod_int"]:
a__ : Dict = get_raw_chars()
if ord(A__ ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(A__ ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG:
return chr(ord(A__ ) + ARROW_KEY_FLAG )
else:
return KEYMAP["undefined"]
else:
return get_raw_chars()
else:
if char in string.printable:
return char
else:
return KEYMAP["undefined"]
| 99 |
from typing import List, Optional, Tuple, Union
import torch
from ...utils import logging, randn_tensor
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
lowercase : Any = logging.get_logger(__name__) # pylint: disable=invalid-name
class A__ ( __UpperCAmelCase ):
"""simple docstring"""
def __init__( self , lowercase , lowercase) -> Optional[Any]:
'''simple docstring'''
super().__init__()
self.register_modules(unet=lowercase , scheduler=lowercase)
@torch.no_grad()
def __call__( self , lowercase = 1 , lowercase = 100 , lowercase = None , lowercase = None , lowercase = True , ) -> Union[AudioPipelineOutput, Tuple]:
'''simple docstring'''
if audio_length_in_s is None:
a__ : Optional[int] = self.unet.config.sample_size / self.unet.config.sample_rate
a__ : int = audio_length_in_s * self.unet.config.sample_rate
a__ : Union[str, Any] = 2 ** len(self.unet.up_blocks)
if sample_size < 3 * down_scale_factor:
raise ValueError(
F'{audio_length_in_s} is too small. Make sure it\'s bigger or equal to'
F' {3 * down_scale_factor / self.unet.config.sample_rate}.')
a__ : str = int(lowercase)
if sample_size % down_scale_factor != 0:
a__ : List[str] = (
(audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1
) * down_scale_factor
logger.info(
F'{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled'
F' by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising'
' process.')
a__ : List[Any] = int(lowercase)
a__ : int = next(iter(self.unet.parameters())).dtype
a__ : Tuple = (batch_size, self.unet.config.in_channels, sample_size)
if isinstance(lowercase , lowercase) and len(lowercase) != batch_size:
raise ValueError(
F'You have passed a list of generators of length {len(lowercase)}, but requested an effective batch'
F' size of {batch_size}. Make sure the batch size matches the length of the generators.')
a__ : Optional[Any] = randn_tensor(lowercase , generator=lowercase , device=self.device , dtype=lowercase)
# set step values
self.scheduler.set_timesteps(lowercase , device=audio.device)
a__ : Union[str, Any] = self.scheduler.timesteps.to(lowercase)
for t in self.progress_bar(self.scheduler.timesteps):
# 1. predict noise model_output
a__ : Dict = self.unet(lowercase , lowercase).sample
# 2. compute previous image: x_t -> t_t-1
a__ : Any = self.scheduler.step(lowercase , lowercase , lowercase).prev_sample
a__ : str = audio.clamp(-1 , 1).float().cpu().numpy()
a__ : List[Any] = audio[:, :, :original_sample_size]
if not return_dict:
return (audio,)
return AudioPipelineOutput(audios=lowercase)
| 99 | 1 |
import logging
from transformers import PretrainedConfig
__lowerCamelCase : Dict = logging.getLogger(__name__)
__lowerCamelCase : Dict = {
"""bertabs-finetuned-cnndm""": """https://huggingface.co/remi/bertabs-finetuned-cnndm-extractive-abstractive-summarization/resolve/main/config.json""",
}
class A__ ( __snake_case ):
_UpperCAmelCase :str = 'bertabs'
def __init__( self , A_=3_0522 , A_=512 , A_=6 , A_=512 , A_=8 , A_=512 , A_=0.2 , A_=6 , A_=768 , A_=8 , A_=2048 , A_=0.2 , **A_ , ):
'''simple docstring'''
super().__init__(**A_ )
UpperCamelCase : Dict = vocab_size
UpperCamelCase : Dict = max_pos
UpperCamelCase : str = enc_layers
UpperCamelCase : List[Any] = enc_hidden_size
UpperCamelCase : Tuple = enc_heads
UpperCamelCase : Dict = enc_ff_size
UpperCamelCase : int = enc_dropout
UpperCamelCase : Tuple = dec_layers
UpperCamelCase : Any = dec_hidden_size
UpperCamelCase : Optional[int] = dec_heads
UpperCamelCase : List[str] = dec_ff_size
UpperCamelCase : Optional[int] = dec_dropout
| 140 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import tensorflow as tf
from transformers import AutoTokenizer, TFAutoModelForSeqaSeqLM
@require_tf
@require_sentencepiece
@require_tokenizers
class A__ ( unittest.TestCase ):
@slow
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : List[str] = TFAutoModelForSeqaSeqLM.from_pretrained("google/mt5-small" )
UpperCamelCase : Tuple = AutoTokenizer.from_pretrained("google/mt5-small" )
UpperCamelCase : Dict = tokenizer("Hello there" , return_tensors="tf" ).input_ids
UpperCamelCase : int = tokenizer("Hi I am" , return_tensors="tf" ).input_ids
UpperCamelCase : Union[str, Any] = model(A_ , labels=A_ ).loss
UpperCamelCase : List[str] = -tf.math.reduce_mean(A_ ).numpy()
UpperCamelCase : Union[str, Any] = -21.22_81_68
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 2e-4 )
| 140 | 1 |
"""simple docstring"""
from ....configuration_utils import PretrainedConfig
from ....utils import logging
__A = logging.get_logger(__name__)
# TODO: upload to AWS
__A = {
"yjernite/retribert-base-uncased": (
"https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/config.json"
),
}
class lowerCamelCase__ ( lowerCamelCase_ ):
a__ : int = """retribert"""
def __init__( self , SCREAMING_SNAKE_CASE=30_522 , SCREAMING_SNAKE_CASE=768 , SCREAMING_SNAKE_CASE=8 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=3_072 , SCREAMING_SNAKE_CASE="gelu" , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=512 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE=1E-12 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=128 , SCREAMING_SNAKE_CASE=0 , **SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
snake_case : List[Any] = vocab_size
snake_case : str = hidden_size
snake_case : Optional[int] = num_hidden_layers
snake_case : Any = num_attention_heads
snake_case : Optional[Any] = hidden_act
snake_case : List[Any] = intermediate_size
snake_case : Dict = hidden_dropout_prob
snake_case : Optional[Any] = attention_probs_dropout_prob
snake_case : Tuple = max_position_embeddings
snake_case : str = type_vocab_size
snake_case : int = initializer_range
snake_case : Dict = layer_norm_eps
snake_case : Optional[int] = share_encoders
snake_case : Optional[Any] = projection_dim
| 148 |
"""simple docstring"""
import copy
from typing import Dict, List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
__A = {
"facebook/mask2former-swin-small-coco-instance": (
"https://huggingface.co/facebook/mask2former-swin-small-coco-instance/blob/main/config.json"
)
# See all Mask2Former models at https://huggingface.co/models?filter=mask2former
}
__A = logging.get_logger(__name__)
class lowerCamelCase__ ( lowerCamelCase_ ):
a__ : Optional[Any] = """mask2former"""
a__ : Union[str, Any] = ["""swin"""]
a__ : Dict = {"""hidden_size""": """hidden_dim"""}
def __init__( self , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = 256 , SCREAMING_SNAKE_CASE = 256 , SCREAMING_SNAKE_CASE = 256 , SCREAMING_SNAKE_CASE = 1_024 , SCREAMING_SNAKE_CASE = "relu" , SCREAMING_SNAKE_CASE = 6 , SCREAMING_SNAKE_CASE = 10 , SCREAMING_SNAKE_CASE = 8 , SCREAMING_SNAKE_CASE = 0.0 , SCREAMING_SNAKE_CASE = 2_048 , SCREAMING_SNAKE_CASE = False , SCREAMING_SNAKE_CASE = False , SCREAMING_SNAKE_CASE = 4 , SCREAMING_SNAKE_CASE = 255 , SCREAMING_SNAKE_CASE = 100 , SCREAMING_SNAKE_CASE = 0.1 , SCREAMING_SNAKE_CASE = 2.0 , SCREAMING_SNAKE_CASE = 5.0 , SCREAMING_SNAKE_CASE = 5.0 , SCREAMING_SNAKE_CASE = 12_544 , SCREAMING_SNAKE_CASE = 3.0 , SCREAMING_SNAKE_CASE = 0.75 , SCREAMING_SNAKE_CASE = 0.02 , SCREAMING_SNAKE_CASE = 1.0 , SCREAMING_SNAKE_CASE = True , SCREAMING_SNAKE_CASE = [4, 8, 16, 32] , SCREAMING_SNAKE_CASE = None , **SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `Swin` backbone." )
snake_case : List[str] = CONFIG_MAPPING["swin"](
image_size=224 , in_channels=3 , patch_size=4 , embed_dim=96 , depths=[2, 2, 18, 2] , num_heads=[3, 6, 12, 24] , window_size=7 , drop_path_rate=0.3 , use_absolute_embeddings=SCREAMING_SNAKE_CASE , out_features=["stage1", "stage2", "stage3", "stage4"] , )
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
snake_case : Tuple = backbone_config.pop("model_type" )
snake_case : Dict = CONFIG_MAPPING[backbone_model_type]
snake_case : Optional[int] = config_class.from_dict(SCREAMING_SNAKE_CASE )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
F'''Backbone {backbone_config.model_type} is not a supported model and may not be compatible with Mask2Former. '''
F'''Supported model types: {','.join(self.backbones_supported )}''' )
snake_case : List[str] = backbone_config
snake_case : Optional[int] = feature_size
snake_case : Optional[int] = mask_feature_size
snake_case : Optional[int] = hidden_dim
snake_case : List[str] = encoder_feedforward_dim
snake_case : Dict = activation_function
snake_case : Optional[Any] = encoder_layers
snake_case : Any = decoder_layers
snake_case : Optional[int] = num_attention_heads
snake_case : List[str] = dropout
snake_case : List[Any] = dim_feedforward
snake_case : Tuple = pre_norm
snake_case : int = enforce_input_projection
snake_case : str = common_stride
snake_case : List[Any] = ignore_value
snake_case : Optional[int] = num_queries
snake_case : Optional[int] = no_object_weight
snake_case : Dict = class_weight
snake_case : Tuple = mask_weight
snake_case : Tuple = dice_weight
snake_case : Tuple = train_num_points
snake_case : int = oversample_ratio
snake_case : Dict = importance_sample_ratio
snake_case : Tuple = init_std
snake_case : Dict = init_xavier_std
snake_case : List[Any] = use_auxiliary_loss
snake_case : Dict = feature_strides
snake_case : List[Any] = output_auxiliary_logits
snake_case : Union[str, Any] = decoder_layers
super().__init__(**SCREAMING_SNAKE_CASE )
@classmethod
def lowerCamelCase_ ( cls , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return cls(
backbone_config=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , )
def lowerCamelCase_ ( self ):
"""simple docstring"""
snake_case : int = copy.deepcopy(self.__dict__ )
snake_case : str = self.backbone_config.to_dict()
snake_case : Optional[int] = self.__class__.model_type
return output
| 148 | 1 |
from sklearn.metrics import mean_squared_error
import datasets
_UpperCAmelCase : Optional[Any] = "\\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n"
_UpperCAmelCase : Optional[Any] = "\\nMean Squared Error(MSE) is the average of the square of difference between the predicted\nand actual values.\n"
_UpperCAmelCase : Tuple = "\nArgs:\n predictions: array-like of shape (n_samples,) or (n_samples, n_outputs)\n Estimated target values.\n references: array-like of shape (n_samples,) or (n_samples, n_outputs)\n Ground truth (correct) target values.\n sample_weight: array-like of shape (n_samples,), default=None\n Sample weights.\n multioutput: {\"raw_values\", \"uniform_average\"} or array-like of shape (n_outputs,), default=\"uniform_average\"\n Defines aggregating of multiple output values. Array-like value defines weights used to average errors.\n\n \"raw_values\" : Returns a full set of errors in case of multioutput input.\n\n \"uniform_average\" : Errors of all outputs are averaged with uniform weight.\n\n squared : bool, default=True\n If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value.\n\nReturns:\n mse : mean squared error.\nExamples:\n\n >>> mse_metric = datasets.load_metric(\"mse\")\n >>> predictions = [2.5, 0.0, 2, 8]\n >>> references = [3, -0.5, 2, 7]\n >>> results = mse_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'mse': 0.375}\n >>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False)\n >>> print(rmse_result)\n {'mse': 0.6123724356957945}\n\n If you're using multi-dimensional lists, then set the config as follows :\n\n >>> mse_metric = datasets.load_metric(\"mse\", \"multilist\")\n >>> predictions = [[0.5, 1], [-1, 1], [7, -6]]\n >>> references = [[0, 2], [-1, 2], [8, -5]]\n >>> results = mse_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'mse': 0.7083333333333334}\n >>> results = mse_metric.compute(predictions=predictions, references=references, multioutput='raw_values')\n >>> print(results) # doctest: +NORMALIZE_WHITESPACE\n {'mse': array([0.41666667, 1. ])}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase ( datasets.Metric ):
def __UpperCamelCase ( self ) -> int:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , reference_urls=[
'https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html'
] , )
def __UpperCamelCase ( self ) -> int:
"""simple docstring"""
if self.config_name == "multilist":
return {
"predictions": datasets.Sequence(datasets.Value('float' ) ),
"references": datasets.Sequence(datasets.Value('float' ) ),
}
else:
return {
"predictions": datasets.Value('float' ),
"references": datasets.Value('float' ),
}
def __UpperCamelCase ( self , A_ , A_ , A_=None , A_="uniform_average" , A_=True ) -> str:
"""simple docstring"""
UpperCamelCase = mean_squared_error(
A_ , A_ , sample_weight=A_ , multioutput=A_ , squared=A_ )
return {"mse": mse}
| 110 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.25.0")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 110 | 1 |
"""simple docstring"""
import torch
from diffusers import StableDiffusionPipeline
UpperCamelCase_ = 'path-to-your-trained-model'
UpperCamelCase_ = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.floataa).to('cuda')
UpperCamelCase_ = 'A photo of sks dog in a bucket'
UpperCamelCase_ = pipe(prompt, num_inference_steps=50, guidance_scale=7.5).images[0]
image.save('dog-bucket.png') | 243 |
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
UniSpeechConfig,
UniSpeechForCTC,
UniSpeechForPreTraining,
WavaVecaFeatureExtractor,
WavaVecaPhonemeCTCTokenizer,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'ctc_proj',
'mask_emb': 'masked_spec_embed',
}
UpperCamelCase_ = [
'ctc_proj',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->Dict:
"""simple docstring"""
for attribute in key.split("." ):
if is_finetuned:
if attribute in ["quantizer", "project_q", "project_hid"]:
# those layers are only relevant for pretraining and should be dropped
return
if attribute == "ctc_proj":
# we should rename `ctc_proj` to `lm_head` for fine-tuned phoneme models
a_ = "lm_head"
a_ = getattr(UpperCAmelCase , UpperCAmelCase )
if weight_type is not None:
a_ = getattr(UpperCAmelCase , UpperCAmelCase ).shape
else:
a_ = hf_pointer.shape
assert hf_shape == value.shape, (
F'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
a_ = value
elif weight_type == "weight_g":
a_ = value
elif weight_type == "weight_v":
a_ = value
elif weight_type == "bias":
a_ = value
else:
a_ = value
logger.info(F'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->Optional[int]:
"""simple docstring"""
a_ = []
a_ = fairseq_model.state_dict()
a_ = hf_model.unispeech.feature_extractor
for name, value in fairseq_dict.items():
a_ = False
if "conv_layers" in name:
load_conv_layer(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , hf_model.config.feat_extract_norm == "group" , )
a_ = True
else:
for key, mapped_key in MAPPING.items():
a_ = "unispeech." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
a_ = True
if "*" in mapped_key:
a_ = name.split(UpperCAmelCase )[0].split("." )[-2]
a_ = mapped_key.replace("*" , UpperCAmelCase )
if "weight_g" in name:
a_ = "weight_g"
elif "weight_v" in name:
a_ = "weight_v"
elif "bias" in name:
a_ = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
a_ = "weight"
else:
a_ = None
set_recursively(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
continue
if not is_used:
unused_weights.append(UpperCAmelCase )
logger.warning(F'''Unused weights: {unused_weights}''' )
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->Optional[Any]:
"""simple docstring"""
a_ = full_name.split("conv_layers." )[-1]
a_ = name.split("." )
a_ = int(items[0] )
a_ = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
a_ = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
a_ = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
a_ = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
a_ = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(UpperCAmelCase )
@torch.no_grad()
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=True ) ->Tuple:
"""simple docstring"""
if config_path is not None:
a_ = UniSpeechConfig.from_pretrained(UpperCAmelCase )
else:
a_ = UniSpeechConfig()
if is_finetuned:
if dict_path:
a_ = Dictionary.load_from_json(UpperCAmelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
a_ = target_dict.pad_index
a_ = target_dict.bos_index
a_ = target_dict.eos_index
a_ = len(target_dict.symbols )
a_ = os.path.join(UpperCAmelCase , "vocab.json" )
if not os.path.isdir(UpperCAmelCase ):
logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(UpperCAmelCase ) )
return
os.makedirs(UpperCAmelCase , exist_ok=UpperCAmelCase )
a_ = target_dict.indices
# fairseq has the <pad> and <s> switched
a_ = 42
a_ = 43
with open(UpperCAmelCase , "w" , encoding="utf-8" ) as vocab_handle:
json.dump(UpperCAmelCase , UpperCAmelCase )
a_ = WavaVecaPhonemeCTCTokenizer(
UpperCAmelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="|" , do_lower_case=UpperCAmelCase , )
a_ = True if config.feat_extract_norm == "layer" else False
a_ = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=UpperCAmelCase , return_attention_mask=UpperCAmelCase , )
a_ = WavaVecaProcessor(feature_extractor=UpperCAmelCase , tokenizer=UpperCAmelCase )
processor.save_pretrained(UpperCAmelCase )
a_ = UniSpeechForCTC(UpperCAmelCase )
else:
a_ = UniSpeechForPreTraining(UpperCAmelCase )
if is_finetuned:
a_ , a_ , a_ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] ), "w2v_path": checkpoint_path} )
else:
a_ , a_ , a_ = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
a_ = model[0].eval()
recursively_load_weights(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
hf_unispeech.save_pretrained(UpperCAmelCase )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
UpperCamelCase_ = parser.parse_args()
convert_unispeech_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
) | 243 | 1 |
"""simple docstring"""
import argparse
import torch
from torch import nn
from transformers import MaMaaaConfig, MaMaaaForConditionalGeneration
def _lowercase ( __lowerCAmelCase ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ : Any = [
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''decoder.output_projection.weight''',
'''_float_tensor''',
'''encoder.embed_positions._float_tensor''',
'''decoder.embed_positions._float_tensor''',
]
for k in ignore_keys:
state_dict.pop(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def _lowercase ( __lowerCAmelCase ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ : Tuple = emb.weight.shape
SCREAMING_SNAKE_CASE__ : str = nn.Linear(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , bias=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Tuple = emb.weight.data
return lin_layer
def _lowercase ( __lowerCAmelCase ) -> str:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.load(SCREAMING_SNAKE_CASE__ , map_location="""cpu""" )
SCREAMING_SNAKE_CASE__ : str = mam_aaa['''args'''] or mam_aaa['''cfg''']['''model''']
SCREAMING_SNAKE_CASE__ : Dict = mam_aaa['''model''']
remove_ignore_keys_(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = state_dict['''encoder.embed_tokens.weight'''].shape[0]
SCREAMING_SNAKE_CASE__ : Optional[Any] = MaMaaaConfig(
vocab_size=SCREAMING_SNAKE_CASE__ , max_position_embeddings=1024 , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , encoder_layerdrop=args.encoder_layerdrop , decoder_layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="""relu""" , )
SCREAMING_SNAKE_CASE__ : List[str] = state_dict['''decoder.embed_tokens.weight''']
SCREAMING_SNAKE_CASE__ : Optional[int] = MaMaaaForConditionalGeneration(SCREAMING_SNAKE_CASE__ )
model.model.load_state_dict(SCREAMING_SNAKE_CASE__ , strict=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : List[Any] = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
a :Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("fairseq_path", type=str, help="path to a model.pt on local filesystem.")
parser.add_argument("pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
a :List[Any] = parser.parse_args()
a :int = convert_fairseq_mamaaa_checkpoint_from_disk(args.fairseq_pathß)
model.save_pretrained(args.pytorch_dump_folder_path)
| 366 |
"""simple docstring"""
def _lowercase ( __lowerCAmelCase ) -> Dict:
SCREAMING_SNAKE_CASE__ : Dict = []
SCREAMING_SNAKE_CASE__ : Optional[Any] = []
SCREAMING_SNAKE_CASE__ : int = {
"""^""": 3,
"""*""": 2,
"""/""": 2,
"""%""": 2,
"""+""": 1,
"""-""": 1,
} # Priority of each operator
SCREAMING_SNAKE_CASE__ : List[Any] = len(__lowerCAmelCase ) if (len(__lowerCAmelCase ) > 7) else 7
# Print table header for output
print(
"""Symbol""".center(8 ) , """Stack""".center(__lowerCAmelCase ) , """Postfix""".center(__lowerCAmelCase ) , sep=""" | """ , )
print("""-""" * (print_width * 3 + 7) )
for x in infix:
if x.isalpha() or x.isdigit():
post_fix.append(__lowerCAmelCase ) # if x is Alphabet / Digit, add it to Postfix
elif x == "(":
stack.append(__lowerCAmelCase ) # if x is "(" push to Stack
elif x == ")": # if x is ")" pop stack until "(" is encountered
while stack[-1] != "(":
post_fix.append(stack.pop() ) # Pop stack & add the content to Postfix
stack.pop()
else:
if len(__lowerCAmelCase ) == 0:
stack.append(__lowerCAmelCase ) # If stack is empty, push x to stack
else: # while priority of x is not > priority of element in the stack
while len(__lowerCAmelCase ) > 0 and priority[x] <= priority[stack[-1]]:
post_fix.append(stack.pop() ) # pop stack & add to Postfix
stack.append(__lowerCAmelCase ) # push x to stack
print(
x.center(8 ) , ("""""".join(__lowerCAmelCase )).ljust(__lowerCAmelCase ) , ("""""".join(__lowerCAmelCase )).ljust(__lowerCAmelCase ) , sep=""" | """ , ) # Output in tabular format
while len(__lowerCAmelCase ) > 0: # while stack is not empty
post_fix.append(stack.pop() ) # pop stack & add to Postfix
print(
""" """.center(8 ) , ("""""".join(__lowerCAmelCase )).ljust(__lowerCAmelCase ) , ("""""".join(__lowerCAmelCase )).ljust(__lowerCAmelCase ) , sep=""" | """ , ) # Output in tabular format
return "".join(__lowerCAmelCase ) # return Postfix as str
def _lowercase ( __lowerCAmelCase ) -> str:
SCREAMING_SNAKE_CASE__ : List[str] = list(infix[::-1] ) # reverse the infix equation
for i in range(len(__lowerCAmelCase ) ):
if infix[i] == "(":
SCREAMING_SNAKE_CASE__ : Optional[int] = """)""" # change "(" to ")"
elif infix[i] == ")":
SCREAMING_SNAKE_CASE__ : Optional[Any] = """(""" # change ")" to "("
return (infix_2_postfix("""""".join(__lowerCAmelCase ) ))[
::-1
] # call infix_2_postfix on Infix, return reverse of Postfix
if __name__ == "__main__":
a :Optional[int] = input("\nEnter an Infix Equation = ") # Input an Infix equation
a :Dict = "".join(Infix.split()) # Remove spaces from the input
print("\n\t", Infix, "(Infix) -> ", infix_2_prefix(Infix), "(Prefix)")
| 56 | 0 |
"""simple docstring"""
import math
import numpy as np
import qiskit
from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute
def _snake_case ( UpperCAmelCase_ : int = 3 ):
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
raise TypeError("""number of qubits must be a integer.""" )
if number_of_qubits <= 0:
raise ValueError("""number of qubits must be > 0.""" )
if math.floor(_lowerCAmelCase ) != number_of_qubits:
raise ValueError("""number of qubits must be exact integer.""" )
if number_of_qubits > 10:
raise ValueError("""number of qubits too large to simulate(>10).""" )
A__ = QuantumRegister(_lowerCAmelCase , """qr""" )
A__ = ClassicalRegister(_lowerCAmelCase , """cr""" )
A__ = QuantumCircuit(_lowerCAmelCase , _lowerCAmelCase )
A__ = number_of_qubits
for i in range(_lowerCAmelCase ):
quantum_circuit.h(number_of_qubits - i - 1 )
counter -= 1
for j in range(_lowerCAmelCase ):
quantum_circuit.cp(np.pi / 2 ** (counter - j) , _lowerCAmelCase , _lowerCAmelCase )
for k in range(number_of_qubits // 2 ):
quantum_circuit.swap(_lowerCAmelCase , number_of_qubits - k - 1 )
# measure all the qubits
quantum_circuit.measure(_lowerCAmelCase , _lowerCAmelCase )
# simulate with 10000 shots
A__ = Aer.get_backend("""qasm_simulator""" )
A__ = execute(_lowerCAmelCase , _lowerCAmelCase , shots=1_0000 )
return job.result().get_counts(_lowerCAmelCase )
if __name__ == "__main__":
print(
f"""Total count for quantum fourier transform state is: \
{quantum_fourier_transform(3)}"""
)
| 335 |
import json
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from transformers import OneFormerImageProcessor
from transformers.models.oneformer.image_processing_oneformer import binary_mask_to_rle
from transformers.models.oneformer.modeling_oneformer import OneFormerForUniversalSegmentationOutput
if is_vision_available():
from PIL import Image
def __snake_case ( _lowerCAmelCase : List[str] , _lowerCAmelCase : List[Any]="shi-labs/oneformer_demo" ) -> int:
with open(hf_hub_download(_lowerCAmelCase , _lowerCAmelCase , repo_type="dataset" ) , "r" ) as f:
A_ : Optional[int] = json.load(_lowerCAmelCase )
A_ : Union[str, Any] = {}
A_ : Tuple = []
A_ : Optional[Any] = []
for key, info in class_info.items():
A_ : Tuple = info["name"]
class_names.append(info["name"] )
if info["isthing"]:
thing_ids.append(int(_lowerCAmelCase ) )
A_ : Optional[Any] = thing_ids
A_ : int = class_names
return metadata
class __magic_name__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self :List[Any] , snake_case :List[str] , snake_case :int=7 , snake_case :Optional[int]=3 , snake_case :Union[str, Any]=30 , snake_case :Tuple=400 , snake_case :List[Any]=None , snake_case :Optional[Any]=True , snake_case :Tuple=True , snake_case :Dict=[0.5, 0.5, 0.5] , snake_case :Any=[0.5, 0.5, 0.5] , snake_case :Optional[int]=10 , snake_case :Tuple=False , snake_case :Optional[int]=255 , snake_case :Optional[Any]="shi-labs/oneformer_demo" , snake_case :Optional[Any]="ade20k_panoptic.json" , snake_case :Optional[int]=10 , ):
'''simple docstring'''
A_ : Tuple = parent
A_ : List[str] = batch_size
A_ : Optional[int] = num_channels
A_ : Tuple = min_resolution
A_ : List[Any] = max_resolution
A_ : Union[str, Any] = do_resize
A_ : Any = {"shortest_edge": 32, "longest_edge": 1_333} if size is None else size
A_ : Tuple = do_normalize
A_ : List[str] = image_mean
A_ : List[Any] = image_std
A_ : Union[str, Any] = class_info_file
A_ : List[Any] = prepare_metadata(snake_case , snake_case )
A_ : Tuple = num_text
A_ : str = repo_path
# for the post_process_functions
A_ : Any = 2
A_ : int = 10
A_ : Optional[int] = 10
A_ : Tuple = 3
A_ : Tuple = 4
A_ : str = num_labels
A_ : int = do_reduce_labels
A_ : List[Any] = ignore_index
def SCREAMING_SNAKE_CASE ( self :Optional[Any] ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"num_labels": self.num_labels,
"do_reduce_labels": self.do_reduce_labels,
"ignore_index": self.ignore_index,
"class_info_file": self.class_info_file,
"metadata": self.metadata,
"num_text": self.num_text,
}
def SCREAMING_SNAKE_CASE ( self :List[Any] , snake_case :Any , snake_case :Any=False ):
'''simple docstring'''
if not batched:
A_ : List[str] = image_inputs[0]
if isinstance(snake_case , Image.Image ):
A_ , A_ : Dict = image.size
else:
A_ , A_ : Tuple = image.shape[1], image.shape[2]
if w < h:
A_ : str = int(self.size["shortest_edge"] * h / w )
A_ : Any = self.size["shortest_edge"]
elif w > h:
A_ : Optional[int] = self.size["shortest_edge"]
A_ : List[str] = int(self.size["shortest_edge"] * w / h )
else:
A_ : List[str] = self.size["shortest_edge"]
A_ : Optional[Any] = self.size["shortest_edge"]
else:
A_ : Tuple = []
for image in image_inputs:
A_ , A_ : Optional[Any] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
A_ : Tuple = max(snake_case , key=lambda snake_case : item[0] )[0]
A_ : Union[str, Any] = max(snake_case , key=lambda snake_case : item[1] )[1]
return expected_height, expected_width
def SCREAMING_SNAKE_CASE ( self :Tuple ):
'''simple docstring'''
return OneFormerForUniversalSegmentationOutput(
# +1 for null class
class_queries_logits=torch.randn((self.batch_size, self.num_queries, self.num_classes + 1) ) , masks_queries_logits=torch.randn((self.batch_size, self.num_queries, self.height, self.width) ) , )
@require_torch
@require_vision
class __magic_name__ ( lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase = OneFormerImageProcessor if (is_vision_available() and is_torch_available()) else None
# only for test_image_processing_common.test_image_proc_to_json_string
__UpperCamelCase = image_processing_class
def SCREAMING_SNAKE_CASE ( self :int ):
'''simple docstring'''
A_ : Union[str, Any] = OneFormerImageProcessorTester(self )
@property
def SCREAMING_SNAKE_CASE ( self :List[str] ):
'''simple docstring'''
return self.image_processing_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE ( self :List[Any] ):
'''simple docstring'''
A_ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(snake_case , "image_mean" ) )
self.assertTrue(hasattr(snake_case , "image_std" ) )
self.assertTrue(hasattr(snake_case , "do_normalize" ) )
self.assertTrue(hasattr(snake_case , "do_resize" ) )
self.assertTrue(hasattr(snake_case , "size" ) )
self.assertTrue(hasattr(snake_case , "ignore_index" ) )
self.assertTrue(hasattr(snake_case , "class_info_file" ) )
self.assertTrue(hasattr(snake_case , "num_text" ) )
self.assertTrue(hasattr(snake_case , "repo_path" ) )
self.assertTrue(hasattr(snake_case , "metadata" ) )
self.assertTrue(hasattr(snake_case , "do_reduce_labels" ) )
def SCREAMING_SNAKE_CASE ( self :str ):
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( self :int ):
'''simple docstring'''
A_ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A_ : Optional[Any] = prepare_image_inputs(self.image_processing_tester , equal_resolution=snake_case )
for image in image_inputs:
self.assertIsInstance(snake_case , Image.Image )
# Test not batched input
A_ : str = image_processor(image_inputs[0] , ["semantic"] , return_tensors="pt" ).pixel_values
A_ , A_ : str = self.image_processing_tester.get_expected_values(snake_case )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
A_ , A_ : Optional[Any] = self.image_processing_tester.get_expected_values(snake_case , batched=snake_case )
A_ : List[str] = image_processor(
snake_case , ["semantic"] * len(snake_case ) , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def SCREAMING_SNAKE_CASE ( self :List[str] ):
'''simple docstring'''
A_ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A_ : List[str] = prepare_image_inputs(self.image_processing_tester , equal_resolution=snake_case , numpify=snake_case )
for image in image_inputs:
self.assertIsInstance(snake_case , np.ndarray )
# Test not batched input
A_ : List[str] = image_processor(image_inputs[0] , ["semantic"] , return_tensors="pt" ).pixel_values
A_ , A_ : List[str] = self.image_processing_tester.get_expected_values(snake_case )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
A_ , A_ : int = self.image_processing_tester.get_expected_values(snake_case , batched=snake_case )
A_ : Optional[Any] = image_processor(
snake_case , ["semantic"] * len(snake_case ) , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def SCREAMING_SNAKE_CASE ( self :Optional[int] ):
'''simple docstring'''
A_ : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A_ : List[str] = prepare_image_inputs(self.image_processing_tester , equal_resolution=snake_case , torchify=snake_case )
for image in image_inputs:
self.assertIsInstance(snake_case , torch.Tensor )
# Test not batched input
A_ : Any = image_processor(image_inputs[0] , ["semantic"] , return_tensors="pt" ).pixel_values
A_ , A_ : Tuple = self.image_processing_tester.get_expected_values(snake_case )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
A_ , A_ : Tuple = self.image_processing_tester.get_expected_values(snake_case , batched=snake_case )
A_ : Any = image_processor(
snake_case , ["semantic"] * len(snake_case ) , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def SCREAMING_SNAKE_CASE ( self :Optional[Any] , snake_case :Dict=False , snake_case :str=False , snake_case :Dict="np" ):
'''simple docstring'''
A_ : Tuple = self.image_processing_class(**self.image_processor_dict )
# prepare image and target
A_ : Tuple = self.image_processing_tester.num_labels
A_ : str = None
A_ : Tuple = None
A_ : Tuple = prepare_image_inputs(self.image_processing_tester , equal_resolution=snake_case )
if with_segmentation_maps:
A_ : List[str] = num_labels
if is_instance_map:
A_ : List[str] = list(range(snake_case ) ) * 2
A_ : int = dict(enumerate(snake_case ) )
A_ : List[str] = [
np.random.randint(0 , high * 2 , (img.size[1], img.size[0]) ).astype(np.uinta ) for img in image_inputs
]
if segmentation_type == "pil":
A_ : int = [Image.fromarray(snake_case ) for annotation in annotations]
A_ : List[str] = image_processor(
snake_case , ["semantic"] * len(snake_case ) , snake_case , return_tensors="pt" , instance_id_to_semantic_id=snake_case , pad_and_return_pixel_mask=snake_case , )
return inputs
def SCREAMING_SNAKE_CASE ( self :Any ):
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( self :Optional[int] ):
'''simple docstring'''
def common(snake_case :Dict=False , snake_case :Optional[int]=None ):
A_ : Tuple = self.comm_get_image_processor_inputs(
with_segmentation_maps=snake_case , is_instance_map=snake_case , segmentation_type=snake_case )
A_ : Optional[Any] = inputs["mask_labels"]
A_ : List[Any] = inputs["class_labels"]
A_ : Optional[Any] = inputs["pixel_values"]
A_ : int = inputs["text_inputs"]
# check the batch_size
for mask_label, class_label, text_input in zip(snake_case , snake_case , snake_case ):
self.assertEqual(mask_label.shape[0] , class_label.shape[0] )
# this ensure padding has happened
self.assertEqual(mask_label.shape[1:] , pixel_values.shape[2:] )
self.assertEqual(len(snake_case ) , self.image_processing_tester.num_text )
common()
common(is_instance_map=snake_case )
common(is_instance_map=snake_case , segmentation_type="pil" )
common(is_instance_map=snake_case , segmentation_type="pil" )
def SCREAMING_SNAKE_CASE ( self :Optional[Any] ):
'''simple docstring'''
A_ : Any = np.zeros((20, 50) )
A_ : List[str] = 1
A_ : int = 1
A_ : Optional[Any] = 1
A_ : Any = binary_mask_to_rle(snake_case )
self.assertEqual(len(snake_case ) , 4 )
self.assertEqual(rle[0] , 21 )
self.assertEqual(rle[1] , 45 )
def SCREAMING_SNAKE_CASE ( self :Optional[int] ):
'''simple docstring'''
A_ : Union[str, Any] = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file="ade20k_panoptic.json" , num_text=self.image_processing_tester.num_text , repo_path="shi-labs/oneformer_demo" , )
A_ : Any = self.image_processing_tester.get_fake_oneformer_outputs()
A_ : int = fature_extractor.post_process_semantic_segmentation(snake_case )
self.assertEqual(len(snake_case ) , self.image_processing_tester.batch_size )
self.assertEqual(
segmentation[0].shape , (
self.image_processing_tester.height,
self.image_processing_tester.width,
) , )
A_ : Optional[int] = [(1, 4) for i in range(self.image_processing_tester.batch_size )]
A_ : List[Any] = fature_extractor.post_process_semantic_segmentation(snake_case , target_sizes=snake_case )
self.assertEqual(segmentation[0].shape , target_sizes[0] )
def SCREAMING_SNAKE_CASE ( self :str ):
'''simple docstring'''
A_ : List[str] = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file="ade20k_panoptic.json" , num_text=self.image_processing_tester.num_text , repo_path="shi-labs/oneformer_demo" , )
A_ : str = self.image_processing_tester.get_fake_oneformer_outputs()
A_ : Optional[Any] = image_processor.post_process_instance_segmentation(snake_case , threshold=0 )
self.assertTrue(len(snake_case ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue("segmentation" in el )
self.assertTrue("segments_info" in el )
self.assertEqual(type(el["segments_info"] ) , snake_case )
self.assertEqual(
el["segmentation"].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
def SCREAMING_SNAKE_CASE ( self :List[str] ):
'''simple docstring'''
A_ : Tuple = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file="ade20k_panoptic.json" , num_text=self.image_processing_tester.num_text , repo_path="shi-labs/oneformer_demo" , )
A_ : List[Any] = self.image_processing_tester.get_fake_oneformer_outputs()
A_ : Optional[Any] = image_processor.post_process_panoptic_segmentation(snake_case , threshold=0 )
self.assertTrue(len(snake_case ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue("segmentation" in el )
self.assertTrue("segments_info" in el )
self.assertEqual(type(el["segments_info"] ) , snake_case )
self.assertEqual(
el["segmentation"].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
| 300 | 0 |
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
__lowercase = logging.get_logger(__name__)
@add_end_docstrings(UpperCAmelCase_ )
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
def __init__( self , *__lowercase , **__lowercase) -> Any:
super().__init__(*__lowerCAmelCase , **__lowerCAmelCase)
requires_backends(self , '''vision''')
self.check_model_type(
TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
if self.framework == '''tf'''
else MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING)
def UpperCamelCase__ ( self , __lowercase=None) -> Any:
__UpperCamelCase :Dict = {}
if top_k is not None:
__UpperCamelCase :List[str] = top_k
return {}, {}, postprocess_params
def __call__( self , __lowercase , **__lowercase) -> Any:
return super().__call__(__lowerCAmelCase , **__lowerCAmelCase)
def UpperCamelCase__ ( self , __lowercase) -> Any:
__UpperCamelCase :str = load_image(__lowerCAmelCase)
__UpperCamelCase :List[Any] = self.image_processor(images=__lowerCAmelCase , return_tensors=self.framework)
return model_inputs
def UpperCamelCase__ ( self , __lowercase) -> int:
__UpperCamelCase :Union[str, Any] = self.model(**__lowerCAmelCase)
return model_outputs
def UpperCamelCase__ ( self , __lowercase , __lowercase=5) -> Optional[Any]:
if top_k > self.model.config.num_labels:
__UpperCamelCase :Dict = self.model.config.num_labels
if self.framework == "pt":
__UpperCamelCase :int = model_outputs.logits.softmax(-1)[0]
__UpperCamelCase , __UpperCamelCase :List[str] = probs.topk(__lowerCAmelCase)
elif self.framework == "tf":
__UpperCamelCase :str = stable_softmax(model_outputs.logits , axis=-1)[0]
__UpperCamelCase :Dict = tf.math.top_k(__lowerCAmelCase , k=__lowerCAmelCase)
__UpperCamelCase , __UpperCamelCase :str = topk.values.numpy(), topk.indices.numpy()
else:
raise ValueError(f"""Unsupported framework: {self.framework}""")
__UpperCamelCase :Optional[int] = scores.tolist()
__UpperCamelCase :List[str] = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(__lowerCAmelCase , __lowerCAmelCase)]
| 360 | from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
__lowercase = logging.get_logger(__name__)
__lowercase = {
'''EleutherAI/gpt-j-6B''': '''https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json''',
# See all GPT-J models at https://huggingface.co/models?filter=gpt_j
}
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
a__ : Optional[Any] = """gptj"""
a__ : Tuple = {
"""max_position_embeddings""": """n_positions""",
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self , __lowercase=50_400 , __lowercase=2_048 , __lowercase=4_096 , __lowercase=28 , __lowercase=16 , __lowercase=64 , __lowercase=None , __lowercase="gelu_new" , __lowercase=0.0 , __lowercase=0.0 , __lowercase=0.0 , __lowercase=1E-5 , __lowercase=0.02 , __lowercase=True , __lowercase=50_256 , __lowercase=50_256 , __lowercase=False , **__lowercase , ) -> Tuple:
__UpperCamelCase :Any = vocab_size
__UpperCamelCase :Optional[int] = n_positions
__UpperCamelCase :Tuple = n_embd
__UpperCamelCase :int = n_layer
__UpperCamelCase :Any = n_head
__UpperCamelCase :Any = n_inner
__UpperCamelCase :Dict = rotary_dim
__UpperCamelCase :Tuple = activation_function
__UpperCamelCase :Optional[Any] = resid_pdrop
__UpperCamelCase :Any = embd_pdrop
__UpperCamelCase :List[str] = attn_pdrop
__UpperCamelCase :str = layer_norm_epsilon
__UpperCamelCase :List[Any] = initializer_range
__UpperCamelCase :Dict = use_cache
__UpperCamelCase :List[Any] = bos_token_id
__UpperCamelCase :Tuple = eos_token_id
super().__init__(
bos_token_id=__lowercase , eos_token_id=__lowercase , tie_word_embeddings=__lowercase , **__lowercase)
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
def __init__( self , __lowercase , __lowercase = "default" , __lowercase = None , __lowercase = False , ) -> Any:
super().__init__(__lowercase , task=__lowercase , patching_specs=__lowercase , use_past=__lowercase)
if not getattr(self._config , '''pad_token_id''' , __lowercase):
# TODO: how to do that better?
__UpperCamelCase :Tuple = 0
@property
def UpperCamelCase__ ( self) -> Mapping[str, Mapping[int, str]]:
__UpperCamelCase :Tuple = OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}})
if self.use_past:
self.fill_with_past_key_values_(__lowercase , direction='''inputs''')
__UpperCamelCase :str = {0: '''batch''', 1: '''past_sequence + sequence'''}
else:
__UpperCamelCase :Any = {0: '''batch''', 1: '''sequence'''}
return common_inputs
@property
def UpperCamelCase__ ( self) -> int:
return self._config.n_layer
@property
def UpperCamelCase__ ( self) -> int:
return self._config.n_head
def UpperCamelCase__ ( self , __lowercase , __lowercase = -1 , __lowercase = -1 , __lowercase = False , __lowercase = None , ) -> Mapping[str, Any]:
__UpperCamelCase :Optional[int] = super(__lowercase , self).generate_dummy_inputs(
__lowercase , batch_size=__lowercase , seq_length=__lowercase , is_pair=__lowercase , framework=__lowercase)
# We need to order the input in the way they appears in the forward()
__UpperCamelCase :int = OrderedDict({'''input_ids''': common_inputs['''input_ids''']})
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''')
else:
import torch
__UpperCamelCase , __UpperCamelCase :str = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
__UpperCamelCase :List[str] = seqlen + 2
__UpperCamelCase :Union[str, Any] = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
__UpperCamelCase :Tuple = [
(torch.zeros(__lowercase), torch.zeros(__lowercase)) for _ in range(self.num_layers)
]
__UpperCamelCase :Tuple = common_inputs['''attention_mask''']
if self.use_past:
__UpperCamelCase :Tuple = ordered_inputs['''attention_mask'''].dtype
__UpperCamelCase :Optional[Any] = torch.cat(
[ordered_inputs['''attention_mask'''], torch.ones(__lowercase , __lowercase , dtype=__lowercase)] , dim=1)
return ordered_inputs
@property
def UpperCamelCase__ ( self) -> int:
return 13
| 105 | 0 |
import math_equivalence # From: git+https://github.com/hendrycks/math.git
import datasets
_UpperCAmelCase = """\
@article{hendrycksmath2021,
title={Measuring Mathematical Problem Solving With the MATH Dataset},
author={Dan Hendrycks
and Collin Burns
and Saurav Kadavath
and Akul Arora
and Steven Basart
and Eric Tang
and Dawn Song
and Jacob Steinhardt},
journal={arXiv preprint arXiv:2103.03874},
year={2021}
}
"""
_UpperCAmelCase = """\
This metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset.
It first canonicalizes the inputs (e.g., converting \"1/2\" to \"\\frac{1}{2}\") and then computes accuracy.
"""
_UpperCAmelCase = r"""
Calculates accuracy after canonicalizing inputs.
Args:
predictions: list of predictions to score. Each prediction
is a string that contains natural language and LaTex.
references: list of reference for each prediction. Each
reference is a string that contains natural language
and LaTex.
Returns:
accuracy: accuracy after canonicalizing inputs
(e.g., converting \"1/2\" to \"\\frac{1}{2}\")
Examples:
>>> metric = datasets.load_metric(\"competition_math\")
>>> results = metric.compute(references=[\"\\frac{1}{2}\"], predictions=[\"1/2\"])
>>> print(results)
{'accuracy': 1.0}
"""
@datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase ( datasets.Metric ):
'''simple docstring'''
def lowerCAmelCase_ ( self ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' ),
'references': datasets.Value('string' ),
} ) , homepage='https://github.com/hendrycks/math' , codebase_urls=['https://github.com/hendrycks/math'] , )
def lowerCAmelCase_ ( self , lowercase , lowercase ):
"""simple docstring"""
A_ : Union[str, Any] = 0.0
for i, j in zip(lowercase , lowercase ):
n_correct += 1.0 if math_equivalence.is_equiv(lowercase , lowercase ) else 0.0
A_ : Optional[Any] = n_correct / len(lowercase )
return {
"accuracy": accuracy,
}
| 140 | import logging
from transformers import PretrainedConfig
_UpperCAmelCase = logging.getLogger(__name__)
_UpperCAmelCase = {
"""bertabs-finetuned-cnndm""": """https://huggingface.co/remi/bertabs-finetuned-cnndm-extractive-abstractive-summarization/resolve/main/config.json""",
}
class UpperCAmelCase ( __A ):
'''simple docstring'''
lowerCamelCase_ = '''bertabs'''
def __init__( self , lowercase=3_0_5_2_2 , lowercase=5_1_2 , lowercase=6 , lowercase=5_1_2 , lowercase=8 , lowercase=5_1_2 , lowercase=0.2 , lowercase=6 , lowercase=7_6_8 , lowercase=8 , lowercase=2_0_4_8 , lowercase=0.2 , **lowercase , ):
"""simple docstring"""
super().__init__(**lowercase )
A_ : Optional[int] = vocab_size
A_ : Union[str, Any] = max_pos
A_ : List[str] = enc_layers
A_ : Tuple = enc_hidden_size
A_ : List[Any] = enc_heads
A_ : str = enc_ff_size
A_ : Optional[Any] = enc_dropout
A_ : Dict = dec_layers
A_ : Optional[Any] = dec_hidden_size
A_ : int = dec_heads
A_ : Any = dec_ff_size
A_ : List[str] = dec_dropout
| 140 | 1 |
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self , A_ , A_ , A_ ) -> int:
__UpperCamelCase =None
__UpperCamelCase =None
__UpperCamelCase =graph
self._normalize_graph(A_ , A_ )
__UpperCamelCase =len(A_ )
__UpperCamelCase =None
def _a ( self , A_ , A_ ) -> Optional[int]:
if sources is int:
__UpperCamelCase =[sources]
if sinks is int:
__UpperCamelCase =[sinks]
if len(A_ ) == 0 or len(A_ ) == 0:
return
__UpperCamelCase =sources[0]
__UpperCamelCase =sinks[0]
# make fake vertex if there are more
# than one source or sink
if len(A_ ) > 1 or len(A_ ) > 1:
__UpperCamelCase =0
for i in sources:
max_input_flow += sum(self.graph[i] )
__UpperCamelCase =len(self.graph ) + 1
for room in self.graph:
room.insert(0 , 0 )
self.graph.insert(0 , [0] * size )
for i in sources:
__UpperCamelCase =max_input_flow
__UpperCamelCase =0
__UpperCamelCase =len(self.graph ) + 1
for room in self.graph:
room.append(0 )
self.graph.append([0] * size )
for i in sinks:
__UpperCamelCase =max_input_flow
__UpperCamelCase =size - 1
def _a ( self ) -> Optional[Any]:
if self.maximum_flow_algorithm is None:
raise Exception('You need to set maximum flow algorithm before.' )
if self.source_index is None or self.sink_index is None:
return 0
self.maximum_flow_algorithm.execute()
return self.maximum_flow_algorithm.getMaximumFlow()
def _a ( self , A_ ) -> Optional[int]:
__UpperCamelCase =algorithm(self )
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self , A_ ) -> Any:
__UpperCamelCase =flow_network
__UpperCamelCase =flow_network.verticesCount
__UpperCamelCase =flow_network.sourceIndex
__UpperCamelCase =flow_network.sinkIndex
# it's just a reference, so you shouldn't change
# it in your algorithms, use deep copy before doing that
__UpperCamelCase =flow_network.graph
__UpperCamelCase =False
def _a ( self ) -> Optional[int]:
if not self.executed:
self._algorithm()
__UpperCamelCase =True
def _a ( self ) -> Tuple:
pass
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
def __init__( self , A_ ) -> Optional[int]:
super().__init__(A_ )
# use this to save your result
__UpperCamelCase =-1
def _a ( self ) -> Optional[Any]:
if not self.executed:
raise Exception('You should execute algorithm before using its result!' )
return self.maximum_flow
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
def __init__( self , A_ ) -> int:
super().__init__(A_ )
__UpperCamelCase =[[0] * self.verticies_count for i in range(self.verticies_count )]
__UpperCamelCase =[0] * self.verticies_count
__UpperCamelCase =[0] * self.verticies_count
def _a ( self ) -> Optional[int]:
__UpperCamelCase =self.verticies_count
# push some substance to graph
for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index] ):
self.preflow[self.source_index][nextvertex_index] += bandwidth
self.preflow[nextvertex_index][self.source_index] -= bandwidth
self.excesses[nextvertex_index] += bandwidth
# Relabel-to-front selection rule
__UpperCamelCase =[
i
for i in range(self.verticies_count )
if i != self.source_index and i != self.sink_index
]
# move through list
__UpperCamelCase =0
while i < len(A_ ):
__UpperCamelCase =vertices_list[i]
__UpperCamelCase =self.heights[vertex_index]
self.process_vertex(A_ )
if self.heights[vertex_index] > previous_height:
# if it was relabeled, swap elements
# and start from 0 index
vertices_list.insert(0 , vertices_list.pop(A_ ) )
__UpperCamelCase =0
else:
i += 1
__UpperCamelCase =sum(self.preflow[self.source_index] )
def _a ( self , A_ ) -> List[str]:
while self.excesses[vertex_index] > 0:
for neighbour_index in range(self.verticies_count ):
# if it's neighbour and current vertex is higher
if (
self.graph[vertex_index][neighbour_index]
- self.preflow[vertex_index][neighbour_index]
> 0
and self.heights[vertex_index] > self.heights[neighbour_index]
):
self.push(A_ , A_ )
self.relabel(A_ )
def _a ( self , A_ , A_ ) -> Union[str, Any]:
__UpperCamelCase =min(
self.excesses[from_index] , self.graph[from_index][to_index] - self.preflow[from_index][to_index] , )
self.preflow[from_index][to_index] += preflow_delta
self.preflow[to_index][from_index] -= preflow_delta
self.excesses[from_index] -= preflow_delta
self.excesses[to_index] += preflow_delta
def _a ( self , A_ ) -> str:
__UpperCamelCase =None
for to_index in range(self.verticies_count ):
if (
self.graph[vertex_index][to_index]
- self.preflow[vertex_index][to_index]
> 0
) and (min_height is None or self.heights[to_index] < min_height):
__UpperCamelCase =self.heights[to_index]
if min_height is not None:
__UpperCamelCase =min_height + 1
if __name__ == "__main__":
_A = [0]
_A = [3]
# graph = [
# [0, 0, 4, 6, 0, 0],
# [0, 0, 5, 2, 0, 0],
# [0, 0, 0, 0, 4, 4],
# [0, 0, 0, 0, 6, 6],
# [0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0],
# ]
_A = [[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]]
# prepare our network
_A = FlowNetwork(graph, entrances, exits)
# set algorithm
flow_network.set_maximum_flow_algorithm(PushRelabelExecutor)
# and calculate
_A = flow_network.find_maximum_flow()
print(f"""maximum flow is {maximum_flow}""")
| 117 |
import warnings
from ...utils import logging
from .image_processing_imagegpt import ImageGPTImageProcessor
_A = logging.get_logger(__name__)
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
def __init__( self , *A_ , **A_ ) -> None:
warnings.warn(
'The class ImageGPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use ImageGPTImageProcessor instead.' , A_ , )
super().__init__(*A_ , **A_ )
| 117 | 1 |
import os
from math import logaa
def _a ( SCREAMING_SNAKE_CASE = "base_exp.txt" ):
"""simple docstring"""
lowercase__ = 0
lowercase__ = 0
for i, line in enumerate(open(os.path.join(os.path.dirname(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE ) ) ):
lowercase__ , lowercase__ = list(map(SCREAMING_SNAKE_CASE , line.split(''',''' ) ) )
if x * logaa(SCREAMING_SNAKE_CASE ) > largest:
lowercase__ = x * logaa(SCREAMING_SNAKE_CASE )
lowercase__ = i + 1
return result
if __name__ == "__main__":
print(solution())
| 110 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class _a ( unittest.TestCase ):
def __init__( self: Dict , UpperCamelCase_: Any , UpperCamelCase_: Dict=7 , UpperCamelCase_: Optional[int]=3 , UpperCamelCase_: List[Any]=18 , UpperCamelCase_: Dict=30 , UpperCamelCase_: Optional[int]=400 , UpperCamelCase_: Optional[int]=True , UpperCamelCase_: Union[str, Any]=None , UpperCamelCase_: List[str]=True , UpperCamelCase_: Dict=None , UpperCamelCase_: Optional[int]=True , UpperCamelCase_: Tuple=[0.5, 0.5, 0.5] , UpperCamelCase_: List[str]=[0.5, 0.5, 0.5] , ) -> int:
"""simple docstring"""
lowercase__ = size if size is not None else {'''shortest_edge''': 18}
lowercase__ = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = num_channels
lowercase__ = image_size
lowercase__ = min_resolution
lowercase__ = max_resolution
lowercase__ = do_resize
lowercase__ = size
lowercase__ = do_center_crop
lowercase__ = crop_size
lowercase__ = do_normalize
lowercase__ = image_mean
lowercase__ = image_std
def lowerCamelCase_ ( self: List[str] ) -> Optional[Any]:
"""simple docstring"""
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"do_center_crop": self.do_center_crop,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : List[str] = LevitImageProcessor if is_vision_available() else None
def lowerCamelCase_ ( self: Optional[Any] ) -> List[str]:
"""simple docstring"""
lowercase__ = LevitImageProcessingTester(self )
@property
def lowerCamelCase_ ( self: List[Any] ) -> Any:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCamelCase_ ( self: Union[str, Any] ) -> str:
"""simple docstring"""
lowercase__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCamelCase_ , '''image_mean''' ) )
self.assertTrue(hasattr(UpperCamelCase_ , '''image_std''' ) )
self.assertTrue(hasattr(UpperCamelCase_ , '''do_normalize''' ) )
self.assertTrue(hasattr(UpperCamelCase_ , '''do_resize''' ) )
self.assertTrue(hasattr(UpperCamelCase_ , '''do_center_crop''' ) )
self.assertTrue(hasattr(UpperCamelCase_ , '''size''' ) )
def lowerCamelCase_ ( self: str ) -> Tuple:
"""simple docstring"""
lowercase__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 18} )
self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} )
lowercase__ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
def lowerCamelCase_ ( self: List[str] ) -> Dict:
"""simple docstring"""
pass
def lowerCamelCase_ ( self: List[str] ) -> int:
"""simple docstring"""
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase_ , Image.Image )
# Test not batched input
lowercase__ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
lowercase__ = image_processing(UpperCamelCase_ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def lowerCamelCase_ ( self: Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowercase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ , numpify=UpperCamelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase_ , np.ndarray )
# Test not batched input
lowercase__ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
lowercase__ = image_processing(UpperCamelCase_ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def lowerCamelCase_ ( self: Any ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowercase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ , torchify=UpperCamelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase_ , torch.Tensor )
# Test not batched input
lowercase__ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
lowercase__ = image_processing(UpperCamelCase_ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
| 110 | 1 |
'''simple docstring'''
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
_lowerCAmelCase = {
"tokenizer_file": {
"EleutherAI/gpt-neox-20b": "https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json",
},
}
_lowerCAmelCase = {
"gpt-neox-20b": 2048,
}
class _SCREAMING_SNAKE_CASE ( __a ):
__SCREAMING_SNAKE_CASE :Any = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE :Any = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE :Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE :Union[str, Any] = ["""input_ids""", """attention_mask"""]
def __init__( self : Optional[Any] , a__ : int=None , a__ : Optional[int]=None , a__ : Optional[Any]=None , a__ : Tuple="<|endoftext|>" , a__ : Optional[Any]="<|endoftext|>" , a__ : Any="<|endoftext|>" , a__ : Optional[int]=False , **a__ : Any , ):
super().__init__(
a__ , a__ , tokenizer_file=a__ , unk_token=a__ , bos_token=a__ , eos_token=a__ , add_prefix_space=a__ , **a__ , )
__magic_name__ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , a__ ) != add_prefix_space:
__magic_name__ = getattr(a__ , pre_tok_state.pop('''type''' ) )
__magic_name__ = add_prefix_space
__magic_name__ = pre_tok_class(**a__ )
__magic_name__ = add_prefix_space
def snake_case__ ( self : str , a__ : str , a__ : Optional[str] = None ):
__magic_name__ = self._tokenizer.model.save(a__ , name=a__ )
return tuple(a__ )
def snake_case__ ( self : int , a__ : "Conversation" ):
__magic_name__ = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(a__ , add_special_tokens=a__ ) + [self.eos_token_id] )
if len(a__ ) > self.model_max_length:
__magic_name__ = input_ids[-self.model_max_length :]
return input_ids
| 352 |
'''simple docstring'''
import argparse
import shutil
from pathlib import Path
from tqdm import tqdm
from transformers import AutoTokenizer
def UpperCamelCase ( a , a , a , a=1024 ) -> Union[str, Any]:
'''simple docstring'''
__magic_name__ , __magic_name__ = [], []
__magic_name__ = list(zip(a , a ) )
__magic_name__ , __magic_name__ = sorted_examples[0]
def is_too_big(a ):
return tok(a , return_tensors='''pt''' ).input_ids.shape[1] > max_tokens
for src, tgt in tqdm(sorted_examples[1:] ):
__magic_name__ = new_src + ''' ''' + src
__magic_name__ = new_tgt + ''' ''' + tgt
if is_too_big(a ) or is_too_big(a ): # cant fit, finalize example
finished_src.append(a )
finished_tgt.append(a )
__magic_name__ , __magic_name__ = src, tgt
else: # can fit, keep adding
__magic_name__ , __magic_name__ = cand_src, cand_tgt
# cleanup
if new_src:
assert new_tgt
finished_src.append(a )
finished_tgt.append(a )
return finished_src, finished_tgt
def UpperCamelCase ( a , a , a , a ) -> Any:
'''simple docstring'''
__magic_name__ = Path(a )
save_path.mkdir(exist_ok=a )
for split in ["train"]:
__magic_name__ , __magic_name__ = data_dir / F'''{split}.source''', data_dir / F'''{split}.target'''
__magic_name__ = [x.rstrip() for x in Path(a ).open().readlines()]
__magic_name__ = [x.rstrip() for x in Path(a ).open().readlines()]
__magic_name__ , __magic_name__ = pack_examples(a , a , a , a )
print(F'''packed {split} split from {len(a )} examples -> {len(a )}.''' )
Path(save_path / F'''{split}.source''' ).open('''w''' ).write('''\n'''.join(a ) )
Path(save_path / F'''{split}.target''' ).open('''w''' ).write('''\n'''.join(a ) )
for split in ["val", "test"]:
__magic_name__ , __magic_name__ = data_dir / F'''{split}.source''', data_dir / F'''{split}.target'''
shutil.copyfile(a , save_path / F'''{split}.source''' )
shutil.copyfile(a , save_path / F'''{split}.target''' )
def UpperCamelCase ( ) -> List[str]:
'''simple docstring'''
__magic_name__ = argparse.ArgumentParser()
parser.add_argument('''--tok_name''' , type=a , help='''like facebook/bart-large-cnn,t5-base, etc.''' )
parser.add_argument('''--max_seq_len''' , type=a , default=128 )
parser.add_argument('''--data_dir''' , type=a )
parser.add_argument('''--save_path''' , type=a )
__magic_name__ = parser.parse_args()
__magic_name__ = AutoTokenizer.from_pretrained(args.tok_name )
return pack_data_dir(a , Path(args.data_dir ) , args.max_seq_len , args.save_path )
if __name__ == "__main__":
packer_cli()
| 98 | 0 |
"""simple docstring"""
def _A ( lowercase , lowercase ):
"""simple docstring"""
if b == 0:
return 1
if (b % 2) == 0:
return actual_power(lowercase , int(b / 2 ) ) * actual_power(lowercase , int(b / 2 ) )
else:
return a * actual_power(lowercase , int(b / 2 ) ) * actual_power(lowercase , int(b / 2 ) )
def _A ( lowercase , lowercase ):
"""simple docstring"""
if b < 0:
return 1 / actual_power(lowercase , lowercase )
return actual_power(lowercase , lowercase )
if __name__ == "__main__":
print(power(-2, -3)) | 81 |
'''simple docstring'''
import unittest
from transformers import MPNetConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
class a :
def __init__( self : List[Any] , lowercase_ : Optional[Any] , lowercase_ : Any=13 , lowercase_ : Optional[Any]=7 , lowercase_ : Optional[Any]=True , lowercase_ : Dict=True , lowercase_ : Tuple=False , lowercase_ : Optional[Any]=True , lowercase_ : Any=99 , lowercase_ : Union[str, Any]=64 , lowercase_ : str=5 , lowercase_ : int=4 , lowercase_ : List[Any]=64 , lowercase_ : Dict="gelu" , lowercase_ : Optional[int]=0.1 , lowercase_ : Optional[Any]=0.1 , lowercase_ : Tuple=512 , lowercase_ : List[Any]=16 , lowercase_ : str=2 , lowercase_ : List[str]=0.02 , lowercase_ : Optional[Any]=3 , lowercase_ : Optional[Any]=4 , lowercase_ : List[Any]=None , ):
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = seq_length
snake_case_ = is_training
snake_case_ = use_input_mask
snake_case_ = use_token_type_ids
snake_case_ = use_labels
snake_case_ = vocab_size
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = max_position_embeddings
snake_case_ = type_vocab_size
snake_case_ = type_sequence_label_size
snake_case_ = initializer_range
snake_case_ = num_labels
snake_case_ = num_choices
snake_case_ = scope
def A_ ( self : List[str] ):
return MPNetConfig.from_pretrained('''microsoft/mpnet-base''' )
def A_ ( self : str ):
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ = None
if self.use_input_mask:
snake_case_ = random_attention_mask([self.batch_size, self.seq_length] )
snake_case_ = None
snake_case_ = None
snake_case_ = None
if self.use_labels:
snake_case_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case_ = ids_tensor([self.batch_size] , self.num_choices )
snake_case_ = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def A_ ( self : Tuple ):
return MPNetConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def A_ ( self : Any , lowercase_ : Dict , lowercase_ : str , lowercase_ : Optional[Any] , lowercase_ : Union[str, Any] , lowercase_ : Dict , lowercase_ : Optional[int] ):
snake_case_ = MPNetModel(config=lowercase_ )
model.to(lowercase_ )
model.eval()
snake_case_ = model(lowercase_ , lowercase_ )
snake_case_ = model(lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def A_ ( self : str , lowercase_ : Optional[Any] , lowercase_ : Any , lowercase_ : str , lowercase_ : Tuple , lowercase_ : Tuple , lowercase_ : Optional[int] ):
snake_case_ = MPNetForQuestionAnswering(config=lowercase_ )
model.to(lowercase_ )
model.eval()
snake_case_ = model(
lowercase_ , attention_mask=lowercase_ , start_positions=lowercase_ , end_positions=lowercase_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A_ ( self : Tuple , lowercase_ : Tuple , lowercase_ : List[str] , lowercase_ : str , lowercase_ : Optional[int] , lowercase_ : Optional[Any] , lowercase_ : Any ):
snake_case_ = self.num_labels
snake_case_ = MPNetForSequenceClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
snake_case_ = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A_ ( self : Any , lowercase_ : Any , lowercase_ : str , lowercase_ : Tuple , lowercase_ : List[str] , lowercase_ : Union[str, Any] , lowercase_ : Dict ):
snake_case_ = self.num_choices
snake_case_ = MPNetForMultipleChoice(config=lowercase_ )
model.to(lowercase_ )
model.eval()
snake_case_ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case_ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case_ = model(
lowercase_ , attention_mask=lowercase_ , labels=lowercase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def A_ ( self : Union[str, Any] , lowercase_ : List[Any] , lowercase_ : Optional[Any] , lowercase_ : Optional[int] , lowercase_ : str , lowercase_ : int , lowercase_ : List[str] ):
snake_case_ = self.num_labels
snake_case_ = MPNetForTokenClassification(config=lowercase_ )
model.to(lowercase_ )
model.eval()
snake_case_ = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A_ ( self : Union[str, Any] ):
snake_case_ = self.prepare_config_and_inputs()
((snake_case_) ,(snake_case_) ,(snake_case_) ,(snake_case_) ,(snake_case_) ,(snake_case_)) = config_and_inputs
snake_case_ = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class a ( _lowerCamelCase , _lowerCamelCase , unittest.TestCase ):
snake_case_ = (
(
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
if is_torch_available()
else ()
)
snake_case_ = (
{
"feature-extraction": MPNetModel,
"fill-mask": MPNetForMaskedLM,
"question-answering": MPNetForQuestionAnswering,
"text-classification": MPNetForSequenceClassification,
"token-classification": MPNetForTokenClassification,
"zero-shot": MPNetForSequenceClassification,
}
if is_torch_available()
else {}
)
snake_case_ = False
snake_case_ = True
def A_ ( self : Tuple ):
snake_case_ = MPNetModelTester(self )
snake_case_ = ConfigTester(self , config_class=lowercase_ , hidden_size=37 )
def A_ ( self : Union[str, Any] ):
self.config_tester.run_common_tests()
def A_ ( self : Tuple ):
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_model(*lowercase_ )
def A_ ( self : List[Any] ):
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_sequence_classification(*lowercase_ )
def A_ ( self : List[Any] ):
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_multiple_choice(*lowercase_ )
def A_ ( self : Union[str, Any] ):
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_token_classification(*lowercase_ )
def A_ ( self : Tuple ):
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_question_answering(*lowercase_ )
@require_torch
class a ( unittest.TestCase ):
@slow
def A_ ( self : List[Any] ):
snake_case_ = MPNetModel.from_pretrained('''microsoft/mpnet-base''' )
snake_case_ = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
snake_case_ = model(lowercase_ )[0]
snake_case_ = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , lowercase_ )
snake_case_ = torch.tensor(
[[[-0.0550, 0.1943, -0.0740], [-0.0562, 0.2211, -0.0579], [-0.0437, 0.3337, -0.0641]]] )
# compare the actual values for a slice.
self.assertTrue(torch.allclose(output[:, :3, :3] , lowercase_ , atol=1e-4 ) )
| 56 | 0 |
"""simple docstring"""
import random
import unittest
from torch.utils.data import BatchSampler, DataLoader, IterableDataset
from accelerate import Accelerator
from accelerate.data_loader import (
BatchSamplerShard,
DataLoaderDispatcher,
DataLoaderShard,
IterableDatasetShard,
SkipBatchSampler,
SkipDataLoader,
skip_first_batches,
)
class lowerCAmelCase__ ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self : Union[str, Any] , lowercase_ : Optional[Any]=0.01 , lowercase_ : List[str]=1000):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[Any] = p_stop
SCREAMING_SNAKE_CASE_ : Union[str, Any] = max_length
def __iter__( self : Optional[int]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : str = 0
SCREAMING_SNAKE_CASE_ : List[str] = False
while not stop and count < self.max_length:
yield count
count += 1
SCREAMING_SNAKE_CASE_ : Any = random.random() < self.p_stop
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowercase_ : Tuple , lowercase_ : List[str] , lowercase_ : List[str]=False , lowercase_ : Tuple=True):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [
BatchSamplerShard(lowercase_ , 2 , lowercase_ , split_batches=lowercase_ , even_batches=lowercase_)
for i in range(2)
]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [list(lowercase_) for batch_sampler_shard in batch_sampler_shards]
if not split_batches:
self.assertListEqual([len(lowercase_) for shard in batch_sampler_shards] , [len(lowercase_) for e in expected])
self.assertListEqual(lowercase_ , lowercase_)
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Any = BatchSampler(range(24) , batch_size=3 , drop_last=lowercase_)
SCREAMING_SNAKE_CASE_ : Dict = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(lowercase_ , lowercase_)
SCREAMING_SNAKE_CASE_ : str = BatchSampler(range(24) , batch_size=3 , drop_last=lowercase_)
# Expected shouldn't change
self.check_batch_sampler_shards(lowercase_ , lowercase_)
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
SCREAMING_SNAKE_CASE_ : int = BatchSampler(range(21) , batch_size=3 , drop_last=lowercase_)
SCREAMING_SNAKE_CASE_ : Tuple = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [0, 1, 2]],
]
self.check_batch_sampler_shards(lowercase_ , lowercase_)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = BatchSampler(range(21) , batch_size=3 , drop_last=lowercase_)
SCREAMING_SNAKE_CASE_ : int = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(lowercase_ , lowercase_)
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
SCREAMING_SNAKE_CASE_ : int = BatchSampler(range(22) , batch_size=3 , drop_last=lowercase_)
SCREAMING_SNAKE_CASE_ : Dict = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 0, 1]],
]
self.check_batch_sampler_shards(lowercase_ , lowercase_)
SCREAMING_SNAKE_CASE_ : str = BatchSampler(range(22) , batch_size=3 , drop_last=lowercase_)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(lowercase_ , lowercase_)
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
SCREAMING_SNAKE_CASE_ : Any = BatchSampler(range(20) , batch_size=3 , drop_last=lowercase_)
SCREAMING_SNAKE_CASE_ : Optional[Any] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 0]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [1, 2, 3]],
]
self.check_batch_sampler_shards(lowercase_ , lowercase_)
SCREAMING_SNAKE_CASE_ : List[str] = BatchSampler(range(20) , batch_size=3 , drop_last=lowercase_)
SCREAMING_SNAKE_CASE_ : List[Any] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(lowercase_ , lowercase_)
# Check the shards when the dataset is very small.
SCREAMING_SNAKE_CASE_ : Dict = BatchSampler(range(2) , batch_size=3 , drop_last=lowercase_)
SCREAMING_SNAKE_CASE_ : List[str] = [[[0, 1, 0]], [[1, 0, 1]]]
self.check_batch_sampler_shards(lowercase_ , lowercase_)
SCREAMING_SNAKE_CASE_ : Optional[Any] = BatchSampler(range(2) , batch_size=3 , drop_last=lowercase_)
SCREAMING_SNAKE_CASE_ : Dict = [[], []]
self.check_batch_sampler_shards(lowercase_ , lowercase_)
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Dict = BatchSampler(range(24) , batch_size=4 , drop_last=lowercase_)
SCREAMING_SNAKE_CASE_ : List[Any] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(lowercase_ , lowercase_ , split_batches=lowercase_)
SCREAMING_SNAKE_CASE_ : str = BatchSampler(range(24) , batch_size=4 , drop_last=lowercase_)
# Expected shouldn't change
self.check_batch_sampler_shards(lowercase_ , lowercase_ , split_batches=lowercase_)
# Check the shards when the dataset is not a round multiple of batch size.
SCREAMING_SNAKE_CASE_ : List[str] = BatchSampler(range(22) , batch_size=4 , drop_last=lowercase_)
SCREAMING_SNAKE_CASE_ : List[str] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [0, 1]],
]
self.check_batch_sampler_shards(lowercase_ , lowercase_ , split_batches=lowercase_)
SCREAMING_SNAKE_CASE_ : Dict = BatchSampler(range(22) , batch_size=4 , drop_last=lowercase_)
SCREAMING_SNAKE_CASE_ : Dict = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(lowercase_ , lowercase_ , split_batches=lowercase_)
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
SCREAMING_SNAKE_CASE_ : Any = BatchSampler(range(21) , batch_size=4 , drop_last=lowercase_)
SCREAMING_SNAKE_CASE_ : Optional[int] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 0]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [1, 2]],
]
self.check_batch_sampler_shards(lowercase_ , lowercase_ , split_batches=lowercase_)
SCREAMING_SNAKE_CASE_ : int = BatchSampler(range(21) , batch_size=4 , drop_last=lowercase_)
SCREAMING_SNAKE_CASE_ : Tuple = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(lowercase_ , lowercase_ , split_batches=lowercase_)
# Check the shards when the dataset is very small.
SCREAMING_SNAKE_CASE_ : Dict = BatchSampler(range(2) , batch_size=4 , drop_last=lowercase_)
SCREAMING_SNAKE_CASE_ : str = [[[0, 1]], [[0, 1]]]
self.check_batch_sampler_shards(lowercase_ , lowercase_ , split_batches=lowercase_)
SCREAMING_SNAKE_CASE_ : Any = BatchSampler(range(2) , batch_size=4 , drop_last=lowercase_)
SCREAMING_SNAKE_CASE_ : int = [[], []]
self.check_batch_sampler_shards(lowercase_ , lowercase_ , split_batches=lowercase_)
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Any = BatchSampler(range(24) , batch_size=3 , drop_last=lowercase_)
SCREAMING_SNAKE_CASE_ : List[Any] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(lowercase_ , lowercase_ , even_batches=lowercase_)
SCREAMING_SNAKE_CASE_ : str = BatchSampler(range(24) , batch_size=3 , drop_last=lowercase_)
# Expected shouldn't change
self.check_batch_sampler_shards(lowercase_ , lowercase_ , even_batches=lowercase_)
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
SCREAMING_SNAKE_CASE_ : Optional[int] = BatchSampler(range(21) , batch_size=3 , drop_last=lowercase_)
SCREAMING_SNAKE_CASE_ : Dict = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(lowercase_ , lowercase_ , even_batches=lowercase_)
SCREAMING_SNAKE_CASE_ : Any = BatchSampler(range(21) , batch_size=3 , drop_last=lowercase_)
SCREAMING_SNAKE_CASE_ : Tuple = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(lowercase_ , lowercase_ , even_batches=lowercase_)
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
SCREAMING_SNAKE_CASE_ : List[Any] = BatchSampler(range(22) , batch_size=3 , drop_last=lowercase_)
SCREAMING_SNAKE_CASE_ : Dict = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21]],
]
self.check_batch_sampler_shards(lowercase_ , lowercase_ , even_batches=lowercase_)
SCREAMING_SNAKE_CASE_ : Any = BatchSampler(range(22) , batch_size=3 , drop_last=lowercase_)
SCREAMING_SNAKE_CASE_ : int = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(lowercase_ , lowercase_ , even_batches=lowercase_)
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
SCREAMING_SNAKE_CASE_ : List[Any] = BatchSampler(range(20) , batch_size=3 , drop_last=lowercase_)
SCREAMING_SNAKE_CASE_ : List[Any] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(lowercase_ , lowercase_ , even_batches=lowercase_)
SCREAMING_SNAKE_CASE_ : List[Any] = BatchSampler(range(20) , batch_size=3 , drop_last=lowercase_)
SCREAMING_SNAKE_CASE_ : List[Any] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(lowercase_ , lowercase_ , even_batches=lowercase_)
# Check the shards when the dataset is very small.
SCREAMING_SNAKE_CASE_ : int = BatchSampler(range(2) , batch_size=3 , drop_last=lowercase_)
SCREAMING_SNAKE_CASE_ : Dict = [[[0, 1]], []]
self.check_batch_sampler_shards(lowercase_ , lowercase_ , even_batches=lowercase_)
SCREAMING_SNAKE_CASE_ : int = BatchSampler(range(2) , batch_size=3 , drop_last=lowercase_)
SCREAMING_SNAKE_CASE_ : Optional[Any] = [[], []]
self.check_batch_sampler_shards(lowercase_ , lowercase_ , even_batches=lowercase_)
def _SCREAMING_SNAKE_CASE ( self : Dict):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Union[str, Any] = BatchSampler(range(24) , batch_size=4 , drop_last=lowercase_)
SCREAMING_SNAKE_CASE_ : List[Any] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(lowercase_ , lowercase_ , split_batches=lowercase_ , even_batches=lowercase_)
SCREAMING_SNAKE_CASE_ : Optional[Any] = BatchSampler(range(24) , batch_size=4 , drop_last=lowercase_)
# Expected shouldn't change
self.check_batch_sampler_shards(lowercase_ , lowercase_ , split_batches=lowercase_ , even_batches=lowercase_)
# Check the shards when the dataset is not a round multiple of batch size.
SCREAMING_SNAKE_CASE_ : List[Any] = BatchSampler(range(22) , batch_size=4 , drop_last=lowercase_)
SCREAMING_SNAKE_CASE_ : List[str] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(lowercase_ , lowercase_ , split_batches=lowercase_ , even_batches=lowercase_)
SCREAMING_SNAKE_CASE_ : Optional[int] = BatchSampler(range(22) , batch_size=4 , drop_last=lowercase_)
SCREAMING_SNAKE_CASE_ : List[Any] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(lowercase_ , lowercase_ , split_batches=lowercase_ , even_batches=lowercase_)
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
SCREAMING_SNAKE_CASE_ : int = BatchSampler(range(21) , batch_size=4 , drop_last=lowercase_)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(lowercase_ , lowercase_ , split_batches=lowercase_ , even_batches=lowercase_)
SCREAMING_SNAKE_CASE_ : Optional[int] = BatchSampler(range(21) , batch_size=4 , drop_last=lowercase_)
SCREAMING_SNAKE_CASE_ : Dict = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(lowercase_ , lowercase_ , split_batches=lowercase_ , even_batches=lowercase_)
# Check the shards when the dataset is very small.
SCREAMING_SNAKE_CASE_ : Optional[int] = BatchSampler(range(2) , batch_size=4 , drop_last=lowercase_)
SCREAMING_SNAKE_CASE_ : Dict = [[[0, 1]], []]
self.check_batch_sampler_shards(lowercase_ , lowercase_ , split_batches=lowercase_ , even_batches=lowercase_)
SCREAMING_SNAKE_CASE_ : Dict = BatchSampler(range(2) , batch_size=4 , drop_last=lowercase_)
SCREAMING_SNAKE_CASE_ : Optional[Any] = [[], []]
self.check_batch_sampler_shards(lowercase_ , lowercase_ , split_batches=lowercase_ , even_batches=lowercase_)
def _SCREAMING_SNAKE_CASE ( self : str):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[str] = [[0, 1, 2], [3, 4], [5, 6, 7, 8], [9, 10, 11], [12, 13]]
SCREAMING_SNAKE_CASE_ : Optional[Any] = [BatchSamplerShard(lowercase_ , 2 , lowercase_ , even_batches=lowercase_) for i in range(2)]
self.assertEqual(len(batch_sampler_shards[0]) , 3)
self.assertEqual(len(batch_sampler_shards[1]) , 2)
self.assertListEqual(list(batch_sampler_shards[0]) , [[0, 1, 2], [5, 6, 7, 8], [12, 13]])
self.assertListEqual(list(batch_sampler_shards[1]) , [[3, 4], [9, 10, 11]])
def _SCREAMING_SNAKE_CASE ( self : Dict , lowercase_ : Optional[int] , lowercase_ : Optional[int] , lowercase_ : Any , lowercase_ : Dict=False , lowercase_ : Dict=2 , lowercase_ : List[Any]=False):
'''simple docstring'''
random.seed(lowercase_)
SCREAMING_SNAKE_CASE_ : Optional[Any] = list(lowercase_)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [
IterableDatasetShard(
lowercase_ , batch_size=lowercase_ , drop_last=lowercase_ , num_processes=lowercase_ , process_index=lowercase_ , split_batches=lowercase_ , )
for i in range(lowercase_)
]
SCREAMING_SNAKE_CASE_ : Any = []
for iterable_dataset_shard in iterable_dataset_shards:
# Since our random iterable dataset will be... random... we need to use a seed to get reproducible results.
random.seed(lowercase_)
iterable_dataset_lists.append(list(lowercase_))
SCREAMING_SNAKE_CASE_ : Any = batch_size // num_processes if split_batches else batch_size
# All iterable dataset shard should have the same length, a round multiple of shard_batch_size
SCREAMING_SNAKE_CASE_ : int = iterable_dataset_lists[0]
for l in iterable_dataset_lists[1:]:
self.assertEqual(len(lowercase_) , len(lowercase_))
self.assertTrue(len(lowercase_) % shard_batch_size == 0)
SCREAMING_SNAKE_CASE_ : Dict = []
for idx in range(0 , len(lowercase_) , lowercase_):
for l in iterable_dataset_lists:
observed += l[idx : idx + shard_batch_size]
if not drop_last:
while len(lowercase_) < len(lowercase_):
reference += reference
self.assertListEqual(lowercase_ , reference[: len(lowercase_)])
def _SCREAMING_SNAKE_CASE ( self : Dict):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : int = 42
SCREAMING_SNAKE_CASE_ : List[Any] = RandomIterableDataset()
self.check_iterable_dataset_shards(lowercase_ , lowercase_ , batch_size=4 , drop_last=lowercase_ , split_batches=lowercase_)
self.check_iterable_dataset_shards(lowercase_ , lowercase_ , batch_size=4 , drop_last=lowercase_ , split_batches=lowercase_)
self.check_iterable_dataset_shards(lowercase_ , lowercase_ , batch_size=4 , drop_last=lowercase_ , split_batches=lowercase_)
self.check_iterable_dataset_shards(lowercase_ , lowercase_ , batch_size=4 , drop_last=lowercase_ , split_batches=lowercase_)
# Edge case with a very small dataset
SCREAMING_SNAKE_CASE_ : List[str] = RandomIterableDataset(max_length=2)
self.check_iterable_dataset_shards(lowercase_ , lowercase_ , batch_size=4 , drop_last=lowercase_ , split_batches=lowercase_)
self.check_iterable_dataset_shards(lowercase_ , lowercase_ , batch_size=4 , drop_last=lowercase_ , split_batches=lowercase_)
self.check_iterable_dataset_shards(lowercase_ , lowercase_ , batch_size=4 , drop_last=lowercase_ , split_batches=lowercase_)
self.check_iterable_dataset_shards(lowercase_ , lowercase_ , batch_size=4 , drop_last=lowercase_ , split_batches=lowercase_)
def _SCREAMING_SNAKE_CASE ( self : List[str]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[int] = BatchSampler(range(16) , batch_size=4 , drop_last=lowercase_)
SCREAMING_SNAKE_CASE_ : List[str] = SkipBatchSampler(lowercase_ , 2)
self.assertListEqual(list(lowercase_) , [[8, 9, 10, 11], [12, 13, 14, 15]])
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[str] = SkipDataLoader(list(range(16)) , batch_size=4 , skip_batches=2)
self.assertListEqual([t.tolist() for t in dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]])
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[int] = DataLoader(list(range(16)) , batch_size=4)
SCREAMING_SNAKE_CASE_ : str = skip_first_batches(lowercase_ , num_batches=2)
self.assertListEqual([t.tolist() for t in new_dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]])
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[int] = DataLoaderShard(list(range(16)) , batch_size=4)
for idx, _ in enumerate(lowercase_):
self.assertEqual(dataloader.end_of_dataloader , idx == 3)
# Test it also works on the second iteration
for idx, _ in enumerate(lowercase_):
self.assertEqual(dataloader.end_of_dataloader , idx == 3)
def _SCREAMING_SNAKE_CASE ( self : Any):
'''simple docstring'''
Accelerator()
SCREAMING_SNAKE_CASE_ : Dict = DataLoaderDispatcher(range(16) , batch_size=4)
for idx, _ in enumerate(lowercase_):
self.assertEqual(dataloader.end_of_dataloader , idx == 3)
# Test it also works on the second iteration
for idx, _ in enumerate(lowercase_):
self.assertEqual(dataloader.end_of_dataloader , idx == 3)
| 318 |
"""simple docstring"""
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
UpperCAmelCase_ : Union[str, Any] = ["""\nclass""", """\ndef""", """\n#""", """\n@""", """\nprint""", """\nif"""]
class lowerCAmelCase__ ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self : List[Any] , lowercase_ : Tuple , lowercase_ : Optional[int] , lowercase_ : int=None , lowercase_ : Dict=1):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[Any] = tokenizer
SCREAMING_SNAKE_CASE_ : Optional[int] = dataset
SCREAMING_SNAKE_CASE_ : Optional[Any] = len(lowercase_) if n_tasks is None else n_tasks
SCREAMING_SNAKE_CASE_ : Optional[int] = n_copies
def __iter__( self : Dict):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Dict = []
for task in range(self.n_tasks):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]['''prompt'''].strip())
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.tokenizer(lowercase_ , padding=lowercase_ , return_tensors='''pt''')
for task in range(self.n_tasks):
for _ in range(self.n_copies):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class lowerCAmelCase__ ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self : int , lowercase_ : Dict , lowercase_ : Optional[Any] , lowercase_ : Dict):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Any = start_length
SCREAMING_SNAKE_CASE_ : List[Any] = eof_strings
SCREAMING_SNAKE_CASE_ : List[Any] = tokenizer
def __call__( self : Optional[int] , lowercase_ : Any , lowercase_ : int , **lowercase_ : Dict):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : str = self.tokenizer.batch_decode(input_ids[:, self.start_length :])
SCREAMING_SNAKE_CASE_ : Tuple = []
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings))
return all(lowercase_)
def _A (__a ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = re.split('''(%s)''' % '''|'''.join(__a ) , __a )
# last string should be ""
return "".join(string_list[:-2] )
def _A (__a , __a , __a , __a , __a , __a=20 , **__a ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = defaultdict(__a ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(__a ) ):
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : Optional[int] = batch['''ids'''].shape[-1]
SCREAMING_SNAKE_CASE_ : Tuple = accelerator.unwrap_model(__a ).generate(
input_ids=batch['''ids'''][:, : batch['''input_len''']] , num_return_sequences=__a , **__a )
# each task is generated batch_size times
SCREAMING_SNAKE_CASE_ : List[Any] = batch['''task_id'''].repeat(__a )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = accelerator.pad_across_processes(
__a , dim=1 , pad_index=tokenizer.pad_token_id )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = accelerator.gather((generated_tokens, generated_tasks) )
SCREAMING_SNAKE_CASE_ : int = generated_tokens.cpu().numpy()
SCREAMING_SNAKE_CASE_ : Optional[Any] = generated_tasks.cpu().numpy()
for task, generated_tokens in zip(__a , __a ):
gen_token_dict[task].append(__a )
SCREAMING_SNAKE_CASE_ : int = [[] for _ in range(__a )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
SCREAMING_SNAKE_CASE_ : Optional[int] = tokenizer.decode(__a , skip_special_tokens=__a , clean_up_tokenization_spaces=__a )
code_gens[task].append(remove_last_block(__a ) )
return code_gens
def _A () -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = HfArgumentParser(__a )
SCREAMING_SNAKE_CASE_ : List[Any] = parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
SCREAMING_SNAKE_CASE_ : Any = args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
SCREAMING_SNAKE_CASE_ : str = '''false'''
if args.num_workers is None:
SCREAMING_SNAKE_CASE_ : Optional[Any] = multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
SCREAMING_SNAKE_CASE_ : Tuple = Accelerator()
set_seed(args.seed , device_specific=__a )
# Load model and tokenizer
SCREAMING_SNAKE_CASE_ : Dict = AutoTokenizer.from_pretrained(args.model_ckpt )
SCREAMING_SNAKE_CASE_ : Dict = tokenizer.eos_token
SCREAMING_SNAKE_CASE_ : Optional[int] = AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
SCREAMING_SNAKE_CASE_ : List[str] = {
'''do_sample''': args.do_sample,
'''temperature''': args.temperature,
'''max_new_tokens''': args.max_new_tokens,
'''top_p''': args.top_p,
'''top_k''': args.top_k,
'''stopping_criteria''': StoppingCriteriaList([EndOfFunctionCriteria(0 , __a , __a )] ),
}
# Load evaluation dataset and metric
SCREAMING_SNAKE_CASE_ : Optional[int] = load_dataset('''openai_humaneval''' )
SCREAMING_SNAKE_CASE_ : str = load_metric('''code_eval''' )
SCREAMING_SNAKE_CASE_ : int = args.num_tasks if args.num_tasks is not None else len(human_eval['''test'''] )
SCREAMING_SNAKE_CASE_ : List[str] = args.n_samples // args.batch_size
SCREAMING_SNAKE_CASE_ : Union[str, Any] = TokenizedDataset(__a , human_eval['''test'''] , n_copies=__a , n_tasks=__a )
# do not confuse args.batch_size, which is actually the num_return_sequences
SCREAMING_SNAKE_CASE_ : Optional[int] = DataLoader(__a , batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
SCREAMING_SNAKE_CASE_ : Any = code_eval_metric.compute(references=[''''''] , predictions=[['''''']] )
except ValueError as exception:
print(
'''Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL="1"`'''
''' flag to enable code evaluation.''' )
raise exception
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = accelerator.prepare(__a , __a )
SCREAMING_SNAKE_CASE_ : List[Any] = complete_code(
__a , __a , __a , __a , n_tasks=__a , batch_size=args.batch_size , **__a , )
if accelerator.is_main_process:
SCREAMING_SNAKE_CASE_ : int = []
for task in tqdm(range(__a ) ):
SCREAMING_SNAKE_CASE_ : Tuple = human_eval['''test'''][task]['''test''']
SCREAMING_SNAKE_CASE_ : Tuple = f'check({human_eval["test"][task]["entry_point"]})'
references.append('''\n''' + test_func + '''\n''' + entry_point )
# Evaluate completions with "code_eval" metric
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = code_eval_metric.compute(
references=__a , predictions=__a , num_workers=args.num_workers )
print(f'Results: {pass_at_k}' )
# Save results to json file
with open(args.output_file , '''w''' ) as fp:
json.dump(__a , __a )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main()
| 318 | 1 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DetrConfig, DetrForObjectDetection, DetrForSegmentation, DetrImageProcessor, ResNetConfig
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCAmelCase = logging.get_logger(__name__)
def lowercase__ ( __snake_case : str ):
'''simple docstring'''
if "resnet-50" in model_name:
UpperCAmelCase_ : Union[str, Any] = ResNetConfig.from_pretrained('microsoft/resnet-50' )
elif "resnet-101" in model_name:
UpperCAmelCase_ : Dict = ResNetConfig.from_pretrained('microsoft/resnet-101' )
else:
raise ValueError('Model name should include either resnet50 or resnet101' )
UpperCAmelCase_ : List[Any] = DetrConfig(use_timm_backbone=__snake_case , backbone_config=__snake_case )
# set label attributes
UpperCAmelCase_ : int = 'panoptic' in model_name
if is_panoptic:
UpperCAmelCase_ : Tuple = 250
else:
UpperCAmelCase_ : Tuple = 91
UpperCAmelCase_ : int = 'huggingface/label-files'
UpperCAmelCase_ : Optional[int] = 'coco-detection-id2label.json'
UpperCAmelCase_ : List[Any] = json.load(open(hf_hub_download(__snake_case , __snake_case , repo_type='dataset' ) , 'r' ) )
UpperCAmelCase_ : int = {int(__snake_case ): v for k, v in idalabel.items()}
UpperCAmelCase_ : List[Any] = idalabel
UpperCAmelCase_ : Tuple = {v: k for k, v in idalabel.items()}
return config, is_panoptic
def lowercase__ ( __snake_case : List[str] ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = []
# stem
# fmt: off
rename_keys.append(('backbone.0.body.conv1.weight', 'backbone.conv_encoder.model.embedder.embedder.convolution.weight') )
rename_keys.append(('backbone.0.body.bn1.weight', 'backbone.conv_encoder.model.embedder.embedder.normalization.weight') )
rename_keys.append(('backbone.0.body.bn1.bias', 'backbone.conv_encoder.model.embedder.embedder.normalization.bias') )
rename_keys.append(('backbone.0.body.bn1.running_mean', 'backbone.conv_encoder.model.embedder.embedder.normalization.running_mean') )
rename_keys.append(('backbone.0.body.bn1.running_var', 'backbone.conv_encoder.model.embedder.embedder.normalization.running_var') )
# stages
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
# shortcut
if layer_idx == 0:
rename_keys.append(
(
F"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.0.weight",
F"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.convolution.weight",
) )
rename_keys.append(
(
F"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.weight",
F"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.weight",
) )
rename_keys.append(
(
F"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.bias",
F"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.bias",
) )
rename_keys.append(
(
F"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_mean",
F"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_mean",
) )
rename_keys.append(
(
F"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_var",
F"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_var",
) )
# 3 convs
for i in range(3 ):
rename_keys.append(
(
F"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.conv{i+1}.weight",
F"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.convolution.weight",
) )
rename_keys.append(
(
F"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.weight",
F"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.weight",
) )
rename_keys.append(
(
F"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.bias",
F"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.bias",
) )
rename_keys.append(
(
F"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_mean",
F"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_mean",
) )
rename_keys.append(
(
F"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_var",
F"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_var",
) )
# fmt: on
for i in range(config.encoder_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(
F"transformer.encoder.layers.{i}.self_attn.out_proj.weight",
F"encoder.layers.{i}.self_attn.out_proj.weight",
) )
rename_keys.append(
(F"transformer.encoder.layers.{i}.self_attn.out_proj.bias", F"encoder.layers.{i}.self_attn.out_proj.bias") )
rename_keys.append((F"transformer.encoder.layers.{i}.linear1.weight", F"encoder.layers.{i}.fc1.weight") )
rename_keys.append((F"transformer.encoder.layers.{i}.linear1.bias", F"encoder.layers.{i}.fc1.bias") )
rename_keys.append((F"transformer.encoder.layers.{i}.linear2.weight", F"encoder.layers.{i}.fc2.weight") )
rename_keys.append((F"transformer.encoder.layers.{i}.linear2.bias", F"encoder.layers.{i}.fc2.bias") )
rename_keys.append(
(F"transformer.encoder.layers.{i}.norm1.weight", F"encoder.layers.{i}.self_attn_layer_norm.weight") )
rename_keys.append(
(F"transformer.encoder.layers.{i}.norm1.bias", F"encoder.layers.{i}.self_attn_layer_norm.bias") )
rename_keys.append(
(F"transformer.encoder.layers.{i}.norm2.weight", F"encoder.layers.{i}.final_layer_norm.weight") )
rename_keys.append((F"transformer.encoder.layers.{i}.norm2.bias", F"encoder.layers.{i}.final_layer_norm.bias") )
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(
F"transformer.decoder.layers.{i}.self_attn.out_proj.weight",
F"decoder.layers.{i}.self_attn.out_proj.weight",
) )
rename_keys.append(
(F"transformer.decoder.layers.{i}.self_attn.out_proj.bias", F"decoder.layers.{i}.self_attn.out_proj.bias") )
rename_keys.append(
(
F"transformer.decoder.layers.{i}.multihead_attn.out_proj.weight",
F"decoder.layers.{i}.encoder_attn.out_proj.weight",
) )
rename_keys.append(
(
F"transformer.decoder.layers.{i}.multihead_attn.out_proj.bias",
F"decoder.layers.{i}.encoder_attn.out_proj.bias",
) )
rename_keys.append((F"transformer.decoder.layers.{i}.linear1.weight", F"decoder.layers.{i}.fc1.weight") )
rename_keys.append((F"transformer.decoder.layers.{i}.linear1.bias", F"decoder.layers.{i}.fc1.bias") )
rename_keys.append((F"transformer.decoder.layers.{i}.linear2.weight", F"decoder.layers.{i}.fc2.weight") )
rename_keys.append((F"transformer.decoder.layers.{i}.linear2.bias", F"decoder.layers.{i}.fc2.bias") )
rename_keys.append(
(F"transformer.decoder.layers.{i}.norm1.weight", F"decoder.layers.{i}.self_attn_layer_norm.weight") )
rename_keys.append(
(F"transformer.decoder.layers.{i}.norm1.bias", F"decoder.layers.{i}.self_attn_layer_norm.bias") )
rename_keys.append(
(F"transformer.decoder.layers.{i}.norm2.weight", F"decoder.layers.{i}.encoder_attn_layer_norm.weight") )
rename_keys.append(
(F"transformer.decoder.layers.{i}.norm2.bias", F"decoder.layers.{i}.encoder_attn_layer_norm.bias") )
rename_keys.append(
(F"transformer.decoder.layers.{i}.norm3.weight", F"decoder.layers.{i}.final_layer_norm.weight") )
rename_keys.append((F"transformer.decoder.layers.{i}.norm3.bias", F"decoder.layers.{i}.final_layer_norm.bias") )
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
('input_proj.weight', 'input_projection.weight'),
('input_proj.bias', 'input_projection.bias'),
('query_embed.weight', 'query_position_embeddings.weight'),
('transformer.decoder.norm.weight', 'decoder.layernorm.weight'),
('transformer.decoder.norm.bias', 'decoder.layernorm.bias'),
('class_embed.weight', 'class_labels_classifier.weight'),
('class_embed.bias', 'class_labels_classifier.bias'),
('bbox_embed.layers.0.weight', 'bbox_predictor.layers.0.weight'),
('bbox_embed.layers.0.bias', 'bbox_predictor.layers.0.bias'),
('bbox_embed.layers.1.weight', 'bbox_predictor.layers.1.weight'),
('bbox_embed.layers.1.bias', 'bbox_predictor.layers.1.bias'),
('bbox_embed.layers.2.weight', 'bbox_predictor.layers.2.weight'),
('bbox_embed.layers.2.bias', 'bbox_predictor.layers.2.bias'),
] )
return rename_keys
def lowercase__ ( __snake_case : List[Any] , __snake_case : Tuple , __snake_case : List[Any] ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = state_dict.pop(__snake_case )
UpperCAmelCase_ : int = val
def lowercase__ ( __snake_case : str , __snake_case : Union[str, Any]=False ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = ''
if is_panoptic:
UpperCAmelCase_ : str = 'detr.'
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
UpperCAmelCase_ : List[str] = state_dict.pop(F"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight" )
UpperCAmelCase_ : Optional[int] = state_dict.pop(F"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias" )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase_ : Optional[int] = in_proj_weight[:256, :]
UpperCAmelCase_ : str = in_proj_bias[:256]
UpperCAmelCase_ : Tuple = in_proj_weight[256:512, :]
UpperCAmelCase_ : int = in_proj_bias[256:512]
UpperCAmelCase_ : Optional[int] = in_proj_weight[-256:, :]
UpperCAmelCase_ : Optional[Any] = in_proj_bias[-256:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
UpperCAmelCase_ : List[str] = state_dict.pop(F"{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight" )
UpperCAmelCase_ : Dict = state_dict.pop(F"{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias" )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase_ : Any = in_proj_weight[:256, :]
UpperCAmelCase_ : Optional[int] = in_proj_bias[:256]
UpperCAmelCase_ : List[Any] = in_proj_weight[256:512, :]
UpperCAmelCase_ : Optional[int] = in_proj_bias[256:512]
UpperCAmelCase_ : List[str] = in_proj_weight[-256:, :]
UpperCAmelCase_ : Optional[int] = in_proj_bias[-256:]
# read in weights + bias of input projection layer of cross-attention
UpperCAmelCase_ : Tuple = state_dict.pop(
F"{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight" )
UpperCAmelCase_ : Optional[Any] = state_dict.pop(F"{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias" )
# next, add query, keys and values (in that order) of cross-attention to the state dict
UpperCAmelCase_ : Optional[Any] = in_proj_weight_cross_attn[:256, :]
UpperCAmelCase_ : Dict = in_proj_bias_cross_attn[:256]
UpperCAmelCase_ : Any = in_proj_weight_cross_attn[256:512, :]
UpperCAmelCase_ : Dict = in_proj_bias_cross_attn[256:512]
UpperCAmelCase_ : List[Any] = in_proj_weight_cross_attn[-256:, :]
UpperCAmelCase_ : int = in_proj_bias_cross_attn[-256:]
def lowercase__ ( ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = 'http://images.cocodataset.org/val2017/000000039769.jpg'
UpperCAmelCase_ : str = Image.open(requests.get(__snake_case , stream=__snake_case ).raw )
return im
@torch.no_grad()
def lowercase__ ( __snake_case : str , __snake_case : List[Any]=None , __snake_case : Optional[Any]=False ):
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = get_detr_config(__snake_case )
# load original model from torch hub
UpperCAmelCase_ : Optional[int] = {
'detr-resnet-50': 'detr_resnet50',
'detr-resnet-101': 'detr_resnet101',
}
logger.info(F"Converting model {model_name}..." )
UpperCAmelCase_ : Tuple = torch.hub.load('facebookresearch/detr' , model_name_to_original_name[model_name] , pretrained=__snake_case ).eval()
UpperCAmelCase_ : Any = detr.state_dict()
# rename keys
for src, dest in create_rename_keys(__snake_case ):
if is_panoptic:
UpperCAmelCase_ : int = 'detr.' + src
rename_key(__snake_case , __snake_case , __snake_case )
# query, key and value matrices need special treatment
read_in_q_k_v(__snake_case , is_panoptic=__snake_case )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
UpperCAmelCase_ : str = 'detr.model.' if is_panoptic else 'model.'
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith('detr' )
and not key.startswith('class_labels_classifier' )
and not key.startswith('bbox_predictor' )
):
UpperCAmelCase_ : List[Any] = state_dict.pop(__snake_case )
UpperCAmelCase_ : Dict = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
UpperCAmelCase_ : Optional[int] = state_dict.pop(__snake_case )
UpperCAmelCase_ : Union[str, Any] = val
elif key.startswith('bbox_attention' ) or key.startswith('mask_head' ):
continue
else:
UpperCAmelCase_ : List[Any] = state_dict.pop(__snake_case )
UpperCAmelCase_ : Union[str, Any] = val
else:
if not key.startswith('class_labels_classifier' ) and not key.startswith('bbox_predictor' ):
UpperCAmelCase_ : Dict = state_dict.pop(__snake_case )
UpperCAmelCase_ : Union[str, Any] = val
# finally, create HuggingFace model and load state dict
UpperCAmelCase_ : List[Any] = DetrForSegmentation(__snake_case ) if is_panoptic else DetrForObjectDetection(__snake_case )
model.load_state_dict(__snake_case )
model.eval()
# verify our conversion on an image
UpperCAmelCase_ : int = 'coco_panoptic' if is_panoptic else 'coco_detection'
UpperCAmelCase_ : Dict = DetrImageProcessor(format=__snake_case )
UpperCAmelCase_ : Any = processor(images=prepare_img() , return_tensors='pt' )
UpperCAmelCase_ : Optional[int] = encoding['pixel_values']
UpperCAmelCase_ : int = detr(__snake_case )
UpperCAmelCase_ : Any = model(__snake_case )
assert torch.allclose(outputs.logits , original_outputs['pred_logits'] , atol=1E-3 )
assert torch.allclose(outputs.pred_boxes , original_outputs['pred_boxes'] , atol=1E-3 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs['pred_masks'] , atol=1E-4 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(F"Saving PyTorch model and image processor to {pytorch_dump_folder_path}..." )
Path(__snake_case ).mkdir(exist_ok=__snake_case )
model.save_pretrained(__snake_case )
processor.save_pretrained(__snake_case )
if push_to_hub:
# Upload model and image processor to the hub
logger.info('Uploading PyTorch model and image processor to the hub...' )
model.push_to_hub(F"nielsr/{model_name}" )
processor.push_to_hub(F"nielsr/{model_name}" )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
default='detr-resnet-50',
type=str,
choices=['detr-resnet-50', 'detr-resnet-101'],
help='Name of the DETR model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
parser.add_argument('--push_to_hub', action='store_true', help='Whether to push the model to the hub or not.')
__UpperCAmelCase = parser.parse_args()
convert_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 29 |
"""simple docstring"""
from decimal import Decimal, getcontext
from math import ceil, factorial
def _SCREAMING_SNAKE_CASE ( _lowercase : int ) ->str:
'''simple docstring'''
if not isinstance(_lowercase , _lowercase ):
raise TypeError("Undefined for non-integers" )
elif precision < 1:
raise ValueError("Undefined for non-natural numbers" )
a : Tuple = precision
a : str = ceil(precision / 14 )
a : List[Any] = 42_6880 * Decimal(1_0005 ).sqrt()
a : Union[str, Any] = 1
a : Dict = 1359_1409
a : Optional[int] = Decimal(_lowercase )
for k in range(1 , _lowercase ):
a : int = factorial(6 * k ) // (factorial(3 * k ) * factorial(_lowercase ) ** 3)
linear_term += 5_4514_0134
exponential_term *= -26_2537_4126_4076_8000
partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term
return str(constant_term / partial_sum )[:-1]
if __name__ == "__main__":
a : Optional[Any] = 50
print(F'''The first {n} digits of pi is: {pi(n)}''')
| 105 | 0 |
"""simple docstring"""
import argparse
import os
import torch
from transformers import (
XLNetConfig,
XLNetForQuestionAnswering,
XLNetForSequenceClassification,
XLNetLMHeadModel,
load_tf_weights_in_xlnet,
)
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
_a : List[Any]= {
"cola": 2,
"mnli": 3,
"mrpc": 2,
"sst-2": 2,
"sts-b": 1,
"qqp": 2,
"qnli": 2,
"rte": 2,
"wnli": 2,
}
logging.set_verbosity_info()
def __UpperCAmelCase ( UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : List[Any]=None ) -> str:
'''simple docstring'''
__snake_case : Optional[int] = XLNetConfig.from_json_file(UpperCAmelCase_ )
__snake_case : Dict = finetuning_task.lower() if finetuning_task is not None else ''
if finetuning_task in GLUE_TASKS_NUM_LABELS:
print(F"Building PyTorch XLNetForSequenceClassification model from configuration: {config}" )
__snake_case : Optional[int] = finetuning_task
__snake_case : Optional[Any] = GLUE_TASKS_NUM_LABELS[finetuning_task]
__snake_case : Tuple = XLNetForSequenceClassification(UpperCAmelCase_ )
elif "squad" in finetuning_task:
__snake_case : List[str] = finetuning_task
__snake_case : Any = XLNetForQuestionAnswering(UpperCAmelCase_ )
else:
__snake_case : Optional[Any] = XLNetLMHeadModel(UpperCAmelCase_ )
# Load weights from tf checkpoint
load_tf_weights_in_xlnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
# Save pytorch-model
__snake_case : List[Any] = os.path.join(UpperCAmelCase_ , UpperCAmelCase_ )
__snake_case : str = os.path.join(UpperCAmelCase_ , UpperCAmelCase_ )
print(F"Save PyTorch model to {os.path.abspath(UpperCAmelCase_ )}" )
torch.save(model.state_dict() , UpperCAmelCase_ )
print(F"Save configuration file to {os.path.abspath(UpperCAmelCase_ )}" )
with open(UpperCAmelCase_ , 'w' , encoding='utf-8' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
_a : Any= argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--xlnet_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained XLNet model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
required=True,
help="Path to the folder to store the PyTorch model or dataset/vocab.",
)
parser.add_argument(
"--finetuning_task",
default=None,
type=str,
help="Name of a task on which the XLNet TensorFlow model was fine-tuned",
)
_a : Any= parser.parse_args()
print(args)
convert_xlnet_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task
)
| 350 | """simple docstring"""
import inspect
import unittest
from transformers import SegformerConfig, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_MAPPING,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerModel,
)
from transformers.models.segformer.modeling_segformer import SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import SegformerImageProcessor
class UpperCamelCase ( lowercase ):
def _lowercase (self : Any) -> List[Any]:
__snake_case : Any = self.config_class(**self.inputs_dict)
self.parent.assertTrue(hasattr(_A , 'hidden_sizes'))
self.parent.assertTrue(hasattr(_A , 'num_attention_heads'))
self.parent.assertTrue(hasattr(_A , 'num_encoder_blocks'))
class UpperCamelCase :
def __init__(self : Optional[int] , _A : Any , _A : str=13 , _A : List[str]=64 , _A : List[Any]=3 , _A : Any=4 , _A : List[str]=[2, 2, 2, 2] , _A : Tuple=[8, 4, 2, 1] , _A : List[str]=[16, 32, 64, 1_28] , _A : int=[1, 4, 8, 16] , _A : List[str]=[1, 2, 4, 8] , _A : Dict=True , _A : Any=True , _A : List[str]="gelu" , _A : Optional[int]=0.1 , _A : Union[str, Any]=0.1 , _A : List[Any]=0.02 , _A : str=3 , _A : int=None , ) -> List[Any]:
__snake_case : int = parent
__snake_case : List[Any] = batch_size
__snake_case : Optional[int] = image_size
__snake_case : List[str] = num_channels
__snake_case : Any = num_encoder_blocks
__snake_case : Dict = sr_ratios
__snake_case : Any = depths
__snake_case : Tuple = hidden_sizes
__snake_case : Tuple = downsampling_rates
__snake_case : Union[str, Any] = num_attention_heads
__snake_case : Optional[int] = is_training
__snake_case : Any = use_labels
__snake_case : List[Any] = hidden_act
__snake_case : Union[str, Any] = hidden_dropout_prob
__snake_case : Tuple = attention_probs_dropout_prob
__snake_case : List[Any] = initializer_range
__snake_case : Union[str, Any] = num_labels
__snake_case : Dict = scope
def _lowercase (self : List[Any]) -> Tuple:
__snake_case : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
__snake_case : str = None
if self.use_labels:
__snake_case : Optional[Any] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels)
__snake_case : Dict = self.get_config()
return config, pixel_values, labels
def _lowercase (self : Any) -> Optional[int]:
return SegformerConfig(
image_size=self.image_size , num_channels=self.num_channels , num_encoder_blocks=self.num_encoder_blocks , depths=self.depths , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def _lowercase (self : List[str] , _A : Union[str, Any] , _A : List[Any] , _A : Union[str, Any]) -> int:
__snake_case : Union[str, Any] = SegformerModel(config=_A)
model.to(_A)
model.eval()
__snake_case : str = model(_A)
__snake_case : List[Any] = self.image_size // (self.downsampling_rates[-1] * 2)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], expected_height, expected_width))
def _lowercase (self : Tuple , _A : Dict , _A : Any , _A : int) -> str:
__snake_case : Any = self.num_labels
__snake_case : List[str] = SegformerForSemanticSegmentation(_A)
model.to(_A)
model.eval()
__snake_case : Dict = model(_A)
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4))
__snake_case : Dict = model(_A , labels=_A)
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4))
self.parent.assertGreater(result.loss , 0.0)
def _lowercase (self : Optional[Any] , _A : Dict , _A : Optional[int] , _A : str) -> List[Any]:
__snake_case : List[Any] = 1
__snake_case : str = SegformerForSemanticSegmentation(config=_A)
model.to(_A)
model.eval()
__snake_case : Any = torch.randint(0 , 1 , (self.batch_size, self.image_size, self.image_size)).to(_A)
__snake_case : Any = model(_A , labels=_A)
self.parent.assertGreater(result.loss , 0.0)
def _lowercase (self : Any) -> Optional[int]:
__snake_case : Optional[Any] = self.prepare_config_and_inputs()
__snake_case , __snake_case , __snake_case : str = config_and_inputs
__snake_case : List[str] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCamelCase ( lowercase , lowercase , unittest.TestCase ):
UpperCAmelCase : List[str] = (
(
SegformerModel,
SegformerForSemanticSegmentation,
SegformerForImageClassification,
)
if is_torch_available()
else ()
)
UpperCAmelCase : Dict = (
{
"""feature-extraction""": SegformerModel,
"""image-classification""": SegformerForImageClassification,
"""image-segmentation""": SegformerForSemanticSegmentation,
}
if is_torch_available()
else {}
)
UpperCAmelCase : Optional[int] = True
UpperCAmelCase : Optional[int] = False
UpperCAmelCase : List[Any] = False
UpperCAmelCase : List[Any] = False
def _lowercase (self : str) -> Union[str, Any]:
__snake_case : Optional[int] = SegformerModelTester(self)
__snake_case : Any = SegformerConfigTester(self , config_class=_A)
def _lowercase (self : List[str]) -> Optional[int]:
self.config_tester.run_common_tests()
def _lowercase (self : List[Any]) -> List[str]:
__snake_case : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A)
def _lowercase (self : Optional[int]) -> str:
__snake_case : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_binary_image_segmentation(*_A)
def _lowercase (self : int) -> Union[str, Any]:
__snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_segmentation(*_A)
@unittest.skip('SegFormer does not use inputs_embeds')
def _lowercase (self : Union[str, Any]) -> str:
pass
@unittest.skip('SegFormer does not have get_input_embeddings method and get_output_embeddings methods')
def _lowercase (self : int) -> str:
pass
def _lowercase (self : List[str]) -> Any:
__snake_case , __snake_case : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : str = model_class(_A)
__snake_case : str = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__snake_case : List[str] = [*signature.parameters.keys()]
__snake_case : Optional[int] = ['pixel_values']
self.assertListEqual(arg_names[:1] , _A)
def _lowercase (self : List[str]) -> List[Any]:
__snake_case , __snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case : Optional[int] = True
for model_class in self.all_model_classes:
__snake_case : Union[str, Any] = True
__snake_case : List[str] = False
__snake_case : Tuple = True
__snake_case : int = model_class(_A)
model.to(_A)
model.eval()
with torch.no_grad():
__snake_case : int = model(**self._prepare_for_class(_A , _A))
__snake_case : Union[str, Any] = outputs.attentions
__snake_case : int = sum(self.model_tester.depths)
self.assertEqual(len(_A) , _A)
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__snake_case : int = True
__snake_case : Union[str, Any] = model_class(_A)
model.to(_A)
model.eval()
with torch.no_grad():
__snake_case : List[str] = model(**self._prepare_for_class(_A , _A))
__snake_case : Optional[int] = outputs.attentions
self.assertEqual(len(_A) , _A)
# verify the first attentions (first block, first layer)
__snake_case : Optional[int] = (self.model_tester.image_size // 4) ** 2
__snake_case : Tuple = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
# verify the last attentions (last block, last layer)
__snake_case : int = (self.model_tester.image_size // 32) ** 2
__snake_case : Any = (self.model_tester.image_size // (32 * self.model_tester.sr_ratios[-1])) ** 2
self.assertListEqual(
list(attentions[-1].shape[-3:]) , [self.model_tester.num_attention_heads[-1], expected_seq_len, expected_reduced_seq_len] , )
__snake_case : int = len(_A)
# Check attention is always last and order is fine
__snake_case : Any = True
__snake_case : Tuple = True
__snake_case : Optional[Any] = model_class(_A)
model.to(_A)
model.eval()
with torch.no_grad():
__snake_case : Any = model(**self._prepare_for_class(_A , _A))
self.assertEqual(out_len + 1 , len(_A))
__snake_case : List[Any] = outputs.attentions
self.assertEqual(len(_A) , _A)
# verify the first attentions (first block, first layer)
__snake_case : Any = (self.model_tester.image_size // 4) ** 2
__snake_case : List[Any] = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(self_attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
def _lowercase (self : str) -> List[str]:
def check_hidden_states_output(_A : Union[str, Any] , _A : List[str] , _A : Tuple):
__snake_case : Tuple = model_class(_A)
model.to(_A)
model.eval()
with torch.no_grad():
__snake_case : Optional[int] = model(**self._prepare_for_class(_A , _A))
__snake_case : List[str] = outputs.hidden_states
__snake_case : Tuple = self.model_tester.num_encoder_blocks
self.assertEqual(len(_A) , _A)
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:]) , [
self.model_tester.hidden_sizes[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
__snake_case , __snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : List[str] = True
check_hidden_states_output(_A , _A , _A)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__snake_case : Any = True
check_hidden_states_output(_A , _A , _A)
def _lowercase (self : Optional[int]) -> int:
if not self.model_tester.is_training:
return
__snake_case , __snake_case : Dict = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case : int = True
for model_class in self.all_model_classes:
if model_class in get_values(_A):
continue
__snake_case : Tuple = model_class(_A)
model.to(_A)
model.train()
__snake_case : str = self._prepare_for_class(_A , _A , return_labels=_A)
__snake_case : Dict = model(**_A).loss
loss.backward()
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.')
def _lowercase (self : Tuple) -> Dict:
pass
@slow
def _lowercase (self : Any) -> List[str]:
for model_name in SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case : List[Any] = SegformerModel.from_pretrained(_A)
self.assertIsNotNone(_A)
def __UpperCAmelCase ( ) -> Union[str, Any]:
'''simple docstring'''
__snake_case : Tuple = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
class UpperCamelCase ( unittest.TestCase ):
@slow
def _lowercase (self : Union[str, Any]) -> Any:
# only resize + normalize
__snake_case : List[str] = SegformerImageProcessor(
image_scale=(5_12, 5_12) , keep_ratio=_A , align=_A , do_random_crop=_A)
__snake_case : Tuple = SegformerForSemanticSegmentation.from_pretrained('nvidia/segformer-b0-finetuned-ade-512-512').to(
_A)
__snake_case : Optional[Any] = prepare_img()
__snake_case : Tuple = image_processor(images=_A , return_tensors='pt')
__snake_case : List[str] = encoded_inputs.pixel_values.to(_A)
with torch.no_grad():
__snake_case : Any = model(_A)
__snake_case : Optional[int] = torch.Size((1, model.config.num_labels, 1_28, 1_28))
self.assertEqual(outputs.logits.shape , _A)
__snake_case : Tuple = torch.tensor(
[
[[-4.6_310, -5.5_232, -6.2_356], [-5.1_921, -6.1_444, -6.5_996], [-5.4_424, -6.2_790, -6.7_574]],
[[-12.1_391, -13.3_122, -13.9_554], [-12.8_732, -13.9_352, -14.3_563], [-12.9_438, -13.8_226, -14.2_513]],
[[-12.5_134, -13.4_686, -14.4_915], [-12.8_669, -14.4_343, -14.7_758], [-13.2_523, -14.5_819, -15.0_694]],
]).to(_A)
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , _A , atol=1E-4))
@slow
def _lowercase (self : Any) -> Optional[int]:
# only resize + normalize
__snake_case : int = SegformerImageProcessor(
image_scale=(5_12, 5_12) , keep_ratio=_A , align=_A , do_random_crop=_A)
__snake_case : Tuple = SegformerForSemanticSegmentation.from_pretrained(
'nvidia/segformer-b1-finetuned-cityscapes-1024-1024').to(_A)
__snake_case : str = prepare_img()
__snake_case : Union[str, Any] = image_processor(images=_A , return_tensors='pt')
__snake_case : str = encoded_inputs.pixel_values.to(_A)
with torch.no_grad():
__snake_case : Any = model(_A)
__snake_case : Any = torch.Size((1, model.config.num_labels, 1_28, 1_28))
self.assertEqual(outputs.logits.shape , _A)
__snake_case : List[Any] = torch.tensor(
[
[[-13.5_748, -13.9_111, -12.6_500], [-14.3_500, -15.3_683, -14.2_328], [-14.7_532, -16.0_424, -15.6_087]],
[[-17.1_651, -15.8_725, -12.9_653], [-17.2_580, -17.3_718, -14.8_223], [-16.6_058, -16.8_783, -16.7_452]],
[[-3.6_456, -3.0_209, -1.4_203], [-3.0_797, -3.1_959, -2.0_000], [-1.8_757, -1.9_217, -1.6_997]],
]).to(_A)
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , _A , atol=1E-1))
@slow
def _lowercase (self : Optional[int]) -> Union[str, Any]:
# only resize + normalize
__snake_case : List[Any] = SegformerImageProcessor(
image_scale=(5_12, 5_12) , keep_ratio=_A , align=_A , do_random_crop=_A)
__snake_case : Optional[Any] = SegformerForSemanticSegmentation.from_pretrained('nvidia/segformer-b0-finetuned-ade-512-512').to(
_A)
__snake_case : Optional[Any] = prepare_img()
__snake_case : Optional[Any] = image_processor(images=_A , return_tensors='pt')
__snake_case : Union[str, Any] = encoded_inputs.pixel_values.to(_A)
with torch.no_grad():
__snake_case : Any = model(_A)
__snake_case : Optional[Any] = outputs.logits.detach().cpu()
__snake_case : Any = image_processor.post_process_semantic_segmentation(outputs=_A , target_sizes=[(5_00, 3_00)])
__snake_case : Any = torch.Size((5_00, 3_00))
self.assertEqual(segmentation[0].shape , _A)
__snake_case : Any = image_processor.post_process_semantic_segmentation(outputs=_A)
__snake_case : str = torch.Size((1_28, 1_28))
self.assertEqual(segmentation[0].shape , _A)
| 95 | 0 |
def _a ( lowerCamelCase: list ) -> list:
'''simple docstring'''
if any(not isinstance(lowerCamelCase , lowerCamelCase ) or x < 0 for x in sequence ):
raise TypeError('''Sequence must be list of non-negative integers''' )
for _ in range(len(lowerCamelCase ) ):
for i, (rod_upper, rod_lower) in enumerate(zip(lowerCamelCase , sequence[1:] ) ):
if rod_upper > rod_lower:
sequence[i] -= rod_upper - rod_lower
sequence[i + 1] += rod_upper - rod_lower
return sequence
if __name__ == "__main__":
assert bead_sort([5, 4, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bead_sort([7, 9, 4, 3, 5]) == [3, 4, 5, 7, 9]
| 117 |
import argparse
import torch
from transformers import BertForMaskedLM
if __name__ == "__main__":
snake_case__ : Optional[int] = argparse.ArgumentParser(
description=(
'Extraction some layers of the full BertForMaskedLM or RObertaForMaskedLM for Transfer Learned'
' Distillation'
)
)
parser.add_argument('--model_type', default='bert', choices=['bert'])
parser.add_argument('--model_name', default='bert-base-uncased', type=str)
parser.add_argument('--dump_checkpoint', default='serialization_dir/tf_bert-base-uncased_0247911.pth', type=str)
parser.add_argument('--vocab_transform', action='store_true')
snake_case__ : Optional[int] = parser.parse_args()
if args.model_type == "bert":
snake_case__ : Dict = BertForMaskedLM.from_pretrained(args.model_name)
snake_case__ : Union[str, Any] = 'bert'
else:
raise ValueError('args.model_type should be "bert".')
snake_case__ : Optional[int] = model.state_dict()
snake_case__ : List[Any] = {}
for w in ["word_embeddings", "position_embeddings"]:
snake_case__ : Tuple = state_dict[f'{prefix}.embeddings.{w}.weight']
for w in ["weight", "bias"]:
snake_case__ : Optional[Any] = state_dict[f'{prefix}.embeddings.LayerNorm.{w}']
snake_case__ : int = 0
for teacher_idx in [0, 2, 4, 7, 9, 11]:
for w in ["weight", "bias"]:
snake_case__ : Union[str, Any] = state_dict[
f'{prefix}.encoder.layer.{teacher_idx}.attention.self.query.{w}'
]
snake_case__ : Dict = state_dict[
f'{prefix}.encoder.layer.{teacher_idx}.attention.self.key.{w}'
]
snake_case__ : int = state_dict[
f'{prefix}.encoder.layer.{teacher_idx}.attention.self.value.{w}'
]
snake_case__ : int = state_dict[
f'{prefix}.encoder.layer.{teacher_idx}.attention.output.dense.{w}'
]
snake_case__ : Optional[int] = state_dict[
f'{prefix}.encoder.layer.{teacher_idx}.attention.output.LayerNorm.{w}'
]
snake_case__ : Optional[Any] = state_dict[
f'{prefix}.encoder.layer.{teacher_idx}.intermediate.dense.{w}'
]
snake_case__ : List[str] = state_dict[
f'{prefix}.encoder.layer.{teacher_idx}.output.dense.{w}'
]
snake_case__ : int = state_dict[
f'{prefix}.encoder.layer.{teacher_idx}.output.LayerNorm.{w}'
]
std_idx += 1
snake_case__ : Optional[int] = state_dict['cls.predictions.decoder.weight']
snake_case__ : str = state_dict['cls.predictions.bias']
if args.vocab_transform:
for w in ["weight", "bias"]:
snake_case__ : int = state_dict[f'cls.predictions.transform.dense.{w}']
snake_case__ : Optional[int] = state_dict[f'cls.predictions.transform.LayerNorm.{w}']
print(f'N layers selected for distillation: {std_idx}')
print(f'Number of params transferred for distillation: {len(compressed_sd.keys())}')
print(f'Save transferred checkpoint to {args.dump_checkpoint}.')
torch.save(compressed_sd, args.dump_checkpoint)
| 117 | 1 |
import argparse
import json
import os
import sys
import tempfile
import unittest
from argparse import Namespace
from dataclasses import dataclass, field
from enum import Enum
from pathlib import Path
from typing import List, Literal, Optional
import yaml
from transformers import HfArgumentParser, TrainingArguments
from transformers.hf_argparser import make_choice_type_function, string_to_bool
# Since Python 3.10, we can use the builtin `|` operator for Union types
# See PEP 604: https://peps.python.org/pep-0604
SCREAMING_SNAKE_CASE :str = sys.version_info >= (3, 10)
def UpperCAmelCase ( a_=None , a_=None ) -> Union[str, Any]:
"""simple docstring"""
return field(default_factory=lambda: default , metadata=__SCREAMING_SNAKE_CASE )
@dataclass
class UpperCAmelCase :
'''simple docstring'''
snake_case_ = 42
snake_case_ = 42
snake_case_ = 42
snake_case_ = 42
@dataclass
class UpperCAmelCase :
'''simple docstring'''
snake_case_ = 42
snake_case_ = field(default="toto" , metadata={"help": "help message"} )
@dataclass
class UpperCAmelCase :
'''simple docstring'''
snake_case_ = False
snake_case_ = True
snake_case_ = None
class UpperCAmelCase ( __UpperCamelCase ):
'''simple docstring'''
snake_case_ = "titi"
snake_case_ = "toto"
class UpperCAmelCase ( __UpperCamelCase ):
'''simple docstring'''
snake_case_ = "titi"
snake_case_ = "toto"
snake_case_ = 42
@dataclass
class UpperCAmelCase :
'''simple docstring'''
snake_case_ = "toto"
def UpperCamelCase_ ( self : Any ):
__A = BasicEnum(self.foo )
@dataclass
class UpperCAmelCase :
'''simple docstring'''
snake_case_ = "toto"
def UpperCamelCase_ ( self : Optional[Any] ):
__A = MixedTypeEnum(self.foo )
@dataclass
class UpperCAmelCase :
'''simple docstring'''
snake_case_ = None
snake_case_ = field(default=__UpperCamelCase , metadata={"help": "help message"} )
snake_case_ = None
snake_case_ = list_field(default=[] )
snake_case_ = list_field(default=[] )
@dataclass
class UpperCAmelCase :
'''simple docstring'''
snake_case_ = list_field(default=[] )
snake_case_ = list_field(default=[1, 2, 3] )
snake_case_ = list_field(default=["Hallo", "Bonjour", "Hello"] )
snake_case_ = list_field(default=[0.1, 0.2, 0.3] )
@dataclass
class UpperCAmelCase :
'''simple docstring'''
snake_case_ = field()
snake_case_ = field()
snake_case_ = field()
def UpperCamelCase_ ( self : Optional[Any] ):
__A = BasicEnum(self.required_enum )
@dataclass
class UpperCAmelCase :
'''simple docstring'''
snake_case_ = 42
snake_case_ = field()
snake_case_ = None
snake_case_ = field(default="toto" , metadata={"help": "help message"} )
snake_case_ = list_field(default=["Hallo", "Bonjour", "Hello"] )
if is_python_no_less_than_3_10:
@dataclass
class UpperCAmelCase :
'''simple docstring'''
snake_case_ = False
snake_case_ = True
snake_case_ = None
@dataclass
class UpperCAmelCase :
'''simple docstring'''
snake_case_ = None
snake_case_ = field(default=__UpperCamelCase , metadata={"help": "help message"} )
snake_case_ = None
snake_case_ = list_field(default=[] )
snake_case_ = list_field(default=[] )
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self : Dict ,A : argparse.ArgumentParser ,A : argparse.ArgumentParser ):
self.assertEqual(len(a._actions ) ,len(b._actions ) )
for x, y in zip(a._actions ,b._actions ):
__A = {k: v for k, v in vars(_lowerCAmelCase ).items() if k != """container"""}
__A = {k: v for k, v in vars(_lowerCAmelCase ).items() if k != """container"""}
# Choices with mixed type have custom function as "type"
# So we need to compare results directly for equality
if xx.get("choices" ,_lowerCAmelCase ) and yy.get("choices" ,_lowerCAmelCase ):
for expected_choice in yy["choices"] + xx["choices"]:
self.assertEqual(xx["type"](_lowerCAmelCase ) ,yy["type"](_lowerCAmelCase ) )
del xx["type"], yy["type"]
self.assertEqual(_lowerCAmelCase ,_lowerCAmelCase )
def UpperCamelCase_ ( self : Dict ):
__A = HfArgumentParser(_lowerCAmelCase )
__A = argparse.ArgumentParser()
expected.add_argument("--foo" ,type=_lowerCAmelCase ,required=_lowerCAmelCase )
expected.add_argument("--bar" ,type=_lowerCAmelCase ,required=_lowerCAmelCase )
expected.add_argument("--baz" ,type=_lowerCAmelCase ,required=_lowerCAmelCase )
expected.add_argument("--flag" ,type=_lowerCAmelCase ,default=_lowerCAmelCase ,const=_lowerCAmelCase ,nargs="?" )
self.argparsersEqual(_lowerCAmelCase ,_lowerCAmelCase )
__A = ["""--foo""", """1""", """--baz""", """quux""", """--bar""", """0.5"""]
(__A ) = parser.parse_args_into_dataclasses(_lowerCAmelCase ,look_for_args_file=_lowerCAmelCase )
self.assertFalse(example.flag )
def UpperCamelCase_ ( self : List[str] ):
__A = HfArgumentParser(_lowerCAmelCase )
__A = argparse.ArgumentParser()
expected.add_argument("--foo" ,default=42 ,type=_lowerCAmelCase )
expected.add_argument("--baz" ,default="toto" ,type=_lowerCAmelCase ,help="help message" )
self.argparsersEqual(_lowerCAmelCase ,_lowerCAmelCase )
def UpperCamelCase_ ( self : Optional[Any] ):
__A = argparse.ArgumentParser()
expected.add_argument("--foo" ,type=_lowerCAmelCase ,default=_lowerCAmelCase ,const=_lowerCAmelCase ,nargs="?" )
expected.add_argument("--baz" ,type=_lowerCAmelCase ,default=_lowerCAmelCase ,const=_lowerCAmelCase ,nargs="?" )
# A boolean no_* argument always has to come after its "default: True" regular counter-part
# and its default must be set to False
expected.add_argument("--no_baz" ,action="store_false" ,default=_lowerCAmelCase ,dest="baz" )
expected.add_argument("--opt" ,type=_lowerCAmelCase ,default=_lowerCAmelCase )
__A = [WithDefaultBoolExample]
if is_python_no_less_than_3_10:
dataclass_types.append(_lowerCAmelCase )
for dataclass_type in dataclass_types:
__A = HfArgumentParser(_lowerCAmelCase )
self.argparsersEqual(_lowerCAmelCase ,_lowerCAmelCase )
__A = parser.parse_args([] )
self.assertEqual(_lowerCAmelCase ,Namespace(foo=_lowerCAmelCase ,baz=_lowerCAmelCase ,opt=_lowerCAmelCase ) )
__A = parser.parse_args(["--foo", "--no_baz"] )
self.assertEqual(_lowerCAmelCase ,Namespace(foo=_lowerCAmelCase ,baz=_lowerCAmelCase ,opt=_lowerCAmelCase ) )
__A = parser.parse_args(["--foo", "--baz"] )
self.assertEqual(_lowerCAmelCase ,Namespace(foo=_lowerCAmelCase ,baz=_lowerCAmelCase ,opt=_lowerCAmelCase ) )
__A = parser.parse_args(["--foo", "True", "--baz", "True", "--opt", "True"] )
self.assertEqual(_lowerCAmelCase ,Namespace(foo=_lowerCAmelCase ,baz=_lowerCAmelCase ,opt=_lowerCAmelCase ) )
__A = parser.parse_args(["--foo", "False", "--baz", "False", "--opt", "False"] )
self.assertEqual(_lowerCAmelCase ,Namespace(foo=_lowerCAmelCase ,baz=_lowerCAmelCase ,opt=_lowerCAmelCase ) )
def UpperCamelCase_ ( self : List[str] ):
__A = HfArgumentParser(_lowerCAmelCase )
__A = argparse.ArgumentParser()
expected.add_argument(
"--foo" ,default="toto" ,choices=["titi", "toto", 42] ,type=make_choice_type_function(["titi", "toto", 42] ) ,)
self.argparsersEqual(_lowerCAmelCase ,_lowerCAmelCase )
__A = parser.parse_args([] )
self.assertEqual(args.foo ,"toto" )
__A = parser.parse_args_into_dataclasses([] )[0]
self.assertEqual(enum_ex.foo ,MixedTypeEnum.toto )
__A = parser.parse_args(["--foo", "titi"] )
self.assertEqual(args.foo ,"titi" )
__A = parser.parse_args_into_dataclasses(["--foo", "titi"] )[0]
self.assertEqual(enum_ex.foo ,MixedTypeEnum.titi )
__A = parser.parse_args(["--foo", "42"] )
self.assertEqual(args.foo ,42 )
__A = parser.parse_args_into_dataclasses(["--foo", "42"] )[0]
self.assertEqual(enum_ex.foo ,MixedTypeEnum.fourtytwo )
def UpperCamelCase_ ( self : Dict ):
@dataclass
class UpperCAmelCase :
'''simple docstring'''
snake_case_ = "toto"
__A = HfArgumentParser(_lowerCAmelCase )
__A = argparse.ArgumentParser()
expected.add_argument(
"--foo" ,default="toto" ,choices=("titi", "toto", 42) ,type=make_choice_type_function(["titi", "toto", 42] ) ,)
self.argparsersEqual(_lowerCAmelCase ,_lowerCAmelCase )
__A = parser.parse_args([] )
self.assertEqual(args.foo ,"toto" )
__A = parser.parse_args(["--foo", "titi"] )
self.assertEqual(args.foo ,"titi" )
__A = parser.parse_args(["--foo", "42"] )
self.assertEqual(args.foo ,42 )
def UpperCamelCase_ ( self : Dict ):
__A = HfArgumentParser(_lowerCAmelCase )
__A = argparse.ArgumentParser()
expected.add_argument("--foo_int" ,nargs="+" ,default=[] ,type=_lowerCAmelCase )
expected.add_argument("--bar_int" ,nargs="+" ,default=[1, 2, 3] ,type=_lowerCAmelCase )
expected.add_argument("--foo_str" ,nargs="+" ,default=["Hallo", "Bonjour", "Hello"] ,type=_lowerCAmelCase )
expected.add_argument("--foo_float" ,nargs="+" ,default=[0.1, 0.2, 0.3] ,type=_lowerCAmelCase )
self.argparsersEqual(_lowerCAmelCase ,_lowerCAmelCase )
__A = parser.parse_args([] )
self.assertEqual(
_lowerCAmelCase ,Namespace(foo_int=[] ,bar_int=[1, 2, 3] ,foo_str=["Hallo", "Bonjour", "Hello"] ,foo_float=[0.1, 0.2, 0.3] ) ,)
__A = parser.parse_args("--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7".split() )
self.assertEqual(_lowerCAmelCase ,Namespace(foo_int=[1] ,bar_int=[2, 3] ,foo_str=["a", "b", "c"] ,foo_float=[0.1, 0.7] ) )
def UpperCamelCase_ ( self : str ):
__A = argparse.ArgumentParser()
expected.add_argument("--foo" ,default=_lowerCAmelCase ,type=_lowerCAmelCase )
expected.add_argument("--bar" ,default=_lowerCAmelCase ,type=_lowerCAmelCase ,help="help message" )
expected.add_argument("--baz" ,default=_lowerCAmelCase ,type=_lowerCAmelCase )
expected.add_argument("--ces" ,nargs="+" ,default=[] ,type=_lowerCAmelCase )
expected.add_argument("--des" ,nargs="+" ,default=[] ,type=_lowerCAmelCase )
__A = [OptionalExample]
if is_python_no_less_than_3_10:
dataclass_types.append(_lowerCAmelCase )
for dataclass_type in dataclass_types:
__A = HfArgumentParser(_lowerCAmelCase )
self.argparsersEqual(_lowerCAmelCase ,_lowerCAmelCase )
__A = parser.parse_args([] )
self.assertEqual(_lowerCAmelCase ,Namespace(foo=_lowerCAmelCase ,bar=_lowerCAmelCase ,baz=_lowerCAmelCase ,ces=[] ,des=[] ) )
__A = parser.parse_args("--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3".split() )
self.assertEqual(_lowerCAmelCase ,Namespace(foo=12 ,bar=3.14 ,baz="42" ,ces=["a", "b", "c"] ,des=[1, 2, 3] ) )
def UpperCamelCase_ ( self : Union[str, Any] ):
__A = HfArgumentParser(_lowerCAmelCase )
__A = argparse.ArgumentParser()
expected.add_argument("--required_list" ,nargs="+" ,type=_lowerCAmelCase ,required=_lowerCAmelCase )
expected.add_argument("--required_str" ,type=_lowerCAmelCase ,required=_lowerCAmelCase )
expected.add_argument(
"--required_enum" ,type=make_choice_type_function(["titi", "toto"] ) ,choices=["titi", "toto"] ,required=_lowerCAmelCase ,)
self.argparsersEqual(_lowerCAmelCase ,_lowerCAmelCase )
def UpperCamelCase_ ( self : str ):
__A = HfArgumentParser(_lowerCAmelCase )
__A = argparse.ArgumentParser()
expected.add_argument("--foo" ,type=_lowerCAmelCase ,required=_lowerCAmelCase )
expected.add_argument(
"--required_enum" ,type=make_choice_type_function(["titi", "toto"] ) ,choices=["titi", "toto"] ,required=_lowerCAmelCase ,)
expected.add_argument("--opt" ,type=_lowerCAmelCase ,default=_lowerCAmelCase )
expected.add_argument("--baz" ,default="toto" ,type=_lowerCAmelCase ,help="help message" )
expected.add_argument("--foo_str" ,nargs="+" ,default=["Hallo", "Bonjour", "Hello"] ,type=_lowerCAmelCase )
self.argparsersEqual(_lowerCAmelCase ,_lowerCAmelCase )
def UpperCamelCase_ ( self : Optional[int] ):
__A = HfArgumentParser(_lowerCAmelCase )
__A = {
"""foo""": 12,
"""bar""": 3.14,
"""baz""": """42""",
"""flag""": True,
}
__A = parser.parse_dict(_lowerCAmelCase )[0]
__A = BasicExample(**_lowerCAmelCase )
self.assertEqual(_lowerCAmelCase ,_lowerCAmelCase )
def UpperCamelCase_ ( self : Optional[int] ):
__A = HfArgumentParser(_lowerCAmelCase )
__A = {
"""foo""": 12,
"""bar""": 3.14,
"""baz""": """42""",
"""flag""": True,
"""extra""": 42,
}
self.assertRaises(_lowerCAmelCase ,parser.parse_dict ,_lowerCAmelCase ,allow_extra_keys=_lowerCAmelCase )
def UpperCamelCase_ ( self : Any ):
__A = HfArgumentParser(_lowerCAmelCase )
__A = {
"""foo""": 12,
"""bar""": 3.14,
"""baz""": """42""",
"""flag""": True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
__A = os.path.join(_lowerCAmelCase ,"temp_json" )
os.mkdir(_lowerCAmelCase )
with open(temp_local_path + ".json" ,"w+" ) as f:
json.dump(_lowerCAmelCase ,_lowerCAmelCase )
__A = parser.parse_yaml_file(Path(temp_local_path + ".json" ) )[0]
__A = BasicExample(**_lowerCAmelCase )
self.assertEqual(_lowerCAmelCase ,_lowerCAmelCase )
def UpperCamelCase_ ( self : Tuple ):
__A = HfArgumentParser(_lowerCAmelCase )
__A = {
"""foo""": 12,
"""bar""": 3.14,
"""baz""": """42""",
"""flag""": True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
__A = os.path.join(_lowerCAmelCase ,"temp_yaml" )
os.mkdir(_lowerCAmelCase )
with open(temp_local_path + ".yaml" ,"w+" ) as f:
yaml.dump(_lowerCAmelCase ,_lowerCAmelCase )
__A = parser.parse_yaml_file(Path(temp_local_path + ".yaml" ) )[0]
__A = BasicExample(**_lowerCAmelCase )
self.assertEqual(_lowerCAmelCase ,_lowerCAmelCase )
def UpperCamelCase_ ( self : Tuple ):
__A = HfArgumentParser(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
| 370 |
from collections import Counter
import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
SCREAMING_SNAKE_CASE :Tuple = datasets.load_iris()
SCREAMING_SNAKE_CASE :Dict = np.array(data['data'])
SCREAMING_SNAKE_CASE :Optional[int] = np.array(data['target'])
SCREAMING_SNAKE_CASE :List[str] = data['target_names']
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE :List[Any] = train_test_split(X, y)
def UpperCAmelCase ( a_ , a_ ) -> Tuple:
"""simple docstring"""
return np.linalg.norm(np.array(a_ ) - np.array(a_ ) )
def UpperCAmelCase ( a_ , a_ , a_ , a_ , a_=5 ) -> Dict:
"""simple docstring"""
__A = zip(a_ , a_ )
# List of distances of all points from the point to be classified
__A = []
for data_point in data:
__A = euclidean_distance(data_point[0] , a_ )
distances.append((distance, data_point[1]) )
# Choosing 'k' points with the least distances.
__A = [i[1] for i in sorted(a_ )[:k]]
# Most commonly occurring class among them
# is the class into which the point is classified
__A = Counter(a_ ).most_common(1 )[0][0]
return classes[result]
if __name__ == "__main__":
print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4]))
| 124 | 0 |
import functools
def __lowercase ( _UpperCamelCase, _UpperCamelCase ) ->Any:
"""simple docstring"""
lowercase : List[str] = len(_UpperCamelCase )
lowercase : int = len(_UpperCamelCase )
@functools.cache
def min_distance(_UpperCamelCase, _UpperCamelCase ) -> int:
# if first word index is overflow - delete all from the second word
if indexa >= len_worda:
return len_worda - indexa
# if second word index is overflow - delete all from the first word
if indexa >= len_worda:
return len_worda - indexa
lowercase : int = int(worda[indexa] != worda[indexa] ) # current letters not identical
return min(
1 + min_distance(indexa + 1, _UpperCamelCase ), 1 + min_distance(_UpperCamelCase, indexa + 1 ), diff + min_distance(indexa + 1, indexa + 1 ), )
return min_distance(0, 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 337 | """simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCAmelCase__ : str = {
'configuration_roc_bert': ['ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RoCBertConfig'],
'tokenization_roc_bert': ['RoCBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
pass
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : List[str] = [
'ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'RoCBertForCausalLM',
'RoCBertForMaskedLM',
'RoCBertForMultipleChoice',
'RoCBertForPreTraining',
'RoCBertForQuestionAnswering',
'RoCBertForSequenceClassification',
'RoCBertForTokenClassification',
'RoCBertLayer',
'RoCBertModel',
'RoCBertPreTrainedModel',
'load_tf_weights_in_roc_bert',
]
if TYPE_CHECKING:
from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig
from .tokenization_roc_bert import RoCBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
raise OptionalDependencyNotAvailable()
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roc_bert import (
ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RoCBertForCausalLM,
RoCBertForMaskedLM,
RoCBertForMultipleChoice,
RoCBertForPreTraining,
RoCBertForQuestionAnswering,
RoCBertForSequenceClassification,
RoCBertForTokenClassification,
RoCBertLayer,
RoCBertModel,
RoCBertPreTrainedModel,
load_tf_weights_in_roc_bert,
)
else:
import sys
lowerCAmelCase__ : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 98 | 0 |
"""simple docstring"""
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase ) -> str:
if a < 0 or b < 0:
raise ValueError("""the value of both inputs must be positive""" )
lowerCAmelCase__ : List[str] = str(bin(UpperCAmelCase__ ) )[2:] # remove the leading "0b"
lowerCAmelCase__ : Union[str, Any] = str(bin(UpperCAmelCase__ ) )[2:]
lowerCAmelCase__ : Tuple = max(len(UpperCAmelCase__ ) , len(UpperCAmelCase__ ) )
return "0b" + "".join(
str(int("""1""" in (char_a, char_b) ) )
for char_a, char_b in zip(a_binary.zfill(UpperCAmelCase__ ) , b_binary.zfill(UpperCAmelCase__ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 371 |
"""simple docstring"""
def lowercase_ ( __UpperCAmelCase ) -> str:
if not all(char in """01""" for char in bin_string ):
raise ValueError("""Non-binary value was passed to the function""" )
if not bin_string:
raise ValueError("""Empty string was passed to the function""" )
lowerCAmelCase__ : List[str] = """"""
while len(__UpperCAmelCase ) % 3 != 0:
lowerCAmelCase__ : Optional[Any] = """0""" + bin_string
lowerCAmelCase__ : List[Any] = [
bin_string[index : index + 3]
for index in range(len(__UpperCAmelCase ) )
if index % 3 == 0
]
for bin_group in bin_string_in_3_list:
lowerCAmelCase__ : Optional[int] = 0
for index, val in enumerate(__UpperCAmelCase ):
oct_val += int(2 ** (2 - index) * int(__UpperCAmelCase ) )
oct_string += str(__UpperCAmelCase )
return oct_string
if __name__ == "__main__":
from doctest import testmod
testmod()
| 212 | 0 |
'''simple docstring'''
from __future__ import annotations
import numpy as np
from numpy import floataa
from numpy.typing import NDArray
def lowercase_ ( _lowercase , _lowercase , _lowercase , _lowercase , ) -> list[float]:
'''simple docstring'''
lowerCamelCase_, lowerCamelCase_ : str = coefficient_matrix.shape
lowerCamelCase_, lowerCamelCase_ : Dict = constant_matrix.shape
if rowsa != colsa:
lowerCamelCase_ : Optional[int] = F"""Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}"""
raise ValueError(_lowercase )
if colsa != 1:
lowerCamelCase_ : Any = F"""Constant matrix must be nx1 but received {rowsa}x{colsa}"""
raise ValueError(_lowercase )
if rowsa != rowsa:
lowerCamelCase_ : Union[str, Any] = (
'''Coefficient and constant matrices dimensions must be nxn and nx1 but '''
F"""received {rowsa}x{colsa} and {rowsa}x{colsa}"""
)
raise ValueError(_lowercase )
if len(_lowercase ) != rowsa:
lowerCamelCase_ : List[str] = (
'''Number of initial values must be equal to number of rows in coefficient '''
F"""matrix but received {len(_lowercase )} and {rowsa}"""
)
raise ValueError(_lowercase )
if iterations <= 0:
raise ValueError('''Iterations must be at least 1''' )
lowerCamelCase_ : NDArray[floataa] = np.concatenate(
(coefficient_matrix, constant_matrix) , axis=1 )
lowerCamelCase_, lowerCamelCase_ : str = table.shape
strictly_diagonally_dominant(_lowercase )
# Iterates the whole matrix for given number of times
for _ in range(_lowercase ):
lowerCamelCase_ : Optional[Any] = []
for row in range(_lowercase ):
lowerCamelCase_ : Tuple = 0
for col in range(_lowercase ):
if col == row:
lowerCamelCase_ : Optional[int] = table[row][col]
elif col == cols - 1:
lowerCamelCase_ : int = table[row][col]
else:
temp += (-1) * table[row][col] * init_val[col]
lowerCamelCase_ : Optional[Any] = (temp + val) / denom
new_val.append(_lowercase )
lowerCamelCase_ : Tuple = new_val
return [float(_lowercase ) for i in new_val]
def lowercase_ ( _lowercase ) -> bool:
'''simple docstring'''
lowerCamelCase_, lowerCamelCase_ : Dict = table.shape
lowerCamelCase_ : Dict = True
for i in range(0 , _lowercase ):
lowerCamelCase_ : Optional[int] = 0
for j in range(0 , cols - 1 ):
if i == j:
continue
else:
total += table[i][j]
if table[i][i] <= total:
raise ValueError('''Coefficient matrix is not strictly diagonally dominant''' )
return is_diagonally_dominant
# Test Cases
if __name__ == "__main__":
import doctest
doctest.testmod()
| 318 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTConfig,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
__lowercase : int = logging.get_logger(__name__)
def lowercase_ ( _lowercase ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase_ : Optional[int] = MobileViTConfig()
# size of the architecture
if "mobilevit_s" in mobilevit_name:
lowerCamelCase_ : Optional[Any] = [144, 192, 240]
lowerCamelCase_ : Optional[Any] = [16, 32, 64, 96, 128, 160, 640]
elif "mobilevit_xs" in mobilevit_name:
lowerCamelCase_ : List[str] = [96, 120, 144]
lowerCamelCase_ : Union[str, Any] = [16, 32, 48, 64, 80, 96, 384]
elif "mobilevit_xxs" in mobilevit_name:
lowerCamelCase_ : Any = [64, 80, 96]
lowerCamelCase_ : List[str] = [16, 16, 24, 48, 64, 80, 320]
lowerCamelCase_ : Union[str, Any] = 0.05
lowerCamelCase_ : Union[str, Any] = 2.0
if mobilevit_name.startswith('''deeplabv3_''' ):
lowerCamelCase_ : Optional[Any] = 512
lowerCamelCase_ : Dict = 16
lowerCamelCase_ : Dict = 21
lowerCamelCase_ : List[Any] = '''pascal-voc-id2label.json'''
else:
lowerCamelCase_ : Any = 1_000
lowerCamelCase_ : Dict = '''imagenet-1k-id2label.json'''
lowerCamelCase_ : Optional[Any] = '''huggingface/label-files'''
lowerCamelCase_ : int = json.load(open(hf_hub_download(_lowercase , _lowercase , repo_type='''dataset''' ) , '''r''' ) )
lowerCamelCase_ : List[Any] = {int(_lowercase ): v for k, v in idalabel.items()}
lowerCamelCase_ : List[str] = idalabel
lowerCamelCase_ : str = {v: k for k, v in idalabel.items()}
return config
def lowercase_ ( _lowercase , _lowercase=False ) -> List[str]:
'''simple docstring'''
for i in range(1 , 6 ):
if F"""layer_{i}.""" in name:
lowerCamelCase_ : Union[str, Any] = name.replace(F"""layer_{i}.""" , F"""encoder.layer.{i - 1}.""" )
if "conv_1." in name:
lowerCamelCase_ : Optional[Any] = name.replace('''conv_1.''' , '''conv_stem.''' )
if ".block." in name:
lowerCamelCase_ : Optional[int] = name.replace('''.block.''' , '''.''' )
if "exp_1x1" in name:
lowerCamelCase_ : Optional[int] = name.replace('''exp_1x1''' , '''expand_1x1''' )
if "red_1x1" in name:
lowerCamelCase_ : int = name.replace('''red_1x1''' , '''reduce_1x1''' )
if ".local_rep.conv_3x3." in name:
lowerCamelCase_ : Dict = name.replace('''.local_rep.conv_3x3.''' , '''.conv_kxk.''' )
if ".local_rep.conv_1x1." in name:
lowerCamelCase_ : Tuple = name.replace('''.local_rep.conv_1x1.''' , '''.conv_1x1.''' )
if ".norm." in name:
lowerCamelCase_ : Dict = name.replace('''.norm.''' , '''.normalization.''' )
if ".conv." in name:
lowerCamelCase_ : Union[str, Any] = name.replace('''.conv.''' , '''.convolution.''' )
if ".conv_proj." in name:
lowerCamelCase_ : List[str] = name.replace('''.conv_proj.''' , '''.conv_projection.''' )
for i in range(0 , 2 ):
for j in range(0 , 4 ):
if F""".{i}.{j}.""" in name:
lowerCamelCase_ : Dict = name.replace(F""".{i}.{j}.""" , F""".{i}.layer.{j}.""" )
for i in range(2 , 6 ):
for j in range(0 , 4 ):
if F""".{i}.{j}.""" in name:
lowerCamelCase_ : str = name.replace(F""".{i}.{j}.""" , F""".{i}.""" )
if "expand_1x1" in name:
lowerCamelCase_ : str = name.replace('''expand_1x1''' , '''downsampling_layer.expand_1x1''' )
if "conv_3x3" in name:
lowerCamelCase_ : List[str] = name.replace('''conv_3x3''' , '''downsampling_layer.conv_3x3''' )
if "reduce_1x1" in name:
lowerCamelCase_ : Optional[int] = name.replace('''reduce_1x1''' , '''downsampling_layer.reduce_1x1''' )
for i in range(2 , 5 ):
if F""".global_rep.{i}.weight""" in name:
lowerCamelCase_ : Optional[Any] = name.replace(F""".global_rep.{i}.weight""" , '''.layernorm.weight''' )
if F""".global_rep.{i}.bias""" in name:
lowerCamelCase_ : Any = name.replace(F""".global_rep.{i}.bias""" , '''.layernorm.bias''' )
if ".global_rep." in name:
lowerCamelCase_ : List[str] = name.replace('''.global_rep.''' , '''.transformer.''' )
if ".pre_norm_mha.0." in name:
lowerCamelCase_ : List[str] = name.replace('''.pre_norm_mha.0.''' , '''.layernorm_before.''' )
if ".pre_norm_mha.1.out_proj." in name:
lowerCamelCase_ : int = name.replace('''.pre_norm_mha.1.out_proj.''' , '''.attention.output.dense.''' )
if ".pre_norm_ffn.0." in name:
lowerCamelCase_ : Any = name.replace('''.pre_norm_ffn.0.''' , '''.layernorm_after.''' )
if ".pre_norm_ffn.1." in name:
lowerCamelCase_ : str = name.replace('''.pre_norm_ffn.1.''' , '''.intermediate.dense.''' )
if ".pre_norm_ffn.4." in name:
lowerCamelCase_ : str = name.replace('''.pre_norm_ffn.4.''' , '''.output.dense.''' )
if ".transformer." in name:
lowerCamelCase_ : Optional[int] = name.replace('''.transformer.''' , '''.transformer.layer.''' )
if ".aspp_layer." in name:
lowerCamelCase_ : str = name.replace('''.aspp_layer.''' , '''.''' )
if ".aspp_pool." in name:
lowerCamelCase_ : Union[str, Any] = name.replace('''.aspp_pool.''' , '''.''' )
if "seg_head." in name:
lowerCamelCase_ : int = name.replace('''seg_head.''' , '''segmentation_head.''' )
if "segmentation_head.classifier.classifier." in name:
lowerCamelCase_ : List[Any] = name.replace('''segmentation_head.classifier.classifier.''' , '''segmentation_head.classifier.''' )
if "classifier.fc." in name:
lowerCamelCase_ : Tuple = name.replace('''classifier.fc.''' , '''classifier.''' )
elif (not base_model) and ("segmentation_head." not in name):
lowerCamelCase_ : Tuple = '''mobilevit.''' + name
return name
def lowercase_ ( _lowercase , _lowercase , _lowercase=False ) -> Tuple:
'''simple docstring'''
if base_model:
lowerCamelCase_ : List[str] = ''''''
else:
lowerCamelCase_ : Any = '''mobilevit.'''
for key in orig_state_dict.copy().keys():
lowerCamelCase_ : Dict = orig_state_dict.pop(_lowercase )
if key[:8] == "encoder.":
lowerCamelCase_ : int = key[8:]
if "qkv" in key:
lowerCamelCase_ : List[Any] = key.split('''.''' )
lowerCamelCase_ : Optional[Any] = int(key_split[0][6:] ) - 1
lowerCamelCase_ : Union[str, Any] = int(key_split[3] )
lowerCamelCase_ : Any = model.get_submodule(F"""{model_prefix}encoder.layer.{layer_num}""" )
lowerCamelCase_ : Dict = layer.transformer.layer[transformer_num].attention.attention.all_head_size
lowerCamelCase_ : Optional[Any] = (
F"""{model_prefix}encoder.layer.{layer_num}.transformer.layer.{transformer_num}.attention.attention."""
)
if "weight" in key:
lowerCamelCase_ : List[str] = val[:dim, :]
lowerCamelCase_ : Dict = val[dim : dim * 2, :]
lowerCamelCase_ : Union[str, Any] = val[-dim:, :]
else:
lowerCamelCase_ : List[Any] = val[:dim]
lowerCamelCase_ : Optional[int] = val[dim : dim * 2]
lowerCamelCase_ : int = val[-dim:]
else:
lowerCamelCase_ : int = val
return orig_state_dict
def lowercase_ ( ) -> str:
'''simple docstring'''
lowerCamelCase_ : Union[str, Any] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowerCamelCase_ : Optional[Any] = Image.open(requests.get(_lowercase , stream=_lowercase ).raw )
return im
@torch.no_grad()
def lowercase_ ( _lowercase , _lowercase , _lowercase , _lowercase=False ) -> Tuple:
'''simple docstring'''
lowerCamelCase_ : Tuple = get_mobilevit_config(_lowercase )
# load original state_dict
lowerCamelCase_ : int = torch.load(_lowercase , map_location='''cpu''' )
# load 🤗 model
if mobilevit_name.startswith('''deeplabv3_''' ):
lowerCamelCase_ : int = MobileViTForSemanticSegmentation(_lowercase ).eval()
else:
lowerCamelCase_ : int = MobileViTForImageClassification(_lowercase ).eval()
lowerCamelCase_ : Optional[Any] = convert_state_dict(_lowercase , _lowercase )
model.load_state_dict(_lowercase )
# Check outputs on an image, prepared by MobileViTImageProcessor
lowerCamelCase_ : str = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 )
lowerCamelCase_ : Tuple = image_processor(images=prepare_img() , return_tensors='''pt''' )
lowerCamelCase_ : Optional[int] = model(**_lowercase )
lowerCamelCase_ : List[str] = outputs.logits
if mobilevit_name.startswith('''deeplabv3_''' ):
assert logits.shape == (1, 21, 32, 32)
if mobilevit_name == "deeplabv3_mobilevit_s":
lowerCamelCase_ : Union[str, Any] = torch.tensor(
[
[[6.20_65, 6.12_92, 6.20_70], [6.10_79, 6.12_54, 6.17_47], [6.00_42, 6.10_71, 6.10_34]],
[[-6.92_53, -6.86_53, -7.03_98], [-7.32_18, -7.39_83, -7.36_70], [-7.19_61, -7.24_82, -7.15_69]],
[[-4.47_23, -4.43_48, -4.37_69], [-5.36_29, -5.46_32, -5.45_98], [-5.15_87, -5.34_02, -5.50_59]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xs":
lowerCamelCase_ : Dict = torch.tensor(
[
[[5.44_49, 5.57_33, 5.63_14], [5.18_15, 5.39_30, 5.59_63], [5.16_56, 5.43_33, 5.48_53]],
[[-9.44_23, -9.77_66, -9.67_14], [-9.15_81, -9.57_20, -9.55_19], [-9.10_06, -9.64_58, -9.57_03]],
[[-7.77_21, -7.37_16, -7.15_83], [-8.45_99, -8.06_24, -7.79_44], [-8.41_72, -7.83_66, -7.50_25]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xxs":
lowerCamelCase_ : List[str] = torch.tensor(
[
[[6.98_11, 6.97_43, 7.31_23], [7.17_77, 7.19_31, 7.39_38], [7.56_33, 7.80_50, 7.89_01]],
[[-10.55_36, -10.23_32, -10.29_24], [-10.23_36, -9.86_24, -9.59_64], [-10.88_40, -10.81_58, -10.66_59]],
[[-3.49_38, -3.06_31, -2.86_20], [-3.42_05, -2.81_35, -2.68_75], [-3.41_79, -2.79_45, -2.87_50]],
] )
else:
raise ValueError(F"""Unknown mobilevit_name: {mobilevit_name}""" )
assert torch.allclose(logits[0, :3, :3, :3] , _lowercase , atol=1e-4 )
else:
assert logits.shape == (1, 1_000)
if mobilevit_name == "mobilevit_s":
lowerCamelCase_ : Optional[Any] = torch.tensor([-0.98_66, 0.23_92, -1.12_41] )
elif mobilevit_name == "mobilevit_xs":
lowerCamelCase_ : Tuple = torch.tensor([-2.47_61, -0.93_99, -1.95_87] )
elif mobilevit_name == "mobilevit_xxs":
lowerCamelCase_ : List[Any] = torch.tensor([-1.93_64, -1.23_27, -0.46_53] )
else:
raise ValueError(F"""Unknown mobilevit_name: {mobilevit_name}""" )
assert torch.allclose(logits[0, :3] , _lowercase , atol=1e-4 )
Path(_lowercase ).mkdir(exist_ok=_lowercase )
print(F"""Saving model {mobilevit_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(_lowercase )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(_lowercase )
if push_to_hub:
lowerCamelCase_ : str = {
'''mobilevit_s''': '''mobilevit-small''',
'''mobilevit_xs''': '''mobilevit-x-small''',
'''mobilevit_xxs''': '''mobilevit-xx-small''',
'''deeplabv3_mobilevit_s''': '''deeplabv3-mobilevit-small''',
'''deeplabv3_mobilevit_xs''': '''deeplabv3-mobilevit-x-small''',
'''deeplabv3_mobilevit_xxs''': '''deeplabv3-mobilevit-xx-small''',
}
print('''Pushing to the hub...''' )
lowerCamelCase_ : int = model_mapping[mobilevit_name]
image_processor.push_to_hub(_lowercase , organization='''apple''' )
model.push_to_hub(_lowercase , organization='''apple''' )
if __name__ == "__main__":
__lowercase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--mobilevit_name''',
default='''mobilevit_s''',
type=str,
help=(
'''Name of the MobileViT model you\'d like to convert. Should be one of \'mobilevit_s\', \'mobilevit_xs\','''
''' \'mobilevit_xxs\', \'deeplabv3_mobilevit_s\', \'deeplabv3_mobilevit_xs\', \'deeplabv3_mobilevit_xxs\'.'''
),
)
parser.add_argument(
'''--checkpoint_path''', required=True, type=str, help='''Path to the original state dict (.pt file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
__lowercase : Optional[int] = parser.parse_args()
convert_movilevit_checkpoint(
args.mobilevit_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 318 | 1 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class snake_case__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowerCamelCase = ["""image_processor""", """tokenizer"""]
lowerCamelCase = """CLIPImageProcessor"""
lowerCamelCase = ("""XLMRobertaTokenizer""", """XLMRobertaTokenizerFast""")
def __init__( self : Optional[int] , UpperCamelCase__ : Tuple=None , UpperCamelCase__ : str=None , **UpperCamelCase__ : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
snake_case : int = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , UpperCamelCase__ , )
snake_case : Optional[Any] = kwargs.pop('''feature_extractor''' )
snake_case : Any = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(UpperCamelCase__ , UpperCamelCase__ )
def __call__( self : Dict , UpperCamelCase__ : str=None , UpperCamelCase__ : Tuple=None , UpperCamelCase__ : List[str]=None , **UpperCamelCase__ : Any ) -> Any:
"""simple docstring"""
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
snake_case : List[str] = self.tokenizer(UpperCamelCase__ , return_tensors=UpperCamelCase__ , **UpperCamelCase__ )
if images is not None:
snake_case : List[Any] = self.image_processor(UpperCamelCase__ , return_tensors=UpperCamelCase__ , **UpperCamelCase__ )
if text is not None and images is not None:
snake_case : Any = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**UpperCamelCase__ ) , tensor_type=UpperCamelCase__ )
def lowerCAmelCase ( self : Any , *UpperCamelCase__ : Optional[int] , **UpperCamelCase__ : Optional[int] ) -> Any:
"""simple docstring"""
return self.tokenizer.batch_decode(*UpperCamelCase__ , **UpperCamelCase__ )
def lowerCAmelCase ( self : Union[str, Any] , *UpperCamelCase__ : str , **UpperCamelCase__ : Any ) -> List[Any]:
"""simple docstring"""
return self.tokenizer.decode(*UpperCamelCase__ , **UpperCamelCase__ )
@property
def lowerCAmelCase ( self : int ) -> str:
"""simple docstring"""
snake_case : int = self.tokenizer.model_input_names
snake_case : int = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 354 |
'''simple docstring'''
import requests
lowercase__ = "" # <-- Put your OpenWeatherMap appid here!
lowercase__ = "https://api.openweathermap.org/data/2.5/"
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ = "Chicago" , SCREAMING_SNAKE_CASE__ = APPID ) -> dict:
'''simple docstring'''
return requests.get(URL_BASE + '''weather''' , params=locals() ).json()
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ = "Kolkata, India" , SCREAMING_SNAKE_CASE__ = APPID ) -> dict:
'''simple docstring'''
return requests.get(URL_BASE + '''forecast''' , params=locals() ).json()
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ = 55.68 , SCREAMING_SNAKE_CASE__ = 12.57 , SCREAMING_SNAKE_CASE__ = APPID ) -> dict:
'''simple docstring'''
return requests.get(URL_BASE + '''onecall''' , params=locals() ).json()
if __name__ == "__main__":
from pprint import pprint
while True:
lowercase__ = input("Enter a location:").strip()
if location:
pprint(current_weather(location))
else:
break
| 83 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase = logging.get_logger(__name__)
lowercase = {
"""uclanlp/visualbert-vqa""": """https://huggingface.co/uclanlp/visualbert-vqa/resolve/main/config.json""",
"""uclanlp/visualbert-vqa-pre""": """https://huggingface.co/uclanlp/visualbert-vqa-pre/resolve/main/config.json""",
"""uclanlp/visualbert-vqa-coco-pre""": (
"""https://huggingface.co/uclanlp/visualbert-vqa-coco-pre/resolve/main/config.json"""
),
"""uclanlp/visualbert-vcr""": """https://huggingface.co/uclanlp/visualbert-vcr/resolve/main/config.json""",
"""uclanlp/visualbert-vcr-pre""": """https://huggingface.co/uclanlp/visualbert-vcr-pre/resolve/main/config.json""",
"""uclanlp/visualbert-vcr-coco-pre""": (
"""https://huggingface.co/uclanlp/visualbert-vcr-coco-pre/resolve/main/config.json"""
),
"""uclanlp/visualbert-nlvr2""": """https://huggingface.co/uclanlp/visualbert-nlvr2/resolve/main/config.json""",
"""uclanlp/visualbert-nlvr2-pre""": """https://huggingface.co/uclanlp/visualbert-nlvr2-pre/resolve/main/config.json""",
"""uclanlp/visualbert-nlvr2-coco-pre""": (
"""https://huggingface.co/uclanlp/visualbert-nlvr2-coco-pre/resolve/main/config.json"""
)
# See all VisualBERT models at https://huggingface.co/models?filter=visual_bert
}
class UpperCamelCase_ ( UpperCamelCase__ ):
'''simple docstring'''
lowerCAmelCase = """visual_bert"""
def __init__( self , a=3_05_22 , a=7_68 , a=5_12 , a=12 , a=12 , a=30_72 , a="gelu" , a=0.1 , a=0.1 , a=5_12 , a=2 , a=0.02 , a=1E-12 , a=False , a=True , a=1 , a=0 , a=2 , **a , ) -> Dict:
super().__init__(pad_token_id=lowerCAmelCase__ , bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
snake_case_ = vocab_size
snake_case_ = max_position_embeddings
snake_case_ = hidden_size
snake_case_ = visual_embedding_dim
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = initializer_range
snake_case_ = type_vocab_size
snake_case_ = layer_norm_eps
snake_case_ = bypass_transformer
snake_case_ = special_visual_initialize
| 178 |
import numpy as np
def _A ( SCREAMING_SNAKE_CASE : np.array ):
"""simple docstring"""
return 1 / (1 + np.exp(-vector ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 95 | 0 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import tensorflow as tf
from transformers import AutoTokenizer, TFAutoModelForSeqaSeqLM
@require_tf
@require_sentencepiece
@require_tokenizers
class snake_case__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowerCAmelCase ( self : Dict ) -> List[Any]:
"""simple docstring"""
snake_case : Dict = TFAutoModelForSeqaSeqLM.from_pretrained('''google/mt5-small''' )
snake_case : int = AutoTokenizer.from_pretrained('''google/mt5-small''' )
snake_case : List[str] = tokenizer('''Hello there''' , return_tensors='''tf''' ).input_ids
snake_case : Tuple = tokenizer('''Hi I am''' , return_tensors='''tf''' ).input_ids
snake_case : Tuple = model(UpperCamelCase__ , labels=UpperCamelCase__ ).loss
snake_case : Dict = -tf.math.reduce_mean(UpperCamelCase__ ).numpy()
snake_case : int = -21.22_8168
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 2e-4 )
| 367 |
'''simple docstring'''
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import VideoMAEConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEModel,
)
from transformers.models.videomae.modeling_videomae import VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class snake_case__ :
"""simple docstring"""
def __init__( self : Optional[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Any=13 , UpperCamelCase__ : Tuple=10 , UpperCamelCase__ : List[str]=3 , UpperCamelCase__ : List[str]=2 , UpperCamelCase__ : int=2 , UpperCamelCase__ : Union[str, Any]=2 , UpperCamelCase__ : Dict=True , UpperCamelCase__ : Tuple=True , UpperCamelCase__ : Tuple=32 , UpperCamelCase__ : str=5 , UpperCamelCase__ : Union[str, Any]=4 , UpperCamelCase__ : Any=37 , UpperCamelCase__ : Any="gelu" , UpperCamelCase__ : Optional[int]=0.1 , UpperCamelCase__ : int=0.1 , UpperCamelCase__ : Optional[Any]=10 , UpperCamelCase__ : str=0.02 , UpperCamelCase__ : str=0.9 , UpperCamelCase__ : Any=None , ) -> Tuple:
"""simple docstring"""
snake_case : List[Any] = parent
snake_case : Tuple = batch_size
snake_case : str = image_size
snake_case : Tuple = num_channels
snake_case : List[Any] = patch_size
snake_case : Optional[Any] = tubelet_size
snake_case : Tuple = num_frames
snake_case : Optional[Any] = is_training
snake_case : Tuple = use_labels
snake_case : List[str] = hidden_size
snake_case : Any = num_hidden_layers
snake_case : int = num_attention_heads
snake_case : List[Any] = intermediate_size
snake_case : Tuple = hidden_act
snake_case : Tuple = hidden_dropout_prob
snake_case : int = attention_probs_dropout_prob
snake_case : Optional[Any] = type_sequence_label_size
snake_case : Optional[int] = initializer_range
snake_case : Any = mask_ratio
snake_case : Optional[int] = scope
# in VideoMAE, the number of tokens equals num_frames/tubelet_size * num_patches per frame
snake_case : Dict = (image_size // patch_size) ** 2
snake_case : Optional[int] = (num_frames // tubelet_size) * self.num_patches_per_frame
# use this variable to define bool_masked_pos
snake_case : Optional[int] = int(mask_ratio * self.seq_length )
def lowerCAmelCase ( self : Any ) -> Dict:
"""simple docstring"""
snake_case : Any = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
snake_case : Tuple = None
if self.use_labels:
snake_case : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case : Dict = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
return VideoMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , tubelet_size=self.tubelet_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , )
def lowerCAmelCase ( self : Dict , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Dict , UpperCamelCase__ : List[str] ) -> Optional[int]:
"""simple docstring"""
snake_case : Any = VideoMAEModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
snake_case : Optional[Any] = model(UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase ( self : int , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : List[Any] ) -> str:
"""simple docstring"""
snake_case : Any = VideoMAEForPreTraining(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
snake_case : int = torch.ones((self.num_masks,) )
snake_case : List[str] = torch.cat([mask, torch.zeros(self.seq_length - mask.size(0 ) )] )
snake_case : Tuple = mask.expand(self.batch_size , -1 ).bool()
snake_case : str = model(UpperCamelCase__ , UpperCamelCase__ )
# model only returns predictions for masked patches
snake_case : Tuple = mask.sum().item()
snake_case : Dict = 3 * self.tubelet_size * self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_masked_patches, decoder_num_labels) )
def lowerCAmelCase ( self : Dict ) -> Optional[int]:
"""simple docstring"""
snake_case : Tuple = self.prepare_config_and_inputs()
snake_case ,snake_case ,snake_case : Optional[int] = config_and_inputs
snake_case : Dict = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase = (
(VideoMAEModel, VideoMAEForPreTraining, VideoMAEForVideoClassification) if is_torch_available() else ()
)
lowerCamelCase = (
{"""feature-extraction""": VideoMAEModel, """video-classification""": VideoMAEForVideoClassification}
if is_torch_available()
else {}
)
lowerCamelCase = False
lowerCamelCase = False
lowerCamelCase = False
lowerCamelCase = False
def lowerCAmelCase ( self : Any ) -> str:
"""simple docstring"""
snake_case : List[Any] = VideoMAEModelTester(self )
snake_case : Optional[Any] = ConfigTester(self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ , hidden_size=37 )
def lowerCAmelCase ( self : Dict , UpperCamelCase__ : List[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : int=False ) -> Optional[Any]:
"""simple docstring"""
snake_case : Optional[Any] = copy.deepcopy(UpperCamelCase__ )
if model_class == VideoMAEForPreTraining:
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
snake_case : Optional[int] = torch.ones((self.model_tester.num_masks,) )
snake_case : int = torch.cat([mask, torch.zeros(self.model_tester.seq_length - mask.size(0 ) )] )
snake_case : Dict = mask.expand(self.model_tester.batch_size , -1 ).bool()
snake_case : Optional[int] = bool_masked_pos.to(UpperCamelCase__ )
if return_labels:
if model_class in [
*get_values(UpperCamelCase__ ),
]:
snake_case : Tuple = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCamelCase__ )
return inputs_dict
def lowerCAmelCase ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='''VideoMAE does not use inputs_embeds''' )
def lowerCAmelCase ( self : int ) -> List[str]:
"""simple docstring"""
pass
def lowerCAmelCase ( self : str ) -> str:
"""simple docstring"""
snake_case ,snake_case : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case : Union[str, Any] = model_class(UpperCamelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
snake_case : str = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCamelCase__ , nn.Linear ) )
def lowerCAmelCase ( self : Union[str, Any] ) -> int:
"""simple docstring"""
snake_case ,snake_case : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case : int = model_class(UpperCamelCase__ )
snake_case : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case : str = [*signature.parameters.keys()]
snake_case : Tuple = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , UpperCamelCase__ )
def lowerCAmelCase ( self : int ) -> str:
"""simple docstring"""
snake_case : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def lowerCAmelCase ( self : List[str] ) -> Any:
"""simple docstring"""
snake_case : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*UpperCamelCase__ )
@slow
def lowerCAmelCase ( self : List[str] ) -> List[Any]:
"""simple docstring"""
for model_name in VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case : int = VideoMAEModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
def lowerCAmelCase ( self : int ) -> int:
"""simple docstring"""
if not self.has_attentions:
pass
else:
snake_case ,snake_case : str = self.model_tester.prepare_config_and_inputs_for_common()
snake_case : Optional[int] = True
for model_class in self.all_model_classes:
snake_case : Union[str, Any] = self.model_tester.seq_length - self.model_tester.num_masks
snake_case : List[Any] = (
num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
)
snake_case : Dict = True
snake_case : List[str] = False
snake_case : Tuple = True
snake_case : List[str] = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
snake_case : List[Any] = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
snake_case : List[Any] = outputs.attentions
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
snake_case : Any = True
snake_case : Any = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
snake_case : Dict = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
snake_case : int = outputs.attentions
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
snake_case : Any = len(UpperCamelCase__ )
# Check attention is always last and order is fine
snake_case : Union[str, Any] = True
snake_case : Union[str, Any] = True
snake_case : str = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
snake_case : Optional[Any] = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
self.assertEqual(out_len + 1 , len(UpperCamelCase__ ) )
snake_case : Tuple = outputs.attentions
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def lowerCAmelCase ( self : Optional[Any] ) -> str:
"""simple docstring"""
def check_hidden_states_output(UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int , UpperCamelCase__ : List[Any] ):
snake_case : Union[str, Any] = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
snake_case : Union[str, Any] = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
snake_case : Union[str, Any] = outputs.hidden_states
snake_case : Optional[int] = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ )
snake_case : Optional[Any] = self.model_tester.seq_length - self.model_tester.num_masks
snake_case : int = num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
snake_case ,snake_case : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case : int = True
check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case : List[str] = True
check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def lowerCAmelCase ( self : List[str] ) -> int:
"""simple docstring"""
pass
def _UpperCamelCase ( ) -> str:
'''simple docstring'''
snake_case : int = hf_hub_download(
repo_id='''hf-internal-testing/spaghetti-video''' , filename='''eating_spaghetti.npy''' , repo_type='''dataset''' )
snake_case : str = np.load(SCREAMING_SNAKE_CASE__ )
return list(SCREAMING_SNAKE_CASE__ )
@require_torch
@require_vision
class snake_case__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def lowerCAmelCase ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def lowerCAmelCase ( self : Dict ) -> str:
"""simple docstring"""
snake_case : Tuple = VideoMAEForVideoClassification.from_pretrained('''MCG-NJU/videomae-base-finetuned-kinetics''' ).to(
UpperCamelCase__ )
snake_case : str = self.default_image_processor
snake_case : Dict = prepare_video()
snake_case : int = image_processor(UpperCamelCase__ , return_tensors='''pt''' ).to(UpperCamelCase__ )
# forward pass
with torch.no_grad():
snake_case : int = model(**UpperCamelCase__ )
# verify the logits
snake_case : Optional[int] = torch.Size((1, 400) )
self.assertEqual(outputs.logits.shape , UpperCamelCase__ )
snake_case : Optional[Any] = torch.tensor([0.3_669, -0.0_688, -0.2_421] ).to(UpperCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCamelCase__ , atol=1e-4 ) )
@slow
def lowerCAmelCase ( self : Dict ) -> Any:
"""simple docstring"""
snake_case : List[str] = VideoMAEForPreTraining.from_pretrained('''MCG-NJU/videomae-base-short''' ).to(UpperCamelCase__ )
snake_case : str = self.default_image_processor
snake_case : Tuple = prepare_video()
snake_case : List[Any] = image_processor(UpperCamelCase__ , return_tensors='''pt''' ).to(UpperCamelCase__ )
# add boolean mask, indicating which patches to mask
snake_case : str = hf_hub_download(repo_id='''hf-internal-testing/bool-masked-pos''' , filename='''bool_masked_pos.pt''' )
snake_case : Dict = torch.load(UpperCamelCase__ )
# forward pass
with torch.no_grad():
snake_case : Tuple = model(**UpperCamelCase__ )
# verify the logits
snake_case : str = torch.Size([1, 1408, 1536] )
snake_case : List[str] = torch.tensor(
[[0.7_994, 0.9_612, 0.8_508], [0.7_401, 0.8_958, 0.8_302], [0.5_862, 0.7_468, 0.7_325]] , device=UpperCamelCase__ )
self.assertEqual(outputs.logits.shape , UpperCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , UpperCamelCase__ , atol=1e-4 ) )
# verify the loss (`config.norm_pix_loss` = `True`)
snake_case : Any = torch.tensor([0.5_142] , device=UpperCamelCase__ )
self.assertTrue(torch.allclose(outputs.loss , UpperCamelCase__ , atol=1e-4 ) )
# verify the loss (`config.norm_pix_loss` = `False`)
snake_case : str = VideoMAEForPreTraining.from_pretrained('''MCG-NJU/videomae-base-short''' , norm_pix_loss=UpperCamelCase__ ).to(
UpperCamelCase__ )
with torch.no_grad():
snake_case : Optional[int] = model(**UpperCamelCase__ )
snake_case : str = torch.tensor(torch.tensor([0.6_469] ) , device=UpperCamelCase__ )
self.assertTrue(torch.allclose(outputs.loss , UpperCamelCase__ , atol=1e-4 ) )
| 83 | 0 |
"""simple docstring"""
import sys
from collections import defaultdict
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : List[str] ):
__lowercase = []
def _lowercase ( self : Union[str, Any], UpperCAmelCase__ : Optional[Any] ):
return self.node_position[vertex]
def _lowercase ( self : Dict, UpperCAmelCase__ : List[str], UpperCAmelCase__ : str ):
__lowercase = pos
def _lowercase ( self : List[str], UpperCAmelCase__ : int, UpperCAmelCase__ : Dict, UpperCAmelCase__ : Union[str, Any], UpperCAmelCase__ : Any ):
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
__lowercase = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
__lowercase = 2 * start + 1
else:
__lowercase = 2 * start + 2
if heap[smallest_child] < heap[start]:
__lowercase ,__lowercase = heap[smallest_child], positions[smallest_child]
__lowercase ,__lowercase = (
heap[start],
positions[start],
)
__lowercase ,__lowercase = temp, tempa
__lowercase = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child], self.get_position(positions[start] ) )
self.set_position(positions[start], UpperCAmelCase__ )
self.top_to_bottom(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
def _lowercase ( self : List[Any], UpperCAmelCase__ : Dict, UpperCAmelCase__ : str, UpperCAmelCase__ : Optional[Any], UpperCAmelCase__ : Tuple ):
__lowercase = position[index]
while index != 0:
__lowercase = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
__lowercase = heap[parent]
__lowercase = position[parent]
self.set_position(position[parent], UpperCAmelCase__ )
else:
__lowercase = val
__lowercase = temp
self.set_position(UpperCAmelCase__, UpperCAmelCase__ )
break
__lowercase = parent
else:
__lowercase = val
__lowercase = temp
self.set_position(UpperCAmelCase__, 0 )
def _lowercase ( self : Optional[int], UpperCAmelCase__ : Union[str, Any], UpperCAmelCase__ : Any ):
__lowercase = len(UpperCAmelCase__ ) // 2 - 1
for i in range(UpperCAmelCase__, -1, -1 ):
self.top_to_bottom(UpperCAmelCase__, UpperCAmelCase__, len(UpperCAmelCase__ ), UpperCAmelCase__ )
def _lowercase ( self : Union[str, Any], UpperCAmelCase__ : Optional[Any], UpperCAmelCase__ : List[Any] ):
__lowercase = positions[0]
__lowercase = sys.maxsize
self.top_to_bottom(UpperCAmelCase__, 0, len(UpperCAmelCase__ ), UpperCAmelCase__ )
return temp
def _A ( UpperCamelCase_ : Dict) -> Optional[Any]:
'''simple docstring'''
__lowercase = Heap()
__lowercase = [0] * len(UpperCamelCase_)
__lowercase = [-1] * len(UpperCamelCase_) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
__lowercase = [] # Heap of Distance of vertices from their neighboring vertex
__lowercase = []
for vertex in range(len(UpperCamelCase_)):
distance_tv.append(sys.maxsize)
positions.append(UpperCamelCase_)
heap.node_position.append(UpperCamelCase_)
__lowercase = []
__lowercase = 1
__lowercase = sys.maxsize
for neighbor, distance in adjacency_list[0]:
__lowercase = 0
__lowercase = distance
heap.heapify(UpperCamelCase_, UpperCamelCase_)
for _ in range(1, len(UpperCamelCase_)):
__lowercase = heap.delete_minimum(UpperCamelCase_, UpperCamelCase_)
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex))
__lowercase = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(UpperCamelCase_)]
):
__lowercase = distance
heap.bottom_to_top(
UpperCamelCase_, heap.get_position(UpperCamelCase_), UpperCamelCase_, UpperCamelCase_)
__lowercase = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
_a = int(input('Enter number of edges: ').strip())
_a = defaultdict(list)
for _ in range(edges_number):
_a = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 17 |
def SCREAMING_SNAKE_CASE__ ( ) -> list[list[int]]:
return [list(range(1000 - i ,-1000 - i ,-1 ) ) for i in range(1000 )]
lowerCamelCase : List[Any] = generate_large_matrix()
lowerCamelCase : Optional[int] = (
[[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]],
[[3, 2], [1, 0]],
[[7, 7, 6]],
[[7, 7, 6], [-1, -2, -3]],
grid,
)
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> None:
assert all(row == sorted(lowercase ,reverse=lowercase ) for row in grid )
assert all(list(lowercase ) == sorted(lowercase ,reverse=lowercase ) for col in zip(*lowercase ) )
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> int:
snake_case : Tuple = 0
snake_case : List[Any] = len(lowercase ) - 1
# Edge cases such as no values or all numbers are negative.
if not array or array[0] < 0:
return 0
while right + 1 > left:
snake_case : Tuple = (left + right) // 2
snake_case : Dict = array[mid]
# Num must be negative and the index must be greater than or equal to 0.
if num < 0 and array[mid - 1] >= 0:
return mid
if num >= 0:
snake_case : List[Any] = mid + 1
else:
snake_case : str = mid - 1
# No negative numbers so return the last index of the array + 1 which is the length.
return len(lowercase )
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> int:
snake_case : Union[str, Any] = 0
snake_case : Dict = len(grid[0] )
for i in range(len(lowercase ) ):
snake_case : Tuple = find_negative_index(grid[i][:bound] )
total += bound
return (len(lowercase ) * len(grid[0] )) - total
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> int:
return len([number for row in grid for number in row if number < 0] )
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> int:
snake_case : Dict = 0
for row in grid:
for i, number in enumerate(lowercase ):
if number < 0:
total += len(lowercase ) - i
break
return total
def SCREAMING_SNAKE_CASE__ ( ) -> None:
from timeit import timeit
print("""Running benchmarks""" )
snake_case : List[Any] = (
"""from __main__ import count_negatives_binary_search, """
"""count_negatives_brute_force, count_negatives_brute_force_with_break, grid"""
)
for func in (
"count_negatives_binary_search", # took 0.7727 seconds
"count_negatives_brute_force_with_break", # took 4.6505 seconds
"count_negatives_brute_force", # took 12.8160 seconds
):
snake_case : int = timeit(f"""{func}(grid=grid)""" ,setup=lowercase ,number=500 )
print(f"""{func}() took {time:0.4f} seconds""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 124 | 0 |
"""simple docstring"""
from sklearn.metrics import fa_score
import datasets
__UpperCAmelCase = '''
The F1 score is the harmonic mean of the precision and recall. It can be computed with the equation:
F1 = 2 * (precision * recall) / (precision + recall)
'''
__UpperCAmelCase = '''
Args:
predictions (`list` of `int`): Predicted labels.
references (`list` of `int`): Ground truth labels.
labels (`list` of `int`): The set of labels to include when `average` is not set to `\'binary\'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None.
pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1.
average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `\'binary\'`.
- \'binary\': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary.
- \'micro\': Calculate metrics globally by counting the total true positives, false negatives and false positives.
- \'macro\': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.
- \'weighted\': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `\'macro\'` to account for label imbalance. This option can result in an F-score that is not between precision and recall.
- \'samples\': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).
sample_weight (`list` of `float`): Sample weights Defaults to None.
Returns:
f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better.
Examples:
Example 1-A simple binary example
>>> f1_metric = datasets.load_metric(\"f1\")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0])
>>> print(results)
{\'f1\': 0.5}
Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`.
>>> f1_metric = datasets.load_metric(\"f1\")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0)
>>> print(round(results[\'f1\'], 2))
0.67
Example 3-The same simple binary example as in Example 1, but with `sample_weight` included.
>>> f1_metric = datasets.load_metric(\"f1\")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3])
>>> print(round(results[\'f1\'], 2))
0.35
Example 4-A multiclass example, with different values for the `average` input.
>>> predictions = [0, 2, 1, 0, 0, 1]
>>> references = [0, 1, 2, 0, 1, 2]
>>> results = f1_metric.compute(predictions=predictions, references=references, average=\"macro\")
>>> print(round(results[\'f1\'], 2))
0.27
>>> results = f1_metric.compute(predictions=predictions, references=references, average=\"micro\")
>>> print(round(results[\'f1\'], 2))
0.33
>>> results = f1_metric.compute(predictions=predictions, references=references, average=\"weighted\")
>>> print(round(results[\'f1\'], 2))
0.27
>>> results = f1_metric.compute(predictions=predictions, references=references, average=None)
>>> print(results)
{\'f1\': array([0.8, 0. , 0. ])}
'''
__UpperCAmelCase = '''
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCamelCase__ ( datasets.Metric ):
def _lowerCamelCase ( self : str ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("int32" ) ),
"references": datasets.Sequence(datasets.Value("int32" ) ),
}
if self.config_name == "multilabel"
else {
"predictions": datasets.Value("int32" ),
"references": datasets.Value("int32" ),
} ) , reference_urls=["https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html"] , )
def _lowerCamelCase ( self : List[Any] , _a : int , _a : Optional[int] , _a : Tuple=None , _a : int=1 , _a : Optional[int]="binary" , _a : Optional[int]=None ):
a__: Union[str, Any] =fa_score(
lowercase_ , lowercase_ , labels=lowercase_ , pos_label=lowercase_ , average=lowercase_ , sample_weight=lowercase_ )
return {"f1": float(lowercase_ ) if score.size == 1 else score}
| 364 |
import pytest
from datasets import inspect_metric, list_metrics, load_metric
@pytest.fixture
def __lowerCamelCase ( __magic_name__ : List[Any] ):
monkeypatch.setattr("datasets.utils.deprecation_utils._emitted_deprecation_warnings" , set() )
@pytest.fixture
def __lowerCamelCase ( __magic_name__ : Optional[Any] ):
class lowerCamelCase__ :
def __init__( self : str , _a : Any ):
a__: Any =metric_id
class lowerCamelCase__ :
_lowerCAmelCase = [MetricMock(_a ) for metric_id in ['''accuracy''', '''mse''', '''precision''', '''codeparrot/apps_metric''']]
def _lowerCamelCase ( self : Optional[Any] ):
return self._metrics
monkeypatch.setattr("datasets.inspect.huggingface_hub" , HfhMock() )
@pytest.mark.parametrize(
"func, args" , [(load_metric, ("metrics/mse",)), (list_metrics, ()), (inspect_metric, ("metrics/mse", "tmp_path"))] )
def __lowerCamelCase ( __magic_name__ : List[str] , __magic_name__ : Any , __magic_name__ : Tuple , __magic_name__ : str , __magic_name__ : Optional[int] ):
if "tmp_path" in args:
a__: Tuple =tuple(arg if arg != "tmp_path" else tmp_path for arg in args )
with pytest.warns(__magic_name__ , match="https://huggingface.co/docs/evaluate" ):
func(*__magic_name__ )
| 42 | 0 |
"""simple docstring"""
import os
import torch
from ..logging import get_logger
from .constants import FSDP_PYTORCH_VERSION, MODEL_NAME, OPTIMIZER_NAME
from .versions import is_torch_version
if is_torch_version('>=', FSDP_PYTORCH_VERSION):
import torch.distributed.checkpoint as dist_cp
from torch.distributed.checkpoint.default_planner import DefaultLoadPlanner, DefaultSavePlanner
from torch.distributed.checkpoint.optimizer import load_sharded_optimizer_state_dict
from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
lowerCAmelCase_ = get_logger(__name__)
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=0 ) -> List[Any]:
os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase )
with FSDP.state_dict_type(
__lowerCamelCase , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
lowercase__ : Optional[int] = model.state_dict()
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
lowercase__ : Optional[Any] = f"""{MODEL_NAME}.bin""" if model_index == 0 else f"""{MODEL_NAME}_{model_index}.bin"""
lowercase__ : Tuple = os.path.join(__lowerCamelCase , __lowerCamelCase )
if accelerator.process_index == 0:
logger.info(f"""Saving model to {output_model_file}""" )
torch.save(__lowerCamelCase , __lowerCamelCase )
logger.info(f"""Model saved to {output_model_file}""" )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
lowercase__ : Optional[int] = (
f"""{MODEL_NAME}_rank{accelerator.process_index}.bin"""
if model_index == 0
else f"""{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin"""
)
lowercase__ : Any = os.path.join(__lowerCamelCase , __lowerCamelCase )
logger.info(f"""Saving model to {output_model_file}""" )
torch.save(__lowerCamelCase , __lowerCamelCase )
logger.info(f"""Model saved to {output_model_file}""" )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
lowercase__ : Union[str, Any] = os.path.join(__lowerCamelCase , f"""{MODEL_NAME}_{model_index}""" )
os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase )
logger.info(f"""Saving model to {ckpt_dir}""" )
lowercase__ : Any = {'''model''': state_dict}
dist_cp.save_state_dict(
state_dict=__lowerCamelCase , storage_writer=dist_cp.FileSystemWriter(__lowerCamelCase ) , planner=DefaultSavePlanner() , )
logger.info(f"""Model saved to {ckpt_dir}""" )
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=0 ) -> Tuple:
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
__lowerCamelCase , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if type(__lowerCamelCase ) != FSDP and accelerator.process_index != 0:
if not fsdp_plugin.sync_module_states:
raise ValueError(
'''Set the `sync_module_states` flag to `True` so that model states are synced across processes when '''
'''initializing FSDP object''' )
return
lowercase__ : Optional[Any] = f"""{MODEL_NAME}.bin""" if model_index == 0 else f"""{MODEL_NAME}_{model_index}.bin"""
lowercase__ : Any = os.path.join(__lowerCamelCase , __lowerCamelCase )
logger.info(f"""Loading model from {input_model_file}""" )
lowercase__ : List[Any] = torch.load(__lowerCamelCase )
logger.info(f"""Model loaded from {input_model_file}""" )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
lowercase__ : List[Any] = (
f"""{MODEL_NAME}_rank{accelerator.process_index}.bin"""
if model_index == 0
else f"""{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin"""
)
lowercase__ : Dict = os.path.join(__lowerCamelCase , __lowerCamelCase )
logger.info(f"""Loading model from {input_model_file}""" )
lowercase__ : List[str] = torch.load(__lowerCamelCase )
logger.info(f"""Model loaded from {input_model_file}""" )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
lowercase__ : str = (
os.path.join(__lowerCamelCase , f"""{MODEL_NAME}_{model_index}""" )
if f"""{MODEL_NAME}""" not in input_dir
else input_dir
)
logger.info(f"""Loading model from {ckpt_dir}""" )
lowercase__ : Dict = {'''model''': model.state_dict()}
dist_cp.load_state_dict(
state_dict=__lowerCamelCase , storage_reader=dist_cp.FileSystemReader(__lowerCamelCase ) , planner=DefaultLoadPlanner() , )
lowercase__ : Optional[int] = state_dict['''model''']
logger.info(f"""Model loaded from {ckpt_dir}""" )
model.load_state_dict(__lowerCamelCase )
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=0 ) -> int:
os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase )
with FSDP.state_dict_type(
__lowerCamelCase , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
lowercase__ : Tuple = FSDP.optim_state_dict(__lowerCamelCase , __lowerCamelCase )
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if accelerator.process_index == 0:
lowercase__ : List[Any] = (
f"""{OPTIMIZER_NAME}.bin""" if optimizer_index == 0 else f"""{OPTIMIZER_NAME}_{optimizer_index}.bin"""
)
lowercase__ : Dict = os.path.join(__lowerCamelCase , __lowerCamelCase )
logger.info(f"""Saving Optimizer state to {output_optimizer_file}""" )
torch.save(__lowerCamelCase , __lowerCamelCase )
logger.info(f"""Optimizer state saved in {output_optimizer_file}""" )
else:
lowercase__ : Any = os.path.join(__lowerCamelCase , f"""{OPTIMIZER_NAME}_{optimizer_index}""" )
os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase )
logger.info(f"""Saving Optimizer state to {ckpt_dir}""" )
dist_cp.save_state_dict(
state_dict={'''optimizer''': optim_state} , storage_writer=dist_cp.FileSystemWriter(__lowerCamelCase ) , planner=DefaultSavePlanner() , )
logger.info(f"""Optimizer state saved in {ckpt_dir}""" )
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=0 ) -> Any:
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
__lowerCamelCase , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
lowercase__ : Optional[Any] = None
# below check should work but currently it isn't working (mostly opytorch issue),
# in the meantime disabling it at the cost of excess memory usage
# if accelerator.process_index == 0 or not fsdp_plugin.optim_state_dict_config.rank0_only:
lowercase__ : Optional[int] = (
f"""{OPTIMIZER_NAME}.bin""" if optimizer_index == 0 else f"""{OPTIMIZER_NAME}_{optimizer_index}.bin"""
)
lowercase__ : Any = os.path.join(__lowerCamelCase , __lowerCamelCase )
logger.info(f"""Loading Optimizer state from {input_optimizer_file}""" )
lowercase__ : Tuple = torch.load(__lowerCamelCase )
logger.info(f"""Optimizer state loaded from {input_optimizer_file}""" )
else:
lowercase__ : Any = (
os.path.join(__lowerCamelCase , f"""{OPTIMIZER_NAME}_{optimizer_index}""" )
if f"""{OPTIMIZER_NAME}""" not in input_dir
else input_dir
)
logger.info(f"""Loading Optimizer from {ckpt_dir}""" )
lowercase__ : List[str] = load_sharded_optimizer_state_dict(
model_state_dict=model.state_dict() , optimizer_key='''optimizer''' , storage_reader=dist_cp.FileSystemReader(__lowerCamelCase ) , )
lowercase__ : int = optim_state['''optimizer''']
logger.info(f"""Optimizer loaded from {ckpt_dir}""" )
lowercase__ : List[Any] = FSDP.optim_state_dict_to_load(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
optimizer.load_state_dict(__lowerCamelCase )
| 16 |
import argparse
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
CLIPTokenizer,
CLIPTokenizerFast,
VideoMAEImageProcessor,
XCLIPConfig,
XCLIPModel,
XCLIPProcessor,
XCLIPTextConfig,
XCLIPVisionConfig,
)
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> int:
lowerCAmelCase__ : int = XCLIPTextConfig()
# derive patch size from model name
lowerCAmelCase__ : Any = model_name.find('patch' )
lowerCAmelCase__ : Any = int(model_name[start_idx + len('patch' ) : start_idx + len('patch' ) + 2] )
lowerCAmelCase__ : Any = XCLIPVisionConfig(patch_size=SCREAMING_SNAKE_CASE_ , num_frames=SCREAMING_SNAKE_CASE_ )
if "large" in model_name:
lowerCAmelCase__ : List[str] = 768
lowerCAmelCase__ : Optional[int] = 3_072
lowerCAmelCase__ : Any = 12
lowerCAmelCase__ : Optional[int] = 1_024
lowerCAmelCase__ : List[Any] = 4_096
lowerCAmelCase__ : Optional[Any] = 16
lowerCAmelCase__ : Any = 24
lowerCAmelCase__ : Dict = 768
lowerCAmelCase__ : List[str] = 3_072
if model_name == "xclip-large-patch14-16-frames":
lowerCAmelCase__ : str = 336
lowerCAmelCase__ : Any = XCLIPConfig.from_text_vision_configs(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if "large" in model_name:
lowerCAmelCase__ : List[Any] = 768
return config
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> List[Any]:
# text encoder
if name == "token_embedding.weight":
lowerCAmelCase__ : Optional[Any] = name.replace('token_embedding.weight' , 'text_model.embeddings.token_embedding.weight' )
if name == "positional_embedding":
lowerCAmelCase__ : int = name.replace('positional_embedding' , 'text_model.embeddings.position_embedding.weight' )
if "ln_1" in name:
lowerCAmelCase__ : Optional[Any] = name.replace('ln_1' , 'layer_norm1' )
if "ln_2" in name:
lowerCAmelCase__ : List[str] = name.replace('ln_2' , 'layer_norm2' )
if "c_fc" in name:
lowerCAmelCase__ : Tuple = name.replace('c_fc' , 'fc1' )
if "c_proj" in name:
lowerCAmelCase__ : Optional[Any] = name.replace('c_proj' , 'fc2' )
if name.startswith('transformer.resblocks' ):
lowerCAmelCase__ : List[str] = name.replace('transformer.resblocks' , 'text_model.encoder.layers' )
if "attn.out_proj" in name and "message" not in name:
lowerCAmelCase__ : Optional[Any] = name.replace('attn.out_proj' , 'self_attn.out_proj' )
if "ln_final" in name:
lowerCAmelCase__ : List[str] = name.replace('ln_final' , 'text_model.final_layer_norm' )
# visual encoder
if name == "visual.class_embedding":
lowerCAmelCase__ : Dict = name.replace('visual.class_embedding' , 'vision_model.embeddings.class_embedding' )
if name == "visual.positional_embedding":
lowerCAmelCase__ : Any = name.replace('visual.positional_embedding' , 'vision_model.embeddings.position_embedding.weight' )
if name.startswith('visual.transformer.resblocks' ):
lowerCAmelCase__ : Union[str, Any] = name.replace('visual.transformer.resblocks' , 'vision_model.encoder.layers' )
if "visual.conv1" in name:
lowerCAmelCase__ : Dict = name.replace('visual.conv1' , 'vision_model.embeddings.patch_embedding' )
if "visual.ln_pre" in name:
lowerCAmelCase__ : Union[str, Any] = name.replace('visual.ln_pre' , 'vision_model.pre_layernorm' )
if "visual.ln_post" in name:
lowerCAmelCase__ : Tuple = name.replace('visual.ln_post' , 'vision_model.post_layernorm' )
if "visual.proj" in name:
lowerCAmelCase__ : Optional[Any] = name.replace('visual.proj' , 'visual_projection.weight' )
if "text_projection" in name:
lowerCAmelCase__ : Any = name.replace('text_projection' , 'text_projection.weight' )
# things on top
if "prompts_visual_proj" in name:
lowerCAmelCase__ : str = name.replace('prompts_visual_proj' , 'prompts_visual_projection' )
if "prompts_visual_ln" in name:
lowerCAmelCase__ : Tuple = name.replace('prompts_visual_ln' , 'prompts_visual_layernorm' )
# mit
if name == "mit.positional_embedding":
lowerCAmelCase__ : Dict = name.replace('positional' , 'position' )
if name.startswith('mit.resblocks' ):
lowerCAmelCase__ : List[str] = name.replace('mit.resblocks' , 'mit.encoder.layers' )
# prompts generator
if name.startswith('prompts_generator.norm' ):
lowerCAmelCase__ : List[Any] = name.replace('prompts_generator.norm' , 'prompts_generator.layernorm' )
return name
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
for key in orig_state_dict.copy().keys():
lowerCAmelCase__ : Any = orig_state_dict.pop(SCREAMING_SNAKE_CASE_ )
if "attn.in_proj" in key:
lowerCAmelCase__ : str = key.split('.' )
if key.startswith('visual' ):
lowerCAmelCase__ : str = key_split[3]
lowerCAmelCase__ : Optional[Any] = config.vision_config.hidden_size
if "message_attn" in key:
if "weight" in key:
lowerCAmelCase__ : Any = val[
:dim, :
]
lowerCAmelCase__ : Optional[Any] = val[
dim : dim * 2, :
]
lowerCAmelCase__ : int = val[
-dim:, :
]
else:
lowerCAmelCase__ : str = val[
:dim
]
lowerCAmelCase__ : Union[str, Any] = val[
dim : dim * 2
]
lowerCAmelCase__ : Any = val[
-dim:
]
else:
if "weight" in key:
lowerCAmelCase__ : str = val[
:dim, :
]
lowerCAmelCase__ : Tuple = val[
dim : dim * 2, :
]
lowerCAmelCase__ : int = val[
-dim:, :
]
else:
lowerCAmelCase__ : List[Any] = val[:dim]
lowerCAmelCase__ : Optional[Any] = val[
dim : dim * 2
]
lowerCAmelCase__ : int = val[-dim:]
elif key.startswith('mit' ):
lowerCAmelCase__ : Dict = key_split[2]
lowerCAmelCase__ : str = config.vision_config.mit_hidden_size
if "weight" in key:
lowerCAmelCase__ : List[Any] = val[:dim, :]
lowerCAmelCase__ : List[Any] = val[dim : dim * 2, :]
lowerCAmelCase__ : Any = val[-dim:, :]
else:
lowerCAmelCase__ : int = val[:dim]
lowerCAmelCase__ : Optional[Any] = val[dim : dim * 2]
lowerCAmelCase__ : List[str] = val[-dim:]
else:
lowerCAmelCase__ : int = key_split[2]
lowerCAmelCase__ : Optional[Any] = config.text_config.hidden_size
if "weight" in key:
lowerCAmelCase__ : int = val[:dim, :]
lowerCAmelCase__ : List[str] = val[
dim : dim * 2, :
]
lowerCAmelCase__ : Dict = val[-dim:, :]
else:
lowerCAmelCase__ : List[str] = val[:dim]
lowerCAmelCase__ : int = val[
dim : dim * 2
]
lowerCAmelCase__ : Optional[int] = val[-dim:]
else:
lowerCAmelCase__ : Optional[int] = rename_key(SCREAMING_SNAKE_CASE_ )
if new_key_name in ["visual_projection.weight", "text_projection.weight"]:
lowerCAmelCase__ : List[str] = val.T
lowerCAmelCase__ : List[str] = val
return orig_state_dict
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> Dict:
if num_frames == 8:
lowerCAmelCase__ : Tuple = 'eating_spaghetti_8_frames.npy'
elif num_frames == 16:
lowerCAmelCase__ : Dict = 'eating_spaghetti.npy'
elif num_frames == 32:
lowerCAmelCase__ : List[Any] = 'eating_spaghetti_32_frames.npy'
lowerCAmelCase__ : Dict = hf_hub_download(
repo_id='hf-internal-testing/spaghetti-video' , filename=SCREAMING_SNAKE_CASE_ , repo_type='dataset' , )
lowerCAmelCase__ : Optional[int] = np.load(SCREAMING_SNAKE_CASE_ )
return list(SCREAMING_SNAKE_CASE_ )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=False ) -> str:
lowerCAmelCase__ : Optional[int] = {
# fully supervised kinetics-400 checkpoints
'xclip-base-patch32': 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_8.pth',
'xclip-base-patch32-16-frames': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_16.pth'
),
'xclip-base-patch16': 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_8.pth',
'xclip-base-patch16-16-frames': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_16.pth'
),
'xclip-large-patch14': 'https://drive.google.com/u/0/uc?id=1NUOImq0o5DlQTST17iIP3vG7DgmHQuCx&export=download&confirm=t&uuid=b26caedc-88e2-473e-830a-9d158b653cdb',
'xclip-large-patch14-16-frames': 'https://drive.google.com/u/0/uc?id=1FOYgnJc097OJ4lGwtRCCydQyVPJEOH7d&export=download&confirm=t&uuid=538fa810-e671-4050-b385-9a623f89804f',
# fully supervised kinetics-600 checkpoints
'xclip-base-patch16-kinetics-600': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_8.pth'
),
'xclip-base-patch16-kinetics-600-16-frames': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_16.pth'
),
'xclip-large-patch14-kinetics-600': 'https://drive.google.com/u/0/uc?id=1FV8C1INuM91sLAN4ImjzePLIlpMSihwV&export=download&confirm=t&uuid=141d4977-4a65-44ae-864f-4b0c19f838be',
# few shot
'xclip-base-patch16-hmdb-2-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_2.pth'
),
'xclip-base-patch16-hmdb-4-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_4.pth'
),
'xclip-base-patch16-hmdb-8-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_8.pth'
),
'xclip-base-patch16-hmdb-16-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_16.pth'
),
'xclip-base-patch16-ucf-2-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_2.pth'
),
'xclip-base-patch16-ucf-4-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_4.pth'
),
'xclip-base-patch16-ucf-8-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_8.pth'
),
'xclip-base-patch16-ucf-16-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_16.pth'
),
# zero shot
'xclip-base-patch16-zero-shot': 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/zero.pth',
}
lowerCAmelCase__ : Dict = model_to_url[model_name]
lowerCAmelCase__ : List[str] = 8
if "16-frames" in model_name:
lowerCAmelCase__ : Any = 16
elif "shot" in model_name:
lowerCAmelCase__ : str = 32
lowerCAmelCase__ : str = get_xclip_config(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Any = XCLIPModel(SCREAMING_SNAKE_CASE_ )
model.eval()
if "drive" in checkpoint_url:
lowerCAmelCase__ : Any = 'pytorch_model.bin'
gdown.cached_download(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , quiet=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Tuple = torch.load(SCREAMING_SNAKE_CASE_ , map_location='cpu' )['model']
else:
lowerCAmelCase__ : Optional[Any] = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE_ )['model']
lowerCAmelCase__ : Optional[Any] = convert_state_dict(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Optional[int] = XCLIPModel(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ , lowerCAmelCase__ : str = model.load_state_dict(SCREAMING_SNAKE_CASE_ , strict=SCREAMING_SNAKE_CASE_ )
assert missing_keys == ["text_model.embeddings.position_ids", "vision_model.embeddings.position_ids"]
model.eval()
lowerCAmelCase__ : Optional[int] = 336 if model_name == 'xclip-large-patch14-16-frames' else 224
lowerCAmelCase__ : List[str] = VideoMAEImageProcessor(size=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Tuple = CLIPTokenizer.from_pretrained('openai/clip-vit-base-patch32' )
lowerCAmelCase__ : List[Any] = CLIPTokenizerFast.from_pretrained('openai/clip-vit-base-patch32' )
lowerCAmelCase__ : Optional[Any] = XCLIPProcessor(image_processor=SCREAMING_SNAKE_CASE_ , tokenizer=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : List[Any] = prepare_video(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : str = processor(
text=['playing sports', 'eating spaghetti', 'go shopping'] , videos=SCREAMING_SNAKE_CASE_ , return_tensors='pt' , padding=SCREAMING_SNAKE_CASE_ )
print('Shape of pixel values:' , inputs.pixel_values.shape )
with torch.no_grad():
lowerCAmelCase__ : Union[str, Any] = model(**SCREAMING_SNAKE_CASE_ )
# Verify outputs
lowerCAmelCase__ : Optional[Any] = outputs.logits_per_video
lowerCAmelCase__ : Tuple = logits_per_video.softmax(dim=1 )
print('Probs:' , SCREAMING_SNAKE_CASE_ )
# kinetics-400
if model_name == "xclip-base-patch32":
lowerCAmelCase__ : str = torch.tensor([[0.0019, 0.9951, 0.0030]] )
elif model_name == "xclip-base-patch32-16-frames":
lowerCAmelCase__ : Dict = torch.tensor([[7.0999e-04, 9.9883e-01, 4.5580e-04]] )
elif model_name == "xclip-base-patch16":
lowerCAmelCase__ : int = torch.tensor([[0.0083, 0.9681, 0.0236]] )
elif model_name == "xclip-base-patch16-16-frames":
lowerCAmelCase__ : Any = torch.tensor([[7.6937e-04, 9.9728e-01, 1.9473e-03]] )
elif model_name == "xclip-large-patch14":
lowerCAmelCase__ : Any = torch.tensor([[0.0062, 0.9864, 0.0075]] )
elif model_name == "xclip-large-patch14-16-frames":
lowerCAmelCase__ : Union[str, Any] = torch.tensor([[3.3877e-04, 9.9937e-01, 2.8888e-04]] )
# kinetics-600
elif model_name == "xclip-base-patch16-kinetics-600":
lowerCAmelCase__ : Optional[int] = torch.tensor([[0.0555, 0.8914, 0.0531]] )
elif model_name == "xclip-base-patch16-kinetics-600-16-frames":
lowerCAmelCase__ : int = torch.tensor([[3.8554e-04, 9.9929e-01, 3.2754e-04]] )
elif model_name == "xclip-large-patch14-kinetics-600":
lowerCAmelCase__ : Dict = torch.tensor([[0.0036, 0.9920, 0.0045]] )
# few shot
elif model_name == "xclip-base-patch16-hmdb-2-shot":
lowerCAmelCase__ : Optional[Any] = torch.tensor([[7.1890e-06, 9.9994e-01, 5.6559e-05]] )
elif model_name == "xclip-base-patch16-hmdb-4-shot":
lowerCAmelCase__ : Tuple = torch.tensor([[1.0320e-05, 9.9993e-01, 6.2435e-05]] )
elif model_name == "xclip-base-patch16-hmdb-8-shot":
lowerCAmelCase__ : int = torch.tensor([[4.1377e-06, 9.9990e-01, 9.8386e-05]] )
elif model_name == "xclip-base-patch16-hmdb-16-shot":
lowerCAmelCase__ : Optional[Any] = torch.tensor([[4.1347e-05, 9.9962e-01, 3.3411e-04]] )
elif model_name == "xclip-base-patch16-ucf-2-shot":
lowerCAmelCase__ : str = torch.tensor([[8.5857e-05, 9.9928e-01, 6.3291e-04]] )
elif model_name == "xclip-base-patch16-ucf-4-shot":
lowerCAmelCase__ : Optional[Any] = torch.tensor([[8.5857e-05, 9.9928e-01, 6.3291e-04]] )
elif model_name == "xclip-base-patch16-ucf-8-shot":
lowerCAmelCase__ : int = torch.tensor([[0.0027, 0.9904, 0.0070]] )
elif model_name == "xclip-base-patch16-ucf-16-shot":
lowerCAmelCase__ : Union[str, Any] = torch.tensor([[9.8219e-04, 9.9593e-01, 3.0863e-03]] )
# zero shot
elif model_name == "xclip-base-patch16-zero-shot":
lowerCAmelCase__ : List[Any] = torch.tensor([[3.5082e-04, 9.9785e-01, 1.7966e-03]] )
else:
raise ValueError(F'''Model name {model_name} not supported''' )
assert torch.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1e-3 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
if push_to_hub:
print('Pushing model, processor and slow tokenizer files to the hub...' )
model.push_to_hub(SCREAMING_SNAKE_CASE_ , organization='nielsr' )
processor.push_to_hub(SCREAMING_SNAKE_CASE_ , organization='nielsr' )
slow_tokenizer.push_to_hub(SCREAMING_SNAKE_CASE_ , organization='nielsr' )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""xclip-base-patch32""",
type=str,
help="""Name of the model.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
lowerCamelCase__ = parser.parse_args()
convert_xclip_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub) | 212 | 0 |
import contextlib
import os
import sqlitea
import pytest
from datasets import Dataset, Features, Value
from datasets.io.sql import SqlDatasetReader, SqlDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases, require_sqlalchemy
def __lowerCamelCase ( __a :Optional[Any] , __a :List[Any] ) -> List[str]:
"""simple docstring"""
assert isinstance(__a , __a )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@require_sqlalchemy
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def __lowerCamelCase ( __a :Any , __a :Tuple , __a :Tuple , __a :Tuple ) -> Any:
"""simple docstring"""
A__ = tmp_path / """cache"""
A__ = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
A__ = SqlDatasetReader(
"""dataset""" , """sqlite:///""" + sqlite_path , cache_dir=__a , keep_in_memory=__a ).read()
_check_sql_dataset(__a , __a )
@require_sqlalchemy
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""},
{"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""},
{"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""},
{"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""},
] , )
def __lowerCamelCase ( __a :Optional[int] , __a :Optional[Any] , __a :List[str] , __a :Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
A__ = tmp_path / """cache"""
A__ = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
A__ = features.copy() if features else default_expected_features
A__ = (
Features({feature: Value(__a ) for feature, dtype in features.items()} ) if features is not None else None
)
A__ = SqlDatasetReader("""dataset""" , """sqlite:///""" + sqlite_path , features=__a , cache_dir=__a ).read()
_check_sql_dataset(__a , __a )
def __lowerCamelCase ( __a :Tuple ) -> Any:
"""simple docstring"""
with contextlib.closing(sqlitea.connect(__a ) ) as con:
A__ = con.cursor()
cur.execute("""SELECT * FROM dataset""" )
for row in cur:
yield row
@require_sqlalchemy
def __lowerCamelCase ( __a :Tuple , __a :Tuple , __a :str ) -> List[Any]:
"""simple docstring"""
A__ = tmp_path / """cache"""
A__ = os.path.join(__a , """tmp.sql""" )
A__ = SqlDatasetReader("""dataset""" , """sqlite:///""" + sqlite_path , cache_dir=__a ).read()
SqlDatasetWriter(__a , """dataset""" , """sqlite:///""" + output_sqlite_path , num_proc=1 ).write()
A__ = iter_sql_file(__a )
A__ = iter_sql_file(__a )
for rowa, rowa in zip(__a , __a ):
assert rowa == rowa
@require_sqlalchemy
def __lowerCamelCase ( __a :Union[str, Any] , __a :Optional[Any] , __a :int ) -> Any:
"""simple docstring"""
A__ = tmp_path / """cache"""
A__ = os.path.join(__a , """tmp.sql""" )
A__ = SqlDatasetReader("""dataset""" , """sqlite:///""" + sqlite_path , cache_dir=__a ).read()
SqlDatasetWriter(__a , """dataset""" , """sqlite:///""" + output_sqlite_path , num_proc=2 ).write()
A__ = iter_sql_file(__a )
A__ = iter_sql_file(__a )
for rowa, rowa in zip(__a , __a ):
assert rowa == rowa
@require_sqlalchemy
def __lowerCamelCase ( __a :str , __a :Any , __a :List[str] ) -> Optional[Any]:
"""simple docstring"""
A__ = tmp_path / """cache"""
A__ = os.path.join(__a , """tmp.sql""" )
A__ = SqlDatasetReader("""dataset""" , """sqlite:///""" + sqlite_path , cache_dir=__a ).read()
with pytest.raises(__a ):
SqlDatasetWriter(__a , """dataset""" , """sqlite:///""" + output_sqlite_path , num_proc=0 ).write()
| 276 |
from ...utils import is_note_seq_available, is_transformers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .notes_encoder import SpectrogramNotesEncoder
from .continous_encoder import SpectrogramContEncoder
from .pipeline_spectrogram_diffusion import (
SpectrogramContEncoder,
SpectrogramDiffusionPipeline,
TaFilmDecoder,
)
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .midi_utils import MidiProcessor
| 276 | 1 |
from collections import OrderedDict
from typing import List, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A__ = logging.get_logger(__name__)
A__ = {
'google/efficientnet-b7': 'https://huggingface.co/google/efficientnet-b7/resolve/main/config.json',
}
class __lowerCAmelCase ( lowerCamelCase__ ):
__lowerCamelCase = '''efficientnet'''
def __init__( self , _snake_case = 3 , _snake_case = 600 , _snake_case = 2.0 , _snake_case = 3.1 , _snake_case = 8 , _snake_case = [3, 3, 5, 3, 5, 5, 3] , _snake_case = [32, 16, 24, 40, 80, 112, 192] , _snake_case = [16, 24, 40, 80, 112, 192, 320] , _snake_case = [] , _snake_case = [1, 2, 2, 2, 1, 2, 1] , _snake_case = [1, 2, 2, 3, 3, 4, 1] , _snake_case = [1, 6, 6, 6, 6, 6, 6] , _snake_case = 0.25 , _snake_case = "swish" , _snake_case = 2560 , _snake_case = "mean" , _snake_case = 0.02 , _snake_case = 0.001 , _snake_case = 0.99 , _snake_case = 0.5 , _snake_case = 0.2 , **_snake_case , ):
"""simple docstring"""
super().__init__(**lowerCamelCase__ )
_lowerCAmelCase = num_channels
_lowerCAmelCase = image_size
_lowerCAmelCase = width_coefficient
_lowerCAmelCase = depth_coefficient
_lowerCAmelCase = depth_divisor
_lowerCAmelCase = kernel_sizes
_lowerCAmelCase = in_channels
_lowerCAmelCase = out_channels
_lowerCAmelCase = depthwise_padding
_lowerCAmelCase = strides
_lowerCAmelCase = num_block_repeats
_lowerCAmelCase = expand_ratios
_lowerCAmelCase = squeeze_expansion_ratio
_lowerCAmelCase = hidden_act
_lowerCAmelCase = hidden_dim
_lowerCAmelCase = pooling_type
_lowerCAmelCase = initializer_range
_lowerCAmelCase = batch_norm_eps
_lowerCAmelCase = batch_norm_momentum
_lowerCAmelCase = dropout_rate
_lowerCAmelCase = drop_connect_rate
_lowerCAmelCase = sum(lowerCamelCase__ ) * 4
class __lowerCAmelCase ( lowerCamelCase__ ):
__lowerCamelCase = version.parse('''1.11''' )
@property
def snake_case ( self ):
"""simple docstring"""
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def snake_case ( self ):
"""simple docstring"""
return 1e-5
| 82 |
'''simple docstring'''
import json
import os
from typing import Optional
import numpy as np
from ...feature_extraction_utils import BatchFeature
from ...processing_utils import ProcessorMixin
from ...utils import logging
from ...utils.hub import get_file_from_repo
from ..auto import AutoTokenizer
snake_case_ : List[Any] = logging.get_logger(__name__)
class lowercase__ ( lowercase ):
lowercase__ = """AutoTokenizer"""
lowercase__ = ["""tokenizer"""]
lowercase__ = {
"""semantic_prompt""": 1,
"""coarse_prompt""": 2,
"""fine_prompt""": 2,
}
def __init__( self : List[str] ,lowerCamelCase__ : Tuple ,lowerCamelCase__ : Tuple=None ):
'''simple docstring'''
super().__init__(lowerCamelCase__ )
_UpperCamelCase : Dict = speaker_embeddings
@classmethod
def UpperCamelCase_ ( cls : Union[str, Any] ,lowerCamelCase__ : int ,lowerCamelCase__ : str="speaker_embeddings_path.json" ,**lowerCamelCase__ : Optional[Any] ):
'''simple docstring'''
if speaker_embeddings_dict_path is not None:
_UpperCamelCase : Optional[Any] = get_file_from_repo(
lowerCamelCase__ ,lowerCamelCase__ ,subfolder=kwargs.pop('subfolder' ,lowerCamelCase__ ) ,cache_dir=kwargs.pop('cache_dir' ,lowerCamelCase__ ) ,force_download=kwargs.pop('force_download' ,lowerCamelCase__ ) ,proxies=kwargs.pop('proxies' ,lowerCamelCase__ ) ,resume_download=kwargs.pop('resume_download' ,lowerCamelCase__ ) ,local_files_only=kwargs.pop('local_files_only' ,lowerCamelCase__ ) ,use_auth_token=kwargs.pop('use_auth_token' ,lowerCamelCase__ ) ,revision=kwargs.pop('revision' ,lowerCamelCase__ ) ,)
if speaker_embeddings_path is None:
logger.warning(
F'`{os.path.join(lowerCamelCase__ ,lowerCamelCase__ )}` does not exists\n , no preloaded speaker embeddings will be used - Make sure to provide a correct path to the json\n dictionnary if wanted, otherwise set `speaker_embeddings_dict_path=None`.' )
_UpperCamelCase : Union[str, Any] = None
else:
with open(lowerCamelCase__ ) as speaker_embeddings_json:
_UpperCamelCase : Optional[int] = json.load(lowerCamelCase__ )
else:
_UpperCamelCase : Tuple = None
_UpperCamelCase : Tuple = AutoTokenizer.from_pretrained(lowerCamelCase__ ,**lowerCamelCase__ )
return cls(tokenizer=lowerCamelCase__ ,speaker_embeddings=lowerCamelCase__ )
def UpperCamelCase_ ( self : Tuple ,lowerCamelCase__ : Union[str, Any] ,lowerCamelCase__ : int="speaker_embeddings_path.json" ,lowerCamelCase__ : Dict="speaker_embeddings" ,lowerCamelCase__ : bool = False ,**lowerCamelCase__ : Tuple ,):
'''simple docstring'''
if self.speaker_embeddings is not None:
os.makedirs(os.path.join(lowerCamelCase__ ,lowerCamelCase__ ,'v2' ) ,exist_ok=lowerCamelCase__ )
_UpperCamelCase : Tuple = {}
_UpperCamelCase : Optional[Any] = save_directory
for prompt_key in self.speaker_embeddings:
if prompt_key != "repo_or_path":
_UpperCamelCase : Any = self._load_voice_preset(lowerCamelCase__ )
_UpperCamelCase : Union[str, Any] = {}
for key in self.speaker_embeddings[prompt_key]:
np.save(
os.path.join(
embeddings_dict['repo_or_path'] ,lowerCamelCase__ ,F'{prompt_key}_{key}' ) ,voice_preset[key] ,allow_pickle=lowerCamelCase__ ,)
_UpperCamelCase : List[str] = os.path.join(lowerCamelCase__ ,F'{prompt_key}_{key}.npy' )
_UpperCamelCase : str = tmp_dict
with open(os.path.join(lowerCamelCase__ ,lowerCamelCase__ ) ,'w' ) as fp:
json.dump(lowerCamelCase__ ,lowerCamelCase__ )
super().save_pretrained(lowerCamelCase__ ,lowerCamelCase__ ,**lowerCamelCase__ )
def UpperCamelCase_ ( self : Union[str, Any] ,lowerCamelCase__ : str = None ,**lowerCamelCase__ : Dict ):
'''simple docstring'''
_UpperCamelCase : Tuple = self.speaker_embeddings[voice_preset]
_UpperCamelCase : Union[str, Any] = {}
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset_paths:
raise ValueError(
F'Voice preset unrecognized, missing {key} as a key in self.speaker_embeddings[{voice_preset}].' )
_UpperCamelCase : Dict = get_file_from_repo(
self.speaker_embeddings.get('repo_or_path' ,'/' ) ,voice_preset_paths[key] ,subfolder=kwargs.pop('subfolder' ,lowerCamelCase__ ) ,cache_dir=kwargs.pop('cache_dir' ,lowerCamelCase__ ) ,force_download=kwargs.pop('force_download' ,lowerCamelCase__ ) ,proxies=kwargs.pop('proxies' ,lowerCamelCase__ ) ,resume_download=kwargs.pop('resume_download' ,lowerCamelCase__ ) ,local_files_only=kwargs.pop('local_files_only' ,lowerCamelCase__ ) ,use_auth_token=kwargs.pop('use_auth_token' ,lowerCamelCase__ ) ,revision=kwargs.pop('revision' ,lowerCamelCase__ ) ,)
if path is None:
raise ValueError(
F'`{os.path.join(self.speaker_embeddings.get("repo_or_path" ,"/" ) ,voice_preset_paths[key] )}` does not exists\n , no preloaded voice preset will be used - Make sure to provide correct paths to the {voice_preset}\n embeddings.' )
_UpperCamelCase : List[str] = np.load(lowerCamelCase__ )
return voice_preset_dict
def UpperCamelCase_ ( self : Any ,lowerCamelCase__ : Optional[dict] = None ):
'''simple docstring'''
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset:
raise ValueError(F'Voice preset unrecognized, missing {key} as a key.' )
if not isinstance(voice_preset[key] ,np.ndarray ):
raise ValueError(F'{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.' )
if len(voice_preset[key].shape ) != self.preset_shape[key]:
raise ValueError(F'{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.' )
def __call__( self : Any ,lowerCamelCase__ : Optional[Any]=None ,lowerCamelCase__ : Union[str, Any]=None ,lowerCamelCase__ : Any="pt" ,lowerCamelCase__ : Dict=256 ,lowerCamelCase__ : int=False ,lowerCamelCase__ : int=True ,lowerCamelCase__ : List[str]=False ,**lowerCamelCase__ : Union[str, Any] ,):
'''simple docstring'''
if voice_preset is not None and not isinstance(lowerCamelCase__ ,lowerCamelCase__ ):
if (
isinstance(lowerCamelCase__ ,lowerCamelCase__ )
and self.speaker_embeddings is not None
and voice_preset in self.speaker_embeddings
):
_UpperCamelCase : Optional[int] = self._load_voice_preset(lowerCamelCase__ )
else:
if isinstance(lowerCamelCase__ ,lowerCamelCase__ ) and not voice_preset.endswith('.npz' ):
_UpperCamelCase : Tuple = voice_preset + '.npz'
_UpperCamelCase : str = np.load(lowerCamelCase__ )
if voice_preset is not None:
self._validate_voice_preset_dict(lowerCamelCase__ ,**lowerCamelCase__ )
_UpperCamelCase : Union[str, Any] = BatchFeature(data=lowerCamelCase__ ,tensor_type=lowerCamelCase__ )
_UpperCamelCase : Union[str, Any] = self.tokenizer(
lowerCamelCase__ ,return_tensors=lowerCamelCase__ ,padding='max_length' ,max_length=lowerCamelCase__ ,return_attention_mask=lowerCamelCase__ ,return_token_type_ids=lowerCamelCase__ ,add_special_tokens=lowerCamelCase__ ,**lowerCamelCase__ ,)
if voice_preset is not None:
_UpperCamelCase : Optional[Any] = voice_preset
return encoded_text
| 83 | 0 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
PNDMScheduler,
StableDiffusionLDMaDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import nightly, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
enable_full_determinism()
class __lowerCAmelCase ( unittest.TestCase ):
UpperCamelCase__ = StableDiffusionLDMaDPipeline
UpperCamelCase__ = TEXT_TO_IMAGE_PARAMS
UpperCamelCase__ = TEXT_TO_IMAGE_BATCH_PARAMS
UpperCamelCase__ = TEXT_TO_IMAGE_IMAGE_PARAMS
def lowerCamelCase__ ( self :str ):
'''simple docstring'''
torch.manual_seed(0 )
a = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
a = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=__magic_name__ , set_alpha_to_one=__magic_name__ , )
torch.manual_seed(0 )
a = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=6 , out_channels=6 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
a = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
a = CLIPTextModel(__magic_name__ )
a = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
a = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def lowerCamelCase__ ( self :List[str] , __magic_name__ :int , __magic_name__ :Dict=0 ):
'''simple docstring'''
if str(__magic_name__ ).startswith("""mps""" ):
a = torch.manual_seed(__magic_name__ )
else:
a = torch.Generator(device=__magic_name__ ).manual_seed(__magic_name__ )
a = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def lowerCamelCase__ ( self :int ):
'''simple docstring'''
a = """cpu""" # ensure determinism for the device-dependent torch.Generator
a = self.get_dummy_components()
a = StableDiffusionLDMaDPipeline(**__magic_name__ )
a = ldmad_pipe.to(__magic_name__ )
ldmad_pipe.set_progress_bar_config(disable=__magic_name__ )
a = self.get_dummy_inputs(__magic_name__ )
a = ldmad_pipe(**__magic_name__ )
a , a = output.rgb, output.depth
a = rgb[0, -3:, -3:, -1]
a = depth[0, -3:, -1]
assert rgb.shape == (1, 64, 64, 3)
assert depth.shape == (1, 64, 64)
a = np.array(
[0.37338176, 0.70247, 0.74203193, 0.51643604, 0.58256793, 0.60932136, 0.4181095, 0.48355877, 0.46535262] )
a = np.array([103.46727, 85.812004, 87.849236] )
assert np.abs(image_slice_rgb.flatten() - expected_slice_rgb ).max() < 1E-2
assert np.abs(image_slice_depth.flatten() - expected_slice_depth ).max() < 1E-2
def lowerCamelCase__ ( self :Tuple ):
'''simple docstring'''
a = self.get_dummy_components()
a = StableDiffusionLDMaDPipeline(**__magic_name__ )
a = ldmad_pipe.to(__magic_name__ )
ldmad_pipe.set_progress_bar_config(disable=__magic_name__ )
a = self.get_dummy_inputs(__magic_name__ )
a = 3 * [inputs["""prompt"""]]
# forward
a = ldmad_pipe(**__magic_name__ )
a , a = output.rgb, output.depth
a = rgb_slice_a[0, -3:, -3:, -1]
a = depth_slice_a[0, -3:, -1]
a = self.get_dummy_inputs(__magic_name__ )
a = 3 * [inputs.pop("""prompt""" )]
a = ldmad_pipe.tokenizer(
__magic_name__ , padding="""max_length""" , max_length=ldmad_pipe.tokenizer.model_max_length , truncation=__magic_name__ , return_tensors="""pt""" , )
a = text_inputs["""input_ids"""].to(__magic_name__ )
a = ldmad_pipe.text_encoder(__magic_name__ )[0]
a = prompt_embeds
# forward
a = ldmad_pipe(**__magic_name__ )
a , a = output.rgb, output.depth
a = rgb_slice_a[0, -3:, -3:, -1]
a = depth_slice_a[0, -3:, -1]
assert np.abs(rgb_slice_a.flatten() - rgb_slice_a.flatten() ).max() < 1E-4
assert np.abs(depth_slice_a.flatten() - depth_slice_a.flatten() ).max() < 1E-4
def lowerCamelCase__ ( self :Optional[int] ):
'''simple docstring'''
a = """cpu""" # ensure determinism for the device-dependent torch.Generator
a = self.get_dummy_components()
a = PNDMScheduler(skip_prk_steps=__magic_name__ )
a = StableDiffusionLDMaDPipeline(**__magic_name__ )
a = ldmad_pipe.to(__magic_name__ )
ldmad_pipe.set_progress_bar_config(disable=__magic_name__ )
a = self.get_dummy_inputs(__magic_name__ )
a = """french fries"""
a = ldmad_pipe(**__magic_name__ , negative_prompt=__magic_name__ )
a , a = output.rgb, output.depth
a = rgb[0, -3:, -3:, -1]
a = depth[0, -3:, -1]
assert rgb.shape == (1, 64, 64, 3)
assert depth.shape == (1, 64, 64)
a = np.array(
[0.37044, 0.71811503, 0.7223251, 0.48603675, 0.5638391, 0.6364948, 0.42833704, 0.4901315, 0.47926217] )
a = np.array([107.84738, 84.62802, 89.962135] )
assert np.abs(rgb_slice.flatten() - expected_slice_rgb ).max() < 1E-2
assert np.abs(depth_slice.flatten() - expected_slice_depth ).max() < 1E-2
@slow
@require_torch_gpu
class __lowerCAmelCase ( unittest.TestCase ):
def lowerCamelCase__ ( self :List[Any] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase__ ( self :Union[str, Any] , __magic_name__ :List[str] , __magic_name__ :List[Any]="cpu" , __magic_name__ :Optional[int]=torch.floataa , __magic_name__ :int=0 ):
'''simple docstring'''
a = torch.Generator(device=__magic_name__ ).manual_seed(__magic_name__ )
a = np.random.RandomState(__magic_name__ ).standard_normal((1, 4, 64, 64) )
a = torch.from_numpy(__magic_name__ ).to(device=__magic_name__ , dtype=__magic_name__ )
a = {
"""prompt""": """a photograph of an astronaut riding a horse""",
"""latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def lowerCamelCase__ ( self :Tuple ):
'''simple docstring'''
a = StableDiffusionLDMaDPipeline.from_pretrained("""Intel/ldm3d""" )
a = ldmad_pipe.to(__magic_name__ )
ldmad_pipe.set_progress_bar_config(disable=__magic_name__ )
a = self.get_inputs(__magic_name__ )
a = ldmad_pipe(**__magic_name__ )
a , a = output.rgb, output.depth
a = rgb[0, -3:, -3:, -1].flatten()
a = rgb[0, -3:, -1].flatten()
assert rgb.shape == (1, 512, 512, 3)
assert depth.shape == (1, 512, 512)
a = np.array(
[0.53805465, 0.56707305, 0.5486515, 0.57012236, 0.5814511, 0.56253487, 0.54843014, 0.55092263, 0.6459706] )
a = np.array(
[0.9263781, 0.6678672, 0.5486515, 0.92202145, 0.67831135, 0.56253487, 0.9241694, 0.7551478, 0.6459706] )
assert np.abs(rgb_slice - expected_slice_rgb ).max() < 3E-3
assert np.abs(depth_slice - expected_slice_depth ).max() < 3E-3
@nightly
@require_torch_gpu
class __lowerCAmelCase ( unittest.TestCase ):
def lowerCamelCase__ ( self :int ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase__ ( self :Optional[int] , __magic_name__ :str , __magic_name__ :List[str]="cpu" , __magic_name__ :Optional[int]=torch.floataa , __magic_name__ :List[str]=0 ):
'''simple docstring'''
a = torch.Generator(device=__magic_name__ ).manual_seed(__magic_name__ )
a = np.random.RandomState(__magic_name__ ).standard_normal((1, 4, 64, 64) )
a = torch.from_numpy(__magic_name__ ).to(device=__magic_name__ , dtype=__magic_name__ )
a = {
"""prompt""": """a photograph of an astronaut riding a horse""",
"""latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 50,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def lowerCamelCase__ ( self :Any ):
'''simple docstring'''
a = StableDiffusionLDMaDPipeline.from_pretrained("""Intel/ldm3d""" ).to(__magic_name__ )
ldmad_pipe.set_progress_bar_config(disable=__magic_name__ )
a = self.get_inputs(__magic_name__ )
a = ldmad_pipe(**__magic_name__ )
a , a = output.rgb, output.depth
a = 0.495586
a = 0.33795515
a = 112.48518
a = 98.489746
assert np.abs(expected_rgb_mean - rgb.mean() ) < 1E-3
assert np.abs(expected_rgb_std - rgb.std() ) < 1E-3
assert np.abs(expected_depth_mean - depth.mean() ) < 1E-3
assert np.abs(expected_depth_std - depth.std() ) < 1E-3
def lowerCamelCase__ ( self :Optional[int] ):
'''simple docstring'''
a = StableDiffusionLDMaDPipeline.from_pretrained("""Intel/ldm3d-4c""" ).to(__magic_name__ )
ldmad_pipe.set_progress_bar_config(disable=__magic_name__ )
a = self.get_inputs(__magic_name__ )
a = ldmad_pipe(**__magic_name__ )
a , a = output.rgb, output.depth
a = 0.4194127
a = 0.35375586
a = 0.5638502
a = 0.34686103
assert rgb.shape == (1, 512, 512, 3)
assert depth.shape == (1, 512, 512, 1)
assert np.abs(expected_rgb_mean - rgb.mean() ) < 1E-3
assert np.abs(expected_rgb_std - rgb.std() ) < 1E-3
assert np.abs(expected_depth_mean - depth.mean() ) < 1E-3
assert np.abs(expected_depth_std - depth.std() ) < 1E-3
| 347 |
import pytest
import datasets.config
from datasets.utils.info_utils import is_small_dataset
@pytest.mark.parametrize("""dataset_size""" , [None, 400 * 2**20, 600 * 2**20] )
@pytest.mark.parametrize("""input_in_memory_max_size""" , ["""default""", 0, 100 * 2**20, 900 * 2**20] )
def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Any:
if input_in_memory_max_size != "default":
monkeypatch.setattr(datasets.config , """IN_MEMORY_MAX_SIZE""" , __lowerCamelCase )
a = datasets.config.IN_MEMORY_MAX_SIZE
if input_in_memory_max_size == "default":
assert in_memory_max_size == 0
else:
assert in_memory_max_size == input_in_memory_max_size
if dataset_size and in_memory_max_size:
a = dataset_size < in_memory_max_size
else:
a = False
a = is_small_dataset(__lowerCamelCase )
assert result == expected
| 347 | 1 |
"""simple docstring"""
def _A ( lowercase , lowercase ):
"""simple docstring"""
return "\n".join(
f'''{number} * {i} = {number * i}''' for i in range(1 , number_of_terms + 1 ) )
if __name__ == "__main__":
print(multiplication_table(number=5, number_of_terms=1_0)) | 81 |
'''simple docstring'''
from typing import Callable, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case_ : Optional[Any] = logging.get_logger(__name__)
snake_case_ : int = {
'microsoft/xprophetnet-large-wiki100-cased': (
'https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/config.json'
),
}
class lowercase__ ( lowercase ):
lowercase__ = """xlm-prophetnet"""
lowercase__ = ["""past_key_values"""]
lowercase__ = {
"""num_attention_heads""": """num_encoder_attention_heads""",
}
def __init__( self : Optional[int] ,lowerCamelCase__ : Optional[float] = 0.1 ,lowerCamelCase__ : Optional[Union[str, Callable]] = "gelu" ,lowerCamelCase__ : Optional[int] = 30522 ,lowerCamelCase__ : Optional[int] = 1024 ,lowerCamelCase__ : Optional[int] = 4096 ,lowerCamelCase__ : Optional[int] = 12 ,lowerCamelCase__ : Optional[int] = 16 ,lowerCamelCase__ : Optional[int] = 4096 ,lowerCamelCase__ : Optional[int] = 12 ,lowerCamelCase__ : Optional[int] = 16 ,lowerCamelCase__ : Optional[float] = 0.1 ,lowerCamelCase__ : Optional[float] = 0.1 ,lowerCamelCase__ : Optional[int] = 512 ,lowerCamelCase__ : Optional[float] = 0.0_2 ,lowerCamelCase__ : Optional[bool] = True ,lowerCamelCase__ : Optional[bool] = True ,lowerCamelCase__ : Optional[int] = 0 ,lowerCamelCase__ : Optional[int] = 2 ,lowerCamelCase__ : Optional[int] = 32 ,lowerCamelCase__ : Optional[int] = 128 ,lowerCamelCase__ : Optional[bool] = False ,lowerCamelCase__ : Optional[float] = 0.0 ,lowerCamelCase__ : Optional[bool] = True ,lowerCamelCase__ : Optional[int] = 0 ,lowerCamelCase__ : Optional[int] = 1 ,lowerCamelCase__ : Optional[int] = 2 ,**lowerCamelCase__ : Union[str, Any] ,):
'''simple docstring'''
_UpperCamelCase : List[Any] = vocab_size
_UpperCamelCase : Union[str, Any] = hidden_size
_UpperCamelCase : str = encoder_ffn_dim
_UpperCamelCase : List[Any] = num_encoder_layers
_UpperCamelCase : Tuple = num_encoder_attention_heads
_UpperCamelCase : Optional[int] = decoder_ffn_dim
_UpperCamelCase : List[Any] = num_decoder_layers
_UpperCamelCase : List[Any] = num_decoder_attention_heads
_UpperCamelCase : Optional[Any] = max_position_embeddings
_UpperCamelCase : str = init_std # Normal(0, this parameter)
_UpperCamelCase : List[str] = activation_function
# parameters for xlmprophetnet
_UpperCamelCase : Tuple = ngram
_UpperCamelCase : Optional[Any] = num_buckets
_UpperCamelCase : Tuple = relative_max_distance
_UpperCamelCase : str = disable_ngram_loss
_UpperCamelCase : str = eps
# 3 Types of Dropout
_UpperCamelCase : Union[str, Any] = attention_dropout
_UpperCamelCase : str = activation_dropout
_UpperCamelCase : List[str] = dropout
_UpperCamelCase : Tuple = use_cache
super().__init__(
pad_token_id=lowerCamelCase__ ,bos_token_id=lowerCamelCase__ ,eos_token_id=lowerCamelCase__ ,is_encoder_decoder=lowerCamelCase__ ,add_cross_attention=lowerCamelCase__ ,decoder_start_token_id=lowerCamelCase__ ,**lowerCamelCase__ ,)
@property
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
return self.num_encoder_layers + self.num_decoder_layers
@num_hidden_layers.setter
def UpperCamelCase_ ( self : str ,lowerCamelCase__ : Union[str, Any] ):
'''simple docstring'''
raise NotImplementedError(
'This model does not support the setting of `num_hidden_layers`. Please set `num_encoder_layers` and'
' `num_decoder_layers`.' )
| 83 | 0 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Iterator
class A_ :
'''simple docstring'''
def __init__( self , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = value
UpperCAmelCase_ : Node | None = None
UpperCAmelCase_ : Node | None = None
class A_ :
'''simple docstring'''
def __init__( self , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : str = tree
def UpperCamelCase__ ( self , lowercase_ ):
"""simple docstring"""
if node is None:
return 0
return node.value + (
self.depth_first_search(node.left ) + self.depth_first_search(node.right )
)
def __iter__( self ):
"""simple docstring"""
yield self.depth_first_search(self.tree )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 351 |
"""simple docstring"""
import argparse
import logging
import sys
from unittest.mock import patch
import run_glue_deebert
from transformers.testing_utils import TestCasePlus, get_gpu_count, require_torch_non_multi_gpu, slow
logging.basicConfig(level=logging.DEBUG)
_a = logging.getLogger()
def __a ( ):
UpperCAmelCase_ : Tuple = argparse.ArgumentParser()
parser.add_argument("-f" )
UpperCAmelCase_ : Dict = parser.parse_args()
return args.f
class A_ (lowercase__ ):
'''simple docstring'''
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Tuple = logging.StreamHandler(sys.stdout )
logger.addHandler(lowercase_ )
def UpperCamelCase__ ( self , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = get_gpu_count()
if n_gpu > 1:
pass
# XXX: doesn't quite work with n_gpu > 1 https://github.com/huggingface/transformers/issues/10560
# script = f"{self.examples_dir_str}/research_projects/deebert/run_glue_deebert.py"
# distributed_args = f"-m torch.distributed.launch --nproc_per_node={n_gpu} {script}".split()
# cmd = [sys.executable] + distributed_args + args
# execute_subprocess_async(cmd, env=self.get_env())
# XXX: test the results - need to save them first into .json file
else:
args.insert(0 , "run_glue_deebert.py" )
with patch.object(lowercase_ , "argv" , lowercase_ ):
UpperCAmelCase_ : List[str] = run_glue_deebert.main()
for value in result.values():
self.assertGreaterEqual(lowercase_ , 0.6_66 )
@slow
@require_torch_non_multi_gpu
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = "\n --model_type roberta\n --model_name_or_path roberta-base\n --task_name MRPC\n --do_train\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --max_seq_length 128\n --per_gpu_eval_batch_size=1\n --per_gpu_train_batch_size=8\n --learning_rate 2e-4\n --num_train_epochs 3\n --overwrite_output_dir\n --seed 42\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --save_steps 0\n --overwrite_cache\n --eval_after_first_stage\n ".split()
self.run_and_check(lowercase_ )
UpperCAmelCase_ : Optional[Any] = "\n --model_type roberta\n --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --task_name MRPC\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --max_seq_length 128\n --eval_each_highway\n --eval_highway\n --overwrite_cache\n --per_gpu_eval_batch_size=1\n ".split()
self.run_and_check(lowercase_ )
UpperCAmelCase_ : Dict = "\n --model_type roberta\n --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --task_name MRPC\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --max_seq_length 128\n --early_exit_entropy 0.1\n --eval_highway\n --overwrite_cache\n --per_gpu_eval_batch_size=1\n ".split()
self.run_and_check(lowercase_ )
| 23 | 0 |
import os
import pickle
import unittest
from transformers import AutoTokenizer
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.models.bert_japanese.tokenization_bert_japanese import (
VOCAB_FILES_NAMES,
BertJapaneseTokenizer,
CharacterTokenizer,
JumanppTokenizer,
MecabTokenizer,
SudachiTokenizer,
WordpieceTokenizer,
)
from transformers.testing_utils import custom_tokenizers, require_jumanpp, require_sudachi
from ...test_tokenization_common import TokenizerTesterMixin
@custom_tokenizers
class __lowercase ( UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase : Optional[int] = BertJapaneseTokenizer
_UpperCAmelCase : Any = False
_UpperCAmelCase : Dict = True
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
super().setUp()
SCREAMING_SNAKE_CASE_: Dict = [
"[UNK]",
"[CLS]",
"[SEP]",
"こんにちは",
"こん",
"にちは",
"ばんは",
"##こん",
"##にちは",
"##ばんは",
"世界",
"##世界",
"、",
"##、",
"。",
"##。",
]
SCREAMING_SNAKE_CASE_: Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"])
with open(self.vocab_file , "w" , encoding="utf-8") as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens]))
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase__ : Optional[int]):
SCREAMING_SNAKE_CASE_: Optional[int] = "こんにちは、世界。 \nこんばんは、世界。"
SCREAMING_SNAKE_CASE_: Dict = "こんにちは 、 世界 。 こんばんは 、 世界 。"
return input_text, output_text
def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase__ : List[str]):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Any = self.get_input_output_texts(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Dict = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[Any] = tokenizer.decode(lowerCAmelCase__ , clean_up_tokenization_spaces=lowerCAmelCase__)
return text, ids
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
pass # TODO add if relevant
def _SCREAMING_SNAKE_CASE ( self : Dict):
pass # TODO add if relevant
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
pass # TODO add if relevant
def _SCREAMING_SNAKE_CASE ( self : int):
SCREAMING_SNAKE_CASE_: int = self.tokenizer_class(self.vocab_file)
SCREAMING_SNAKE_CASE_: List[Any] = tokenizer.tokenize("こんにちは、世界。\nこんばんは、世界。")
self.assertListEqual(lowerCAmelCase__ , ["こんにちは", "、", "世界", "。", "こん", "##ばんは", "、", "世界", "。"])
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase__) , [3, 12, 10, 14, 4, 9, 12, 10, 14])
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
SCREAMING_SNAKE_CASE_: int = self.tokenizer_class(self.vocab_file , word_tokenizer_type="mecab")
self.assertIsNotNone(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Dict = "こんにちは、世界。\nこんばんは、世界。"
SCREAMING_SNAKE_CASE_: Optional[int] = tokenizer.tokenize(lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__ , ["こんにちは", "、", "世界", "。", "こん", "##ばんは", "、", "世界", "。"])
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase__) , [3, 12, 10, 14, 4, 9, 12, 10, 14])
SCREAMING_SNAKE_CASE_: int = os.path.join(self.tmpdirname , "tokenizer.bin")
with open(lowerCAmelCase__ , "wb") as handle:
pickle.dump(lowerCAmelCase__ , lowerCAmelCase__)
with open(lowerCAmelCase__ , "rb") as handle:
SCREAMING_SNAKE_CASE_: List[Any] = pickle.load(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[Any] = tokenizer_new.tokenize(lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
SCREAMING_SNAKE_CASE_: Union[str, Any] = MecabTokenizer(mecab_dic="ipadic")
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 ") , ["アップルストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", "。"] , )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
try:
SCREAMING_SNAKE_CASE_: List[str] = MecabTokenizer(mecab_dic="unidic_lite")
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 ") , ["アップル", "ストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", "。"] , )
def _SCREAMING_SNAKE_CASE ( self : str):
try:
SCREAMING_SNAKE_CASE_: Tuple = MecabTokenizer(mecab_dic="unidic")
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 ") , ["アップル", "ストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", "。"] , )
def _SCREAMING_SNAKE_CASE ( self : Tuple):
SCREAMING_SNAKE_CASE_: Tuple = MecabTokenizer(do_lower_case=lowerCAmelCase__ , mecab_dic="ipadic")
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 ") , ["アップルストア", "で", "iphone", "8", "が", "発売", "さ", "れ", "た", "。"] , )
def _SCREAMING_SNAKE_CASE ( self : int):
try:
SCREAMING_SNAKE_CASE_: int = MecabTokenizer(
do_lower_case=lowerCAmelCase__ , normalize_text=lowerCAmelCase__ , mecab_option="-d /usr/local/lib/mecab/dic/jumandic")
except RuntimeError:
# if dict doesn't exist in the system, previous code raises this error.
return
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 ") , ["アップルストア", "で", "iPhone", "8", "が", "発売", "さ", "れた", "\u3000", "。"] , )
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
SCREAMING_SNAKE_CASE_: Dict = MecabTokenizer(normalize_text=lowerCAmelCase__ , mecab_dic="ipadic")
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 ") , ["アップルストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", " ", "。"] , )
@require_sudachi
def _SCREAMING_SNAKE_CASE ( self : Dict):
SCREAMING_SNAKE_CASE_: Any = self.tokenizer_class(self.vocab_file , word_tokenizer_type="sudachi")
self.assertIsNotNone(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[str] = "こんにちは、世界。\nこんばんは、世界。"
SCREAMING_SNAKE_CASE_: Any = tokenizer.tokenize(lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__ , ["こんにちは", "、", "世界", "。", "こん", "##ばんは", "、", "世界", "。"])
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase__) , [3, 12, 10, 14, 4, 9, 12, 10, 14])
SCREAMING_SNAKE_CASE_: Tuple = os.path.join(self.tmpdirname , "tokenizer.bin")
with open(lowerCAmelCase__ , "wb") as handle:
pickle.dump(lowerCAmelCase__ , lowerCAmelCase__)
with open(lowerCAmelCase__ , "rb") as handle:
SCREAMING_SNAKE_CASE_: str = pickle.load(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: str = tokenizer_new.tokenize(lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__)
@require_sudachi
def _SCREAMING_SNAKE_CASE ( self : List[str]):
SCREAMING_SNAKE_CASE_: Optional[int] = SudachiTokenizer(sudachi_dict_type="core")
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 ") , [" ", "\t", "アップル", "ストア", "で", "iPhone", "8", " ", "が", " ", " ", "\n ", "発売", "さ", "れ", "た", " ", "。", " ", " "] , )
@require_sudachi
def _SCREAMING_SNAKE_CASE ( self : Any):
SCREAMING_SNAKE_CASE_: Union[str, Any] = SudachiTokenizer(sudachi_dict_type="core" , sudachi_split_mode="A")
self.assertListEqual(tokenizer.tokenize("外国人参政権") , ["外国", "人", "参政", "権"])
@require_sudachi
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
SCREAMING_SNAKE_CASE_: Tuple = SudachiTokenizer(sudachi_dict_type="core" , sudachi_split_mode="B")
self.assertListEqual(tokenizer.tokenize("外国人参政権") , ["外国人", "参政権"])
@require_sudachi
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
SCREAMING_SNAKE_CASE_: Union[str, Any] = SudachiTokenizer(sudachi_dict_type="core" , sudachi_split_mode="C")
self.assertListEqual(tokenizer.tokenize("外国人参政権") , ["外国人参政権"])
@require_sudachi
def _SCREAMING_SNAKE_CASE ( self : Dict):
SCREAMING_SNAKE_CASE_: Optional[Any] = SudachiTokenizer(do_lower_case=lowerCAmelCase__ , sudachi_dict_type="core")
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 ") , [" ", "\t", "アップル", "ストア", "で", "iphone", "8", " ", "が", " ", " ", "\n ", "発売", "さ", "れ", "た", " ", "。", " ", " "] , )
@require_sudachi
def _SCREAMING_SNAKE_CASE ( self : Any):
SCREAMING_SNAKE_CASE_: str = SudachiTokenizer(normalize_text=lowerCAmelCase__ , sudachi_dict_type="core")
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 ") , [" ", "\t", "アップル", "ストア", "で", "iPhone", "8", " ", "が", " ", " ", "\n ", "発売", "さ", "れ", "た", "\u3000", "。", " ", " "] , )
@require_sudachi
def _SCREAMING_SNAKE_CASE ( self : List[str]):
SCREAMING_SNAKE_CASE_: Optional[Any] = SudachiTokenizer(trim_whitespace=lowerCAmelCase__ , sudachi_dict_type="core")
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 ") , ["アップル", "ストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", "。"] , )
@require_jumanpp
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
SCREAMING_SNAKE_CASE_: Tuple = self.tokenizer_class(self.vocab_file , word_tokenizer_type="jumanpp")
self.assertIsNotNone(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: int = "こんにちは、世界。\nこんばんは、世界。"
SCREAMING_SNAKE_CASE_: Dict = tokenizer.tokenize(lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__ , ["こんにちは", "、", "世界", "。", "こん", "##ばんは", "、", "世界", "。"])
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase__) , [3, 12, 10, 14, 4, 9, 12, 10, 14])
SCREAMING_SNAKE_CASE_: Optional[int] = os.path.join(self.tmpdirname , "tokenizer.bin")
with open(lowerCAmelCase__ , "wb") as handle:
pickle.dump(lowerCAmelCase__ , lowerCAmelCase__)
with open(lowerCAmelCase__ , "rb") as handle:
SCREAMING_SNAKE_CASE_: Dict = pickle.load(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Dict = tokenizer_new.tokenize(lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__)
@require_jumanpp
def _SCREAMING_SNAKE_CASE ( self : Any):
SCREAMING_SNAKE_CASE_: Union[str, Any] = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 ") , ["アップル", "ストア", "で", "iPhone", "8", "\u3000", "が", "\u3000", "\u3000", "\u3000", "発売", "さ", "れた", "\u3000", "。"] , )
@require_jumanpp
def _SCREAMING_SNAKE_CASE ( self : str):
SCREAMING_SNAKE_CASE_: List[str] = JumanppTokenizer(do_lower_case=lowerCAmelCase__)
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 ") , ["アップル", "ストア", "で", "iphone", "8", "\u3000", "が", "\u3000", "\u3000", "\u3000", "発売", "さ", "れた", "\u3000", "。"] , )
@require_jumanpp
def _SCREAMING_SNAKE_CASE ( self : Tuple):
SCREAMING_SNAKE_CASE_: Dict = JumanppTokenizer(normalize_text=lowerCAmelCase__)
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 ") , ["ア", "ッ", "フ", "゚", "ル", "ストア", "で", "iPhone", "8", "\u3000", "が", "\u3000", "\u3000", "\u3000", "発売", "さ", "れた", "\u3000", "。"] , )
@require_jumanpp
def _SCREAMING_SNAKE_CASE ( self : Dict):
SCREAMING_SNAKE_CASE_: str = JumanppTokenizer(trim_whitespace=lowerCAmelCase__)
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 ") , ["アップル", "ストア", "で", "iPhone", "8", "が", "発売", "さ", "れた", "。"] , )
@require_jumanpp
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
SCREAMING_SNAKE_CASE_: Union[str, Any] = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize("ありがとうございますm(_ _)m見つけるのが大変です。") , ["ありがとう", "ございます", "m(_ _)m", "見つける", "の", "が", "大変です", "。"] , )
def _SCREAMING_SNAKE_CASE ( self : List[str]):
SCREAMING_SNAKE_CASE_: Optional[Any] = ["[UNK]", "[CLS]", "[SEP]", "こんにちは", "こん", "にちは", "ばんは", "##こん", "##にちは", "##ばんは"]
SCREAMING_SNAKE_CASE_: Optional[int] = {}
for i, token in enumerate(lowerCAmelCase__):
SCREAMING_SNAKE_CASE_: Optional[Any] = i
SCREAMING_SNAKE_CASE_: Union[str, Any] = WordpieceTokenizer(vocab=lowerCAmelCase__ , unk_token="[UNK]")
self.assertListEqual(tokenizer.tokenize("") , [])
self.assertListEqual(tokenizer.tokenize("こんにちは") , ["こんにちは"])
self.assertListEqual(tokenizer.tokenize("こんばんは") , ["こん", "##ばんは"])
self.assertListEqual(tokenizer.tokenize("こんばんは こんばんにちは こんにちは") , ["こん", "##ばんは", "[UNK]", "こんにちは"])
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
SCREAMING_SNAKE_CASE_: str = BertJapaneseTokenizer.from_pretrained("nlp-waseda/roberta-base-japanese-with-auto-jumanpp")
SCREAMING_SNAKE_CASE_: List[Any] = tokenizer.subword_tokenizer
SCREAMING_SNAKE_CASE_: List[Any] = subword_tokenizer.tokenize("国境 の 長い トンネル を 抜ける と 雪国 であった 。")
self.assertListEqual(lowerCAmelCase__ , ["▁国境", "▁の", "▁長い", "▁トンネル", "▁を", "▁抜ける", "▁と", "▁雪", "国", "▁であった", "▁。"])
SCREAMING_SNAKE_CASE_: str = subword_tokenizer.tokenize("こんばんは こんばん にち は こんにちは")
self.assertListEqual(lowerCAmelCase__ , ["▁こん", "ばん", "は", "▁こん", "ばん", "▁に", "ち", "▁は", "▁こんにちは"])
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
SCREAMING_SNAKE_CASE_: Dict = self.tokenizer_class.from_pretrained("cl-tohoku/bert-base-japanese")
SCREAMING_SNAKE_CASE_: Tuple = tokenizer.encode("ありがとう。" , add_special_tokens=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Dict = tokenizer.encode("どういたしまして。" , add_special_tokens=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Dict = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: int = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__ , lowerCAmelCase__)
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class __lowercase ( UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase : Optional[Any] = BertJapaneseTokenizer
_UpperCAmelCase : List[str] = False
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
super().setUp()
SCREAMING_SNAKE_CASE_: Any = ["[UNK]", "[CLS]", "[SEP]", "こ", "ん", "に", "ち", "は", "ば", "世", "界", "、", "。"]
SCREAMING_SNAKE_CASE_: int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"])
with open(self.vocab_file , "w" , encoding="utf-8") as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens]))
def _SCREAMING_SNAKE_CASE ( self : List[str] , **lowerCAmelCase__ : Union[str, Any]):
return BertJapaneseTokenizer.from_pretrained(self.tmpdirname , subword_tokenizer_type="character" , **lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase__ : Union[str, Any]):
SCREAMING_SNAKE_CASE_: List[Any] = "こんにちは、世界。 \nこんばんは、世界。"
SCREAMING_SNAKE_CASE_: Union[str, Any] = "こ ん に ち は 、 世 界 。 こ ん ば ん は 、 世 界 。"
return input_text, output_text
def _SCREAMING_SNAKE_CASE ( self : Tuple):
pass # TODO add if relevant
def _SCREAMING_SNAKE_CASE ( self : int):
pass # TODO add if relevant
def _SCREAMING_SNAKE_CASE ( self : Any):
pass # TODO add if relevant
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
SCREAMING_SNAKE_CASE_: Dict = self.tokenizer_class(self.vocab_file , subword_tokenizer_type="character")
SCREAMING_SNAKE_CASE_: Optional[int] = tokenizer.tokenize("こんにちは、世界。 \nこんばんは、世界。")
self.assertListEqual(
lowerCAmelCase__ , ["こ", "ん", "に", "ち", "は", "、", "世", "界", "。", "こ", "ん", "ば", "ん", "は", "、", "世", "界", "。"])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCAmelCase__) , [3, 4, 5, 6, 7, 11, 9, 10, 12, 3, 4, 8, 4, 7, 11, 9, 10, 12])
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
SCREAMING_SNAKE_CASE_: Optional[Any] = ["[UNK]", "[CLS]", "[SEP]", "こ", "ん", "に", "ち", "は", "ば", "世", "界", "、", "。"]
SCREAMING_SNAKE_CASE_: List[Any] = {}
for i, token in enumerate(lowerCAmelCase__):
SCREAMING_SNAKE_CASE_: Dict = i
SCREAMING_SNAKE_CASE_: Optional[int] = CharacterTokenizer(vocab=lowerCAmelCase__ , unk_token="[UNK]")
self.assertListEqual(tokenizer.tokenize("") , [])
self.assertListEqual(tokenizer.tokenize("こんにちは") , ["こ", "ん", "に", "ち", "は"])
self.assertListEqual(tokenizer.tokenize("こんにちほ") , ["こ", "ん", "に", "ち", "[UNK]"])
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
SCREAMING_SNAKE_CASE_: str = self.tokenizer_class.from_pretrained("cl-tohoku/bert-base-japanese-char")
SCREAMING_SNAKE_CASE_: Optional[int] = tokenizer.encode("ありがとう。" , add_special_tokens=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[Any] = tokenizer.encode("どういたしまして。" , add_special_tokens=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: int = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[Any] = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__ , lowerCAmelCase__)
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( self : str):
SCREAMING_SNAKE_CASE_: List[str] = "cl-tohoku/bert-base-japanese"
SCREAMING_SNAKE_CASE_: List[str] = AutoTokenizer.from_pretrained(lowerCAmelCase__)
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__)
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
SCREAMING_SNAKE_CASE_: Union[str, Any] = "cl-tohoku/bert-base-japanese"
with self.assertLogs("transformers" , level="WARNING") as cm:
BertTokenizer.from_pretrained(lowerCAmelCase__)
self.assertTrue(
cm.records[0].message.startswith(
"The tokenizer class you load from this checkpoint is not the same type as the class this function"
" is called from."))
SCREAMING_SNAKE_CASE_: Union[str, Any] = "bert-base-cased"
with self.assertLogs("transformers" , level="WARNING") as cm:
BertJapaneseTokenizer.from_pretrained(lowerCAmelCase__)
self.assertTrue(
cm.records[0].message.startswith(
"The tokenizer class you load from this checkpoint is not the same type as the class this function"
" is called from."))
| 13 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( __A ) -> bool:
return number & 1 == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 42 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCamelCase_ = {
"configuration_biogpt": ["BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP", "BioGptConfig"],
"tokenization_biogpt": ["BioGptTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST",
"BioGptForCausalLM",
"BioGptForTokenClassification",
"BioGptForSequenceClassification",
"BioGptModel",
"BioGptPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig
from .tokenization_biogpt import BioGptTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_biogpt import (
BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST,
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptPreTrainedModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 344 |
def A ( __UpperCAmelCase = 100_0000 ) -> int:
'''simple docstring'''
UpperCAmelCase_ = [i - 1 for i in range(limit + 1 )]
for i in range(2 , limit + 1 ):
if phi[i] == i - 1:
for j in range(2 * i , limit + 1 , __UpperCAmelCase ):
phi[j] -= phi[j] // i
return sum(phi[2 : limit + 1] )
if __name__ == "__main__":
print(solution())
| 344 | 1 |
'''simple docstring'''
from typing import List, Union
import numpy as np
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_DEPTH_ESTIMATION_MAPPING
A__: Optional[int] = logging.get_logger(__name__)
@add_end_docstrings(UpperCAmelCase__ )
class A__ ( UpperCAmelCase__ ):
def __init__( self :List[Any] , *SCREAMING_SNAKE_CASE :Optional[int] , **SCREAMING_SNAKE_CASE :Tuple ) -> str:
'''simple docstring'''
super().__init__(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
requires_backends(self , """vision""" )
self.check_model_type(SCREAMING_SNAKE_CASE )
def __call__( self :int , SCREAMING_SNAKE_CASE :Union[str, List[str], "Image.Image", List["Image.Image"]] , **SCREAMING_SNAKE_CASE :Union[str, Any] ) -> Any:
'''simple docstring'''
return super().__call__(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :List[Any] , **SCREAMING_SNAKE_CASE :Any ) -> str:
'''simple docstring'''
return {}, {}, {}
def __UpperCAmelCase ( self :str , SCREAMING_SNAKE_CASE :Optional[Any] ) -> int:
'''simple docstring'''
_a : Dict =load_image(SCREAMING_SNAKE_CASE )
_a : str =image.size
_a : Tuple =self.image_processor(images=SCREAMING_SNAKE_CASE , return_tensors=self.framework )
return model_inputs
def __UpperCAmelCase ( self :Tuple , SCREAMING_SNAKE_CASE :int ) -> Union[str, Any]:
'''simple docstring'''
_a : int =self.model(**SCREAMING_SNAKE_CASE )
return model_outputs
def __UpperCAmelCase ( self :Optional[int] , SCREAMING_SNAKE_CASE :Optional[Any] ) -> int:
'''simple docstring'''
_a : List[str] =model_outputs.predicted_depth
_a : Optional[int] =torch.nn.functional.interpolate(
predicted_depth.unsqueeze(1 ) , size=self.image_size[::-1] , mode="""bicubic""" , align_corners=SCREAMING_SNAKE_CASE )
_a : Optional[Any] =prediction.squeeze().cpu().numpy()
_a : Tuple =(output * 2_5_5 / np.max(SCREAMING_SNAKE_CASE )).astype("""uint8""" )
_a : List[str] =Image.fromarray(SCREAMING_SNAKE_CASE )
_a : List[Any] ={}
_a : List[str] =predicted_depth
_a : int =depth
return output_dict
| 276 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer
@dataclass
class A__ ( UpperCAmelCase__ ):
__UpperCamelCase : torch.FloatTensor
class A__ ( UpperCAmelCase__ , UpperCAmelCase__ ):
@register_to_config
def __init__( self :Optional[Any] , SCREAMING_SNAKE_CASE :int = 3 , SCREAMING_SNAKE_CASE :int = 3 , SCREAMING_SNAKE_CASE :Tuple[str] = ("DownEncoderBlock2D",) , SCREAMING_SNAKE_CASE :Tuple[str] = ("UpDecoderBlock2D",) , SCREAMING_SNAKE_CASE :Tuple[int] = (6_4,) , SCREAMING_SNAKE_CASE :int = 1 , SCREAMING_SNAKE_CASE :str = "silu" , SCREAMING_SNAKE_CASE :int = 3 , SCREAMING_SNAKE_CASE :int = 3_2 , SCREAMING_SNAKE_CASE :int = 2_5_6 , SCREAMING_SNAKE_CASE :int = 3_2 , SCREAMING_SNAKE_CASE :Optional[int] = None , SCREAMING_SNAKE_CASE :float = 0.18_215 , SCREAMING_SNAKE_CASE :str = "group" , ) -> Optional[int]:
'''simple docstring'''
super().__init__()
# pass init params to Encoder
_a : Union[str, Any] =Encoder(
in_channels=SCREAMING_SNAKE_CASE , out_channels=SCREAMING_SNAKE_CASE , down_block_types=SCREAMING_SNAKE_CASE , block_out_channels=SCREAMING_SNAKE_CASE , layers_per_block=SCREAMING_SNAKE_CASE , act_fn=SCREAMING_SNAKE_CASE , norm_num_groups=SCREAMING_SNAKE_CASE , double_z=SCREAMING_SNAKE_CASE , )
_a : Optional[int] =vq_embed_dim if vq_embed_dim is not None else latent_channels
_a : Optional[int] =nn.Convad(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , 1 )
_a : str =VectorQuantizer(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , beta=0.25 , remap=SCREAMING_SNAKE_CASE , sane_index_shape=SCREAMING_SNAKE_CASE )
_a : List[str] =nn.Convad(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , 1 )
# pass init params to Decoder
_a : List[str] =Decoder(
in_channels=SCREAMING_SNAKE_CASE , out_channels=SCREAMING_SNAKE_CASE , up_block_types=SCREAMING_SNAKE_CASE , block_out_channels=SCREAMING_SNAKE_CASE , layers_per_block=SCREAMING_SNAKE_CASE , act_fn=SCREAMING_SNAKE_CASE , norm_num_groups=SCREAMING_SNAKE_CASE , norm_type=SCREAMING_SNAKE_CASE , )
@apply_forward_hook
def __UpperCAmelCase ( self :Optional[Any] , SCREAMING_SNAKE_CASE :torch.FloatTensor , SCREAMING_SNAKE_CASE :bool = True ) -> VQEncoderOutput:
'''simple docstring'''
_a : Optional[int] =self.encoder(SCREAMING_SNAKE_CASE )
_a : int =self.quant_conv(SCREAMING_SNAKE_CASE )
if not return_dict:
return (h,)
return VQEncoderOutput(latents=SCREAMING_SNAKE_CASE )
@apply_forward_hook
def __UpperCAmelCase ( self :List[Any] , SCREAMING_SNAKE_CASE :torch.FloatTensor , SCREAMING_SNAKE_CASE :bool = False , SCREAMING_SNAKE_CASE :bool = True ) -> Union[DecoderOutput, torch.FloatTensor]:
'''simple docstring'''
# also go through quantization layer
if not force_not_quantize:
_a , _a , _a : Tuple =self.quantize(SCREAMING_SNAKE_CASE )
else:
_a : str =h
_a : Dict =self.post_quant_conv(SCREAMING_SNAKE_CASE )
_a : Union[str, Any] =self.decoder(SCREAMING_SNAKE_CASE , quant if self.config.norm_type == """spatial""" else None )
if not return_dict:
return (dec,)
return DecoderOutput(sample=SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :Optional[Any] , SCREAMING_SNAKE_CASE :torch.FloatTensor , SCREAMING_SNAKE_CASE :bool = True ) -> Union[DecoderOutput, torch.FloatTensor]:
'''simple docstring'''
_a : Tuple =sample
_a : int =self.encode(SCREAMING_SNAKE_CASE ).latents
_a : List[Any] =self.decode(SCREAMING_SNAKE_CASE ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=SCREAMING_SNAKE_CASE )
| 276 | 1 |
from __future__ import annotations
import math
lowerCamelCase : Dict ='''2020.9.26'''
lowerCamelCase : Optional[int] ='''xcodz-dot, cclaus, dhruvmanila'''
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> tuple[float, float]:
if not all(isinstance(__lowerCAmelCase , (float, int) ) for val in locals().values() ):
UpperCamelCase__ : Tuple = f'Input values must either be float or int: {list(locals().values() )}'
raise TypeError(__lowerCAmelCase )
UpperCamelCase__ : Union[str, Any] = ((x * distance) / (z + distance)) * scale
UpperCamelCase__ : Optional[Any] = ((y * distance) / (z + distance)) * scale
return projected_x, projected_y
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> tuple[float, float, float]:
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise TypeError("Axis must be a str" )
UpperCamelCase__ : Union[str, Any] = locals()
del input_variables["axis"]
if not all(isinstance(__lowerCAmelCase , (float, int) ) for val in input_variables.values() ):
UpperCamelCase__ : int = (
"Input values except axis must either be float or int: "
f'{list(input_variables.values() )}'
)
raise TypeError(__lowerCAmelCase )
UpperCamelCase__ : Union[str, Any] = (angle % 360) / 450 * 180 / math.pi
if axis == "z":
UpperCamelCase__ : int = x * math.cos(__lowerCAmelCase ) - y * math.sin(__lowerCAmelCase )
UpperCamelCase__ : Any = y * math.cos(__lowerCAmelCase ) + x * math.sin(__lowerCAmelCase )
UpperCamelCase__ : Optional[int] = z
elif axis == "x":
UpperCamelCase__ : Union[str, Any] = y * math.cos(__lowerCAmelCase ) - z * math.sin(__lowerCAmelCase )
UpperCamelCase__ : Optional[Any] = z * math.cos(__lowerCAmelCase ) + y * math.sin(__lowerCAmelCase )
UpperCamelCase__ : Tuple = x
elif axis == "y":
UpperCamelCase__ : List[Any] = x * math.cos(__lowerCAmelCase ) - z * math.sin(__lowerCAmelCase )
UpperCamelCase__ : Optional[int] = z * math.cos(__lowerCAmelCase ) + x * math.sin(__lowerCAmelCase )
UpperCamelCase__ : int = y
else:
raise ValueError("not a valid axis, choose one of 'x', 'y', 'z'" )
return new_x, new_y, new_z
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F"""{convert_to_ad(1.0, 2.0, 3.0, 10.0, 10.0) = }""")
print(F"""{rotate(1.0, 2.0, 3.0, "y", 90.0) = }""") | 196 |
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=A__ )
class __a ( A__ ):
_lowerCAmelCase : str = field(default='''language-modeling''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
_lowerCAmelCase : ClassVar[Features] = Features({'''text''': Value('''string''' )} )
_lowerCAmelCase : ClassVar[Features] = Features({} )
_lowerCAmelCase : str = "text"
@property
def __lowercase ( self : str ):
'''simple docstring'''
return {self.text_column: "text"} | 196 | 1 |
"""simple docstring"""
from __future__ import annotations
from fractions import Fraction
from math import gcd, sqrt
def _a ( _SCREAMING_SNAKE_CASE ) -> bool:
snake_case_ = int(number**0.5 )
return number == sq * sq
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> tuple[int, int]:
snake_case_ = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den
snake_case_ = x_den * y_den * z_den
snake_case_ = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
top //= hcf
bottom //= hcf
return top, bottom
def _a ( _SCREAMING_SNAKE_CASE = 35 ) -> int:
snake_case_ = set()
snake_case_ = 42
snake_case_ = Fraction(0 )
snake_case_ = 42
for x_num in range(1 , order + 1 ):
for x_den in range(x_num + 1 , order + 1 ):
for y_num in range(1 , order + 1 ):
for y_den in range(y_num + 1 , order + 1 ):
# n=1
snake_case_ = x_num * y_den + x_den * y_num
snake_case_ = x_den * y_den
snake_case_ = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
snake_case_ = add_three(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
unique_s.add(_SCREAMING_SNAKE_CASE )
# n=2
snake_case_ = (
x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num
)
snake_case_ = x_den * x_den * y_den * y_den
if is_sq(_SCREAMING_SNAKE_CASE ) and is_sq(_SCREAMING_SNAKE_CASE ):
snake_case_ = int(sqrt(_SCREAMING_SNAKE_CASE ) )
snake_case_ = int(sqrt(_SCREAMING_SNAKE_CASE ) )
snake_case_ = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
snake_case_ = add_three(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
unique_s.add(_SCREAMING_SNAKE_CASE )
# n=-1
snake_case_ = x_num * y_num
snake_case_ = x_den * y_num + x_num * y_den
snake_case_ = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
snake_case_ = add_three(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
unique_s.add(_SCREAMING_SNAKE_CASE )
# n=2
snake_case_ = x_num * x_num * y_num * y_num
snake_case_ = (
x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den
)
if is_sq(_SCREAMING_SNAKE_CASE ) and is_sq(_SCREAMING_SNAKE_CASE ):
snake_case_ = int(sqrt(_SCREAMING_SNAKE_CASE ) )
snake_case_ = int(sqrt(_SCREAMING_SNAKE_CASE ) )
snake_case_ = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
snake_case_ = add_three(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
unique_s.add(_SCREAMING_SNAKE_CASE )
for num, den in unique_s:
total += Fraction(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return total.denominator + total.numerator
if __name__ == "__main__":
print(f"""{solution() = }""")
| 347 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_mobilevit import MobileViTImageProcessor
__SCREAMING_SNAKE_CASE : int = logging.get_logger(__name__)
class __A (snake_case__):
'''simple docstring'''
def __init__( self : str , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : int ) ->None:
"""simple docstring"""
warnings.warn(
"""The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use MobileViTImageProcessor instead.""" , UpperCAmelCase_ , )
super().__init__(*UpperCAmelCase_ , **UpperCAmelCase_ )
| 347 | 1 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPImageProcessor, CLIPProcessor
@require_vision
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def _A ( self : Optional[Any] ):
UpperCamelCase :List[str] = tempfile.mkdtemp()
# fmt: off
UpperCamelCase :int = ["""l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """lo""", """l</w>""", """w</w>""", """r</w>""", """t</w>""", """low</w>""", """er</w>""", """lowest</w>""", """newer</w>""", """wider""", """<unk>""", """<|startoftext|>""", """<|endoftext|>"""]
# fmt: on
UpperCamelCase :Tuple = dict(zip(__lowerCamelCase , range(len(__lowerCamelCase ) ) ) )
UpperCamelCase :Union[str, Any] = ["""#version: 0.2""", """l o""", """lo w</w>""", """e r</w>""", """"""]
UpperCamelCase :Optional[int] = {"""unk_token""": """<unk>"""}
UpperCamelCase :Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
UpperCamelCase :Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(__lowerCamelCase ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(__lowerCamelCase ) )
UpperCamelCase :Dict = {
"""do_resize""": True,
"""size""": 20,
"""do_center_crop""": True,
"""crop_size""": 18,
"""do_normalize""": True,
"""image_mean""": [0.48145466, 0.4578275, 0.40821073],
"""image_std""": [0.26862954, 0.26130258, 0.27577711],
}
UpperCamelCase :Union[str, Any] = os.path.join(self.tmpdirname , __lowerCamelCase )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(__lowerCamelCase , __lowerCamelCase )
def _A ( self : str , **__lowerCamelCase : Union[str, Any] ):
return CLIPTokenizer.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def _A ( self : str , **__lowerCamelCase : List[str] ):
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def _A ( self : List[Any] , **__lowerCamelCase : Any ):
return CLIPImageProcessor.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def _A ( self : List[str] ):
shutil.rmtree(self.tmpdirname )
def _A ( self : Any ):
UpperCamelCase :Union[str, Any] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
UpperCamelCase :Any = [Image.fromarray(np.moveaxis(__lowerCamelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _A ( self : Optional[int] ):
UpperCamelCase :List[str] = self.get_tokenizer()
UpperCamelCase :List[Any] = self.get_rust_tokenizer()
UpperCamelCase :Any = self.get_image_processor()
UpperCamelCase :Optional[int] = CLIPProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
processor_slow.save_pretrained(self.tmpdirname )
UpperCamelCase :Any = CLIPProcessor.from_pretrained(self.tmpdirname , use_fast=__lowerCamelCase )
UpperCamelCase :Dict = CLIPProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
processor_fast.save_pretrained(self.tmpdirname )
UpperCamelCase :Union[str, Any] = CLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , __lowerCamelCase )
self.assertIsInstance(processor_fast.tokenizer , __lowerCamelCase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , __lowerCamelCase )
self.assertIsInstance(processor_fast.image_processor , __lowerCamelCase )
def _A ( self : int ):
UpperCamelCase :Union[str, Any] = CLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
UpperCamelCase :Dict = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
UpperCamelCase :Union[str, Any] = self.get_image_processor(do_normalize=__lowerCamelCase , padding_value=1.0 )
UpperCamelCase :Union[str, Any] = CLIPProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=__lowerCamelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __lowerCamelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __lowerCamelCase )
def _A ( self : Tuple ):
UpperCamelCase :Dict = self.get_image_processor()
UpperCamelCase :Dict = self.get_tokenizer()
UpperCamelCase :List[str] = CLIPProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
UpperCamelCase :str = self.prepare_image_inputs()
UpperCamelCase :List[Any] = image_processor(__lowerCamelCase , return_tensors="""np""" )
UpperCamelCase :Tuple = processor(images=__lowerCamelCase , return_tensors="""np""" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def _A ( self : Dict ):
UpperCamelCase :Any = self.get_image_processor()
UpperCamelCase :Tuple = self.get_tokenizer()
UpperCamelCase :int = CLIPProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
UpperCamelCase :Optional[int] = """lower newer"""
UpperCamelCase :int = processor(text=__lowerCamelCase )
UpperCamelCase :Optional[Any] = tokenizer(__lowerCamelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _A ( self : Union[str, Any] ):
UpperCamelCase :List[str] = self.get_image_processor()
UpperCamelCase :List[str] = self.get_tokenizer()
UpperCamelCase :Optional[int] = CLIPProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
UpperCamelCase :Optional[Any] = """lower newer"""
UpperCamelCase :Union[str, Any] = self.prepare_image_inputs()
UpperCamelCase :Optional[Any] = processor(text=__lowerCamelCase , images=__lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(__lowerCamelCase ):
processor()
def _A ( self : Optional[int] ):
UpperCamelCase :Dict = self.get_image_processor()
UpperCamelCase :List[str] = self.get_tokenizer()
UpperCamelCase :List[Any] = CLIPProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
UpperCamelCase :Tuple = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
UpperCamelCase :List[str] = processor.batch_decode(__lowerCamelCase )
UpperCamelCase :Dict = tokenizer.batch_decode(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
def _A ( self : int ):
UpperCamelCase :List[Any] = self.get_image_processor()
UpperCamelCase :str = self.get_tokenizer()
UpperCamelCase :int = CLIPProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
UpperCamelCase :Optional[int] = """lower newer"""
UpperCamelCase :Optional[int] = self.prepare_image_inputs()
UpperCamelCase :Union[str, Any] = processor(text=__lowerCamelCase , images=__lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 62 |
from __future__ import annotations
from collections import deque
class _SCREAMING_SNAKE_CASE :
def __init__( self : Optional[Any] , __lowerCamelCase : list[str] ):
UpperCamelCase :list[dict] = []
self.adlist.append(
{"""value""": """""", """next_states""": [], """fail_state""": 0, """output""": []} )
for keyword in keywords:
self.add_keyword(__lowerCamelCase )
self.set_fail_transitions()
def _A ( self : Tuple , __lowerCamelCase : int , __lowerCamelCase : str ):
for state in self.adlist[current_state]["next_states"]:
if char == self.adlist[state]["value"]:
return state
return None
def _A ( self : int , __lowerCamelCase : str ):
UpperCamelCase :List[str] = 0
for character in keyword:
UpperCamelCase :List[Any] = self.find_next_state(__lowerCamelCase , __lowerCamelCase )
if next_state is None:
self.adlist.append(
{
"""value""": character,
"""next_states""": [],
"""fail_state""": 0,
"""output""": [],
} )
self.adlist[current_state]["next_states"].append(len(self.adlist ) - 1 )
UpperCamelCase :Optional[int] = len(self.adlist ) - 1
else:
UpperCamelCase :Union[str, Any] = next_state
self.adlist[current_state]["output"].append(__lowerCamelCase )
def _A ( self : List[str] ):
UpperCamelCase :deque = deque()
for node in self.adlist[0]["next_states"]:
q.append(__lowerCamelCase )
UpperCamelCase :Tuple = 0
while q:
UpperCamelCase :Union[str, Any] = q.popleft()
for child in self.adlist[r]["next_states"]:
q.append(__lowerCamelCase )
UpperCamelCase :Optional[int] = self.adlist[r]["""fail_state"""]
while (
self.find_next_state(__lowerCamelCase , self.adlist[child]["""value"""] ) is None
and state != 0
):
UpperCamelCase :List[Any] = self.adlist[state]["""fail_state"""]
UpperCamelCase :List[str] = self.find_next_state(
__lowerCamelCase , self.adlist[child]["""value"""] )
if self.adlist[child]["fail_state"] is None:
UpperCamelCase :Any = 0
UpperCamelCase :Tuple = (
self.adlist[child]["""output"""]
+ self.adlist[self.adlist[child]["""fail_state"""]]["""output"""]
)
def _A ( self : Union[str, Any] , __lowerCamelCase : str ):
UpperCamelCase :dict = {} # returns a dict with keywords and list of its occurrences
UpperCamelCase :Union[str, Any] = 0
for i in range(len(__lowerCamelCase ) ):
while (
self.find_next_state(__lowerCamelCase , string[i] ) is None
and current_state != 0
):
UpperCamelCase :List[Any] = self.adlist[current_state]["""fail_state"""]
UpperCamelCase :Optional[Any] = self.find_next_state(__lowerCamelCase , string[i] )
if next_state is None:
UpperCamelCase :Dict = 0
else:
UpperCamelCase :List[str] = next_state
for key in self.adlist[current_state]["output"]:
if key not in result:
UpperCamelCase :Optional[int] = []
result[key].append(i - len(__lowerCamelCase ) + 1 )
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 62 | 1 |
import unittest
from transformers.models.xlm_prophetnet.tokenization_xlm_prophetnet import SPIECE_UNDERLINE, XLMProphetNetTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_A = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
class lowercase_ ( A__ , unittest.TestCase ):
A__ : Any = XLMProphetNetTokenizer
A__ : Any = False
A__ : List[Any] = True
def lowerCamelCase_ ( self ):
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
UpperCamelCase_ = XLMProphetNetTokenizer(__snake_case , keep_accents=__snake_case )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = '''[PAD]'''
UpperCamelCase_ = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__snake_case ) , __snake_case )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__snake_case ) , __snake_case )
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """[PAD]""" )
self.assertEqual(vocab_keys[1] , """[CLS]""" )
self.assertEqual(vocab_keys[-1] , """j""" )
self.assertEqual(len(__snake_case ) , 1_0_1_2 )
def lowerCamelCase_ ( self ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1_0_1_2 )
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = XLMProphetNetTokenizer(__snake_case , keep_accents=__snake_case )
UpperCamelCase_ = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(__snake_case , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__snake_case ) , [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] , )
UpperCamelCase_ = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
__snake_case , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
UpperCamelCase_ = tokenizer.convert_tokens_to_ids(__snake_case )
self.assertListEqual(
__snake_case , [
value + tokenizer.fairseq_offset
for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, -9, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, -9, 4]
] , )
UpperCamelCase_ = tokenizer.convert_ids_to_tokens(__snake_case )
self.assertListEqual(
__snake_case , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""[UNK]""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""[UNK]""",
""".""",
] , )
@cached_property
def lowerCamelCase_ ( self ):
"""simple docstring"""
return XLMProphetNetTokenizer.from_pretrained("""microsoft/xprophetnet-large-wiki100-cased""" )
@slow
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = '''Hello World!'''
UpperCamelCase_ = [3_5_3_8_9, 6_6_7_2, 4_9, 2]
self.assertListEqual(__snake_case , self.big_tokenizer.encode(__snake_case ) )
@slow
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = {'''input_ids''': [[1_1_0_7_3, 8_2_7_8_3, 1_8, 2_6, 8_2_7_8_3, 5_4_9, 5_1_5_4_0, 2_4_8, 1_7_2_0_9, 1_3_0_1, 2_1_7, 2_0, 2_1_5_1_8_6, 1_3_2_5, 1_4_7, 1_7_2_0_9, 1_3_0_1, 2_1_7, 2_0, 5_6_3_7_0, 5_3, 1_2_2_0_2_0, 2_0, 1_6_4_7_7, 2_7, 8_7_3_5_5, 4_5_4_8, 2_0, 4_7_2_8, 7_8_3_9_2, 1_7, 1_5_9_9_6_9, 1_8, 2_6, 2_4_4_9_1, 6_2_9, 1_5, 5_3_8, 2_2_7_0_4, 5_4_3_9, 1_5, 2_7_8_8, 2_4_4_9_1, 9_8_8_5, 1_5, 4_3_5_3_4, 6_0_5, 1_5, 8_1_4, 1_8_4_0_3, 3_3_2_0_0, 2_9, 1_5, 4_3_5_3_4, 2_4_4_5_8, 1_2_4_1_0, 1_1_1, 2_4_9_6_6, 8_3_6_6_9, 9_6_3_7, 1_4_4_0_6_8, 2_6, 8_5_0, 2_2_3_4_6, 2_7, 1_4_7, 2_4_9_6_6, 8_3_6_6_9, 8_3_4_9_0, 2_6, 3_9_1_1_3, 7_3_5, 2_7, 6_8_9, 6_5_6, 2_8_0_0, 1_3_3_9, 4_6_0_0, 5_3, 1_2_2_0_2_0, 1_1_5_7_8_5, 3_4, 8_1_6, 1_3_3_9, 4_6_8_8_7, 1_8, 1_4_7, 5_3_9_0_5, 1_9_5_1, 4_2_2_3_8, 4_1_1_7_0, 1_7_7_3_2, 8_3_4, 4_3_6, 1_5, 2_7_5_2_3, 9_8_7_3_3, 2_1_7, 1_4_7, 5_5_4_2, 4_9_8_1, 9_3_0, 1_7_3_4_7, 1_6, 2], [2_0_0_9_1, 6_2_9, 9_4, 8_2_7_8_6, 5_8, 4_9_0, 2_0, 1_5_2_8, 8_4, 5_3_9_0_5, 3_4_4, 8_0_5_9_2, 1_1_0_1_2_8, 1_8_8_2_2, 5_2_6_7, 1_3_0_6, 6_2, 1_5_2_5_3_7, 3_0_8, 7_9_9_7, 4_0_1, 1_2_4_4_2_7, 5_4_9, 3_5_4_4_2, 2_2_5, 1_0_9, 1_5_0_5_5, 2_5_7_4_8, 1_4_7, 7_1_1_9, 4_3_7_1_2, 3_4, 7_6_7, 1_3_5_3_6_6, 1_8, 1_6, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_9_2, 6_3_7_8_4, 1_1_9_4_6_6, 1_7, 1_4_7_8_0_8, 8_8_2_1_4, 1_8, 6_5_6, 8_1, 3_2, 3_2_9_6, 1_0_2_8_0, 1_6, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__snake_case , model_name="""microsoft/xprophetnet-large-wiki100-cased""" , revision="""1acad1643ddd54a44df6a1b797ada8373685d90e""" , )
| 122 |
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import ScoreSdeVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class SCREAMING_SNAKE_CASE( A__ ):
"""simple docstring"""
lowerCamelCase__ = 42
lowerCamelCase__ = 42
def __init__( self : Union[str, Any] , __snake_case : UNetaDModel , __snake_case : ScoreSdeVeScheduler ) -> int:
super().__init__()
self.register_modules(unet=__snake_case , scheduler=__snake_case )
@torch.no_grad()
def __call__( self : Optional[int] , __snake_case : int = 1 , __snake_case : int = 2000 , __snake_case : Optional[Union[torch.Generator, List[torch.Generator]]] = None , __snake_case : Optional[str] = "pil" , __snake_case : bool = True , **__snake_case : Optional[int] , ) -> Union[ImagePipelineOutput, Tuple]:
UpperCAmelCase : str = self.unet.config.sample_size
UpperCAmelCase : Union[str, Any] = (batch_size, 3, img_size, img_size)
UpperCAmelCase : int = self.unet
UpperCAmelCase : Any = randn_tensor(__snake_case , generator=__snake_case ) * self.scheduler.init_noise_sigma
UpperCAmelCase : List[Any] = sample.to(self.device )
self.scheduler.set_timesteps(__snake_case )
self.scheduler.set_sigmas(__snake_case )
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
UpperCAmelCase : Any = self.scheduler.sigmas[i] * torch.ones(shape[0] , device=self.device )
# correction step
for _ in range(self.scheduler.config.correct_steps ):
UpperCAmelCase : Union[str, Any] = self.unet(__snake_case , __snake_case ).sample
UpperCAmelCase : Optional[Any] = self.scheduler.step_correct(__snake_case , __snake_case , generator=__snake_case ).prev_sample
# prediction step
UpperCAmelCase : Optional[Any] = model(__snake_case , __snake_case ).sample
UpperCAmelCase : List[str] = self.scheduler.step_pred(__snake_case , __snake_case , __snake_case , generator=__snake_case )
UpperCAmelCase , UpperCAmelCase : Optional[Any] = output.prev_sample, output.prev_sample_mean
UpperCAmelCase : int = sample_mean.clamp(0 , 1 )
UpperCAmelCase : Union[str, Any] = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
UpperCAmelCase : Optional[Any] = self.numpy_to_pil(__snake_case )
if not return_dict:
return (sample,)
return ImagePipelineOutput(images=__snake_case )
| 23 | 0 |
'''simple docstring'''
import sys
import webbrowser
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
print('Googling.....')
__A ="""https://www.google.com/search?q=""" + """ """.join(sys.argv[1:])
__A =requests.get(url, headers={'UserAgent': UserAgent().random})
# res.raise_for_status()
with open('project1a.html', 'wb') as out_file: # only for knowing the class
for data in res.iter_content(1_00_00):
out_file.write(data)
__A =BeautifulSoup(res.text, 'html.parser')
__A =list(soup.select('.eZt8xd'))[:5]
print(len(links))
for link in links:
if link.text == "Maps":
webbrowser.open(link.get('href'))
else:
webbrowser.open(f"""https://google.com{link.get('href')}""") | 364 |
'''simple docstring'''
import os
from datetime import datetime as dt
from github import Github
__A =[
'good first issue',
'good second issue',
'good difficult issue',
'enhancement',
'new pipeline/model',
'new scheduler',
'wip',
]
def _UpperCamelCase ( ):
UpperCAmelCase__ : Union[str, Any] = Github(os.environ["""GITHUB_TOKEN"""] )
UpperCAmelCase__ : Any = g.get_repo("""huggingface/diffusers""" )
UpperCAmelCase__ : Optional[int] = repo.get_issues(state="""open""" )
for issue in open_issues:
UpperCAmelCase__ : Any = sorted(issue.get_comments() , key=lambda UpperCamelCase__ : i.created_at , reverse=UpperCamelCase__ )
UpperCAmelCase__ : List[Any] = comments[0] if len(UpperCamelCase__ ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 3_0
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Closes the issue after 7 days of inactivity since the Stalebot notification.
issue.edit(state="""closed""" )
elif (
"stale" in issue.get_labels()
and last_comment is not None
and last_comment.user.login != "github-actions[bot]"
):
# Opens the issue if someone other than Stalebot commented.
issue.edit(state="""open""" )
issue.remove_from_labels("""stale""" )
elif (
(dt.utcnow() - issue.updated_at).days > 2_3
and (dt.utcnow() - issue.created_at).days >= 3_0
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Post a Stalebot notification after 23 days of inactivity.
issue.create_comment(
"""This issue has been automatically marked as stale because it has not had """
"""recent activity. If you think this still needs to be addressed """
"""please comment on this thread.\n\nPlease note that issues that do not follow the """
"""[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) """
"""are likely to be ignored.""" )
issue.add_to_labels("""stale""" )
if __name__ == "__main__":
main() | 283 | 0 |
'''simple docstring'''
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase=7 , _lowerCamelCase=3 , _lowerCamelCase=18 , _lowerCamelCase=30 , _lowerCamelCase=400 , _lowerCamelCase=True , _lowerCamelCase=None , _lowerCamelCase=True , _lowerCamelCase=None , _lowerCamelCase=True , _lowerCamelCase=[0.5, 0.5, 0.5] , _lowerCamelCase=[0.5, 0.5, 0.5] , _lowerCamelCase=False , ) -> Optional[int]:
A_ : Union[str, Any] = size if size is not None else {"""height""": 20, """width""": 20}
A_ : Tuple = crop_size if crop_size is not None else {"""height""": 18, """width""": 18}
A_ : Optional[Any] = parent
A_ : Optional[int] = batch_size
A_ : Union[str, Any] = num_channels
A_ : str = image_size
A_ : Tuple = min_resolution
A_ : Dict = max_resolution
A_ : str = do_resize
A_ : Tuple = size
A_ : int = do_center_crop
A_ : Dict = crop_size
A_ : Tuple = do_normalize
A_ : List[str] = image_mean
A_ : Optional[Any] = image_std
A_ : Any = do_reduce_labels
def UpperCAmelCase_ ( self ) -> Any:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_reduce_labels": self.do_reduce_labels,
}
def UpperCAmelCase ( ) -> List[str]:
"""simple docstring"""
A_ : Any = load_dataset("""hf-internal-testing/fixtures_ade20k""" , split="""test""" )
A_ : Tuple = Image.open(dataset[0]["""file"""] )
A_ : Dict = Image.open(dataset[1]["""file"""] )
return image, map
def UpperCAmelCase ( ) -> Optional[int]:
"""simple docstring"""
A_ : Tuple = load_dataset("""hf-internal-testing/fixtures_ade20k""" , split="""test""" )
A_ : Tuple = Image.open(ds[0]["""file"""] )
A_ : List[Any] = Image.open(ds[1]["""file"""] )
A_ : Any = Image.open(ds[2]["""file"""] )
A_ : str = Image.open(ds[3]["""file"""] )
return [imagea, imagea], [mapa, mapa]
@require_torch
@require_vision
class _lowerCAmelCase ( __A, unittest.TestCase ):
"""simple docstring"""
lowerCamelCase = BeitImageProcessor if is_vision_available() else None
def UpperCAmelCase_ ( self ) -> Dict:
A_ : List[Any] = BeitImageProcessingTester(self )
@property
def UpperCAmelCase_ ( self ) -> Optional[int]:
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase_ ( self ) -> Optional[int]:
A_ : str = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowerCamelCase , """do_resize""" ) )
self.assertTrue(hasattr(_lowerCamelCase , """size""" ) )
self.assertTrue(hasattr(_lowerCamelCase , """do_center_crop""" ) )
self.assertTrue(hasattr(_lowerCamelCase , """center_crop""" ) )
self.assertTrue(hasattr(_lowerCamelCase , """do_normalize""" ) )
self.assertTrue(hasattr(_lowerCamelCase , """image_mean""" ) )
self.assertTrue(hasattr(_lowerCamelCase , """image_std""" ) )
def UpperCAmelCase_ ( self ) -> Optional[Any]:
A_ : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 20, """width""": 20} )
self.assertEqual(image_processor.crop_size , {"""height""": 18, """width""": 18} )
self.assertEqual(image_processor.do_reduce_labels , _lowerCamelCase )
A_ : int = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , crop_size=84 , reduce_labels=_lowerCamelCase )
self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} )
self.assertEqual(image_processor.crop_size , {"""height""": 84, """width""": 84} )
self.assertEqual(image_processor.do_reduce_labels , _lowerCamelCase )
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
pass
def UpperCAmelCase_ ( self ) -> Dict:
# Initialize image_processing
A_ : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A_ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , Image.Image )
# Test not batched input
A_ : Tuple = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
A_ : int = image_processing(_lowerCamelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def UpperCAmelCase_ ( self ) -> List[str]:
# Initialize image_processing
A_ : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A_ : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase , numpify=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , np.ndarray )
# Test not batched input
A_ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
A_ : List[Any] = image_processing(_lowerCamelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def UpperCAmelCase_ ( self ) -> str:
# Initialize image_processing
A_ : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A_ : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase , torchify=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , torch.Tensor )
# Test not batched input
A_ : Tuple = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
A_ : Union[str, Any] = image_processing(_lowerCamelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def UpperCAmelCase_ ( self ) -> Optional[int]:
# Initialize image_processing
A_ : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A_ : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase , torchify=_lowerCamelCase )
A_ : Optional[int] = []
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , torch.Tensor )
maps.append(torch.zeros(image.shape[-2:] ).long() )
# Test not batched input
A_ : Union[str, Any] = image_processing(image_inputs[0] , maps[0] , return_tensors="""pt""" )
self.assertEqual(
encoding["""pixel_values"""].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(
encoding["""labels"""].shape , (
1,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(encoding["""labels"""].dtype , torch.long )
self.assertTrue(encoding["""labels"""].min().item() >= 0 )
self.assertTrue(encoding["""labels"""].max().item() <= 255 )
# Test batched
A_ : Optional[Any] = image_processing(_lowerCamelCase , _lowerCamelCase , return_tensors="""pt""" )
self.assertEqual(
encoding["""pixel_values"""].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(
encoding["""labels"""].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(encoding["""labels"""].dtype , torch.long )
self.assertTrue(encoding["""labels"""].min().item() >= 0 )
self.assertTrue(encoding["""labels"""].max().item() <= 255 )
# Test not batched input (PIL images)
A_ , A_ : List[Any] = prepare_semantic_single_inputs()
A_ : Union[str, Any] = image_processing(_lowerCamelCase , _lowerCamelCase , return_tensors="""pt""" )
self.assertEqual(
encoding["""pixel_values"""].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(
encoding["""labels"""].shape , (
1,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(encoding["""labels"""].dtype , torch.long )
self.assertTrue(encoding["""labels"""].min().item() >= 0 )
self.assertTrue(encoding["""labels"""].max().item() <= 255 )
# Test batched input (PIL images)
A_ , A_ : str = prepare_semantic_batch_inputs()
A_ : Any = image_processing(_lowerCamelCase , _lowerCamelCase , return_tensors="""pt""" )
self.assertEqual(
encoding["""pixel_values"""].shape , (
2,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(
encoding["""labels"""].shape , (
2,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(encoding["""labels"""].dtype , torch.long )
self.assertTrue(encoding["""labels"""].min().item() >= 0 )
self.assertTrue(encoding["""labels"""].max().item() <= 255 )
def UpperCAmelCase_ ( self ) -> Tuple:
# Initialize image_processing
A_ : Any = self.image_processing_class(**self.image_processor_dict )
# ADE20k has 150 classes, and the background is included, so labels should be between 0 and 150
A_ , A_ : Tuple = prepare_semantic_single_inputs()
A_ : str = image_processing(_lowerCamelCase , _lowerCamelCase , return_tensors="""pt""" )
self.assertTrue(encoding["""labels"""].min().item() >= 0 )
self.assertTrue(encoding["""labels"""].max().item() <= 150 )
A_ : str = True
A_ : Union[str, Any] = image_processing(_lowerCamelCase , _lowerCamelCase , return_tensors="""pt""" )
self.assertTrue(encoding["""labels"""].min().item() >= 0 )
self.assertTrue(encoding["""labels"""].max().item() <= 255 )
| 344 |
'''simple docstring'''
from typing import Dict
import numpy as np
import torch
from . import residue_constants as rc
from .tensor_utils import tensor_tree_map, tree_map
def UpperCAmelCase ( a_ ) -> Dict[str, torch.Tensor]:
"""simple docstring"""
A_ : List[str] = []
A_ : Dict = []
A_ : List[Any] = []
for rt in rc.restypes:
A_ : Tuple = rc.restype_name_to_atomaa_names[rc.restype_atoa[rt]]
restype_atomaa_to_atomaa_list.append([(rc.atom_order[name] if name else 0) for name in atom_names] )
A_ : Union[str, Any] = {name: i for i, name in enumerate(a_ )}
restype_atomaa_to_atomaa_list.append(
[(atom_name_to_idxaa[name] if name in atom_name_to_idxaa else 0) for name in rc.atom_types] )
restype_atomaa_mask_list.append([(1.0 if name else 0.0) for name in atom_names] )
# Add dummy mapping for restype 'UNK'
restype_atomaa_to_atomaa_list.append([0] * 1_4 )
restype_atomaa_to_atomaa_list.append([0] * 3_7 )
restype_atomaa_mask_list.append([0.0] * 1_4 )
A_ : Tuple = torch.tensor(
a_ , dtype=torch.intaa , device=protein["""aatype"""].device , )
A_ : Optional[int] = torch.tensor(
a_ , dtype=torch.intaa , device=protein["""aatype"""].device , )
A_ : List[Any] = torch.tensor(
a_ , dtype=torch.floataa , device=protein["""aatype"""].device , )
A_ : Optional[int] = protein["""aatype"""].to(torch.long )
# create the mapping for (residx, atom14) --> atom37, i.e. an array
# with shape (num_res, 14) containing the atom37 indices for this protein
A_ : Dict = restype_atomaa_to_atomaa[protein_aatype]
A_ : Optional[Any] = restype_atomaa_mask[protein_aatype]
A_ : Any = residx_atomaa_mask
A_ : List[str] = residx_atomaa_to_atomaa.long()
# create the gather indices for mapping back
A_ : Tuple = restype_atomaa_to_atomaa[protein_aatype]
A_ : Tuple = residx_atomaa_to_atomaa.long()
# create the corresponding mask
A_ : Optional[Any] = torch.zeros([2_1, 3_7] , dtype=torch.floataa , device=protein["""aatype"""].device )
for restype, restype_letter in enumerate(rc.restypes ):
A_ : Optional[Any] = rc.restype_atoa[restype_letter]
A_ : Any = rc.residue_atoms[restype_name]
for atom_name in atom_names:
A_ : Any = rc.atom_order[atom_name]
A_ : Optional[int] = 1
A_ : Optional[int] = restype_atomaa_mask[protein_aatype]
A_ : Dict = residx_atomaa_mask
return protein
def UpperCAmelCase ( a_ ) -> Dict[str, np.ndarray]:
"""simple docstring"""
A_ : Union[str, Any] = tree_map(lambda a_ : torch.tensor(a_ , device=batch["""aatype"""].device ) , a_ , np.ndarray )
A_ : Optional[int] = tensor_tree_map(lambda a_ : np.array(a_ ) , make_atomaa_masks(a_ ) )
return out
| 344 | 1 |
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
__UpperCamelCase : Any = logging.get_logger(__name__) # pylint: disable=invalid-name
__UpperCamelCase : Optional[Any] = '\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Pipeline, KandinskyV22PriorPipeline\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\"kandinsky-community/kandinsky-2-2-prior\")\n >>> pipe_prior.to(\"cuda\")\n >>> prompt = \"red cat, 4k photo\"\n >>> out = pipe_prior(prompt)\n >>> image_emb = out.image_embeds\n >>> zero_image_emb = out.negative_image_embeds\n >>> pipe = KandinskyV22Pipeline.from_pretrained(\"kandinsky-community/kandinsky-2-2-decoder\")\n >>> pipe.to(\"cuda\")\n >>> image = pipe(\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=50,\n ... ).images\n >>> image[0].save(\"cat.png\")\n ```\n'
def A ( _lowercase , _lowercase , _lowercase=8 ):
SCREAMING_SNAKE_CASE : Dict = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
SCREAMING_SNAKE_CASE : Optional[Any] = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class lowercase__ ( lowercase__):
def __init__( self : Tuple , UpperCamelCase__ : UNetaDConditionModel , UpperCamelCase__ : DDPMScheduler , UpperCamelCase__ : VQModel , ):
'''simple docstring'''
super().__init__()
self.register_modules(
unet=_a , scheduler=_a , movq=_a , )
SCREAMING_SNAKE_CASE : Optional[Any] = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def __A ( self : Dict , UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : str , UpperCamelCase__ : List[str] , UpperCamelCase__ : str , UpperCamelCase__ : Union[str, Any] ):
'''simple docstring'''
if latents is None:
SCREAMING_SNAKE_CASE : List[str] = randn_tensor(_a , generator=_a , device=_a , dtype=_a )
else:
if latents.shape != shape:
raise ValueError(f"""Unexpected latents shape, got {latents.shape}, expected {shape}""" )
SCREAMING_SNAKE_CASE : Optional[int] = latents.to(_a )
SCREAMING_SNAKE_CASE : List[str] = latents * scheduler.init_noise_sigma
return latents
def __A ( self : Optional[Any] , UpperCamelCase__ : Union[str, Any]=0 ):
'''simple docstring'''
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('''Please install accelerate via `pip install accelerate`''' )
SCREAMING_SNAKE_CASE : Optional[int] = torch.device(f"""cuda:{gpu_id}""" )
SCREAMING_SNAKE_CASE : Union[str, Any] = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(_a , _a )
def __A ( self : List[Any] , UpperCamelCase__ : int=0 ):
'''simple docstring'''
if is_accelerate_available() and is_accelerate_version('''>=''' , '''0.17.0.dev0''' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('''`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.''' )
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.device(f"""cuda:{gpu_id}""" )
if self.device.type != "cpu":
self.to('''cpu''' , silence_dtype_warnings=_a )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
SCREAMING_SNAKE_CASE : int = None
for cpu_offloaded_model in [self.unet, self.movq]:
SCREAMING_SNAKE_CASE : Union[str, Any] = cpu_offload_with_hook(_a , _a , prev_module_hook=_a )
# We'll offload the last model manually.
SCREAMING_SNAKE_CASE : Any = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def __A ( self : Dict ):
'''simple docstring'''
if not hasattr(self.unet , '''_hf_hook''' ):
return self.device
for module in self.unet.modules():
if (
hasattr(_a , '''_hf_hook''' )
and hasattr(module._hf_hook , '''execution_device''' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(_a )
def __call__( self : List[Any] , UpperCamelCase__ : Union[torch.FloatTensor, List[torch.FloatTensor]] , UpperCamelCase__ : Union[torch.FloatTensor, List[torch.FloatTensor]] , UpperCamelCase__ : int = 512 , UpperCamelCase__ : int = 512 , UpperCamelCase__ : int = 100 , UpperCamelCase__ : float = 4.0 , UpperCamelCase__ : int = 1 , UpperCamelCase__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCamelCase__ : Optional[torch.FloatTensor] = None , UpperCamelCase__ : Optional[str] = "pil" , UpperCamelCase__ : bool = True , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = self._execution_device
SCREAMING_SNAKE_CASE : Tuple = guidance_scale > 1.0
if isinstance(_a , _a ):
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.cat(_a , dim=0 )
SCREAMING_SNAKE_CASE : Optional[int] = image_embeds.shape[0] * num_images_per_prompt
if isinstance(_a , _a ):
SCREAMING_SNAKE_CASE : Tuple = torch.cat(_a , dim=0 )
if do_classifier_free_guidance:
SCREAMING_SNAKE_CASE : List[str] = image_embeds.repeat_interleave(_a , dim=0 )
SCREAMING_SNAKE_CASE : List[str] = negative_image_embeds.repeat_interleave(_a , dim=0 )
SCREAMING_SNAKE_CASE : Optional[int] = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=_a )
self.scheduler.set_timesteps(_a , device=_a )
SCREAMING_SNAKE_CASE : int = self.scheduler.timesteps
SCREAMING_SNAKE_CASE : List[Any] = self.unet.config.in_channels
SCREAMING_SNAKE_CASE : Optional[int] = downscale_height_and_width(_a , _a , self.movq_scale_factor )
# create initial latent
SCREAMING_SNAKE_CASE : Optional[int] = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , _a , _a , _a , self.scheduler , )
for i, t in enumerate(self.progress_bar(_a ) ):
# expand the latents if we are doing classifier free guidance
SCREAMING_SNAKE_CASE : Optional[Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
SCREAMING_SNAKE_CASE : Union[str, Any] = {'image_embeds': image_embeds}
SCREAMING_SNAKE_CASE : Any = self.unet(
sample=_a , timestep=_a , encoder_hidden_states=_a , added_cond_kwargs=_a , return_dict=_a , )[0]
if do_classifier_free_guidance:
SCREAMING_SNAKE_CASE : Optional[Any] = noise_pred.split(latents.shape[1] , dim=1 )
SCREAMING_SNAKE_CASE : Optional[Any] = noise_pred.chunk(2 )
SCREAMING_SNAKE_CASE : str = variance_pred.chunk(2 )
SCREAMING_SNAKE_CASE : List[Any] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
SCREAMING_SNAKE_CASE : str = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , '''variance_type''' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
SCREAMING_SNAKE_CASE : Dict = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
SCREAMING_SNAKE_CASE : str = self.scheduler.step(
_a , _a , _a , generator=_a , )[0]
# post-processing
SCREAMING_SNAKE_CASE : Tuple = self.movq.decode(_a , force_not_quantize=_a )['sample']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" )
if output_type in ["np", "pil"]:
SCREAMING_SNAKE_CASE : str = image * 0.5 + 0.5
SCREAMING_SNAKE_CASE : Union[str, Any] = image.clamp(0 , 1 )
SCREAMING_SNAKE_CASE : Optional[Any] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
SCREAMING_SNAKE_CASE : str = self.numpy_to_pil(_a )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_a )
| 355 | import argparse
import json
import numpy
import torch
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def A ( _lowercase , _lowercase ):
# Load checkpoint
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.load(_lowercase , map_location='''cpu''' )
SCREAMING_SNAKE_CASE : List[str] = chkpt['''model''']
# We have the base model one level deeper than the original XLM repository
SCREAMING_SNAKE_CASE : int = {}
for k, v in state_dict.items():
if "pred_layer" in k:
SCREAMING_SNAKE_CASE : Optional[Any] = v
else:
SCREAMING_SNAKE_CASE : List[Any] = v
SCREAMING_SNAKE_CASE : Dict = chkpt['''params''']
SCREAMING_SNAKE_CASE : Optional[Any] = {n: v for n, v in config.items() if not isinstance(_lowercase , (torch.FloatTensor, numpy.ndarray) )}
SCREAMING_SNAKE_CASE : Any = chkpt['''dico_word2id''']
SCREAMING_SNAKE_CASE : str = {s + '''</w>''' if s.find('''@@''' ) == -1 and i > 13 else s.replace('''@@''' , '''''' ): i for s, i in vocab.items()}
# Save pytorch-model
SCREAMING_SNAKE_CASE : Dict = pytorch_dump_folder_path + '''/''' + WEIGHTS_NAME
SCREAMING_SNAKE_CASE : List[str] = pytorch_dump_folder_path + '''/''' + CONFIG_NAME
SCREAMING_SNAKE_CASE : Dict = pytorch_dump_folder_path + '''/''' + VOCAB_FILES_NAMES['''vocab_file''']
print(f"""Save PyTorch model to {pytorch_weights_dump_path}""" )
torch.save(_lowercase , _lowercase )
print(f"""Save configuration file to {pytorch_config_dump_path}""" )
with open(_lowercase , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(_lowercase , indent=2 ) + '''\n''' )
print(f"""Save vocab file to {pytorch_config_dump_path}""" )
with open(_lowercase , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(_lowercase , indent=2 ) + '''\n''' )
if __name__ == "__main__":
__UpperCamelCase : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--xlm_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
__UpperCamelCase : List[Any] = parser.parse_args()
convert_xlm_checkpoint_to_pytorch(args.xlm_checkpoint_path, args.pytorch_dump_folder_path)
| 258 | 0 |
import warnings
from ...utils import logging
from .image_processing_beit import BeitImageProcessor
__lowerCAmelCase = logging.get_logger(__name__)
class __a ( __UpperCamelCase ):
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> None:
'''simple docstring'''
warnings.warn(
'The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use BeitImageProcessor instead.' , lowerCAmelCase__ , )
super().__init__(*lowerCAmelCase__ , **lowerCAmelCase__ )
| 196 |
def snake_case_ ( snake_case ) -> int:
if n == 1 or not isinstance(snake_case , snake_case ):
return 0
elif n == 2:
return 1
else:
lowercase__: Optional[Any] = [0, 1]
for i in range(2 , n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def snake_case_ ( snake_case ) -> int:
lowercase__: int = 0
lowercase__: int = 2
while digits < n:
index += 1
lowercase__: Tuple = len(str(fibonacci(snake_case ) ) )
return index
def snake_case_ ( snake_case = 10_00 ) -> int:
return fibonacci_digits_index(snake_case )
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 196 | 1 |
# This code is adapted from OpenAI's release
# https://github.com/openai/human-eval/blob/master/human_eval/execution.py
import contextlib
import faulthandler
import io
import multiprocessing
import os
import platform
import signal
import tempfile
def UpperCAmelCase_ ( _A , _A , _A , _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = multiprocessing.Manager()
SCREAMING_SNAKE_CASE__ = manager.list()
SCREAMING_SNAKE_CASE__ = multiprocessing.Process(target=snake_case__ , args=(check_program, result, timeout) )
p.start()
p.join(timeout=timeout + 1 )
if p.is_alive():
p.kill()
if not result:
result.append('''timed out''' )
return {
"task_id": task_id,
"passed": result[0] == "passed",
"result": result[0],
"completion_id": completion_id,
}
def UpperCAmelCase_ ( _A , _A , _A ):
'''simple docstring'''
with create_tempdir():
# These system calls are needed when cleaning up tempdir.
import os
import shutil
SCREAMING_SNAKE_CASE__ = shutil.rmtree
SCREAMING_SNAKE_CASE__ = os.rmdir
SCREAMING_SNAKE_CASE__ = os.chdir
# Disable functionalities that can make destructive changes to the test.
reliability_guard()
# Run program.
try:
SCREAMING_SNAKE_CASE__ = {}
with swallow_io():
with time_limit(snake_case__ ):
exec(snake_case__ , snake_case__ )
result.append('''passed''' )
except TimeoutException:
result.append('''timed out''' )
except BaseException as e:
result.append(F'''failed: {e}''' )
# Needed for cleaning up.
SCREAMING_SNAKE_CASE__ = rmtree
SCREAMING_SNAKE_CASE__ = rmdir
SCREAMING_SNAKE_CASE__ = chdir
@contextlib.contextmanager
def UpperCAmelCase_ ( _A ):
'''simple docstring'''
def signal_handler(_A , _A ):
raise TimeoutException('''Timed out!''' )
signal.setitimer(signal.ITIMER_REAL , snake_case__ )
signal.signal(signal.SIGALRM , snake_case__ )
try:
yield
finally:
signal.setitimer(signal.ITIMER_REAL , 0 )
@contextlib.contextmanager
def UpperCAmelCase_ ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = WriteOnlyStringIO()
with contextlib.redirect_stdout(snake_case__ ):
with contextlib.redirect_stderr(snake_case__ ):
with redirect_stdin(snake_case__ ):
yield
@contextlib.contextmanager
def UpperCAmelCase_ ( ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as dirname:
with chdir(snake_case__ ):
yield dirname
class UpperCAmelCase__ ( __lowerCAmelCase ):
"""simple docstring"""
pass
class UpperCAmelCase__ ( io.StringIO ):
"""simple docstring"""
def lowercase_ ( self : Optional[int] , *__lowerCamelCase : Any , **__lowerCamelCase : Dict ) -> List[str]:
raise OSError
def lowercase_ ( self : Any , *__lowerCamelCase : int , **__lowerCamelCase : Tuple ) -> Tuple:
raise OSError
def lowercase_ ( self : str , *__lowerCamelCase : int , **__lowerCamelCase : Any ) -> str:
raise OSError
def lowercase_ ( self : Optional[Any] , *__lowerCamelCase : Optional[Any] , **__lowerCamelCase : Any ) -> str:
return False
class UpperCAmelCase__ ( contextlib._RedirectStream ): # type: ignore
"""simple docstring"""
a = '''stdin'''
@contextlib.contextmanager
def UpperCAmelCase_ ( _A ):
'''simple docstring'''
if root == ".":
yield
return
SCREAMING_SNAKE_CASE__ = os.getcwd()
os.chdir(snake_case__ )
try:
yield
except BaseException as exc:
raise exc
finally:
os.chdir(snake_case__ )
def UpperCAmelCase_ ( _A=None ):
'''simple docstring'''
if maximum_memory_bytes is not None:
import resource
resource.setrlimit(resource.RLIMIT_AS , (maximum_memory_bytes, maximum_memory_bytes) )
resource.setrlimit(resource.RLIMIT_DATA , (maximum_memory_bytes, maximum_memory_bytes) )
if not platform.uname().system == "Darwin":
resource.setrlimit(resource.RLIMIT_STACK , (maximum_memory_bytes, maximum_memory_bytes) )
faulthandler.disable()
import builtins
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
import os
SCREAMING_SNAKE_CASE__ = '''1'''
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
import shutil
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
import subprocess
SCREAMING_SNAKE_CASE__ = None # type: ignore
SCREAMING_SNAKE_CASE__ = None
import sys
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
| 365 |
from functools import reduce
_SCREAMING_SNAKE_CASE : Any = (
'''73167176531330624919225119674426574742355349194934'''
'''96983520312774506326239578318016984801869478851843'''
'''85861560789112949495459501737958331952853208805511'''
'''12540698747158523863050715693290963295227443043557'''
'''66896648950445244523161731856403098711121722383113'''
'''62229893423380308135336276614282806444486645238749'''
'''30358907296290491560440772390713810515859307960866'''
'''70172427121883998797908792274921901699720888093776'''
'''65727333001053367881220235421809751254540594752243'''
'''52584907711670556013604839586446706324415722155397'''
'''53697817977846174064955149290862569321978468622482'''
'''83972241375657056057490261407972968652414535100474'''
'''82166370484403199890008895243450658541227588666881'''
'''16427171479924442928230863465674813919123162824586'''
'''17866458359124566529476545682848912883142607690042'''
'''24219022671055626321111109370544217506941658960408'''
'''07198403850962455444362981230987879927244284909188'''
'''84580156166097919133875499200524063689912560717606'''
'''05886116467109405077541002256983155200055935729725'''
'''71636269561882670428252483600823257530420752963450'''
)
def UpperCAmelCase_ ( _A = N ):
'''simple docstring'''
return max(
# mypy cannot properly interpret reduce
int(reduce(lambda _A , _A : str(int(_A ) * int(_A ) ) , n[i : i + 13] ) )
for i in range(len(_A ) - 12 ) )
if __name__ == "__main__":
print(F"{solution() = }")
| 218 | 0 |
import unittest
from transformers import DebertaVaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaVaForMaskedLM,
DebertaVaForMultipleChoice,
DebertaVaForQuestionAnswering,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaModel,
)
from transformers.models.deberta_va.modeling_deberta_va import DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
def __init__( self , A_ , A_=13 , A_=7 , A_=True , A_=True , A_=True , A_=True , A_=99 , A_=32 , A_=5 , A_=4 , A_=37 , A_="gelu" , A_=0.1 , A_=0.1 , A_=512 , A_=16 , A_=2 , A_=0.02 , A_=False , A_=True , A_="None" , A_=3 , A_=4 , A_=None , ) -> Optional[int]:
__UpperCamelCase =parent
__UpperCamelCase =batch_size
__UpperCamelCase =seq_length
__UpperCamelCase =is_training
__UpperCamelCase =use_input_mask
__UpperCamelCase =use_token_type_ids
__UpperCamelCase =use_labels
__UpperCamelCase =vocab_size
__UpperCamelCase =hidden_size
__UpperCamelCase =num_hidden_layers
__UpperCamelCase =num_attention_heads
__UpperCamelCase =intermediate_size
__UpperCamelCase =hidden_act
__UpperCamelCase =hidden_dropout_prob
__UpperCamelCase =attention_probs_dropout_prob
__UpperCamelCase =max_position_embeddings
__UpperCamelCase =type_vocab_size
__UpperCamelCase =type_sequence_label_size
__UpperCamelCase =initializer_range
__UpperCamelCase =num_labels
__UpperCamelCase =num_choices
__UpperCamelCase =relative_attention
__UpperCamelCase =position_biased_input
__UpperCamelCase =pos_att_type
__UpperCamelCase =scope
def _a ( self ) -> List[Any]:
__UpperCamelCase =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCamelCase =None
if self.use_input_mask:
__UpperCamelCase =ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
__UpperCamelCase =None
if self.use_token_type_ids:
__UpperCamelCase =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__UpperCamelCase =None
__UpperCamelCase =None
__UpperCamelCase =None
if self.use_labels:
__UpperCamelCase =ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCamelCase =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__UpperCamelCase =ids_tensor([self.batch_size] , self.num_choices )
__UpperCamelCase =self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _a ( self ) -> str:
return DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def _a ( self , A_ ) -> Tuple:
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> Tuple:
__UpperCamelCase =DebertaVaModel(config=A_ )
model.to(A_ )
model.eval()
__UpperCamelCase =model(A_ , attention_mask=A_ , token_type_ids=A_ )[0]
__UpperCamelCase =model(A_ , token_type_ids=A_ )[0]
__UpperCamelCase =model(A_ )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> Optional[Any]:
__UpperCamelCase =DebertaVaForMaskedLM(config=A_ )
model.to(A_ )
model.eval()
__UpperCamelCase =model(A_ , attention_mask=A_ , token_type_ids=A_ , labels=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> Dict:
__UpperCamelCase =self.num_labels
__UpperCamelCase =DebertaVaForSequenceClassification(A_ )
model.to(A_ )
model.eval()
__UpperCamelCase =model(A_ , attention_mask=A_ , token_type_ids=A_ , labels=A_ )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(A_ )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> List[Any]:
__UpperCamelCase =self.num_labels
__UpperCamelCase =DebertaVaForTokenClassification(config=A_ )
model.to(A_ )
model.eval()
__UpperCamelCase =model(A_ , attention_mask=A_ , token_type_ids=A_ , labels=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> Any:
__UpperCamelCase =DebertaVaForQuestionAnswering(config=A_ )
model.to(A_ )
model.eval()
__UpperCamelCase =model(
A_ , attention_mask=A_ , token_type_ids=A_ , start_positions=A_ , end_positions=A_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> int:
__UpperCamelCase =DebertaVaForMultipleChoice(config=A_ )
model.to(A_ )
model.eval()
__UpperCamelCase =input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__UpperCamelCase =token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__UpperCamelCase =input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__UpperCamelCase =model(
A_ , attention_mask=A_ , token_type_ids=A_ , labels=A_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _a ( self ) -> Optional[Any]:
__UpperCamelCase =self.prepare_config_and_inputs()
(
(
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) ,
) =config_and_inputs
__UpperCamelCase ={'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase__ ( A_ , A_ , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : Dict = (
(
DebertaVaModel,
DebertaVaForMaskedLM,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaForQuestionAnswering,
DebertaVaForMultipleChoice,
)
if is_torch_available()
else ()
)
UpperCAmelCase__ : Union[str, Any] = (
{
"feature-extraction": DebertaVaModel,
"fill-mask": DebertaVaForMaskedLM,
"question-answering": DebertaVaForQuestionAnswering,
"text-classification": DebertaVaForSequenceClassification,
"token-classification": DebertaVaForTokenClassification,
"zero-shot": DebertaVaForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCAmelCase__ : Tuple = True
UpperCAmelCase__ : List[Any] = False
UpperCAmelCase__ : int = False
UpperCAmelCase__ : Tuple = False
UpperCAmelCase__ : List[str] = False
def _a ( self ) -> Optional[int]:
__UpperCamelCase =DebertaVaModelTester(self )
__UpperCamelCase =ConfigTester(self , config_class=A_ , hidden_size=37 )
def _a ( self ) -> Dict:
self.config_tester.run_common_tests()
def _a ( self ) -> str:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*A_ )
def _a ( self ) -> List[str]:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*A_ )
def _a ( self ) -> Any:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*A_ )
def _a ( self ) -> Optional[int]:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*A_ )
def _a ( self ) -> str:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*A_ )
def _a ( self ) -> List[str]:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_multiple_choice(*A_ )
@slow
def _a ( self ) -> Tuple:
for model_name in DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase =DebertaVaModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
@require_torch
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@unittest.skip(reason='Model not available yet' )
def _a ( self ) -> int:
pass
@slow
def _a ( self ) -> Tuple:
__UpperCamelCase =DebertaVaModel.from_pretrained('microsoft/deberta-v2-xlarge' )
__UpperCamelCase =torch.tensor([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]] )
__UpperCamelCase =torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
__UpperCamelCase =model(A_ , attention_mask=A_ )[0]
# compare the actual values for a slice.
__UpperCamelCase =torch.tensor(
[[[0.2356, 0.1948, 0.0369], [-0.1063, 0.3586, -0.5152], [-0.6399, -0.0259, -0.2525]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , A_ , atol=1E-4 ) , f'{output[:, 1:4, 1:4]}' )
| 62 |
import collections
import gzip
import os
import urllib
import numpy
from tensorflow.python.framework import dtypes, random_seed
from tensorflow.python.platform import gfile
from tensorflow.python.util.deprecation import deprecated
_A = collections.namedtuple('_Datasets', ['train', 'validation', 'test'])
# CVDF mirror of http://yann.lecun.com/exdb/mnist/
_A = 'https://storage.googleapis.com/cvdf-datasets/mnist/'
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int ):
__UpperCamelCase =numpy.dtype(numpy.uintaa ).newbyteorder('>' )
return numpy.frombuffer(bytestream.read(4 ) , dtype=SCREAMING_SNAKE_CASE__ )[0]
@deprecated(SCREAMING_SNAKE_CASE__ , 'Please use tf.data to implement this functionality.' )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Optional[Any] ):
print('Extracting' , f.name )
with gzip.GzipFile(fileobj=SCREAMING_SNAKE_CASE__ ) as bytestream:
__UpperCamelCase =_readaa(SCREAMING_SNAKE_CASE__ )
if magic != 20_51:
raise ValueError(
'Invalid magic number %d in MNIST image file: %s' % (magic, f.name) )
__UpperCamelCase =_readaa(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =_readaa(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =_readaa(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =bytestream.read(rows * cols * num_images )
__UpperCamelCase =numpy.frombuffer(SCREAMING_SNAKE_CASE__ , dtype=numpy.uinta )
__UpperCamelCase =data.reshape(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 1 )
return data
@deprecated(SCREAMING_SNAKE_CASE__ , 'Please use tf.one_hot on tensors.' )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[Any] ):
__UpperCamelCase =labels_dense.shape[0]
__UpperCamelCase =numpy.arange(SCREAMING_SNAKE_CASE__ ) * num_classes
__UpperCamelCase =numpy.zeros((num_labels, num_classes) )
__UpperCamelCase =1
return labels_one_hot
@deprecated(SCREAMING_SNAKE_CASE__ , 'Please use tf.data to implement this functionality.' )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Dict=False , SCREAMING_SNAKE_CASE__ : str=10 ):
print('Extracting' , f.name )
with gzip.GzipFile(fileobj=SCREAMING_SNAKE_CASE__ ) as bytestream:
__UpperCamelCase =_readaa(SCREAMING_SNAKE_CASE__ )
if magic != 20_49:
raise ValueError(
'Invalid magic number %d in MNIST label file: %s' % (magic, f.name) )
__UpperCamelCase =_readaa(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =bytestream.read(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =numpy.frombuffer(SCREAMING_SNAKE_CASE__ , dtype=numpy.uinta )
if one_hot:
return _dense_to_one_hot(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return labels
class UpperCAmelCase__ :
"""simple docstring"""
@deprecated(
A_ , 'Please use alternatives such as official/mnist/_DataSet.py'
' from tensorflow/models.' , )
def __init__( self , A_ , A_ , A_=False , A_=False , A_=dtypes.floataa , A_=True , A_=None , ) -> Optional[int]:
__UpperCamelCase , __UpperCamelCase =random_seed.get_seed(A_ )
# If op level seed is not set, use whatever graph level seed is returned
numpy.random.seed(seeda if seed is None else seeda )
__UpperCamelCase =dtypes.as_dtype(A_ ).base_dtype
if dtype not in (dtypes.uinta, dtypes.floataa):
raise TypeError('Invalid image dtype %r, expected uint8 or float32' % dtype )
if fake_data:
__UpperCamelCase =10000
__UpperCamelCase =one_hot
else:
assert (
images.shape[0] == labels.shape[0]
), f'images.shape: {images.shape} labels.shape: {labels.shape}'
__UpperCamelCase =images.shape[0]
# Convert shape from [num examples, rows, columns, depth]
# to [num examples, rows*columns] (assuming depth == 1)
if reshape:
assert images.shape[3] == 1
__UpperCamelCase =images.reshape(
images.shape[0] , images.shape[1] * images.shape[2] )
if dtype == dtypes.floataa:
# Convert from [0, 255] -> [0.0, 1.0].
__UpperCamelCase =images.astype(numpy.floataa )
__UpperCamelCase =numpy.multiply(A_ , 1.0 / 255.0 )
__UpperCamelCase =images
__UpperCamelCase =labels
__UpperCamelCase =0
__UpperCamelCase =0
@property
def _a ( self ) -> Tuple:
return self._images
@property
def _a ( self ) -> Union[str, Any]:
return self._labels
@property
def _a ( self ) -> Optional[Any]:
return self._num_examples
@property
def _a ( self ) -> List[str]:
return self._epochs_completed
def _a ( self , A_ , A_=False , A_=True ) -> Optional[Any]:
if fake_data:
__UpperCamelCase =[1] * 784
__UpperCamelCase =[1] + [0] * 9 if self.one_hot else 0
return (
[fake_image for _ in range(A_ )],
[fake_label for _ in range(A_ )],
)
__UpperCamelCase =self._index_in_epoch
# Shuffle for the first epoch
if self._epochs_completed == 0 and start == 0 and shuffle:
__UpperCamelCase =numpy.arange(self._num_examples )
numpy.random.shuffle(A_ )
__UpperCamelCase =self.images[perma]
__UpperCamelCase =self.labels[perma]
# Go to the next epoch
if start + batch_size > self._num_examples:
# Finished epoch
self._epochs_completed += 1
# Get the rest examples in this epoch
__UpperCamelCase =self._num_examples - start
__UpperCamelCase =self._images[start : self._num_examples]
__UpperCamelCase =self._labels[start : self._num_examples]
# Shuffle the data
if shuffle:
__UpperCamelCase =numpy.arange(self._num_examples )
numpy.random.shuffle(A_ )
__UpperCamelCase =self.images[perm]
__UpperCamelCase =self.labels[perm]
# Start next epoch
__UpperCamelCase =0
__UpperCamelCase =batch_size - rest_num_examples
__UpperCamelCase =self._index_in_epoch
__UpperCamelCase =self._images[start:end]
__UpperCamelCase =self._labels[start:end]
return (
numpy.concatenate((images_rest_part, images_new_part) , axis=0 ),
numpy.concatenate((labels_rest_part, labels_new_part) , axis=0 ),
)
else:
self._index_in_epoch += batch_size
__UpperCamelCase =self._index_in_epoch
return self._images[start:end], self._labels[start:end]
@deprecated(SCREAMING_SNAKE_CASE__ , 'Please write your own downloading logic.' )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : str ):
if not gfile.Exists(SCREAMING_SNAKE_CASE__ ):
gfile.MakeDirs(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if not gfile.Exists(SCREAMING_SNAKE_CASE__ ):
urllib.request.urlretrieve(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) # noqa: S310
with gfile.GFile(SCREAMING_SNAKE_CASE__ ) as f:
__UpperCamelCase =f.size()
print('Successfully downloaded' , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 'bytes.' )
return filepath
@deprecated(
SCREAMING_SNAKE_CASE__ , 'Please use alternatives such as:' ' tensorflow_datasets.load(\'mnist\')' )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : int=False , SCREAMING_SNAKE_CASE__ : str=False , SCREAMING_SNAKE_CASE__ : Union[str, Any]=dtypes.floataa , SCREAMING_SNAKE_CASE__ : Optional[int]=True , SCREAMING_SNAKE_CASE__ : str=50_00 , SCREAMING_SNAKE_CASE__ : List[Any]=None , SCREAMING_SNAKE_CASE__ : str=DEFAULT_SOURCE_URL , ):
if fake_data:
def fake():
return _DataSet(
[] , [] , fake_data=SCREAMING_SNAKE_CASE__ , one_hot=SCREAMING_SNAKE_CASE__ , dtype=SCREAMING_SNAKE_CASE__ , seed=SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =fake()
__UpperCamelCase =fake()
__UpperCamelCase =fake()
return _Datasets(train=SCREAMING_SNAKE_CASE__ , validation=SCREAMING_SNAKE_CASE__ , test=SCREAMING_SNAKE_CASE__ )
if not source_url: # empty string check
__UpperCamelCase =DEFAULT_SOURCE_URL
__UpperCamelCase ='train-images-idx3-ubyte.gz'
__UpperCamelCase ='train-labels-idx1-ubyte.gz'
__UpperCamelCase ='t10k-images-idx3-ubyte.gz'
__UpperCamelCase ='t10k-labels-idx1-ubyte.gz'
__UpperCamelCase =_maybe_download(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , source_url + train_images_file )
with gfile.Open(SCREAMING_SNAKE_CASE__ , 'rb' ) as f:
__UpperCamelCase =_extract_images(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =_maybe_download(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , source_url + train_labels_file )
with gfile.Open(SCREAMING_SNAKE_CASE__ , 'rb' ) as f:
__UpperCamelCase =_extract_labels(SCREAMING_SNAKE_CASE__ , one_hot=SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =_maybe_download(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , source_url + test_images_file )
with gfile.Open(SCREAMING_SNAKE_CASE__ , 'rb' ) as f:
__UpperCamelCase =_extract_images(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =_maybe_download(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , source_url + test_labels_file )
with gfile.Open(SCREAMING_SNAKE_CASE__ , 'rb' ) as f:
__UpperCamelCase =_extract_labels(SCREAMING_SNAKE_CASE__ , one_hot=SCREAMING_SNAKE_CASE__ )
if not 0 <= validation_size <= len(SCREAMING_SNAKE_CASE__ ):
__UpperCamelCase =(
'Validation size should be between 0 and '
F'{len(SCREAMING_SNAKE_CASE__ )}. Received: {validation_size}.'
)
raise ValueError(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =train_images[:validation_size]
__UpperCamelCase =train_labels[:validation_size]
__UpperCamelCase =train_images[validation_size:]
__UpperCamelCase =train_labels[validation_size:]
__UpperCamelCase ={'dtype': dtype, 'reshape': reshape, 'seed': seed}
__UpperCamelCase =_DataSet(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =_DataSet(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =_DataSet(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
return _Datasets(train=SCREAMING_SNAKE_CASE__ , validation=SCREAMING_SNAKE_CASE__ , test=SCREAMING_SNAKE_CASE__ )
| 62 | 1 |
"""simple docstring"""
import argparse
import logging
import os
import sys
import numpy as np
import onnxruntime
import torch
from bart_onnx.generation_onnx import BARTBeamSearchGenerator
from bart_onnx.reduce_onnx_size import remove_dup_initializers
import transformers
from transformers import BartForConditionalGeneration, BartTokenizer
logging.basicConfig(
format='''%(asctime)s | %(levelname)s | %(name)s | [%(filename)s:%(lineno)d] %(message)s''',
datefmt='''%Y-%m-%d %H:%M:%S''',
level=os.environ.get('''LOGLEVEL''', '''INFO''').upper(),
stream=sys.stdout,
)
__UpperCamelCase = logging.getLogger(__name__)
__UpperCamelCase = {"facebook/bart-base": BartForConditionalGeneration}
__UpperCamelCase = {"facebook/bart-base": BartTokenizer}
def UpperCAmelCase ( ) -> Optional[Any]:
snake_case_ = argparse.ArgumentParser(description='Export Bart model + Beam Search to ONNX graph.' )
parser.add_argument(
'--validation_file' , type=UpperCAmelCase , default=UpperCAmelCase , help='A csv or a json file containing the validation data.' )
parser.add_argument(
'--max_length' , type=UpperCAmelCase , default=5 , help='The maximum total input sequence length after tokenization.' , )
parser.add_argument(
'--num_beams' , type=UpperCAmelCase , default=UpperCAmelCase , help=(
'Number of beams to use for evaluation. This argument will be '
'passed to ``model.generate``, which is used during ``evaluate`` and ``predict``.'
) , )
parser.add_argument(
'--model_name_or_path' , type=UpperCAmelCase , help='Path to pretrained model or model identifier from huggingface.co/models.' , required=UpperCAmelCase , )
parser.add_argument(
'--config_name' , type=UpperCAmelCase , default=UpperCAmelCase , help='Pretrained config name or path if not the same as model_name' , )
parser.add_argument(
'--device' , type=UpperCAmelCase , default='cpu' , help='Device where the model will be run' , )
parser.add_argument('--output_file_path' , type=UpperCAmelCase , default=UpperCAmelCase , help='Where to store the final ONNX file.' )
snake_case_ = parser.parse_args()
return args
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase="cpu" ) -> Optional[int]:
snake_case_ = model_dict[model_name].from_pretrained(UpperCAmelCase ).to(UpperCAmelCase )
snake_case_ = tokenizer_dict[model_name].from_pretrained(UpperCAmelCase )
if model_name in ["facebook/bart-base"]:
snake_case_ = 0
snake_case_ = None
snake_case_ = 0
return huggingface_model, tokenizer
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> str:
model.eval()
snake_case_ = None
snake_case_ = torch.jit.script(BARTBeamSearchGenerator(UpperCAmelCase ) )
with torch.no_grad():
snake_case_ = """My friends are cool but they eat too many carbs."""
snake_case_ = tokenizer([ARTICLE_TO_SUMMARIZE] , max_length=1024 , return_tensors='pt' ).to(model.device )
snake_case_ = model.generate(
inputs['input_ids'] , attention_mask=inputs['attention_mask'] , num_beams=UpperCAmelCase , max_length=UpperCAmelCase , early_stopping=UpperCAmelCase , decoder_start_token_id=model.config.decoder_start_token_id , )
torch.onnx.export(
UpperCAmelCase , (
inputs['input_ids'],
inputs['attention_mask'],
num_beams,
max_length,
model.config.decoder_start_token_id,
) , UpperCAmelCase , opset_version=14 , input_names=['input_ids', 'attention_mask', 'num_beams', 'max_length', 'decoder_start_token_id'] , output_names=['output_ids'] , dynamic_axes={
'input_ids': {0: 'batch', 1: 'seq'},
'output_ids': {0: 'batch', 1: 'seq_out'},
} , example_outputs=UpperCAmelCase , )
logger.info('Model exported to {}'.format(UpperCAmelCase ) )
snake_case_ = remove_dup_initializers(os.path.abspath(UpperCAmelCase ) )
logger.info('Deduplicated and optimized model written to {}'.format(UpperCAmelCase ) )
snake_case_ = onnxruntime.InferenceSession(UpperCAmelCase )
snake_case_ = ort_sess.run(
UpperCAmelCase , {
'input_ids': inputs['input_ids'].cpu().numpy(),
'attention_mask': inputs['attention_mask'].cpu().numpy(),
'num_beams': np.array(UpperCAmelCase ),
'max_length': np.array(UpperCAmelCase ),
'decoder_start_token_id': np.array(model.config.decoder_start_token_id ),
} , )
np.testing.assert_allclose(summary_ids.cpu().numpy() , ort_out[0] , rtol=1e-3 , atol=1e-3 )
logger.info('Model outputs from torch and ONNX Runtime are similar.' )
logger.info('Success.' )
def UpperCAmelCase ( ) -> Optional[int]:
snake_case_ = parse_args()
snake_case_ = 5
snake_case_ = 4
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO , )
logger.setLevel(logging.INFO )
transformers.utils.logging.set_verbosity_error()
snake_case_ = torch.device(args.device )
snake_case_ = load_model_tokenizer(args.model_name_or_path , UpperCAmelCase )
if model.config.decoder_start_token_id is None:
raise ValueError('Make sure that `config.decoder_start_token_id` is correctly defined' )
model.to(UpperCAmelCase )
if args.max_length:
snake_case_ = args.max_length
if args.num_beams:
snake_case_ = args.num_beams
if args.output_file_path:
snake_case_ = args.output_file_path
else:
snake_case_ = """BART.onnx"""
logger.info('Exporting model to ONNX' )
export_and_validate_model(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
if __name__ == "__main__":
main()
| 360 | """simple docstring"""
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
__UpperCamelCase = logging.getLogger(__name__)
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> Any:
return (preds == labels).mean()
@dataclass
class UpperCamelCase :
SCREAMING_SNAKE_CASE_ = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
SCREAMING_SNAKE_CASE_ = field(
default=lowerCAmelCase__ , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
SCREAMING_SNAKE_CASE_ = field(
default=lowerCAmelCase__ , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
SCREAMING_SNAKE_CASE_ = field(
default=lowerCAmelCase__ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
@dataclass
class UpperCamelCase :
SCREAMING_SNAKE_CASE_ = field(metadata={"help": "The name of the task to train on: " + ", ".join(processors.keys() )} )
SCREAMING_SNAKE_CASE_ = field(metadata={"help": "Should contain the data files for the task."} )
SCREAMING_SNAKE_CASE_ = field(
default=1_2_8 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
SCREAMING_SNAKE_CASE_ = field(
default=lowerCAmelCase__ , metadata={"help": "Overwrite the cached training and evaluation sets"} )
def UpperCAmelCase ( ) -> Optional[int]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
snake_case_ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
snake_case_ , snake_case_ , snake_case_ = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f'Output directory ({training_args.output_dir}) already exists and is not empty. Use'
' --overwrite_output_dir to overcome.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('Training/evaluation parameters %s' , UpperCAmelCase )
# Set seed
set_seed(training_args.seed )
try:
snake_case_ = processors[data_args.task_name]()
snake_case_ = processor.get_labels()
snake_case_ = len(UpperCAmelCase )
except KeyError:
raise ValueError('Task not found: %s' % (data_args.task_name) )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
snake_case_ = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=UpperCAmelCase , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , )
snake_case_ = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
snake_case_ = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=UpperCAmelCase , cache_dir=model_args.cache_dir , )
# Get datasets
snake_case_ = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=UpperCAmelCase , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
snake_case_ = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=UpperCAmelCase , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def compute_metrics(UpperCAmelCase ) -> Dict:
snake_case_ = np.argmax(p.predictions , axis=1 )
return {"acc": simple_accuracy(UpperCAmelCase , p.label_ids )}
# Data collator
snake_case_ = DataCollatorWithPadding(UpperCAmelCase , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
snake_case_ = Trainer(
model=UpperCAmelCase , args=UpperCAmelCase , train_dataset=UpperCAmelCase , eval_dataset=UpperCAmelCase , compute_metrics=UpperCAmelCase , data_collator=UpperCAmelCase , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
snake_case_ = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
snake_case_ = trainer.evaluate()
snake_case_ = os.path.join(training_args.output_dir , 'eval_results.txt' )
if trainer.is_world_master():
with open(UpperCAmelCase , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key, value in result.items():
logger.info(' %s = %s' , UpperCAmelCase , UpperCAmelCase )
writer.write('%s = %s\n' % (key, value) )
results.update(UpperCAmelCase )
return results
def UpperCAmelCase ( UpperCAmelCase ) -> Optional[int]:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 312 | 0 |
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from transformers import (
VisualBertConfig,
VisualBertForMultipleChoice,
VisualBertForPreTraining,
VisualBertForQuestionAnswering,
VisualBertForVisualReasoning,
)
from transformers.utils import logging
logging.set_verbosity_info()
a =logging.get_logger(__name__)
a =[
("""bert.bert""", """visual_bert"""),
("""bert.cls""", """cls"""),
("""bert.classifier""", """cls"""),
("""token_type_embeddings_visual""", """visual_token_type_embeddings"""),
("""position_embeddings_visual""", """visual_position_embeddings"""),
("""projection""", """visual_projection"""),
]
a =[
"""nlvr2_coco_pre_trained.th""",
"""nlvr2_fine_tuned.th""",
"""nlvr2_pre_trained.th""",
"""vcr_coco_pre_train.th""",
"""vcr_fine_tune.th""",
"""vcr_pre_train.th""",
"""vqa_coco_pre_trained.th""",
"""vqa_fine_tuned.th""",
"""vqa_pre_trained.th""",
]
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> List[str]:
__lowerCamelCase : int = torch.load(lowerCamelCase__ , map_location='cpu' )
return sd
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=rename_keys_prefix ) -> List[Any]:
__lowerCamelCase : Tuple = OrderedDict()
__lowerCamelCase : int = torch.arange(config.max_position_embeddings ).expand((1, -1) )
# detector_d = OrderedDict()
for key in d:
if "detector" in key:
# detector_d[key.replace('detector.','')] = d[key]
continue
__lowerCamelCase : List[Any] = key
for name_pair in rename_keys_prefix:
__lowerCamelCase : Optional[Any] = new_key.replace(name_pair[0] , name_pair[1] )
__lowerCamelCase : Dict = d[key]
if key == "bert.cls.predictions.decoder.weight":
# Old bert code didn't have `decoder.bias`, but was added separately
__lowerCamelCase : str = new_d['cls.predictions.bias']
return new_d
@torch.no_grad()
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> Tuple:
assert (
checkpoint_path.split('/' )[-1] in ACCEPTABLE_CHECKPOINTS
), F"The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}."
# Get Config
if "pre" in checkpoint_path:
__lowerCamelCase : Optional[Any] = 'pretraining'
if "vcr" in checkpoint_path:
__lowerCamelCase : Any = {'visual_embedding_dim': 5_1_2}
elif "vqa_advanced" in checkpoint_path:
__lowerCamelCase : List[str] = {'visual_embedding_dim': 2_0_4_8}
elif "vqa" in checkpoint_path:
__lowerCamelCase : Tuple = {'visual_embedding_dim': 2_0_4_8}
elif "nlvr" in checkpoint_path:
__lowerCamelCase : List[Any] = {'visual_embedding_dim': 1_0_2_4}
else:
raise NotImplementedError(F"No implementation found for `{checkpoint_path}`." )
else:
if "vcr" in checkpoint_path:
__lowerCamelCase : Tuple = {'visual_embedding_dim': 5_1_2}
__lowerCamelCase : Any = 'multichoice'
elif "vqa_advanced" in checkpoint_path:
__lowerCamelCase : Tuple = {'visual_embedding_dim': 2_0_4_8}
__lowerCamelCase : List[Any] = 'vqa_advanced'
elif "vqa" in checkpoint_path:
__lowerCamelCase : List[Any] = {'visual_embedding_dim': 2_0_4_8, 'num_labels': 3_1_2_9}
__lowerCamelCase : Optional[int] = 'vqa'
elif "nlvr" in checkpoint_path:
__lowerCamelCase : Optional[Any] = {
'visual_embedding_dim': 1_0_2_4,
'num_labels': 2,
}
__lowerCamelCase : Any = 'nlvr'
__lowerCamelCase : Any = VisualBertConfig(**lowerCamelCase__ )
# Load State Dict
__lowerCamelCase : Dict = load_state_dict(lowerCamelCase__ )
__lowerCamelCase : Optional[Any] = get_new_dict(lowerCamelCase__ , lowerCamelCase__ )
if model_type == "pretraining":
__lowerCamelCase : Union[str, Any] = VisualBertForPreTraining(lowerCamelCase__ )
elif model_type == "vqa":
__lowerCamelCase : str = VisualBertForQuestionAnswering(lowerCamelCase__ )
elif model_type == "nlvr":
__lowerCamelCase : Dict = VisualBertForVisualReasoning(lowerCamelCase__ )
elif model_type == "multichoice":
__lowerCamelCase : Union[str, Any] = VisualBertForMultipleChoice(lowerCamelCase__ )
model.load_state_dict(lowerCamelCase__ )
# Save Checkpoints
Path(lowerCamelCase__ ).mkdir(exist_ok=lowerCamelCase__ )
model.save_pretrained(lowerCamelCase__ )
if __name__ == "__main__":
a =argparse.ArgumentParser()
# Required parameters
parser.add_argument("""orig_checkpoint_path""", type=str, help="""A path to .th on local filesystem.""")
parser.add_argument("""pytorch_dump_folder_path""", type=str, help="""Path to the output PyTorch model.""")
a =parser.parse_args()
convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
| 73 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
'''google/mobilenet_v2_1.4_224''': '''https://huggingface.co/google/mobilenet_v2_1.4_224/resolve/main/config.json''',
'''google/mobilenet_v2_1.0_224''': '''https://huggingface.co/google/mobilenet_v2_1.0_224/resolve/main/config.json''',
'''google/mobilenet_v2_0.75_160''': '''https://huggingface.co/google/mobilenet_v2_0.75_160/resolve/main/config.json''',
'''google/mobilenet_v2_0.35_96''': '''https://huggingface.co/google/mobilenet_v2_0.35_96/resolve/main/config.json''',
# See all MobileNetV2 models at https://huggingface.co/models?filter=mobilenet_v2
}
class UpperCAmelCase_ ( UpperCamelCase ):
'''simple docstring'''
__A : Dict = "mobilenet_v2"
def __init__( self , __A=3 , __A=224 , __A=1.0 , __A=8 , __A=8 , __A=6 , __A=32 , __A=True , __A=True , __A="relu6" , __A=True , __A=0.8 , __A=0.02 , __A=0.001 , __A=255 , **__A , ):
"""simple docstring"""
super().__init__(**__A )
if depth_multiplier <= 0:
raise ValueError("depth_multiplier must be greater than zero." )
lowerCamelCase : str = num_channels
lowerCamelCase : Any = image_size
lowerCamelCase : Union[str, Any] = depth_multiplier
lowerCamelCase : Tuple = depth_divisible_by
lowerCamelCase : Dict = min_depth
lowerCamelCase : Dict = expand_ratio
lowerCamelCase : Optional[Any] = output_stride
lowerCamelCase : int = first_layer_is_expansion
lowerCamelCase : Union[str, Any] = finegrained_output
lowerCamelCase : Optional[Any] = hidden_act
lowerCamelCase : Optional[Any] = tf_padding
lowerCamelCase : Optional[Any] = classifier_dropout_prob
lowerCamelCase : Dict = initializer_range
lowerCamelCase : str = layer_norm_eps
lowerCamelCase : Optional[Any] = semantic_loss_ignore_index
class UpperCAmelCase_ ( UpperCamelCase ):
'''simple docstring'''
__A : Union[str, Any] = version.parse("1.11" )
@property
def _snake_case ( self ):
"""simple docstring"""
return OrderedDict([("pixel_values", {0: "batch"})] )
@property
def _snake_case ( self ):
"""simple docstring"""
if self.task == "image-classification":
return OrderedDict([("logits", {0: "batch"})] )
else:
return OrderedDict([("last_hidden_state", {0: "batch"}), ("pooler_output", {0: "batch"})] )
@property
def _snake_case ( self ):
"""simple docstring"""
return 1e-4
| 283 | 0 |
import socket
def __lowercase ( ):
UpperCamelCase_ : Optional[int] = socket.socket(socket.AF_INET , socket.SOCK_STREAM )
UpperCamelCase_ : Optional[int] = socket.gethostname()
UpperCamelCase_ : Dict = 12312
sock.connect((host, port) )
sock.send(B'Hello server!' )
with open('Received_file' , 'wb' ) as out_file:
print('File opened' )
print('Receiving data...' )
while True:
UpperCamelCase_ : List[str] = sock.recv(1024 )
if not data:
break
out_file.write(lowerCamelCase )
print('Successfully received the file' )
sock.close()
print('Connection closed' )
if __name__ == "__main__":
main()
| 50 | import tempfile
import unittest
from transformers import TaConfig, is_torch_available
from transformers.testing_utils import (
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel
class _lowercase :
def __init__( self : List[Any] , snake_case : int , snake_case : Any=9_9 , snake_case : Tuple=1_3 , snake_case : str=7 , snake_case : List[str]=9 , snake_case : Optional[Any]=True , snake_case : Any=True , snake_case : Optional[Any]=False , snake_case : List[str]=3_2 , snake_case : str=5 , snake_case : Any=4 , snake_case : List[str]=3_7 , snake_case : Optional[Any]=8 , snake_case : Optional[Any]=0.1 , snake_case : Dict=0.002 , snake_case : Any=1 , snake_case : Optional[int]=0 , snake_case : List[str]=0 , snake_case : List[str]=None , snake_case : List[str]=None , ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase_ : int = parent
UpperCamelCase_ : List[Any] = batch_size
UpperCamelCase_ : int = encoder_seq_length
UpperCamelCase_ : int = decoder_seq_length
# For common tests
UpperCamelCase_ : List[Any] = self.decoder_seq_length
UpperCamelCase_ : Optional[Any] = is_training
UpperCamelCase_ : Tuple = use_attention_mask
UpperCamelCase_ : int = use_labels
UpperCamelCase_ : List[str] = vocab_size
UpperCamelCase_ : Dict = hidden_size
UpperCamelCase_ : Any = num_hidden_layers
UpperCamelCase_ : Any = num_attention_heads
UpperCamelCase_ : Dict = d_ff
UpperCamelCase_ : List[Any] = relative_attention_num_buckets
UpperCamelCase_ : List[Any] = dropout_rate
UpperCamelCase_ : Dict = initializer_factor
UpperCamelCase_ : Union[str, Any] = eos_token_id
UpperCamelCase_ : Optional[int] = pad_token_id
UpperCamelCase_ : List[str] = decoder_start_token_id
UpperCamelCase_ : str = None
UpperCamelCase_ : int = decoder_layers
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> List[str]:
"""simple docstring"""
return TaConfig.from_pretrained('google/umt5-base' )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , snake_case : Optional[int] , snake_case : Any , snake_case : Optional[int] , snake_case : Optional[int]=None , snake_case : List[Any]=None , snake_case : int=None , snake_case : Optional[int]=None , snake_case : Tuple=None , ) -> List[str]:
"""simple docstring"""
if attention_mask is None:
UpperCamelCase_ : Optional[Any] = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
UpperCamelCase_ : Optional[int] = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
UpperCamelCase_ : Optional[int] = torch.ones(config.num_hidden_layers , config.num_attention_heads , device=snake_case )
if decoder_head_mask is None:
UpperCamelCase_ : Dict = torch.ones(config.num_decoder_layers , config.num_attention_heads , device=snake_case )
if cross_attn_head_mask is None:
UpperCamelCase_ : Optional[Any] = torch.ones(
config.num_decoder_layers , config.num_attention_heads , device=snake_case )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase_ : Union[str, Any] = ids_tensor([self.batch_size, self.encoder_seq_length] , self.vocab_size )
UpperCamelCase_ : Any = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for NllbMoe the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
UpperCamelCase_ : Union[str, Any] = input_ids.clamp(self.pad_token_id + 1 )
UpperCamelCase_ : Any = decoder_input_ids.clamp(self.pad_token_id + 1 )
UpperCamelCase_ : Dict = self.get_config()
UpperCamelCase_ : Dict = config.num_attention_heads
UpperCamelCase_ : Optional[int] = self.prepare_inputs_dict(snake_case , snake_case , snake_case )
return config, input_dict
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
UpperCamelCase_, UpperCamelCase_ : Any = self.prepare_config_and_inputs()
return config, inputs_dict
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
return TaConfig(
vocab_size=1_6_6 , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def SCREAMING_SNAKE_CASE__ ( self : int ) -> Optional[int]:
"""simple docstring"""
return TaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def SCREAMING_SNAKE_CASE__ ( self : str , snake_case : Dict , snake_case : List[str] , snake_case : Tuple , snake_case : int , snake_case : List[str] , snake_case : Optional[Any] , ) -> Tuple:
"""simple docstring"""
UpperCamelCase_ : int = UMTaModel(config=snake_case )
model.to(snake_case )
model.eval()
UpperCamelCase_ : Any = model(
input_ids=snake_case , decoder_input_ids=snake_case , attention_mask=snake_case , decoder_attention_mask=snake_case , )
UpperCamelCase_ : List[str] = model(input_ids=snake_case , decoder_input_ids=snake_case )
UpperCamelCase_ : Optional[Any] = result.last_hidden_state
UpperCamelCase_ : Optional[Any] = result.past_key_values
UpperCamelCase_ : Optional[int] = result.encoder_last_hidden_state
self.parent.assertEqual(encoder_output.size() , (self.batch_size, self.encoder_seq_length, self.hidden_size) )
self.parent.assertEqual(decoder_output.size() , (self.batch_size, self.decoder_seq_length, self.hidden_size) )
# There should be `num_layers` key value embeddings stored in decoder_past
self.parent.assertEqual(len(snake_case ) , config.num_layers )
# There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple
self.parent.assertEqual(len(decoder_past[0] ) , 4 )
def SCREAMING_SNAKE_CASE__ ( self : Any , snake_case : Tuple , snake_case : List[Any] , snake_case : Optional[int] , snake_case : Any , snake_case : Tuple , snake_case : str , ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase_ : int = UMTaModel(config=snake_case ).get_decoder().to(snake_case ).eval()
# first forward pass
UpperCamelCase_ : str = model(snake_case , use_cache=snake_case )
UpperCamelCase_ : List[Any] = model(snake_case )
UpperCamelCase_ : Dict = model(snake_case , use_cache=snake_case )
self.parent.assertTrue(len(snake_case ) == len(snake_case ) )
self.parent.assertTrue(len(snake_case ) == len(snake_case ) + 1 )
UpperCamelCase_, UpperCamelCase_ : Optional[Any] = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
UpperCamelCase_ : Any = ids_tensor((self.batch_size, 1) , config.vocab_size )
# append to next input_ids and
UpperCamelCase_ : List[Any] = torch.cat([input_ids, next_tokens] , dim=-1 )
UpperCamelCase_ : List[Any] = model(snake_case )['last_hidden_state']
UpperCamelCase_ : List[str] = model(snake_case , past_key_values=snake_case )['last_hidden_state']
# select random slice
UpperCamelCase_ : List[str] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
UpperCamelCase_ : Union[str, Any] = output_from_no_past[:, -1, random_slice_idx].detach()
UpperCamelCase_ : Any = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(snake_case , snake_case , atol=1e-3 ) )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , snake_case : Tuple , snake_case : int , ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase_ : Optional[int] = UMTaModel(config=snake_case ).to(snake_case ).half().eval()
UpperCamelCase_ : Union[str, Any] = model(**snake_case )['last_hidden_state']
self.parent.assertFalse(torch.isnan(snake_case ).any().item() )
@require_torch
class _lowercase ( snake_case_ , snake_case_ , snake_case_ , unittest.TestCase ):
lowercase = (
(UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else ()
)
lowercase = (UMTaForConditionalGeneration,) if is_torch_available() else ()
lowercase = (
{
'conversational': UMTaForConditionalGeneration,
'feature-extraction': UMTaModel,
'summarization': UMTaForConditionalGeneration,
'text2text-generation': UMTaForConditionalGeneration,
'translation': UMTaForConditionalGeneration,
'question-answering': UMTaForQuestionAnswering,
}
if is_torch_available()
else {}
)
lowercase = True
lowercase = False
lowercase = False
lowercase = True
lowercase = True
# The small UMT5 model needs higher percentages for CPU/MP tests
lowercase = [0.8, 0.9]
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> List[Any]:
"""simple docstring"""
UpperCamelCase_ : Optional[Any] = UMTaModelTester(self )
@unittest.skip('Test has a segmentation fault on torch 1.8.0' )
def SCREAMING_SNAKE_CASE__ ( self : str ) -> List[str]:
"""simple docstring"""
UpperCamelCase_ : List[str] = self.model_tester.prepare_config_and_inputs()
UpperCamelCase_ : List[str] = UMTaModel(config_and_inputs[0] ).to(snake_case )
with tempfile.TemporaryDirectory() as tmpdirname:
torch.onnx.export(
snake_case , (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) , f"{tmpdirname}/t5_test.onnx" , export_params=snake_case , opset_version=9 , input_names=['input_ids', 'decoder_input_ids'] , )
@unittest.skipIf(torch_device == 'cpu' , 'Cant do half precision' )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_fpaa_forward(*snake_case )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase_ : Tuple = ['encoder_attentions', 'decoder_attentions', 'cross_attentions']
UpperCamelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
UpperCamelCase_ : Union[str, Any] = config_and_inputs[0]
UpperCamelCase_ : Tuple = UMTaForConditionalGeneration(snake_case ).eval()
model.to(snake_case )
UpperCamelCase_ : str = {
'head_mask': torch.zeros(config.num_layers , config.num_heads , device=snake_case ),
'decoder_head_mask': torch.zeros(config.num_decoder_layers , config.num_heads , device=snake_case ),
'cross_attn_head_mask': torch.zeros(config.num_decoder_layers , config.num_heads , device=snake_case ),
}
for attn_name, (name, mask) in zip(snake_case , head_masking.items() ):
UpperCamelCase_ : Optional[int] = {name: mask}
# Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified
if name == "head_mask":
UpperCamelCase_ : Union[str, Any] = torch.ones(
config.num_decoder_layers , config.num_heads , device=snake_case )
UpperCamelCase_ : Any = model.generate(
config_and_inputs[1]['input_ids'] , num_beams=1 , max_length=3 , output_attentions=snake_case , return_dict_in_generate=snake_case , **snake_case , )
# We check the state of decoder_attentions and cross_attentions just from the last step
UpperCamelCase_ : int = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1]
self.assertEqual(sum([w.sum().item() for w in attn_weights] ) , 0.0 )
@unittest.skip('Does not work on the tiny model as we keep hitting edge cases.' )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
pass
@require_torch
@require_sentencepiece
@require_tokenizers
class _lowercase ( unittest.TestCase ):
@slow
@unittest.skip(
'Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged' )
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> List[Any]:
"""simple docstring"""
UpperCamelCase_ : str = UMTaForConditionalGeneration.from_pretrained('google/umt5-small' , return_dict=snake_case ).to(snake_case )
UpperCamelCase_ : int = AutoTokenizer.from_pretrained('google/umt5-small' , use_fast=snake_case , legacy=snake_case )
UpperCamelCase_ : Dict = [
'Bonjour monsieur <extra_id_0> bien <extra_id_1>.',
'No se como puedo <extra_id_0>.',
'This is the reason why we <extra_id_0> them.',
'The <extra_id_0> walks in <extra_id_1>, seats',
'A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.',
]
UpperCamelCase_ : Dict = tokenizer(snake_case , return_tensors='pt' , padding=snake_case ).input_ids
# fmt: off
UpperCamelCase_ : List[str] = torch.tensor(
[
[ 3_8_5_3_0, 2_1_0_7_0_3, 2_5_6_2_9_9, 1_4_1_0, 2_5_6_2_9_8, 2_7_4, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 8_2_6, 3_2_1, 6_7_1, 2_5_9_2_2, 2_5_6_2_9_9, 2_7_4, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 1_4_6_0, 3_3_9, 3_1_2, 1_9_0_1_4, 1_0_6_2_0, 7_5_8, 2_5_6_2_9_9, 2_3_5_5,2_7_4, 1, 0, 0, 0, 0, 0, 0,0, 0],
[ 5_1_7, 2_5_6_2_9_9, 1_4_8_6_9, 2_8_1, 3_0_1, 2_5_6_2_9_8, 2_7_5, 1_1_9_9_8_3,1, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 3_2_0, 2_5_6_2_9_9, 1_4_8_6_9, 2_8_1, 2_2_3_4, 2_8_9, 2_2_7_5, 3_3_3,6_1_3_9_1, 2_8_9, 2_5_6_2_9_8, 5_4_3, 2_5_6_2_9_7, 1_6_8_7_1_4, 3_2_9, 2_5_6_2_9_6,2_7_4, 1],
] )
# fmt: on
torch.testing.assert_allclose(snake_case , snake_case )
UpperCamelCase_ : int = model.generate(input_ids.to(snake_case ) )
UpperCamelCase_ : List[Any] = [
'<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>',
'<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
'<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
'<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
'<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
]
UpperCamelCase_ : Dict = tokenizer.batch_decode(snake_case )
self.assertEqual(snake_case , snake_case )
| 50 | 1 |
'''simple docstring'''
import copy
import random
from transformers import CLIPTokenizer
class __UpperCAmelCase ( A__ ):
'''simple docstring'''
def __init__(self : Union[str, Any] , *_lowerCAmelCase : int , **_lowerCAmelCase : Any ):
super().__init__(*_lowerCAmelCase , **_lowerCAmelCase )
A = {}
def A (self : Union[str, Any] , _lowerCAmelCase : List[Any] , *_lowerCAmelCase : Union[str, Any] , **_lowerCAmelCase : int ):
A = super().add_tokens(_lowerCAmelCase , *_lowerCAmelCase , **_lowerCAmelCase )
if num_added_tokens == 0:
raise ValueError(
F"""The tokenizer already contains the token {placeholder_token}. Please pass a different"""
""" `placeholder_token` that is not already in the tokenizer.""" )
def A (self : Tuple , _lowerCAmelCase : Dict , *_lowerCAmelCase : Optional[int] , _lowerCAmelCase : Optional[int]=1 , **_lowerCAmelCase : List[str] ):
A = []
if num_vec_per_token == 1:
self.try_adding_tokens(_lowerCAmelCase , *_lowerCAmelCase , **_lowerCAmelCase )
output.append(_lowerCAmelCase )
else:
A = []
for i in range(_lowerCAmelCase ):
A = placeholder_token + F"""_{i}"""
self.try_adding_tokens(_lowerCAmelCase , *_lowerCAmelCase , **_lowerCAmelCase )
output.append(_lowerCAmelCase )
# handle cases where there is a new placeholder token that contains the current placeholder token but is larger
for token in self.token_map:
if token in placeholder_token:
raise ValueError(
F"""The tokenizer already has placeholder token {token} that can get confused with"""
F""" {placeholder_token}keep placeholder tokens independent""" )
A = output
def A (self : Tuple , _lowerCAmelCase : str , _lowerCAmelCase : Optional[int]=False , _lowerCAmelCase : Any=1.0 ):
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
A = []
for i in range(len(_lowerCAmelCase ) ):
output.append(self.replace_placeholder_tokens_in_text(text[i] , vector_shuffle=_lowerCAmelCase ) )
return output
for placeholder_token in self.token_map:
if placeholder_token in text:
A = self.token_map[placeholder_token]
A = tokens[: 1 + int(len(_lowerCAmelCase ) * prop_tokens_to_load )]
if vector_shuffle:
A = copy.copy(_lowerCAmelCase )
random.shuffle(_lowerCAmelCase )
A = text.replace(_lowerCAmelCase , """ """.join(_lowerCAmelCase ) )
return text
def __call__(self : Tuple , _lowerCAmelCase : Optional[Any] , *_lowerCAmelCase : List[Any] , _lowerCAmelCase : Union[str, Any]=False , _lowerCAmelCase : Any=1.0 , **_lowerCAmelCase : Dict ):
return super().__call__(
self.replace_placeholder_tokens_in_text(
_lowerCAmelCase , vector_shuffle=_lowerCAmelCase , prop_tokens_to_load=_lowerCAmelCase ) , *_lowerCAmelCase , **_lowerCAmelCase , )
def A (self : Optional[int] , _lowerCAmelCase : Optional[Any] , *_lowerCAmelCase : Dict , _lowerCAmelCase : Optional[Any]=False , _lowerCAmelCase : Any=1.0 , **_lowerCAmelCase : Optional[int] ):
return super().encode(
self.replace_placeholder_tokens_in_text(
_lowerCAmelCase , vector_shuffle=_lowerCAmelCase , prop_tokens_to_load=_lowerCAmelCase ) , *_lowerCAmelCase , **_lowerCAmelCase , )
| 258 |
'''simple docstring'''
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from transformers import CLIPImageProcessor, CLIPVisionModel
from ...models import PriorTransformer
from ...pipelines import DiffusionPipeline
from ...schedulers import HeunDiscreteScheduler
from ...utils import (
BaseOutput,
is_accelerate_available,
logging,
randn_tensor,
replace_example_docstring,
)
from .renderer import ShapERenderer
_lowerCamelCase : Union[str, Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
_lowerCamelCase : str = '\n Examples:\n ```py\n >>> from PIL import Image\n >>> import torch\n >>> from diffusers import DiffusionPipeline\n >>> from diffusers.utils import export_to_gif, load_image\n\n >>> device = torch.device("cuda" if torch.cuda.is_available() else "cpu")\n\n >>> repo = "openai/shap-e-img2img"\n >>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16)\n >>> pipe = pipe.to(device)\n\n >>> guidance_scale = 3.0\n >>> image_url = "https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png"\n >>> image = load_image(image_url).convert("RGB")\n\n >>> images = pipe(\n ... image,\n ... guidance_scale=guidance_scale,\n ... num_inference_steps=64,\n ... frame_size=256,\n ... ).images\n\n >>> gif_path = export_to_gif(images[0], "corgi_3d.gif")\n ```\n'
@dataclass
class __UpperCAmelCase ( A__ ):
'''simple docstring'''
__lowerCAmelCase = 42
class __UpperCAmelCase ( A__ ):
'''simple docstring'''
def __init__(self : str , _lowerCAmelCase : PriorTransformer , _lowerCAmelCase : CLIPVisionModel , _lowerCAmelCase : CLIPImageProcessor , _lowerCAmelCase : HeunDiscreteScheduler , _lowerCAmelCase : ShapERenderer , ):
super().__init__()
self.register_modules(
prior=_lowerCAmelCase , image_encoder=_lowerCAmelCase , image_processor=_lowerCAmelCase , scheduler=_lowerCAmelCase , renderer=_lowerCAmelCase , )
def A (self : Dict , _lowerCAmelCase : Tuple , _lowerCAmelCase : Any , _lowerCAmelCase : Tuple , _lowerCAmelCase : Any , _lowerCAmelCase : Tuple , _lowerCAmelCase : Optional[Any] ):
if latents is None:
A = randn_tensor(_lowerCAmelCase , generator=_lowerCAmelCase , device=_lowerCAmelCase , dtype=_lowerCAmelCase )
else:
if latents.shape != shape:
raise ValueError(F"""Unexpected latents shape, got {latents.shape}, expected {shape}""" )
A = latents.to(_lowerCAmelCase )
A = latents * scheduler.init_noise_sigma
return latents
def A (self : Union[str, Any] , _lowerCAmelCase : List[Any]=0 ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("""Please install accelerate via `pip install accelerate`""" )
A = torch.device(F"""cuda:{gpu_id}""" )
A = [self.image_encoder, self.prior]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(_lowerCAmelCase , _lowerCAmelCase )
@property
def A (self : Optional[Any] ):
if self.device != torch.device("""meta""" ) or not hasattr(self.image_encoder , """_hf_hook""" ):
return self.device
for module in self.image_encoder.modules():
if (
hasattr(_lowerCAmelCase , """_hf_hook""" )
and hasattr(module._hf_hook , """execution_device""" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
def A (self : int , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Any , _lowerCAmelCase : Optional[int] , ):
if isinstance(_lowerCAmelCase , _lowerCAmelCase ) and isinstance(image[0] , torch.Tensor ):
A = torch.cat(_lowerCAmelCase , axis=0 ) if image[0].ndim == 4 else torch.stack(_lowerCAmelCase , axis=0 )
if not isinstance(_lowerCAmelCase , torch.Tensor ):
A = self.image_processor(_lowerCAmelCase , return_tensors="""pt""" ).pixel_values[0].unsqueeze(0 )
A = image.to(dtype=self.image_encoder.dtype , device=_lowerCAmelCase )
A = self.image_encoder(_lowerCAmelCase )["""last_hidden_state"""]
A = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256
A = image_embeds.repeat_interleave(_lowerCAmelCase , dim=0 )
if do_classifier_free_guidance:
A = torch.zeros_like(_lowerCAmelCase )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
A = torch.cat([negative_image_embeds, image_embeds] )
return image_embeds
@torch.no_grad()
@replace_example_docstring(_lowerCAmelCase )
def __call__(self : List[Any] , _lowerCAmelCase : Union[PIL.Image.Image, List[PIL.Image.Image]] , _lowerCAmelCase : int = 1 , _lowerCAmelCase : int = 25 , _lowerCAmelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , _lowerCAmelCase : Optional[torch.FloatTensor] = None , _lowerCAmelCase : float = 4.0 , _lowerCAmelCase : int = 64 , _lowerCAmelCase : Optional[str] = "pil" , _lowerCAmelCase : bool = True , ):
if isinstance(_lowerCAmelCase , PIL.Image.Image ):
A = 1
elif isinstance(_lowerCAmelCase , torch.Tensor ):
A = image.shape[0]
elif isinstance(_lowerCAmelCase , _lowerCAmelCase ) and isinstance(image[0] , (torch.Tensor, PIL.Image.Image) ):
A = len(_lowerCAmelCase )
else:
raise ValueError(
F"""`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(_lowerCAmelCase )}""" )
A = self._execution_device
A = batch_size * num_images_per_prompt
A = guidance_scale > 1.0
A = self._encode_image(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# prior
self.scheduler.set_timesteps(_lowerCAmelCase , device=_lowerCAmelCase )
A = self.scheduler.timesteps
A = self.prior.config.num_embeddings
A = self.prior.config.embedding_dim
A = self.prepare_latents(
(batch_size, num_embeddings * embedding_dim) , image_embeds.dtype , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , self.scheduler , )
# YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim
A = latents.reshape(latents.shape[0] , _lowerCAmelCase , _lowerCAmelCase )
for i, t in enumerate(self.progress_bar(_lowerCAmelCase ) ):
# expand the latents if we are doing classifier free guidance
A = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
A = self.scheduler.scale_model_input(_lowerCAmelCase , _lowerCAmelCase )
A = self.prior(
_lowerCAmelCase , timestep=_lowerCAmelCase , proj_embedding=_lowerCAmelCase , ).predicted_image_embedding
# remove the variance
A , A = noise_pred.split(
scaled_model_input.shape[2] , dim=2 ) # batch_size, num_embeddings, embedding_dim
if do_classifier_free_guidance is not None:
A , A = noise_pred.chunk(2 )
A = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond)
A = self.scheduler.step(
_lowerCAmelCase , timestep=_lowerCAmelCase , sample=_lowerCAmelCase , ).prev_sample
if output_type == "latent":
return ShapEPipelineOutput(images=_lowerCAmelCase )
A = []
for i, latent in enumerate(_lowerCAmelCase ):
print()
A = self.renderer.decode(
latent[None, :] , _lowerCAmelCase , size=_lowerCAmelCase , ray_batch_size=4096 , n_coarse_samples=64 , n_fine_samples=128 , )
images.append(_lowerCAmelCase )
A = torch.stack(_lowerCAmelCase )
if output_type not in ["np", "pil"]:
raise ValueError(F"""Only the output types `pil` and `np` are supported not output_type={output_type}""" )
A = images.cpu().numpy()
if output_type == "pil":
A = [self.numpy_to_pil(_lowerCAmelCase ) for image in images]
# Offload last model to CPU
if hasattr(self , """final_offload_hook""" ) and self.final_offload_hook is not None:
self.final_offload_hook.offload()
if not return_dict:
return (images,)
return ShapEPipelineOutput(images=_lowerCAmelCase )
| 258 | 1 |
"""simple docstring"""
import numpy as np
from PIL import Image
def UpperCamelCase_ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = np.array(lowerCAmelCase_ )
if arr.shape[0] != arr.shape[1]:
raise ValueError("The input array is not a square matrix" )
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 0
# compute the shape of the output matrix
__SCREAMING_SNAKE_CASE = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape maxpool_shape
__SCREAMING_SNAKE_CASE = np.zeros((maxpool_shape, maxpool_shape) )
while i < arr.shape[0]:
if i + size > arr.shape[0]:
# if the end of the matrix is reached, break
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the maximum of the pooling matrix
__SCREAMING_SNAKE_CASE = np.max(arr[i : i + size, j : j + size] )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 0
return updated_arr
def UpperCamelCase_ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = np.array(lowerCAmelCase_ )
if arr.shape[0] != arr.shape[1]:
raise ValueError("The input array is not a square matrix" )
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 0
# compute the shape of the output matrix
__SCREAMING_SNAKE_CASE = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape avgpool_shape
__SCREAMING_SNAKE_CASE = np.zeros((avgpool_shape, avgpool_shape) )
while i < arr.shape[0]:
# if the end of the matrix is reached, break
if i + size > arr.shape[0]:
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the average of the pooling matrix
__SCREAMING_SNAKE_CASE = int(np.average(arr[i : i + size, j : j + size] ) )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 0
return updated_arr
# Main Function
if __name__ == "__main__":
from doctest import testmod
testmod(name='''avgpooling''', verbose=True)
# Loading the image
a__ : Optional[int] = Image.open('''path_to_image''')
# Converting the image to numpy array and maxpooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(maxpooling(np.array(image), size=3, stride=2)).show()
# Converting the image to numpy array and averagepooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(avgpooling(np.array(image), size=3, stride=2)).show()
| 358 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import _LazyModule
a__ : Optional[int] = {'''tokenization_byt5''': ['''ByT5Tokenizer''']}
if TYPE_CHECKING:
from .tokenization_byta import ByTaTokenizer
else:
import sys
a__ : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 195 | 0 |
from functools import lru_cache
def lowerCamelCase__ ( _a):
SCREAMING_SNAKE_CASE : str = 2
SCREAMING_SNAKE_CASE : Optional[Any] = set()
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.add(_a)
if n > 1:
factors.add(_a)
return factors
@lru_cache
def lowerCamelCase__ ( _a):
return len(unique_prime_factors(_a))
def lowerCamelCase__ ( _a):
return len(set(_a)) in (0, 1)
def lowerCamelCase__ ( _a):
SCREAMING_SNAKE_CASE : Any = 2
while True:
# Increment each value of a generated range
SCREAMING_SNAKE_CASE : Union[str, Any] = [base + i for i in range(_a)]
# Run elements through out unique_prime_factors function
# Append our target number to the end.
SCREAMING_SNAKE_CASE : List[str] = [upf_len(_a) for x in group]
checker.append(_a)
# If all numbers in the list are equal, return the group variable.
if equality(_a):
return group
# Increment our base variable by 1
base += 1
def lowerCamelCase__ ( _a = 4):
SCREAMING_SNAKE_CASE : Tuple = run(_a)
return results[0] if len(_a) else None
if __name__ == "__main__":
print(solution()) | 76 |
from __future__ import annotations
def UpperCamelCase_( _snake_case : int ):
"""simple docstring"""
__a =str(_snake_case )
return len(_snake_case ) == 9 and set(_snake_case ) == set('123456789' )
def UpperCamelCase_( ):
"""simple docstring"""
for base_num in range(9999 , 4999 , -1 ):
__a =100002 * base_num
if is_9_pandigital(_snake_case ):
return candidate
for base_num in range(333 , 99 , -1 ):
__a =1002003 * base_num
if is_9_pandigital(_snake_case ):
return candidate
return None
if __name__ == "__main__":
print(f'''{solution() = }''')
| 218 | 0 |
"""simple docstring"""
import secrets
from random import shuffle
from string import ascii_letters, ascii_lowercase, ascii_uppercase, digits, punctuation
def A ( snake_case :int = 8 ) -> str:
__UpperCamelCase = ascii_letters + digits + punctuation
return "".join(secrets.choice(snake_case ) for _ in range(snake_case ) )
def A ( snake_case :str , snake_case :int ) -> str:
# Password Generator = full boot with random_number, random_letters, and
# random_character FUNCTIONS
# Put your code here...
i -= len(snake_case )
__UpperCamelCase = i // 3
__UpperCamelCase = i % 3
# chars = chars_incl + random_letters(ascii_letters, i / 3 + remainder) +
# random_number(digits, i / 3) + random_characters(punctuation, i / 3)
__UpperCamelCase = (
chars_incl
+ random(snake_case , quotient + remainder )
+ random(snake_case , snake_case )
+ random(snake_case , snake_case )
)
__UpperCamelCase = list(snake_case )
shuffle(snake_case )
return "".join(snake_case )
# random is a generalised function for letters, characters and numbers
def A ( snake_case :str , snake_case :int ) -> str:
return "".join(secrets.choice(snake_case ) for _ in range(snake_case ) )
def A ( snake_case :Tuple , snake_case :Optional[Any] ) -> Any:
pass # Put your code here...
def A ( snake_case :Any , snake_case :Any ) -> Any:
pass # Put your code here...
def A ( snake_case :str , snake_case :Optional[int] ) -> Optional[int]:
pass # Put your code here...
def A ( snake_case :str , snake_case :int = 8 ) -> bool:
if len(snake_case ) < min_length:
# Your Password must be at least 8 characters long
return False
__UpperCamelCase = any(char in ascii_uppercase for char in password )
__UpperCamelCase = any(char in ascii_lowercase for char in password )
__UpperCamelCase = any(char in digits for char in password )
__UpperCamelCase = any(char in punctuation for char in password )
return upper and lower and num and spec_char
# Passwords should contain UPPERCASE, lowerase
# numbers, and special characters
def A ( ) -> Any:
__UpperCamelCase = int(input('Please indicate the max length of your password: ' ).strip() )
__UpperCamelCase = input(
'Please indicate the characters that must be in your password: ' ).strip()
print('Password generated:' , password_generator(snake_case ) )
print(
'Alternative Password generated:' , alternative_password_generator(snake_case , snake_case ) , )
print('[If you are thinking of using this passsword, You better save it.]' )
if __name__ == "__main__":
main()
| 263 |
"""simple docstring"""
def A ( snake_case :list[list[int]] , snake_case :int , snake_case :int , snake_case :list[int] ) -> bool:
# 1. Validate that path exists between current and next vertices
if graph[path[curr_ind - 1]][next_ver] == 0:
return False
# 2. Validate that next vertex is not already in path
return not any(vertex == next_ver for vertex in path )
def A ( snake_case :list[list[int]] , snake_case :list[int] , snake_case :int ) -> bool:
# Base Case
if curr_ind == len(snake_case ):
# return whether path exists between current and starting vertices
return graph[path[curr_ind - 1]][path[0]] == 1
# Recursive Step
for next_ver in range(0 , len(snake_case ) ):
if valid_connection(snake_case , snake_case , snake_case , snake_case ):
# Insert current vertex into path as next transition
__UpperCamelCase = next_ver
# Validate created path
if util_hamilton_cycle(snake_case , snake_case , curr_ind + 1 ):
return True
# Backtrack
__UpperCamelCase = -1
return False
def A ( snake_case :list[list[int]] , snake_case :int = 0 ) -> list[int]:
__UpperCamelCase = [-1] * (len(snake_case ) + 1)
# initialize start and end of path with starting index
__UpperCamelCase = __UpperCamelCase = start_index
# evaluate and if we find answer return path either return empty array
return path if util_hamilton_cycle(snake_case , snake_case , 1 ) else []
| 263 | 1 |
from collections import deque
from math import floor
from random import random
from time import time
class a__ :
"""simple docstring"""
def __init__( self ) -> Dict:
'''simple docstring'''
A__ = {}
def UpperCamelCase ( self , lowercase , lowercase , lowercase=1 ) -> Tuple:
'''simple docstring'''
if self.graph.get(lowercase ):
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
A__ = [[w, v]]
if not self.graph.get(lowercase ):
A__ = []
def UpperCamelCase ( self ) -> Optional[Any]:
'''simple docstring'''
return list(self.graph )
def UpperCamelCase ( self , lowercase , lowercase ) -> int:
'''simple docstring'''
if self.graph.get(lowercase ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(lowercase )
def UpperCamelCase ( self , lowercase=-2 , lowercase=-1 ) -> Any:
'''simple docstring'''
if s == d:
return []
A__ = []
A__ = []
if s == -2:
A__ = list(self.graph )[0]
stack.append(lowercase )
visited.append(lowercase )
A__ = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
A__ = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(lowercase )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
A__ = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(lowercase ) != 0:
A__ = stack[len(lowercase ) - 1]
else:
A__ = ss
# check if se have reached the starting point
if len(lowercase ) == 0:
return visited
def UpperCamelCase ( self , lowercase=-1 ) -> Optional[Any]:
'''simple docstring'''
if c == -1:
A__ = floor(random() * 10000 ) + 10
for i in range(lowercase ):
# every vertex has max 100 edges
for _ in range(floor(random() * 102 ) + 1 ):
A__ = floor(random() * c ) + 1
if n != i:
self.add_pair(lowercase , lowercase , 1 )
def UpperCamelCase ( self , lowercase=-2 ) -> Any:
'''simple docstring'''
A__ = deque()
A__ = []
if s == -2:
A__ = list(self.graph )[0]
d.append(lowercase )
visited.append(lowercase )
while d:
A__ = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def UpperCamelCase ( self , lowercase ) -> Tuple:
'''simple docstring'''
A__ = 0
for x in self.graph:
for y in self.graph[x]:
if y[1] == u:
count += 1
return count
def UpperCamelCase ( self , lowercase ) -> int:
'''simple docstring'''
return len(self.graph[u] )
def UpperCamelCase ( self , lowercase=-2 ) -> str:
'''simple docstring'''
A__ = []
A__ = []
if s == -2:
A__ = list(self.graph )[0]
stack.append(lowercase )
visited.append(lowercase )
A__ = s
A__ = []
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
A__ = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
A__ = node[1]
break
# check if all the children are visited
if s == ss:
sorted_nodes.append(stack.pop() )
if len(lowercase ) != 0:
A__ = stack[len(lowercase ) - 1]
else:
A__ = ss
# check if se have reached the starting point
if len(lowercase ) == 0:
return sorted_nodes
def UpperCamelCase ( self ) -> List[Any]:
'''simple docstring'''
A__ = []
A__ = []
A__ = list(self.graph )[0]
stack.append(lowercase )
visited.append(lowercase )
A__ = -2
A__ = []
A__ = s
A__ = False
A__ = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
A__ = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
A__ = len(lowercase ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
A__ = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
A__ = True
if len(lowercase ) != 0:
A__ = stack[len(lowercase ) - 1]
else:
A__ = False
indirect_parents.append(lowercase )
A__ = s
A__ = ss
# check if se have reached the starting point
if len(lowercase ) == 0:
return list(lowercase )
def UpperCamelCase ( self ) -> List[str]:
'''simple docstring'''
A__ = []
A__ = []
A__ = list(self.graph )[0]
stack.append(lowercase )
visited.append(lowercase )
A__ = -2
A__ = []
A__ = s
A__ = False
A__ = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
A__ = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
A__ = len(lowercase ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
A__ = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
A__ = True
if len(lowercase ) != 0:
A__ = stack[len(lowercase ) - 1]
else:
A__ = False
indirect_parents.append(lowercase )
A__ = s
A__ = ss
# check if se have reached the starting point
if len(lowercase ) == 0:
return False
def UpperCamelCase ( self , lowercase=-2 , lowercase=-1 ) -> Any:
'''simple docstring'''
A__ = time()
self.dfs(lowercase , lowercase )
A__ = time()
return end - begin
def UpperCamelCase ( self , lowercase=-2 ) -> int:
'''simple docstring'''
A__ = time()
self.bfs(lowercase )
A__ = time()
return end - begin
class a__ :
"""simple docstring"""
def __init__( self ) -> int:
'''simple docstring'''
A__ = {}
def UpperCamelCase ( self , lowercase , lowercase , lowercase=1 ) -> Union[str, Any]:
'''simple docstring'''
if self.graph.get(lowercase ):
# if there already is a edge
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
# if u does not exist
A__ = [[w, v]]
# add the other way
if self.graph.get(lowercase ):
# if there already is a edge
if self.graph[v].count([w, u] ) == 0:
self.graph[v].append([w, u] )
else:
# if u does not exist
A__ = [[w, u]]
def UpperCamelCase ( self , lowercase , lowercase ) -> Union[str, Any]:
'''simple docstring'''
if self.graph.get(lowercase ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(lowercase )
# the other way round
if self.graph.get(lowercase ):
for _ in self.graph[v]:
if _[1] == u:
self.graph[v].remove(lowercase )
def UpperCamelCase ( self , lowercase=-2 , lowercase=-1 ) -> List[str]:
'''simple docstring'''
if s == d:
return []
A__ = []
A__ = []
if s == -2:
A__ = list(self.graph )[0]
stack.append(lowercase )
visited.append(lowercase )
A__ = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
A__ = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(lowercase )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
A__ = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(lowercase ) != 0:
A__ = stack[len(lowercase ) - 1]
else:
A__ = ss
# check if se have reached the starting point
if len(lowercase ) == 0:
return visited
def UpperCamelCase ( self , lowercase=-1 ) -> str:
'''simple docstring'''
if c == -1:
A__ = floor(random() * 10000 ) + 10
for i in range(lowercase ):
# every vertex has max 100 edges
for _ in range(floor(random() * 102 ) + 1 ):
A__ = floor(random() * c ) + 1
if n != i:
self.add_pair(lowercase , lowercase , 1 )
def UpperCamelCase ( self , lowercase=-2 ) -> Dict:
'''simple docstring'''
A__ = deque()
A__ = []
if s == -2:
A__ = list(self.graph )[0]
d.append(lowercase )
visited.append(lowercase )
while d:
A__ = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def UpperCamelCase ( self , lowercase ) -> Tuple:
'''simple docstring'''
return len(self.graph[u] )
def UpperCamelCase ( self ) -> Dict:
'''simple docstring'''
A__ = []
A__ = []
A__ = list(self.graph )[0]
stack.append(lowercase )
visited.append(lowercase )
A__ = -2
A__ = []
A__ = s
A__ = False
A__ = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
A__ = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
A__ = len(lowercase ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
A__ = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
A__ = True
if len(lowercase ) != 0:
A__ = stack[len(lowercase ) - 1]
else:
A__ = False
indirect_parents.append(lowercase )
A__ = s
A__ = ss
# check if se have reached the starting point
if len(lowercase ) == 0:
return list(lowercase )
def UpperCamelCase ( self ) -> int:
'''simple docstring'''
A__ = []
A__ = []
A__ = list(self.graph )[0]
stack.append(lowercase )
visited.append(lowercase )
A__ = -2
A__ = []
A__ = s
A__ = False
A__ = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
A__ = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
A__ = len(lowercase ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
A__ = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
A__ = True
if len(lowercase ) != 0:
A__ = stack[len(lowercase ) - 1]
else:
A__ = False
indirect_parents.append(lowercase )
A__ = s
A__ = ss
# check if se have reached the starting point
if len(lowercase ) == 0:
return False
def UpperCamelCase ( self ) -> List[str]:
'''simple docstring'''
return list(self.graph )
def UpperCamelCase ( self , lowercase=-2 , lowercase=-1 ) -> Optional[Any]:
'''simple docstring'''
A__ = time()
self.dfs(lowercase , lowercase )
A__ = time()
return end - begin
def UpperCamelCase ( self , lowercase=-2 ) -> List[Any]:
'''simple docstring'''
A__ = time()
self.bfs(lowercase )
A__ = time()
return end - begin
| 68 |
from typing import Dict
from transformers import EvalPrediction, HfArgumentParser, TrainingArguments, is_torch_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
get_torch_dist_unique_port,
require_torch_multi_gpu,
require_torch_neuroncore,
)
from transformers.training_args import ParallelMode
from transformers.utils import logging
__a :int = logging.get_logger(__name__)
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
from transformers import Trainer
class _a ( snake_case_ ):
"""simple docstring"""
def __init__( self : Tuple , UpperCAmelCase : int = 101 ):
A_ = length
def __len__( self : int ):
return self.length
def __getitem__( self : Optional[int] , UpperCAmelCase : Optional[int] ):
return i
class _a :
"""simple docstring"""
def __call__( self : Any , UpperCAmelCase : Optional[Any] ):
return {"input_ids": torch.tensor(UpperCAmelCase ), "labels": torch.tensor(UpperCAmelCase )}
class _a ( nn.Module ):
"""simple docstring"""
def __init__( self : int ):
super().__init__()
# Add some (unused) params otherwise DDP will complain.
A_ = nn.Linear(120 , 80 )
def __A ( self : Tuple , UpperCAmelCase : Dict , UpperCAmelCase : Tuple=None ):
if labels is not None:
return torch.tensor(0.0 , device=input_ids.device ), input_ids
else:
return input_ids
class _a ( snake_case_ ):
"""simple docstring"""
@require_torch_neuroncore
def __A ( self : List[str] ):
A_ = f'''--nproc_per_node=2
--master_port={get_torch_dist_unique_port()}
{self.test_file_dir}/test_trainer_distributed.py
'''.split()
A_ = self.get_auto_remove_tmp_dir()
A_ = f'''--output_dir {output_dir}'''.split()
A_ = ["torchrun"] + distributed_args + args
execute_subprocess_async(UpperCAmelCase , env=self.get_env() )
# successful return here == success - any errors would have caused an error in the sub-call
class _a ( snake_case_ ):
"""simple docstring"""
@require_torch_multi_gpu
def __A ( self : List[str] ):
A_ = f'''--nproc_per_node={torch.cuda.device_count()}
--master_port={get_torch_dist_unique_port()}
{self.test_file_dir}/test_trainer_distributed.py
'''.split()
A_ = self.get_auto_remove_tmp_dir()
A_ = f'''--output_dir {output_dir}'''.split()
A_ = ["torchrun"] + distributed_args + args
execute_subprocess_async(UpperCAmelCase , env=self.get_env() )
# successful return here == success - any errors would have caused an error in the sub-call
if __name__ == "__main__":
# The script below is meant to be run under torch.distributed, on a machine with multiple GPUs:
#
# PYTHONPATH="src" python -m torch.distributed.run --nproc_per_node 2 --output_dir output_dir ./tests/test_trainer_distributed.py
__a :Union[str, Any] = HfArgumentParser((TrainingArguments,))
__a :Tuple = parser.parse_args_into_dataclasses()[0]
logger.warning(
F"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, "
F"distributed training: {training_args.parallel_mode != ParallelMode.NOT_DISTRIBUTED}"
)
# Essentially, what we want to verify in the distributed case is that we get all samples back,
# in the right order. (this is crucial for prediction for instance)
for dataset_length in [101, 40, 7]:
__a :int = DummyDataset(dataset_length)
def __snake_case ( __UpperCamelCase : EvalPrediction ):
"""simple docstring"""
A_ = list(range(len(__UpperCamelCase ) ) )
A_ = p.predictions.tolist() == sequential and p.label_ids.tolist() == sequential
if not success and training_args.local_rank == 0:
logger.warning(
"Predictions and/or labels do not match expected results:\n - predictions: "
f'''{p.predictions.tolist()}\n - labels: {p.label_ids.tolist()}\n - expected: {sequential}''' )
return {"success": success}
__a :str = Trainer(
model=DummyModel(),
args=training_args,
data_collator=DummyDataCollator(),
eval_dataset=dataset,
compute_metrics=compute_metrics,
)
__a :str = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
__a :str = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
__a :Optional[int] = 2
__a :List[Any] = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
__a :str = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
__a :Union[str, Any] = None | 312 | 0 |
import numpy as np
from transformers import Pipeline
def SCREAMING_SNAKE_CASE__ ( __a ):
snake_case_ : str = np.max(lowerCAmelCase__ , axis=-1 , keepdims=lowerCAmelCase__ )
snake_case_ : str = np.exp(outputs - maxes )
return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=lowerCAmelCase__ )
class SCREAMING_SNAKE_CASE_ ( __lowercase ):
def UpperCAmelCase_ ( self : Union[str, Any] , **_A : List[str] ) -> Optional[Any]:
"""simple docstring"""
snake_case_ : Any = {}
if "second_text" in kwargs:
snake_case_ : Optional[int] = kwargs['''second_text''']
return preprocess_kwargs, {}, {}
def UpperCAmelCase_ ( self : Optional[int] , _A : List[Any] , _A : Dict=None ) -> Union[str, Any]:
"""simple docstring"""
return self.tokenizer(snake_case_ , text_pair=snake_case_ , return_tensors=self.framework )
def UpperCAmelCase_ ( self : List[str] , _A : int ) -> int:
"""simple docstring"""
return self.model(**snake_case_ )
def UpperCAmelCase_ ( self : int , _A : Dict ) -> int:
"""simple docstring"""
snake_case_ : Dict = model_outputs.logits[0].numpy()
snake_case_ : Tuple = softmax(snake_case_ )
snake_case_ : int = np.argmax(snake_case_ )
snake_case_ : int = self.model.config.idalabel[best_class]
snake_case_ : Optional[int] = probabilities[best_class].item()
snake_case_ : str = logits.tolist()
return {"label": label, "score": score, "logits": logits}
| 351 |
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
_SCREAMING_SNAKE_CASE = subprocess.check_output("""git merge-base main HEAD""".split()).decode("""utf-8""")
_SCREAMING_SNAKE_CASE = (
subprocess.check_output(F'''git diff --diff-filter=d --name-only {fork_point_sha}'''.split()).decode("""utf-8""").split()
)
_SCREAMING_SNAKE_CASE = """|""".join(sys.argv[1:])
_SCREAMING_SNAKE_CASE = re.compile(RF'''^({joined_dirs}).*?\.py$''')
_SCREAMING_SNAKE_CASE = [x for x in modified_files if regex.match(x)]
print(""" """.join(relevant_modified_files), end="""""")
| 88 | 0 |
from datetime import datetime as dt
import os
from github import Github
_UpperCAmelCase : Dict = [
"""good first issue""",
"""good second issue""",
"""good difficult issue""",
"""feature request""",
"""new model""",
"""wip""",
]
def SCREAMING_SNAKE_CASE ( ) -> int:
lowerCamelCase__ : Optional[Any] = Github(os.environ['GITHUB_TOKEN'] )
lowerCamelCase__ : Union[str, Any] = g.get_repo('huggingface/transformers' )
lowerCamelCase__ : int = repo.get_issues(state='open' )
for issue in open_issues:
lowerCamelCase__ : str = sorted([comment for comment in issue.get_comments()] , key=lambda _UpperCAmelCase : i.created_at , reverse=_UpperCAmelCase )
lowerCamelCase__ : int = comments[0] if len(_UpperCAmelCase ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.")
issue.edit(state='closed' )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would add stale comment to {issue.number}")
issue.create_comment(
'This issue has been automatically marked as stale because it has not had '
'recent activity. If you think this still needs to be addressed '
'please comment on this thread.\n\nPlease note that issues that do not follow the '
'[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) '
'are likely to be ignored.' )
if __name__ == "__main__":
main()
| 50 |
import argparse
import torch
from torch import nn
from transformers import MaMaaaConfig, MaMaaaForConditionalGeneration
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> str:
lowerCamelCase__ : Optional[int] = [
'encoder.version',
'decoder.version',
'model.encoder.version',
'model.decoder.version',
'decoder.output_projection.weight',
'_float_tensor',
'encoder.embed_positions._float_tensor',
'decoder.embed_positions._float_tensor',
]
for k in ignore_keys:
state_dict.pop(_UpperCAmelCase , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> List[Any]:
lowerCamelCase__ , lowerCamelCase__ : List[str] = emb.weight.shape
lowerCamelCase__ : Tuple = nn.Linear(_UpperCAmelCase , _UpperCAmelCase , bias=_UpperCAmelCase )
lowerCamelCase__ : Dict = emb.weight.data
return lin_layer
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> str:
lowerCamelCase__ : Tuple = torch.load(_UpperCAmelCase , map_location='cpu' )
lowerCamelCase__ : List[str] = mam_aaa['args'] or mam_aaa['cfg']['model']
lowerCamelCase__ : Optional[int] = mam_aaa['model']
remove_ignore_keys_(_UpperCAmelCase )
lowerCamelCase__ : str = state_dict['encoder.embed_tokens.weight'].shape[0]
lowerCamelCase__ : Union[str, Any] = MaMaaaConfig(
vocab_size=_UpperCAmelCase , max_position_embeddings=1024 , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , encoder_layerdrop=args.encoder_layerdrop , decoder_layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function='relu' , )
lowerCamelCase__ : Optional[Any] = state_dict['decoder.embed_tokens.weight']
lowerCamelCase__ : Union[str, Any] = MaMaaaForConditionalGeneration(_UpperCAmelCase )
model.model.load_state_dict(_UpperCAmelCase , strict=_UpperCAmelCase )
lowerCamelCase__ : List[str] = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
_UpperCAmelCase : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""fairseq_path""", type=str, help="""path to a model.pt on local filesystem.""")
parser.add_argument("""pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
_UpperCAmelCase : str = parser.parse_args()
_UpperCAmelCase : Optional[Any] = convert_fairseq_mamaaa_checkpoint_from_disk(args.fairseq_pathß)
model.save_pretrained(args.pytorch_dump_folder_path)
| 50 | 1 |
import math
def UpperCamelCase ( __lowercase : float ,__lowercase : float ):
'''simple docstring'''
if initial_intensity < 0:
raise ValueError('The value of intensity cannot be negative' )
# handling of negative values of initial intensity
if angle < 0 or angle > 3_60:
raise ValueError('In Malus Law, the angle is in the range 0-360 degrees' )
# handling of values out of allowed range
return initial_intensity * (math.cos(math.radians(__lowercase ) ) ** 2)
if __name__ == "__main__":
import doctest
doctest.testmod(name="""malus_law""")
| 192 | import copy
from typing import Dict, List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
_UpperCAmelCase = {
"""facebook/mask2former-swin-small-coco-instance""": (
"""https://huggingface.co/facebook/mask2former-swin-small-coco-instance/blob/main/config.json"""
)
# See all Mask2Former models at https://huggingface.co/models?filter=mask2former
}
_UpperCAmelCase = logging.get_logger(__name__)
class UpperCAmelCase ( __A ):
'''simple docstring'''
lowerCamelCase_ = '''mask2former'''
lowerCamelCase_ = ['''swin''']
lowerCamelCase_ = {'''hidden_size''': '''hidden_dim'''}
def __init__( self , lowercase = None , lowercase = 2_5_6 , lowercase = 2_5_6 , lowercase = 2_5_6 , lowercase = 1_0_2_4 , lowercase = "relu" , lowercase = 6 , lowercase = 1_0 , lowercase = 8 , lowercase = 0.0 , lowercase = 2_0_4_8 , lowercase = False , lowercase = False , lowercase = 4 , lowercase = 2_5_5 , lowercase = 1_0_0 , lowercase = 0.1 , lowercase = 2.0 , lowercase = 5.0 , lowercase = 5.0 , lowercase = 1_2_5_4_4 , lowercase = 3.0 , lowercase = 0.75 , lowercase = 0.02 , lowercase = 1.0 , lowercase = True , lowercase = [4, 8, 1_6, 3_2] , lowercase = None , **lowercase , ):
"""simple docstring"""
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `Swin` backbone.' )
A_ : List[str] = CONFIG_MAPPING['swin'](
image_size=2_2_4 , in_channels=3 , patch_size=4 , embed_dim=9_6 , depths=[2, 2, 1_8, 2] , num_heads=[3, 6, 1_2, 2_4] , window_size=7 , drop_path_rate=0.3 , use_absolute_embeddings=lowercase , out_features=['stage1', 'stage2', 'stage3', 'stage4'] , )
if isinstance(lowercase , lowercase ):
A_ : str = backbone_config.pop('model_type' )
A_ : List[str] = CONFIG_MAPPING[backbone_model_type]
A_ : Tuple = config_class.from_dict(lowercase )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
F'''Backbone {backbone_config.model_type} is not a supported model and may not be compatible with Mask2Former. '''
F'''Supported model types: {','.join(self.backbones_supported )}''' )
A_ : List[Any] = backbone_config
A_ : Optional[Any] = feature_size
A_ : int = mask_feature_size
A_ : Tuple = hidden_dim
A_ : Dict = encoder_feedforward_dim
A_ : int = activation_function
A_ : str = encoder_layers
A_ : Tuple = decoder_layers
A_ : Tuple = num_attention_heads
A_ : str = dropout
A_ : List[str] = dim_feedforward
A_ : List[str] = pre_norm
A_ : Tuple = enforce_input_projection
A_ : Dict = common_stride
A_ : Union[str, Any] = ignore_value
A_ : List[Any] = num_queries
A_ : List[Any] = no_object_weight
A_ : int = class_weight
A_ : int = mask_weight
A_ : Optional[Any] = dice_weight
A_ : int = train_num_points
A_ : Optional[int] = oversample_ratio
A_ : Tuple = importance_sample_ratio
A_ : Union[str, Any] = init_std
A_ : List[Any] = init_xavier_std
A_ : Optional[Any] = use_auxiliary_loss
A_ : Dict = feature_strides
A_ : List[Any] = output_auxiliary_logits
A_ : Optional[int] = decoder_layers
super().__init__(**lowercase )
@classmethod
def lowerCAmelCase_ ( cls , lowercase , **lowercase ):
"""simple docstring"""
return cls(
backbone_config=lowercase , **lowercase , )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : List[Any] = copy.deepcopy(self.__dict__ )
A_ : Optional[int] = self.backbone_config.to_dict()
A_ : Dict = self.__class__.model_type
return output
| 192 | 1 |
'''simple docstring'''
import math
from typing import Optional
import numpy as np
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A =logging.get_logger(__name__)
A ={
'facebook/encodec_24khz': 'https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json',
'facebook/encodec_48khz': 'https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json',
}
class _a ( __lowerCamelCase ):
__a : int = """encodec"""
def __init__( self : Any , lowercase : List[Any]=[1.5, 3.0, 6.0, 12.0, 24.0] , lowercase : List[Any]=24_000 , lowercase : Optional[int]=1 , lowercase : int=False , lowercase : Any=None , lowercase : Any=None , lowercase : Optional[Any]=128 , lowercase : Union[str, Any]=32 , lowercase : List[str]=1 , lowercase : Tuple=[8, 5, 4, 2] , lowercase : List[str]="weight_norm" , lowercase : Tuple=7 , lowercase : Optional[Any]=7 , lowercase : List[Any]=3 , lowercase : List[Any]=2 , lowercase : Optional[Any]=True , lowercase : Optional[int]="reflect" , lowercase : Union[str, Any]=2 , lowercase : Optional[Any]=2 , lowercase : Optional[Any]=1.0 , lowercase : Optional[int]=1_024 , lowercase : Optional[int]=None , lowercase : Optional[int]=True , **lowercase : Any , ):
'''simple docstring'''
UpperCAmelCase = target_bandwidths
UpperCAmelCase = sampling_rate
UpperCAmelCase = audio_channels
UpperCAmelCase = normalize
UpperCAmelCase = chunk_length_s
UpperCAmelCase = overlap
UpperCAmelCase = hidden_size
UpperCAmelCase = num_filters
UpperCAmelCase = num_residual_layers
UpperCAmelCase = upsampling_ratios
UpperCAmelCase = norm_type
UpperCAmelCase = kernel_size
UpperCAmelCase = last_kernel_size
UpperCAmelCase = residual_kernel_size
UpperCAmelCase = dilation_growth_rate
UpperCAmelCase = use_causal_conv
UpperCAmelCase = pad_mode
UpperCAmelCase = compress
UpperCAmelCase = num_lstm_layers
UpperCAmelCase = trim_right_ratio
UpperCAmelCase = codebook_size
UpperCAmelCase = codebook_dim if codebook_dim is not None else hidden_size
UpperCAmelCase = use_conv_shortcut
if self.norm_type not in ["weight_norm", "time_group_norm"]:
raise ValueError(
f"self.norm_type must be one of `\"weight_norm\"`, `\"time_group_norm\"`), got {self.norm_type}" )
super().__init__(**lowercase )
@property
def A ( self : List[Any] ):
'''simple docstring'''
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def A ( self : List[Any] ):
'''simple docstring'''
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
@property
def A ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase = np.prod(self.upsampling_ratios )
return math.ceil(self.sampling_rate / hop_length )
@property
def A ( self : List[Any] ):
'''simple docstring'''
return int(1_000 * self.target_bandwidths[-1] // (self.frame_rate * 10) )
| 34 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pathlib import Path
import torch
from ...utils import is_npu_available, is_xpu_available
from .config_args import ClusterConfig, default_json_config_file
from .config_utils import SubcommandHelpFormatter
UpperCAmelCase = '''Create a default config file for Accelerate with only a few flags set.'''
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE="no" , __SCREAMING_SNAKE_CASE = default_json_config_file , __SCREAMING_SNAKE_CASE = False ):
lowercase = Path(__SCREAMING_SNAKE_CASE )
path.parent.mkdir(parents=__SCREAMING_SNAKE_CASE , exist_ok=__SCREAMING_SNAKE_CASE )
if path.exists():
print(
F'''Configuration already exists at {save_location}, will not override. Run `accelerate config` manually or pass a different `save_location`.''' )
return False
lowercase = mixed_precision.lower()
if mixed_precision not in ["no", "fp16", "bf16", "fp8"]:
raise ValueError(
F'''`mixed_precision` should be one of \'no\', \'fp16\', \'bf16\', or \'fp8\'. Received {mixed_precision}''' )
lowercase = {
'compute_environment': 'LOCAL_MACHINE',
'mixed_precision': mixed_precision,
}
if torch.cuda.is_available():
lowercase = torch.cuda.device_count()
lowercase = num_gpus
lowercase = False
if num_gpus > 1:
lowercase = 'MULTI_GPU'
else:
lowercase = 'NO'
elif is_xpu_available() and use_xpu:
lowercase = torch.xpu.device_count()
lowercase = num_xpus
lowercase = False
if num_xpus > 1:
lowercase = 'MULTI_XPU'
else:
lowercase = 'NO'
elif is_npu_available():
lowercase = torch.npu.device_count()
lowercase = num_npus
lowercase = False
if num_npus > 1:
lowercase = 'MULTI_NPU'
else:
lowercase = 'NO'
else:
lowercase = 0
lowercase = True
lowercase = 1
lowercase = 'NO'
lowercase = ClusterConfig(**__SCREAMING_SNAKE_CASE )
config.to_json_file(__SCREAMING_SNAKE_CASE )
return path
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase = parser.add_parser('default' , parents=__SCREAMING_SNAKE_CASE , help=__SCREAMING_SNAKE_CASE , formatter_class=__SCREAMING_SNAKE_CASE )
parser.add_argument(
'--config_file' , default=__SCREAMING_SNAKE_CASE , help=(
'The path to use to store the config file. Will default to a file named default_config.yaml in the cache '
'location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '
'such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '
'with \'huggingface\'.'
) , dest='save_location' , )
parser.add_argument(
'--mixed_precision' , choices=['no', 'fp16', 'bf16'] , type=__SCREAMING_SNAKE_CASE , help='Whether or not to use mixed precision training. '
'Choose between FP16 and BF16 (bfloat16) training. '
'BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later.' , default='no' , )
parser.set_defaults(func=__SCREAMING_SNAKE_CASE )
return parser
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
lowercase = write_basic_config(args.mixed_precision , args.save_location )
if config_file:
print(F'''accelerate configuration saved at {config_file}''' )
| 195 | 0 |
"""simple docstring"""
import os
import sys
import warnings
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen
from ..table import array_cast
from ..utils.file_utils import is_local_path
from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
import PIL.Image
from .features import FeatureType
__A : Optional[List[str]] = None
__A : Optional[int] = '<' if sys.byteorder == 'little' else '>'
# Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image
__A : Tuple = [
np.dtype('|b1'),
np.dtype('|u1'),
np.dtype('<u2'),
np.dtype('>u2'),
np.dtype('<i2'),
np.dtype('>i2'),
np.dtype('<u4'),
np.dtype('>u4'),
np.dtype('<i4'),
np.dtype('>i4'),
np.dtype('<f4'),
np.dtype('>f4'),
np.dtype('<f8'),
np.dtype('>f8'),
]
@dataclass
class __UpperCamelCase :
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = None
# Automatically constructed
SCREAMING_SNAKE_CASE = "PIL.Image.Image"
SCREAMING_SNAKE_CASE = pa.struct({"bytes": pa.binary(), "path": pa.string()} )
SCREAMING_SNAKE_CASE = field(default="Image" , init=_A , repr=_A )
def __call__(self : str):
return self.pa_type
def SCREAMING_SNAKE_CASE__ (self : Tuple , __SCREAMING_SNAKE_CASE : Union[str, bytes, dict, np.ndarray, "PIL.Image.Image"]):
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'.")
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE):
A = np.array(__SCREAMING_SNAKE_CASE)
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE):
return {"path": value, "bytes": None}
elif isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE):
return {"path": None, "bytes": value}
elif isinstance(__SCREAMING_SNAKE_CASE , np.ndarray):
# convert the image array to PNG/TIFF bytes
return encode_np_array(__SCREAMING_SNAKE_CASE)
elif isinstance(__SCREAMING_SNAKE_CASE , PIL.Image.Image):
# convert the PIL image to bytes (default format is PNG/TIFF)
return encode_pil_image(__SCREAMING_SNAKE_CASE)
elif value.get("path") is not None and os.path.isfile(value["path"]):
# we set "bytes": None to not duplicate the data if they're already available locally
return {"bytes": None, "path": value.get("path")}
elif value.get("bytes") is not None or value.get("path") is not None:
# store the image bytes, and path is used to infer the image format using the file extension
return {"bytes": value.get("bytes"), "path": value.get("path")}
else:
raise ValueError(
F"""An image sample should have one of 'path' or 'bytes' but they are missing or None in {value}.""")
def SCREAMING_SNAKE_CASE__ (self : int , __SCREAMING_SNAKE_CASE : dict , __SCREAMING_SNAKE_CASE : Union[str, Any]=None):
if not self.decode:
raise RuntimeError("Decoding is disabled for this feature. Please use Image(decode=True) instead.")
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support decoding images, please install 'Pillow'.")
if token_per_repo_id is None:
A = {}
A , A = value["path"], value["bytes"]
if bytes_ is None:
if path is None:
raise ValueError(F"""An image should have one of 'path' or 'bytes' but both are None in {value}.""")
else:
if is_local_path(__SCREAMING_SNAKE_CASE):
A = PIL.Image.open(__SCREAMING_SNAKE_CASE)
else:
A = path.split("::")[-1]
try:
A = string_to_dict(__SCREAMING_SNAKE_CASE , config.HUB_DATASETS_URL)["repo_id"]
A = token_per_repo_id.get(__SCREAMING_SNAKE_CASE)
except ValueError:
A = None
with xopen(__SCREAMING_SNAKE_CASE , "rb" , use_auth_token=__SCREAMING_SNAKE_CASE) as f:
A = BytesIO(f.read())
A = PIL.Image.open(bytes_)
else:
A = PIL.Image.open(BytesIO(bytes_))
image.load() # to avoid "Too many open files" errors
return image
def SCREAMING_SNAKE_CASE__ (self : Optional[Any]):
from .features import Value
return (
self
if self.decode
else {
"bytes": Value("binary"),
"path": Value("string"),
}
)
def SCREAMING_SNAKE_CASE__ (self : Optional[int] , __SCREAMING_SNAKE_CASE : Union[pa.StringArray, pa.StructArray, pa.ListArray]):
if pa.types.is_string(storage.type):
A = pa.array([None] * len(__SCREAMING_SNAKE_CASE) , type=pa.binary())
A = pa.StructArray.from_arrays([bytes_array, storage] , ["bytes", "path"] , mask=storage.is_null())
elif pa.types.is_binary(storage.type):
A = pa.array([None] * len(__SCREAMING_SNAKE_CASE) , type=pa.string())
A = pa.StructArray.from_arrays([storage, path_array] , ["bytes", "path"] , mask=storage.is_null())
elif pa.types.is_struct(storage.type):
if storage.type.get_field_index("bytes") >= 0:
A = storage.field("bytes")
else:
A = pa.array([None] * len(__SCREAMING_SNAKE_CASE) , type=pa.binary())
if storage.type.get_field_index("path") >= 0:
A = storage.field("path")
else:
A = pa.array([None] * len(__SCREAMING_SNAKE_CASE) , type=pa.string())
A = pa.StructArray.from_arrays([bytes_array, path_array] , ["bytes", "path"] , mask=storage.is_null())
elif pa.types.is_list(storage.type):
A = pa.array(
[encode_np_array(np.array(__SCREAMING_SNAKE_CASE))["bytes"] if arr is not None else None for arr in storage.to_pylist()] , type=pa.binary() , )
A = pa.array([None] * len(__SCREAMING_SNAKE_CASE) , type=pa.string())
A = pa.StructArray.from_arrays(
[bytes_array, path_array] , ["bytes", "path"] , mask=bytes_array.is_null())
return array_cast(__SCREAMING_SNAKE_CASE , self.pa_type)
def SCREAMING_SNAKE_CASE__ (self : Any , __SCREAMING_SNAKE_CASE : pa.StructArray):
@no_op_if_value_is_null
def path_to_bytes(__SCREAMING_SNAKE_CASE : str):
with xopen(__SCREAMING_SNAKE_CASE , "rb") as f:
A = f.read()
return bytes_
A = pa.array(
[
(path_to_bytes(x["path"]) if x["bytes"] is None else x["bytes"]) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
A = pa.array(
[os.path.basename(__SCREAMING_SNAKE_CASE) if path is not None else None for path in storage.field("path").to_pylist()] , type=pa.string() , )
A = pa.StructArray.from_arrays([bytes_array, path_array] , ["bytes", "path"] , mask=bytes_array.is_null())
return array_cast(__SCREAMING_SNAKE_CASE , self.pa_type)
def __SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'." )
global _IMAGE_COMPRESSION_FORMATS
if _IMAGE_COMPRESSION_FORMATS is None:
PIL.Image.init()
A = list(set(PIL.Image.OPEN.keys() ) & set(PIL.Image.SAVE.keys() ) )
return _IMAGE_COMPRESSION_FORMATS
def __SCREAMING_SNAKE_CASE ( lowercase__ ):
"""simple docstring"""
A = BytesIO()
if image.format in list_image_compression_formats():
A = image.format
else:
A = "PNG" if image.mode in ["1", "L", "LA", "RGB", "RGBA"] else "TIFF"
image.save(lowercase__ , format=lowercase__ )
return buffer.getvalue()
def __SCREAMING_SNAKE_CASE ( lowercase__ ):
"""simple docstring"""
if hasattr(lowercase__ , "filename" ) and image.filename != "":
return {"path": image.filename, "bytes": None}
else:
return {"path": None, "bytes": image_to_bytes(lowercase__ )}
def __SCREAMING_SNAKE_CASE ( lowercase__ ):
"""simple docstring"""
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'." )
A = array.dtype
A = dtype.byteorder if dtype.byteorder != "=" else _NATIVE_BYTEORDER
A = dtype.kind
A = dtype.itemsize
A = None
# Multi-channel array case (only np.dtype("|u1") is allowed)
if array.shape[2:]:
A = np.dtype("|u1" )
if dtype_kind not in ["u", "i"]:
raise TypeError(
F"""Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays.""" )
if dtype is not dest_dtype:
warnings.warn(F"""Downcasting array dtype {dtype} to {dest_dtype} to be compatible with 'Pillow'""" )
# Exact match
elif dtype in _VALID_IMAGE_ARRAY_DTPYES:
A = dtype
else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually)
while dtype_itemsize >= 1:
A = dtype_byteorder + dtype_kind + str(lowercase__ )
A = np.dtype(lowercase__ )
if dest_dtype in _VALID_IMAGE_ARRAY_DTPYES:
warnings.warn(F"""Downcasting array dtype {dtype} to {dest_dtype} to be compatible with 'Pillow'""" )
break
else:
dtype_itemsize //= 2
if dest_dtype is None:
raise TypeError(
F"""Cannot convert dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}""" )
A = PIL.Image.fromarray(array.astype(lowercase__ ) )
return {"path": None, "bytes": image_to_bytes(lowercase__ )}
def __SCREAMING_SNAKE_CASE ( lowercase__ ):
"""simple docstring"""
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'." )
if objs:
A , A = first_non_null_value(lowercase__ )
if isinstance(lowercase__ , lowercase__ ):
return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs]
if isinstance(lowercase__ , np.ndarray ):
A = no_op_if_value_is_null(lowercase__ )
return [obj_to_image_dict_func(lowercase__ ) for obj in objs]
elif isinstance(lowercase__ , PIL.Image.Image ):
A = no_op_if_value_is_null(lowercase__ )
return [obj_to_image_dict_func(lowercase__ ) for obj in objs]
else:
return objs
else:
return objs
| 57 |
"""simple docstring"""
import argparse
import json
import os
import torch
from transformers import LukeConfig, LukeModel, LukeTokenizer, RobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def __SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
"""simple docstring"""
# Load configuration defined in the metadata file
with open(lowercase__ ) as metadata_file:
A = json.load(lowercase__ )
A = LukeConfig(use_entity_aware_attention=lowercase__ , **metadata["model_config"] )
# Load in the weights from the checkpoint_path
A = torch.load(lowercase__ , map_location="cpu" )
# Load the entity vocab file
A = load_entity_vocab(lowercase__ )
A = RobertaTokenizer.from_pretrained(metadata["model_config"]["bert_model_name"] )
# Add special tokens to the token vocabulary for downstream tasks
A = AddedToken("<ent>" , lstrip=lowercase__ , rstrip=lowercase__ )
A = AddedToken("<ent2>" , lstrip=lowercase__ , rstrip=lowercase__ )
tokenizer.add_special_tokens({"additional_special_tokens": [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(F"""Saving tokenizer to {pytorch_dump_folder_path}""" )
tokenizer.save_pretrained(lowercase__ )
with open(os.path.join(lowercase__ , LukeTokenizer.vocab_files_names["entity_vocab_file"] ) , "w" ) as f:
json.dump(lowercase__ , lowercase__ )
A = LukeTokenizer.from_pretrained(lowercase__ )
# Initialize the embeddings of the special tokens
A = state_dict["embeddings.word_embeddings.weight"]
A = word_emb[tokenizer.convert_tokens_to_ids(["@"] )[0]].unsqueeze(0 )
A = word_emb[tokenizer.convert_tokens_to_ids(["#"] )[0]].unsqueeze(0 )
A = torch.cat([word_emb, ent_emb, enta_emb] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
A = F"""encoder.layer.{layer_index}.attention.self."""
A = state_dict[prefix + matrix_name]
A = state_dict[prefix + matrix_name]
A = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
A = state_dict["entity_embeddings.entity_embeddings.weight"]
A = entity_emb[entity_vocab["[MASK]"]]
A = LukeModel(config=lowercase__ ).eval()
A , A = model.load_state_dict(lowercase__ , strict=lowercase__ )
if not (len(lowercase__ ) == 1 and missing_keys[0] == "embeddings.position_ids"):
raise ValueError(F"""Missing keys {", ".join(lowercase__ )}. Expected only missing embeddings.position_ids""" )
if not (all(key.startswith("entity_predictions" ) or key.startswith("lm_head" ) for key in unexpected_keys )):
raise ValueError(
"Unexpected keys"
F""" {", ".join([key for key in unexpected_keys if not (key.startswith("entity_predictions" ) or key.startswith("lm_head" ))] )}""" )
# Check outputs
A = LukeTokenizer.from_pretrained(lowercase__ , task="entity_classification" )
A = (
"Top seed Ana Ivanovic said on Thursday she could hardly believe her luck as a fortuitous netcord helped the"
" new world number one avoid a humiliating second- round exit at Wimbledon ."
)
A = (39, 42)
A = tokenizer(lowercase__ , entity_spans=[span] , add_prefix_space=lowercase__ , return_tensors="pt" )
A = model(**lowercase__ )
# Verify word hidden states
if model_size == "large":
A = torch.Size((1, 42, 1_024) )
A = torch.tensor(
[[0.01_33, 0.08_65, 0.00_95], [0.30_93, -0.25_76, -0.74_18], [-0.17_20, -0.21_17, -0.28_69]] )
else: # base
A = torch.Size((1, 42, 768) )
A = torch.tensor([[0.00_37, 0.13_68, -0.00_91], [0.10_99, 0.33_29, -0.10_95], [0.07_65, 0.53_35, 0.11_79]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F"""Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}""" )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , lowercase__ , atol=1e-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
A = torch.Size((1, 1, 1_024) )
A = torch.tensor([[0.04_66, -0.01_06, -0.01_79]] )
else: # base
A = torch.Size((1, 1, 768) )
A = torch.tensor([[0.14_57, 0.10_44, 0.01_74]] )
if not (outputs.entity_last_hidden_state.shape != expected_shape):
raise ValueError(
F"""Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is"""
F""" {expected_shape}""" )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , lowercase__ , atol=1e-4 ):
raise ValueError
# Finally, save our PyTorch model and tokenizer
print("Saving PyTorch model to {}".format(lowercase__ ) )
model.save_pretrained(lowercase__ )
def __SCREAMING_SNAKE_CASE ( lowercase__ ):
"""simple docstring"""
A = {}
with open(lowercase__ , "r" , encoding="utf-8" ) as f:
for index, line in enumerate(lowercase__ ):
A , A = line.rstrip().split("\t" )
A = index
return entity_vocab
if __name__ == "__main__":
__A : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--checkpoint_path', type=str, help='Path to a pytorch_model.bin file.')
parser.add_argument(
'--metadata_path', default=None, type=str, help='Path to a metadata.json file, defining the configuration.'
)
parser.add_argument(
'--entity_vocab_path',
default=None,
type=str,
help='Path to an entity_vocab.tsv file, containing the entity vocabulary.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to where to dump the output PyTorch model.'
)
parser.add_argument(
'--model_size', default='base', type=str, choices=['base', 'large'], help='Size of the model to be converted.'
)
__A : int = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 57 | 1 |
"""simple docstring"""
from arguments import InitializationArguments
from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, HfArgumentParser
# Configuration
_lowerCAmelCase :Tuple = HfArgumentParser(InitializationArguments)
_lowerCAmelCase :List[str] = parser.parse_args()
# Load codeparrot tokenizer trained for Python code tokenization
_lowerCAmelCase :int = AutoTokenizer.from_pretrained(args.tokenizer_name)
# Config: "scale_attn_by_layer_idx" and "reorder_and_upcast_attn" are Mistral stability tweaks
_lowerCAmelCase :str = {
'vocab_size': len(tokenizer),
'scale_attn_by_inverse_layer_idx': True,
'reorder_and_upcast_attn': True,
}
# Load model config (GPT-2 large in this case)
_lowerCAmelCase :int = AutoConfig.from_pretrained(args.config_name, **config_kwargs)
# Initialize new model with config
_lowerCAmelCase :Optional[Any] = AutoModelForCausalLM.from_config(config)
# Save model to the hub
model.save_pretrained(args.model_name, push_to_hub=args.push_to_hub)
| 263 |
"""simple docstring"""
import unittest
from datasets import load_dataset
from transformers.pipelines import pipeline
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_torch, slow
@is_pipeline_test
@require_torch
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@require_torch
def __lowerCAmelCase ( self ) -> Any:
_UpperCAmelCase : str = pipeline(
task='''zero-shot-audio-classification''' , model='''hf-internal-testing/tiny-clap-htsat-unfused''' )
_UpperCAmelCase : List[Any] = load_dataset('''ashraq/esc50''' )
_UpperCAmelCase : Optional[int] = dataset['''train''']['''audio'''][-1]['''array''']
_UpperCAmelCase : str = audio_classifier(A , candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''] )
self.assertEqual(
nested_simplify(A ) , [{'''score''': 0.501, '''label''': '''Sound of a dog'''}, {'''score''': 0.499, '''label''': '''Sound of vaccum cleaner'''}] , )
@unittest.skip('''No models are available in TF''' )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
pass
@slow
@require_torch
def __lowerCAmelCase ( self ) -> str:
_UpperCAmelCase : Union[str, Any] = pipeline(
task='''zero-shot-audio-classification''' , model='''laion/clap-htsat-unfused''' , )
# This is an audio of a dog
_UpperCAmelCase : List[Any] = load_dataset('''ashraq/esc50''' )
_UpperCAmelCase : Optional[int] = dataset['''train''']['''audio'''][-1]['''array''']
_UpperCAmelCase : Any = audio_classifier(A , candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''] )
self.assertEqual(
nested_simplify(A ) , [
{'''score''': 0.999, '''label''': '''Sound of a dog'''},
{'''score''': 0.001, '''label''': '''Sound of vaccum cleaner'''},
] , )
_UpperCAmelCase : List[Any] = audio_classifier([audio] * 5 , candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''] )
self.assertEqual(
nested_simplify(A ) , [
[
{'''score''': 0.999, '''label''': '''Sound of a dog'''},
{'''score''': 0.001, '''label''': '''Sound of vaccum cleaner'''},
],
]
* 5 , )
_UpperCAmelCase : Tuple = audio_classifier(
[audio] * 5 , candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''] , batch_size=5 )
self.assertEqual(
nested_simplify(A ) , [
[
{'''score''': 0.999, '''label''': '''Sound of a dog'''},
{'''score''': 0.001, '''label''': '''Sound of vaccum cleaner'''},
],
]
* 5 , )
@unittest.skip('''No models are available in TF''' )
def __lowerCAmelCase ( self ) -> int:
pass
| 263 | 1 |
import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class a :
"""simple docstring"""
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_=13 , lowerCAmelCase_=7 , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=99 , lowerCAmelCase_=24 , lowerCAmelCase_=2 , lowerCAmelCase_=6 , lowerCAmelCase_=37 , lowerCAmelCase_="gelu" , lowerCAmelCase_=0.1 , lowerCAmelCase_=0.1 , lowerCAmelCase_=5_12 , lowerCAmelCase_=16 , lowerCAmelCase_=2 , lowerCAmelCase_=0.02 , lowerCAmelCase_=3 , lowerCAmelCase_=None , lowerCAmelCase_=10_00 , ) -> Any:
_A = parent
_A = batch_size
_A = seq_length
_A = is_training
_A = use_input_mask
_A = use_token_type_ids
_A = use_labels
_A = vocab_size
_A = hidden_size
_A = num_hidden_layers
_A = num_attention_heads
_A = intermediate_size
_A = hidden_act
_A = hidden_dropout_prob
_A = attention_probs_dropout_prob
_A = max_position_embeddings
_A = type_vocab_size
_A = type_sequence_label_size
_A = initializer_range
_A = num_labels
_A = scope
_A = range_bbox
def UpperCAmelCase ( self ) -> Optional[Any]:
_A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_A = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
_A = bbox[i, j, 3]
_A = bbox[i, j, 1]
_A = t
if bbox[i, j, 2] < bbox[i, j, 0]:
_A = bbox[i, j, 2]
_A = bbox[i, j, 0]
_A = t
_A = None
if self.use_input_mask:
_A = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
_A = None
if self.use_token_type_ids:
_A = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_A = None
_A = None
if self.use_labels:
_A = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_A = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_A = self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def UpperCAmelCase ( self ) -> Any:
return LiltConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , ) -> Optional[int]:
_A = LiltModel(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_A = model(lowerCAmelCase_ , bbox=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ )
_A = model(lowerCAmelCase_ , bbox=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ )
_A = model(lowerCAmelCase_ , bbox=lowerCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , ) -> List[Any]:
_A = self.num_labels
_A = LiltForTokenClassification(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_A = model(
lowerCAmelCase_ , bbox=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , ) -> int:
_A = LiltForQuestionAnswering(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_A = model(
lowerCAmelCase_ , bbox=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , start_positions=lowerCAmelCase_ , end_positions=lowerCAmelCase_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase ( self ) -> List[Any]:
_A = self.prepare_config_and_inputs()
(
(
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) ,
) = config_and_inputs
_A = {
"""input_ids""": input_ids,
"""bbox""": bbox,
"""token_type_ids""": token_type_ids,
"""attention_mask""": input_mask,
}
return config, inputs_dict
@require_torch
class a ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase :Union[str, Any] = (
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
lowerCamelCase :Dict = (
{
'''feature-extraction''': LiltModel,
'''question-answering''': LiltForQuestionAnswering,
'''text-classification''': LiltForSequenceClassification,
'''token-classification''': LiltForTokenClassification,
'''zero-shot''': LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase :Tuple = False
lowerCamelCase :Optional[Any] = False
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Tuple:
return True
def UpperCAmelCase ( self ) -> List[str]:
_A = LiltModelTester(self )
_A = ConfigTester(self , config_class=lowerCAmelCase_ , hidden_size=37 )
def UpperCAmelCase ( self ) -> Union[str, Any]:
self.config_tester.run_common_tests()
def UpperCAmelCase ( self ) -> Any:
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> Dict:
_A = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_A = type
self.model_tester.create_and_check_model(*lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> Union[str, Any]:
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> List[str]:
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCAmelCase_ )
@slow
def UpperCAmelCase ( self ) -> Optional[int]:
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_A = LiltModel.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
@require_torch
@slow
class a ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> Any:
_A = LiltModel.from_pretrained("""SCUT-DLVCLab/lilt-roberta-en-base""" ).to(lowerCAmelCase_ )
_A = torch.tensor([[1, 2]] , device=lowerCAmelCase_ )
_A = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=lowerCAmelCase_ )
# forward pass
with torch.no_grad():
_A = model(input_ids=lowerCAmelCase_ , bbox=lowerCAmelCase_ )
_A = torch.Size([1, 2, 7_68] )
_A = torch.tensor(
[[-0.0653, 0.0950, -0.0061], [-0.0545, 0.0926, -0.0324]] , device=lowerCAmelCase_ , )
self.assertTrue(outputs.last_hidden_state.shape , lowerCAmelCase_ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , lowerCAmelCase_ , atol=1E-3 ) )
| 369 | # DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin, SchedulerOutput
@dataclass
class a ( __lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase :torch.FloatTensor
lowerCamelCase :torch.FloatTensor
class a ( __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase :int = 1
@register_to_config
def __init__( self , lowerCAmelCase_ = 20_00 , lowerCAmelCase_ = 0.15 , lowerCAmelCase_ = 0.01 , lowerCAmelCase_ = 1348.0 , lowerCAmelCase_ = 1E-5 , lowerCAmelCase_ = 1 , ) -> Tuple:
# standard deviation of the initial noise distribution
_A = sigma_max
# setable values
_A = None
self.set_sigmas(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None ) -> torch.FloatTensor:
return sample
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = None ) -> Tuple:
_A = sampling_eps if sampling_eps is not None else self.config.sampling_eps
_A = torch.linspace(1 , lowerCAmelCase_ , lowerCAmelCase_ , device=lowerCAmelCase_ )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None ) -> Any:
_A = sigma_min if sigma_min is not None else self.config.sigma_min
_A = sigma_max if sigma_max is not None else self.config.sigma_max
_A = sampling_eps if sampling_eps is not None else self.config.sampling_eps
if self.timesteps is None:
self.set_timesteps(lowerCAmelCase_ , lowerCAmelCase_ )
_A = sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps)
_A = torch.exp(torch.linspace(math.log(lowerCAmelCase_ ) , math.log(lowerCAmelCase_ ) , lowerCAmelCase_ ) )
_A = torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps] )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ) -> int:
return torch.where(
timesteps == 0 , torch.zeros_like(t.to(timesteps.device ) ) , self.discrete_sigmas[timesteps - 1].to(timesteps.device ) , )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = True , ) -> Union[SdeVeOutput, Tuple]:
if self.timesteps is None:
raise ValueError(
"""`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler""" )
_A = timestep * torch.ones(
sample.shape[0] , device=sample.device ) # torch.repeat_interleave(timestep, sample.shape[0])
_A = (timestep * (len(self.timesteps ) - 1)).long()
# mps requires indices to be in the same device, so we use cpu as is the default with cuda
_A = timesteps.to(self.discrete_sigmas.device )
_A = self.discrete_sigmas[timesteps].to(sample.device )
_A = self.get_adjacent_sigma(lowerCAmelCase_ , lowerCAmelCase_ ).to(sample.device )
_A = torch.zeros_like(lowerCAmelCase_ )
_A = (sigma**2 - adjacent_sigma**2) ** 0.5
# equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x)
# also equation 47 shows the analog from SDE models to ancestral sampling methods
_A = diffusion.flatten()
while len(diffusion.shape ) < len(sample.shape ):
_A = diffusion.unsqueeze(-1 )
_A = drift - diffusion**2 * model_output
# equation 6: sample noise for the diffusion term of
_A = randn_tensor(
sample.shape , layout=sample.layout , generator=lowerCAmelCase_ , device=sample.device , dtype=sample.dtype )
_A = sample - drift # subtract because `dt` is a small negative timestep
# TODO is the variable diffusion the correct scaling term for the noise?
_A = prev_sample_mean + diffusion * noise # add impact of diffusion field g
if not return_dict:
return (prev_sample, prev_sample_mean)
return SdeVeOutput(prev_sample=lowerCAmelCase_ , prev_sample_mean=lowerCAmelCase_ )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = True , ) -> Union[SchedulerOutput, Tuple]:
if self.timesteps is None:
raise ValueError(
"""`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler""" )
# For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z"
# sample noise for correction
_A = randn_tensor(sample.shape , layout=sample.layout , generator=lowerCAmelCase_ ).to(sample.device )
# compute step size from the model_output, the noise, and the snr
_A = torch.norm(model_output.reshape(model_output.shape[0] , -1 ) , dim=-1 ).mean()
_A = torch.norm(noise.reshape(noise.shape[0] , -1 ) , dim=-1 ).mean()
_A = (self.config.snr * noise_norm / grad_norm) ** 2 * 2
_A = step_size * torch.ones(sample.shape[0] ).to(sample.device )
# self.repeat_scalar(step_size, sample.shape[0])
# compute corrected sample: model_output term and noise term
_A = step_size.flatten()
while len(step_size.shape ) < len(sample.shape ):
_A = step_size.unsqueeze(-1 )
_A = sample + step_size * model_output
_A = prev_sample_mean + ((step_size * 2) ** 0.5) * noise
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=lowerCAmelCase_ )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , ) -> torch.FloatTensor:
# Make sure sigmas and timesteps have the same device and dtype as original_samples
_A = timesteps.to(original_samples.device )
_A = self.discrete_sigmas.to(original_samples.device )[timesteps]
_A = (
noise * sigmas[:, None, None, None]
if noise is not None
else torch.randn_like(lowerCAmelCase_ ) * sigmas[:, None, None, None]
)
_A = noise + original_samples
return noisy_samples
def __len__( self ) -> List[str]:
return self.config.num_train_timesteps
| 81 | 0 |
# DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax
import jax.numpy as jnp
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils_flax import (
CommonSchedulerState,
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
add_noise_common,
get_velocity_common,
)
@flax.struct.dataclass
class snake_case_ :
__A : CommonSchedulerState
# setable values
__A : jnp.ndarray
__A : jnp.ndarray
__A : Optional[int] = None
@classmethod
def __UpperCamelCase ( cls : str , lowercase_ : CommonSchedulerState , lowercase_ : jnp.ndarray , lowercase_ : jnp.ndarray ) -> Optional[int]:
return cls(common=lowercase_ , init_noise_sigma=lowercase_ , timesteps=lowercase_ )
@dataclass
class snake_case_ ( __A ):
__A : DDPMSchedulerState
class snake_case_ ( __A ,__A ):
__A : List[Any] = [e.name for e in FlaxKarrasDiffusionSchedulers]
__A : jnp.dtype
@property
def __UpperCamelCase ( self : Union[str, Any] ) -> List[Any]:
return True
@register_to_config
def __init__( self : Optional[Any] , lowercase_ : int = 10_00 , lowercase_ : float = 0.00_01 , lowercase_ : float = 0.02 , lowercase_ : str = "linear" , lowercase_ : Optional[jnp.ndarray] = None , lowercase_ : str = "fixed_small" , lowercase_ : bool = True , lowercase_ : str = "epsilon" , lowercase_ : jnp.dtype = jnp.floataa , ) -> List[str]:
lowercase__ : Any = dtype
def __UpperCamelCase ( self : int , lowercase_ : Optional[CommonSchedulerState] = None ) -> DDPMSchedulerState:
if common is None:
lowercase__ : str = CommonSchedulerState.create(self )
# standard deviation of the initial noise distribution
lowercase__ : Any = jnp.array(1.0 , dtype=self.dtype )
lowercase__ : Union[str, Any] = jnp.arange(0 , self.config.num_train_timesteps ).round()[::-1]
return DDPMSchedulerState.create(
common=lowercase_ , init_noise_sigma=lowercase_ , timesteps=lowercase_ , )
def __UpperCamelCase ( self : Optional[int] , lowercase_ : DDPMSchedulerState , lowercase_ : jnp.ndarray , lowercase_ : Optional[int] = None ) -> jnp.ndarray:
return sample
def __UpperCamelCase ( self : int , lowercase_ : DDPMSchedulerState , lowercase_ : int , lowercase_ : Tuple = () ) -> DDPMSchedulerState:
lowercase__ : int = self.config.num_train_timesteps // num_inference_steps
# creates integer timesteps by multiplying by ratio
# rounding to avoid issues when num_inference_step is power of 3
lowercase__ : List[str] = (jnp.arange(0 , lowercase_ ) * step_ratio).round()[::-1]
return state.replace(
num_inference_steps=lowercase_ , timesteps=lowercase_ , )
def __UpperCamelCase ( self : Optional[Any] , lowercase_ : DDPMSchedulerState , lowercase_ : Optional[Any] , lowercase_ : Any=None , lowercase_ : Tuple=None ) -> Optional[int]:
lowercase__ : Dict = state.common.alphas_cumprod[t]
lowercase__ : List[Any] = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
lowercase__ : List[str] = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t]
if variance_type is None:
lowercase__ : Dict = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small":
lowercase__ : List[Any] = jnp.clip(lowercase_ , a_min=1E-20 )
# for rl-diffuser https://arxiv.org/abs/2205.09991
elif variance_type == "fixed_small_log":
lowercase__ : Union[str, Any] = jnp.log(jnp.clip(lowercase_ , a_min=1E-20 ) )
elif variance_type == "fixed_large":
lowercase__ : Optional[int] = state.common.betas[t]
elif variance_type == "fixed_large_log":
# Glide max_log
lowercase__ : List[str] = jnp.log(state.common.betas[t] )
elif variance_type == "learned":
return predicted_variance
elif variance_type == "learned_range":
lowercase__ : Tuple = variance
lowercase__ : Union[str, Any] = state.common.betas[t]
lowercase__ : int = (predicted_variance + 1) / 2
lowercase__ : List[str] = frac * max_log + (1 - frac) * min_log
return variance
def __UpperCamelCase ( self : Any , lowercase_ : DDPMSchedulerState , lowercase_ : jnp.ndarray , lowercase_ : int , lowercase_ : jnp.ndarray , lowercase_ : Optional[jax.random.KeyArray] = None , lowercase_ : bool = True , ) -> Union[FlaxDDPMSchedulerOutput, Tuple]:
lowercase__ : str = timestep
if key is None:
lowercase__ : Any = jax.random.PRNGKey(0 )
if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]:
lowercase__ , lowercase__ : Union[str, Any] = jnp.split(lowercase_ , sample.shape[1] , axis=1 )
else:
lowercase__ : Any = None
# 1. compute alphas, betas
lowercase__ : str = state.common.alphas_cumprod[t]
lowercase__ : Optional[Any] = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
lowercase__ : Optional[int] = 1 - alpha_prod_t
lowercase__ : List[str] = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
lowercase__ : Any = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
lowercase__ : Any = model_output
elif self.config.prediction_type == "v_prediction":
lowercase__ : List[str] = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
else:
raise ValueError(
F'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` '''
" for the FlaxDDPMScheduler." )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
lowercase__ : str = jnp.clip(lowercase_ , -1 , 1 )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
lowercase__ : List[str] = (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t
lowercase__ : List[Any] = state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
lowercase__ : Any = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
def random_variance():
lowercase__ : Optional[int] = jax.random.split(lowercase_ , num=1 )
lowercase__ : List[str] = jax.random.normal(lowercase_ , shape=model_output.shape , dtype=self.dtype )
return (self._get_variance(lowercase_ , lowercase_ , predicted_variance=lowercase_ ) ** 0.5) * noise
lowercase__ : int = jnp.where(t > 0 , random_variance() , jnp.zeros(model_output.shape , dtype=self.dtype ) )
lowercase__ : Any = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample, state)
return FlaxDDPMSchedulerOutput(prev_sample=lowercase_ , state=lowercase_ )
def __UpperCamelCase ( self : List[Any] , lowercase_ : DDPMSchedulerState , lowercase_ : jnp.ndarray , lowercase_ : jnp.ndarray , lowercase_ : jnp.ndarray , ) -> jnp.ndarray:
return add_noise_common(state.common , lowercase_ , lowercase_ , lowercase_ )
def __UpperCamelCase ( self : str , lowercase_ : DDPMSchedulerState , lowercase_ : jnp.ndarray , lowercase_ : jnp.ndarray , lowercase_ : jnp.ndarray , ) -> jnp.ndarray:
return get_velocity_common(state.common , lowercase_ , lowercase_ , lowercase_ )
def __len__( self : Any ) -> Optional[Any]:
return self.config.num_train_timesteps
| 87 |
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCAmelCase : Any = {
'configuration_mctct': ['MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MCTCTConfig'],
'feature_extraction_mctct': ['MCTCTFeatureExtractor'],
'processing_mctct': ['MCTCTProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : int = [
'MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST',
'MCTCTForCTC',
'MCTCTModel',
'MCTCTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
from .feature_extraction_mctct import MCTCTFeatureExtractor
from .processing_mctct import MCTCTProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
else:
import sys
__lowerCAmelCase : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 88 | 0 |
import argparse
import collections
import numpy as np
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import MTaConfig, UMTaEncoderModel, UMTaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def __snake_case ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : Tuple , _lowerCAmelCase : Optional[Any] ) -> Tuple:
return params[f"{prefix}/{prefix}/relpos_bias/rel_embedding"][:, i, :]
def __snake_case ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Dict , _lowerCAmelCase : Any="attention" ) -> List[str]:
A_ : Optional[Any] = np.ascontiguousarray(params[f"{prefix}/{prefix}/{layer_name}/key/kernel"][:, i, :, :] )
A_ : int = k_tmp.reshape(k_tmp.shape[0] , k_tmp.shape[1] * k_tmp.shape[2] )
A_ : Union[str, Any] = np.ascontiguousarray(params[f"{prefix}/{prefix}/{layer_name}/out/kernel"][:, i, :, :] )
A_ : Any = o_tmp.reshape(o_tmp.shape[0] * o_tmp.shape[1] , o_tmp.shape[2] )
A_ : List[str] = np.ascontiguousarray(params[f"{prefix}/{prefix}/{layer_name}/query/kernel"][:, i, :, :] )
A_ : Any = q_tmp.reshape(q_tmp.shape[0] , q_tmp.shape[1] * q_tmp.shape[2] )
A_ : str = np.ascontiguousarray(params[f"{prefix}/{prefix}/{layer_name}/value/kernel"][:, i, :, :] )
A_ : Union[str, Any] = v_tmp.reshape(v_tmp.shape[0] , v_tmp.shape[1] * v_tmp.shape[2] )
return k, o, q, v
def __snake_case ( _lowerCAmelCase : Any , _lowerCAmelCase : Dict , _lowerCAmelCase : List[str] , _lowerCAmelCase : Dict=False ) -> str:
if split_mlp_wi:
A_ : Optional[Any] = params[f"{prefix}/{prefix}/mlp/wi_0/kernel"][:, i, :]
A_ : List[str] = params[f"{prefix}/{prefix}/mlp/wi_1/kernel"][:, i, :]
A_ : int = (wi_a, wi_a)
else:
A_ : List[str] = params[f"{prefix}/{prefix}/mlp/wi/kernel"][:, i, :]
A_ : Optional[Any] = params[f"{prefix}/{prefix}/mlp/wo/kernel"][:, i, :]
return wi, wo
def __snake_case ( _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Dict , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[int] ) -> Optional[int]:
return params[f"{prefix}/{prefix}/{layer_name}/scale"][:, i]
def __snake_case ( _lowerCAmelCase : dict , *, _lowerCAmelCase : int , _lowerCAmelCase : bool , _lowerCAmelCase : bool = False ) -> Dict:
A_ : Optional[Any] = traverse_util.flatten_dict(variables["target"] )
A_ : str = {"/".join(_lowerCAmelCase ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
A_ : List[Any] = "encoder/encoder/mlp/wi_0/kernel" in old
print("Split MLP:" , _lowerCAmelCase )
A_ : Dict = collections.OrderedDict()
# Shared embeddings.
A_ : Union[str, Any] = old["token_embedder/embedding"]
# Encoder.
for i in range(_lowerCAmelCase ):
# Block i, layer 0 (Self Attention).
A_ : Optional[Any] = tax_layer_norm_lookup(_lowerCAmelCase , _lowerCAmelCase , "encoder" , "pre_attention_layer_norm" )
A_ , A_ , A_ , A_ : int = tax_attention_lookup(_lowerCAmelCase , _lowerCAmelCase , "encoder" , "attention" )
A_ : int = layer_norm
A_ : Optional[int] = k.T
A_ : Tuple = o.T
A_ : List[str] = q.T
A_ : Any = v.T
# Block i, layer 1 (MLP).
A_ : int = tax_layer_norm_lookup(_lowerCAmelCase , _lowerCAmelCase , "encoder" , "pre_mlp_layer_norm" )
A_ , A_ : Tuple = tax_mlp_lookup(_lowerCAmelCase , _lowerCAmelCase , "encoder" , _lowerCAmelCase )
A_ : Optional[Any] = layer_norm
if split_mlp_wi:
A_ : Union[str, Any] = wi[0].T
A_ : Optional[Any] = wi[1].T
else:
A_ : Tuple = wi.T
A_ : str = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
A_ : Any = tax_relpos_bias_lookup(
_lowerCAmelCase , _lowerCAmelCase , "encoder" ).T
A_ : Any = old["encoder/encoder_norm/scale"]
if not scalable_attention:
A_ : Optional[Any] = tax_relpos_bias_lookup(
_lowerCAmelCase , 0 , "encoder" ).T
A_ : List[Any] = tax_relpos_bias_lookup(
_lowerCAmelCase , 0 , "decoder" ).T
if not is_encoder_only:
# Decoder.
for i in range(_lowerCAmelCase ):
# Block i, layer 0 (Self Attention).
A_ : List[str] = tax_layer_norm_lookup(_lowerCAmelCase , _lowerCAmelCase , "decoder" , "pre_self_attention_layer_norm" )
A_ , A_ , A_ , A_ : Union[str, Any] = tax_attention_lookup(_lowerCAmelCase , _lowerCAmelCase , "decoder" , "self_attention" )
A_ : Any = layer_norm
A_ : Optional[Any] = k.T
A_ : Any = o.T
A_ : Union[str, Any] = q.T
A_ : str = v.T
# Block i, layer 1 (Cross Attention).
A_ : str = tax_layer_norm_lookup(_lowerCAmelCase , _lowerCAmelCase , "decoder" , "pre_cross_attention_layer_norm" )
A_ , A_ , A_ , A_ : Union[str, Any] = tax_attention_lookup(_lowerCAmelCase , _lowerCAmelCase , "decoder" , "encoder_decoder_attention" )
A_ : Dict = layer_norm
A_ : List[Any] = k.T
A_ : int = o.T
A_ : int = q.T
A_ : Optional[int] = v.T
# Block i, layer 2 (MLP).
A_ : Dict = tax_layer_norm_lookup(_lowerCAmelCase , _lowerCAmelCase , "decoder" , "pre_mlp_layer_norm" )
A_ , A_ : Union[str, Any] = tax_mlp_lookup(_lowerCAmelCase , _lowerCAmelCase , "decoder" , _lowerCAmelCase )
A_ : str = layer_norm
if split_mlp_wi:
A_ : List[str] = wi[0].T
A_ : int = wi[1].T
else:
A_ : str = wi.T
A_ : int = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
A_ : Optional[int] = tax_relpos_bias_lookup(_lowerCAmelCase , _lowerCAmelCase , "decoder" ).T
A_ : Optional[Any] = old["decoder/decoder_norm/scale"]
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
A_ : List[Any] = old["decoder/logits_dense/kernel"].T
return new
def __snake_case ( _lowerCAmelCase : int , _lowerCAmelCase : bool ) -> Any:
A_ : Optional[Any] = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
A_ : Optional[int] = state_dict["shared.weight"]
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
A_ : Optional[int] = state_dict["shared.weight"]
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print("Using shared word embeddings as lm_head." )
A_ : List[Any] = state_dict["shared.weight"]
return state_dict
def __snake_case ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Tuple , _lowerCAmelCase : Dict , _lowerCAmelCase : Union[str, Any] ) -> Tuple:
A_ : int = checkpoints.load_tax_checkpoint(_lowerCAmelCase )
A_ : Dict = convert_tax_to_pytorch(
_lowerCAmelCase , num_layers=config.num_layers , is_encoder_only=_lowerCAmelCase , scalable_attention=_lowerCAmelCase )
A_ : int = make_state_dict(_lowerCAmelCase , _lowerCAmelCase )
model.load_state_dict(_lowerCAmelCase , strict=_lowerCAmelCase )
def __snake_case ( _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : Dict , _lowerCAmelCase : bool = False , _lowerCAmelCase : bool = False , ) -> int:
A_ : int = MTaConfig.from_json_file(_lowerCAmelCase )
print(f"Building PyTorch model from configuration: {config}" )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
A_ : str = UMTaEncoderModel(_lowerCAmelCase )
else:
A_ : int = UMTaForConditionalGeneration(_lowerCAmelCase )
# Load weights from tf checkpoint
load_tax_weights_in_ta(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# Save pytorch-model
print(f"Save PyTorch model to {pytorch_dump_path}" )
model.save_pretrained(_lowerCAmelCase )
# Verify that we can load the checkpoint.
model.from_pretrained(_lowerCAmelCase )
print("Done" )
if __name__ == "__main__":
_lowerCAmelCase : Optional[Any] = argparse.ArgumentParser(description='''Converts a native T5X checkpoint into a PyTorch checkpoint.''')
# Required parameters
parser.add_argument(
'''--t5x_checkpoint_path''', default=None, type=str, required=True, help='''Path to the T5X checkpoint.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--is_encoder_only''', action='''store_true''', help='''Check if the model is encoder-decoder model''', default=False
)
parser.add_argument(
'''--scalable_attention''',
action='''store_true''',
help='''Whether the model uses scaled attention (umt5 model)''',
default=False,
)
_lowerCAmelCase : str = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path,
args.config_file,
args.pytorch_dump_path,
args.is_encoder_only,
args.scalable_attention,
)
| 70 |
# XXX: we want transformers master here - in the absense of conftest manipulating sys.path:
# hack it in for now:
import sys
from pathlib import Path
_lowerCAmelCase : Optional[Any] = Path(__file__).resolve().parents[3] / '''src'''
sys.path.insert(1, str(git_repo_path))
import dataclasses # noqa
import io # noqa
import itertools # noqa
import json # noqa
import os # noqa
import unittest # noqa
from copy import deepcopy # noqa
from parameterized import parameterized # noqa
from transformers import TrainingArguments, is_torch_available # noqa
from transformers.deepspeed import is_deepspeed_available # noqa
from transformers.file_utils import WEIGHTS_NAME # noqa
from transformers.testing_utils import ( # noqa
CaptureLogger,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
mockenv_context,
require_deepspeed,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
from transformers.trainer_utils import set_seed # noqa
set_seed(42)
_lowerCAmelCase : Tuple = {'''base''': '''patrickvonplaten/wav2vec2_tiny_random''', '''robust''': '''patrickvonplaten/wav2vec2_tiny_random_robust'''}
_lowerCAmelCase : List[str] = '''zero2'''
_lowerCAmelCase : Dict = '''zero3'''
_lowerCAmelCase : Tuple = [ZEROa, ZEROa]
def __snake_case ( _lowerCAmelCase : str , _lowerCAmelCase : Dict , _lowerCAmelCase : List[Any] ) -> Any:
# customize the test name generator function as we want both params to appear in the sub-test
# name, as by default it shows only the first param
A_ : Dict = parameterized.to_safe_name("_".join(str(_lowerCAmelCase ) for x in param.args ) )
return f"{func.__name__}_{param_based_name}"
# Cartesian-product of zero stages with models to test
_lowerCAmelCase : List[Any] = list(itertools.product(stages, models.keys()))
@slow
@require_deepspeed
@require_torch_gpu
class __magic_name__ ( lowerCamelCase__ ):
"""simple docstring"""
@parameterized.expand(snake_case , name_func=snake_case )
def SCREAMING_SNAKE_CASE ( self :str , snake_case :Tuple , snake_case :Tuple ):
'''simple docstring'''
self.run_and_check(
stage=snake_case , model=snake_case , distributed=snake_case , fpaa=snake_case , )
@require_torch_multi_gpu
@parameterized.expand(snake_case , name_func=snake_case )
def SCREAMING_SNAKE_CASE ( self :int , snake_case :Tuple , snake_case :Optional[Any] ):
'''simple docstring'''
self.run_and_check(
stage=snake_case , model=snake_case , distributed=snake_case , fpaa=snake_case , )
@parameterized.expand(snake_case , name_func=snake_case )
def SCREAMING_SNAKE_CASE ( self :Union[str, Any] , snake_case :Dict , snake_case :Any ):
'''simple docstring'''
self.run_and_check(
stage=snake_case , model=snake_case , distributed=snake_case , fpaa=snake_case , )
@require_torch_multi_gpu
@parameterized.expand(snake_case , name_func=snake_case )
def SCREAMING_SNAKE_CASE ( self :int , snake_case :str , snake_case :Tuple ):
'''simple docstring'''
self.run_and_check(
stage=snake_case , model=snake_case , distributed=snake_case , fpaa=snake_case , )
def SCREAMING_SNAKE_CASE ( self :List[str] , snake_case :int ):
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( self :Dict , snake_case :str , snake_case :str , snake_case :int = 10 , snake_case :bool = True , snake_case :bool = True , snake_case :bool = True , ):
'''simple docstring'''
A_ : Any = models[model]
A_ : List[Any] = self.run_trainer(
stage=snake_case , model_name=snake_case , eval_steps=snake_case , num_train_epochs=1 , distributed=snake_case , fpaa=snake_case , )
self.do_checks(snake_case )
return output_dir
def SCREAMING_SNAKE_CASE ( self :str , snake_case :str , snake_case :str , snake_case :int = 10 , snake_case :int = 1 , snake_case :bool = True , snake_case :bool = True , ):
'''simple docstring'''
A_ : List[Any] = self.get_auto_remove_tmp_dir("./xxx" , after=snake_case )
A_ : Tuple = f"\n --model_name_or_path {model_name}\n --dataset_name hf-internal-testing/librispeech_asr_dummy\n --dataset_config_name clean\n --train_split_name validation\n --validation_split_name validation\n --output_dir {output_dir}\n --num_train_epochs {str(snake_case )}\n --per_device_train_batch_size 2\n --per_device_eval_batch_size 2\n --evaluation_strategy steps\n --learning_rate 5e-4\n --warmup_steps 8\n --orthography timit\n --preprocessing_num_workers 1\n --group_by_length\n --freeze_feature_extractor\n --report_to none\n --save_steps 0\n --eval_steps {eval_steps}\n --report_to none\n ".split()
if fpaa:
args.extend(["--fp16"] )
# currently ds_config_wav2vec2_zero.json requires "zero_optimization.find_unused_parameters": true,
# hence the separate config files
A_ : List[str] = f"--deepspeed {self.test_file_dir_str}/ds_config_wav2vec2_{stage}.json".split()
A_ : List[str] = [f"{self.examples_dir_str}/research_projects/wav2vec2/run_asr.py"]
A_ : str = self.get_launcher(snake_case )
A_ : Dict = launcher + script + args + ds_args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(snake_case , env=self.get_env() )
return output_dir
def SCREAMING_SNAKE_CASE ( self :Optional[int] , snake_case :Optional[Any]=False ):
'''simple docstring'''
A_ : int = min(2 , get_gpu_count() ) if distributed else 1
return f"deepspeed --num_nodes 1 --num_gpus {num_gpus}".split()
| 70 | 1 |
import uuid
from typing import Any, Dict, List, Optional, Union
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
A_ : List[Any] = logging.get_logger(__name__)
class _a :
'''simple docstring'''
def __init__( self , A__ = None , A__ = None , A__=None , A__=None ):
if not conversation_id:
A__ : List[Any] = uuid.uuida()
if past_user_inputs is None:
A__ : Dict = []
if generated_responses is None:
A__ : int = []
A__ : uuid.UUID = conversation_id
A__ : List[str] = past_user_inputs
A__ : List[str] = generated_responses
A__ : Optional[str] = text
def __eq__( self , A__ ):
if not isinstance(A__ , A__ ):
return False
if self.uuid == other.uuid:
return True
return (
self.new_user_input == other.new_user_input
and self.past_user_inputs == other.past_user_inputs
and self.generated_responses == other.generated_responses
)
def __A ( self , A__ , A__ = False ):
if self.new_user_input:
if overwrite:
logger.warning(
F"""User input added while unprocessed input was existing: \"{self.new_user_input}\" was overwritten """
F"""with: \"{text}\".""" )
A__ : str = text
else:
logger.warning(
F"""User input added while unprocessed input was existing: \"{self.new_user_input}\" new input """
F"""ignored: \"{text}\". Set `overwrite` to True to overwrite unprocessed user input""" )
else:
A__ : Tuple = text
def __A ( self ):
if self.new_user_input:
self.past_user_inputs.append(self.new_user_input )
A__ : Tuple = None
def __A ( self , A__ ):
self.generated_responses.append(A__ )
def __A ( self ):
for user_input, generated_response in zip(self.past_user_inputs , self.generated_responses ):
yield True, user_input
yield False, generated_response
if self.new_user_input:
yield True, self.new_user_input
def __repr__( self ):
A__ : Optional[Any] = F"""Conversation id: {self.uuid} \n"""
for is_user, text in self.iter_texts():
A__ : str = """user""" if is_user else """bot"""
output += F"""{name} >> {text} \n"""
return output
@add_end_docstrings(
__magic_name__ , r'''
min_length_for_response (`int`, *optional*, defaults to 32):
The minimum length (in number of tokens) for a response.
minimum_tokens (`int`, *optional*, defaults to 10):
The minimum length of tokens to leave for a response.
''' , )
class _a (__magic_name__ ):
'''simple docstring'''
def __init__( self , *A__ , **A__ ):
super().__init__(*A__ , **A__ )
if self.tokenizer.pad_token_id is None:
A__ : Tuple = self.tokenizer.eos_token
def __A ( self , A__=None , A__=None , A__=None , **A__ ):
A__ : Tuple = {}
A__ : List[str] = {}
A__ : Union[str, Any] = {}
if min_length_for_response is not None:
A__ : str = min_length_for_response
if minimum_tokens is not None:
A__ : List[str] = minimum_tokens
if "max_length" in generate_kwargs:
A__ : List[Any] = generate_kwargs["""max_length"""]
# self.max_length = generate_kwargs.get("max_length", self.model.config.max_length)
if clean_up_tokenization_spaces is not None:
A__ : Optional[int] = clean_up_tokenization_spaces
if generate_kwargs:
forward_params.update(A__ )
return preprocess_params, forward_params, postprocess_params
def __call__( self , A__ , A__=0 , **A__ ):
A__ : Optional[Any] = super().__call__(A__ , num_workers=A__ , **A__ )
if isinstance(A__ , A__ ) and len(A__ ) == 1:
return outputs[0]
return outputs
def __A ( self , A__ , A__=32 ):
if not isinstance(A__ , A__ ):
raise ValueError("""ConversationalPipeline, expects Conversation as inputs""" )
if conversation.new_user_input is None:
raise ValueError(
F"""Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. """
"""Add user inputs with the conversation's `add_user_input` method""" )
if hasattr(self.tokenizer , """_build_conversation_input_ids""" ):
A__ : List[str] = self.tokenizer._build_conversation_input_ids(A__ )
else:
# If the tokenizer cannot handle conversations, we default to only the old version
A__ : Tuple = self._legacy_parse_and_tokenize(A__ )
if self.framework == "pt":
A__ : List[str] = torch.LongTensor([input_ids] )
elif self.framework == "tf":
A__ : Optional[Any] = tf.constant([input_ids] )
return {"input_ids": input_ids, "conversation": conversation}
def __A ( self , A__ , A__=10 , **A__ ):
A__ : List[Any] = generate_kwargs.get("""max_length""" , self.model.config.max_length )
A__ : Optional[int] = model_inputs["""input_ids"""].shape[1]
if max_length - minimum_tokens < n:
logger.warning(F"""Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})""" )
A__ : Dict = max_length - minimum_tokens
A__ : Optional[int] = model_inputs["""input_ids"""][:, -trim:]
if "attention_mask" in model_inputs:
A__ : str = model_inputs["""attention_mask"""][:, -trim:]
A__ : List[str] = model_inputs.pop("""conversation""" )
A__ : Dict = max_length
A__ : str = self.model.generate(**A__ , **A__ )
if self.model.config.is_encoder_decoder:
A__ : Union[str, Any] = 1
else:
A__ : Optional[Any] = n
return {"output_ids": output_ids[:, start_position:], "conversation": conversation}
def __A ( self , A__ , A__=True ):
A__ : Dict = model_outputs["""output_ids"""]
A__ : str = self.tokenizer.decode(
output_ids[0] , skip_special_tokens=A__ , clean_up_tokenization_spaces=A__ , )
A__ : Optional[int] = model_outputs["""conversation"""]
conversation.mark_processed()
conversation.append_response(A__ )
return conversation
def __A ( self , A__ ):
A__ : str = self.tokenizer.eos_token_id
A__ : Tuple = []
for is_user, text in conversation.iter_texts():
if eos_token_id is not None:
input_ids.extend(self.tokenizer.encode(A__ , add_special_tokens=A__ ) + [eos_token_id] )
else:
input_ids.extend(self.tokenizer.encode(A__ , add_special_tokens=A__ ) )
if len(A__ ) > self.tokenizer.model_max_length:
A__ : str = input_ids[-self.tokenizer.model_max_length :]
return input_ids
| 192 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ : int = logging.get_logger(__name__)
A_ : Optional[Any] = {
'alibaba-damo/mgp-str-base': 'https://huggingface.co/alibaba-damo/mgp-str-base/resolve/main/config.json',
}
class _a (__magic_name__ ):
'''simple docstring'''
UpperCAmelCase__: List[Any] = '''mgp-str'''
def __init__( self , A__=[32, 128] , A__=4 , A__=3 , A__=27 , A__=38 , A__=5_0257 , A__=3_0522 , A__=768 , A__=12 , A__=12 , A__=4.0 , A__=True , A__=False , A__=1e-5 , A__=0.0 , A__=0.0 , A__=0.0 , A__=False , A__=0.0_2 , **A__ , ):
super().__init__(**A__ )
A__ : Dict = image_size
A__ : int = patch_size
A__ : Dict = num_channels
A__ : List[Any] = max_token_length
A__ : str = num_character_labels
A__ : Tuple = num_bpe_labels
A__ : Optional[Any] = num_wordpiece_labels
A__ : Optional[int] = hidden_size
A__ : Tuple = num_hidden_layers
A__ : Any = num_attention_heads
A__ : List[Any] = mlp_ratio
A__ : Tuple = distilled
A__ : Union[str, Any] = layer_norm_eps
A__ : Tuple = drop_rate
A__ : List[str] = qkv_bias
A__ : Optional[Any] = attn_drop_rate
A__ : Union[str, Any] = drop_path_rate
A__ : Optional[Any] = output_aa_attentions
A__ : Optional[int] = initializer_range
| 192 | 1 |
import logging
import os
import sys
from dataclasses import dataclass, field
from itertools import chain
from typing import Optional, Union
import datasets
import numpy as np
import torch
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.31.0')
UpperCAmelCase_ = logging.getLogger(__name__)
@dataclass
class lowercase__ :
'''simple docstring'''
a : str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
a : Optional[str] = field(
default=__lowerCamelCase , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
a : Optional[str] = field(
default=__lowerCamelCase , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
a : Optional[str] = field(
default=__lowerCamelCase , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
a : bool = field(
default=__lowerCamelCase , metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."} , )
a : str = field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
a : bool = field(
default=__lowerCamelCase , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
@dataclass
class lowercase__ :
'''simple docstring'''
a : Optional[str] = field(default=__lowerCamelCase , metadata={"help": "The input training data file (a text file)."} )
a : Optional[str] = field(
default=__lowerCamelCase , metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."} , )
a : bool = field(
default=__lowerCamelCase , metadata={"help": "Overwrite the cached training and evaluation sets"} )
a : Optional[int] = field(
default=__lowerCamelCase , metadata={"help": "The number of processes to use for the preprocessing."} , )
a : Optional[int] = field(
default=__lowerCamelCase , metadata={
"help": (
"The maximum total input sequence length after tokenization. If passed, sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
a : bool = field(
default=__lowerCamelCase , metadata={
"help": (
"Whether to pad all samples to the maximum sentence length. "
"If False, will pad the samples dynamically when batching to the maximum length in the batch. More "
"efficient on GPU but very bad for TPU."
)
} , )
a : Optional[int] = field(
default=__lowerCamelCase , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
a : Optional[int] = field(
default=__lowerCamelCase , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} , )
def UpperCamelCase__ ( self ) -> Dict:
"""simple docstring"""
if self.train_file is not None:
UpperCamelCase__ : Optional[Any] = self.train_file.split('''.''' )[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
UpperCamelCase__ : Tuple = self.validation_file.split('''.''' )[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
@dataclass
class lowercase__ :
'''simple docstring'''
a : PreTrainedTokenizerBase
a : Union[bool, str, PaddingStrategy] = True
a : Optional[int] = None
a : Optional[int] = None
def __call__( self, __magic_name__ ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase__ : Optional[int] = '''label''' if '''label''' in features[0].keys() else '''labels'''
UpperCamelCase__ : List[Any] = [feature.pop(__magic_name__ ) for feature in features]
UpperCamelCase__ : Union[str, Any] = len(__magic_name__ )
UpperCamelCase__ : List[Any] = len(features[0]['''input_ids'''] )
UpperCamelCase__ : Any = [
[{k: v[i] for k, v in feature.items()} for i in range(__magic_name__ )] for feature in features
]
UpperCamelCase__ : List[str] = list(chain(*__magic_name__ ) )
UpperCamelCase__ : Any = self.tokenizer.pad(
__magic_name__, padding=self.padding, max_length=self.max_length, pad_to_multiple_of=self.pad_to_multiple_of, return_tensors='''pt''', )
# Un-flatten
UpperCamelCase__ : Union[str, Any] = {k: v.view(__magic_name__, __magic_name__, -1 ) for k, v in batch.items()}
# Add back labels
UpperCamelCase__ : Union[str, Any] = torch.tensor(__magic_name__, dtype=torch.intaa )
return batch
def lowerCAmelCase_ ( ) -> Union[str, Any]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
UpperCamelCase__ : Optional[int] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ : Optional[int] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ : str = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('''run_swag''' , __UpperCAmelCase , __UpperCAmelCase )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
UpperCamelCase__ : Optional[int] = training_args.get_process_log_level()
logger.setLevel(__UpperCAmelCase )
datasets.utils.logging.set_verbosity(__UpperCAmelCase )
transformers.utils.logging.set_verbosity(__UpperCAmelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ f"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
logger.info(f"Training/evaluation parameters {training_args}" )
# Detecting last checkpoint.
UpperCamelCase__ : Union[str, Any] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
UpperCamelCase__ : List[Any] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.train_file is not None or data_args.validation_file is not None:
UpperCamelCase__ : Optional[int] = {}
if data_args.train_file is not None:
UpperCamelCase__ : Union[str, Any] = data_args.train_file
if data_args.validation_file is not None:
UpperCamelCase__ : List[str] = data_args.validation_file
UpperCamelCase__ : Union[str, Any] = data_args.train_file.split('''.''' )[-1]
UpperCamelCase__ : List[Any] = load_dataset(
__UpperCAmelCase , data_files=__UpperCAmelCase , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
# Downloading and loading the swag dataset from the hub.
UpperCamelCase__ : str = load_dataset(
'''swag''' , '''regular''' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
UpperCamelCase__ : List[str] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
UpperCamelCase__ : List[str] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
UpperCamelCase__ : Union[str, Any] = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=__UpperCAmelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# When using your own dataset or a different dataset from swag, you will probably need to change this.
UpperCamelCase__ : Union[str, Any] = [f"ending{i}" for i in range(4 )]
UpperCamelCase__ : List[Any] = '''sent1'''
UpperCamelCase__ : str = '''sent2'''
if data_args.max_seq_length is None:
UpperCamelCase__ : Optional[Any] = tokenizer.model_max_length
if max_seq_length > 1024:
logger.warning(
'''The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value'''
''' of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can'''
''' override this default with `--block_size xxx`.''' )
UpperCamelCase__ : Dict = 1024
else:
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f"The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"
f"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}." )
UpperCamelCase__ : List[Any] = min(data_args.max_seq_length , tokenizer.model_max_length )
# Preprocessing the datasets.
def preprocess_function(__UpperCAmelCase: Optional[int] ):
UpperCamelCase__ : Any = [[context] * 4 for context in examples[context_name]]
UpperCamelCase__ : Optional[Any] = examples[question_header_name]
UpperCamelCase__ : List[str] = [
[f"{header} {examples[end][i]}" for end in ending_names] for i, header in enumerate(__UpperCAmelCase )
]
# Flatten out
UpperCamelCase__ : Dict = list(chain(*__UpperCAmelCase ) )
UpperCamelCase__ : Optional[Any] = list(chain(*__UpperCAmelCase ) )
# Tokenize
UpperCamelCase__ : int = tokenizer(
__UpperCAmelCase , __UpperCAmelCase , truncation=__UpperCAmelCase , max_length=__UpperCAmelCase , padding='''max_length''' if data_args.pad_to_max_length else False , )
# Un-flatten
return {k: [v[i : i + 4] for i in range(0 , len(__UpperCAmelCase ) , 4 )] for k, v in tokenized_examples.items()}
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError('''--do_train requires a train dataset''' )
UpperCamelCase__ : str = raw_datasets['''train''']
if data_args.max_train_samples is not None:
UpperCamelCase__ : Any = min(len(__UpperCAmelCase ) , data_args.max_train_samples )
UpperCamelCase__ : Dict = train_dataset.select(range(__UpperCAmelCase ) )
with training_args.main_process_first(desc='''train dataset map pre-processing''' ):
UpperCamelCase__ : List[Any] = train_dataset.map(
__UpperCAmelCase , batched=__UpperCAmelCase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
if training_args.do_eval:
if "validation" not in raw_datasets:
raise ValueError('''--do_eval requires a validation dataset''' )
UpperCamelCase__ : Dict = raw_datasets['''validation''']
if data_args.max_eval_samples is not None:
UpperCamelCase__ : Optional[Any] = min(len(__UpperCAmelCase ) , data_args.max_eval_samples )
UpperCamelCase__ : Union[str, Any] = eval_dataset.select(range(__UpperCAmelCase ) )
with training_args.main_process_first(desc='''validation dataset map pre-processing''' ):
UpperCamelCase__ : str = eval_dataset.map(
__UpperCAmelCase , batched=__UpperCAmelCase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
# Data collator
UpperCamelCase__ : str = (
default_data_collator
if data_args.pad_to_max_length
else DataCollatorForMultipleChoice(tokenizer=__UpperCAmelCase , pad_to_multiple_of=8 if training_args.fpaa else None )
)
# Metric
def compute_metrics(__UpperCAmelCase: Optional[Any] ):
UpperCamelCase__ ,UpperCamelCase__ : str = eval_predictions
UpperCamelCase__ : List[str] = np.argmax(__UpperCAmelCase , axis=1 )
return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()}
# Initialize our Trainer
UpperCamelCase__ : int = Trainer(
model=__UpperCAmelCase , args=__UpperCAmelCase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=__UpperCAmelCase , data_collator=__UpperCAmelCase , compute_metrics=__UpperCAmelCase , )
# Training
if training_args.do_train:
UpperCamelCase__ : List[str] = None
if training_args.resume_from_checkpoint is not None:
UpperCamelCase__ : str = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
UpperCamelCase__ : List[str] = last_checkpoint
UpperCamelCase__ : Optional[Any] = trainer.train(resume_from_checkpoint=__UpperCAmelCase )
trainer.save_model() # Saves the tokenizer too for easy upload
UpperCamelCase__ : Dict = train_result.metrics
UpperCamelCase__ : str = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(__UpperCAmelCase )
)
UpperCamelCase__ : Dict = min(__UpperCAmelCase , len(__UpperCAmelCase ) )
trainer.log_metrics('''train''' , __UpperCAmelCase )
trainer.save_metrics('''train''' , __UpperCAmelCase )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
UpperCamelCase__ : str = trainer.evaluate()
UpperCamelCase__ : Optional[int] = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(__UpperCAmelCase )
UpperCamelCase__ : Optional[int] = min(__UpperCAmelCase , len(__UpperCAmelCase ) )
trainer.log_metrics('''eval''' , __UpperCAmelCase )
trainer.save_metrics('''eval''' , __UpperCAmelCase )
UpperCamelCase__ : int = {
'''finetuned_from''': model_args.model_name_or_path,
'''tasks''': '''multiple-choice''',
'''dataset_tags''': '''swag''',
'''dataset_args''': '''regular''',
'''dataset''': '''SWAG''',
'''language''': '''en''',
}
if training_args.push_to_hub:
trainer.push_to_hub(**__UpperCAmelCase )
else:
trainer.create_model_card(**__UpperCAmelCase )
def lowerCAmelCase_ ( __UpperCAmelCase: List[Any] ) -> List[str]:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 247 |
import random
def lowerCAmelCase_ ( __UpperCAmelCase: list , __UpperCAmelCase: Optional[Any] ) -> tuple:
UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ : int = [], [], []
for element in data:
if element < pivot:
less.append(__UpperCAmelCase )
elif element > pivot:
greater.append(__UpperCAmelCase )
else:
equal.append(__UpperCAmelCase )
return less, equal, greater
def lowerCAmelCase_ ( __UpperCAmelCase: list , __UpperCAmelCase: int ) -> List[str]:
# index = len(items) // 2 when trying to find the median
# (value of index when items is sorted)
# invalid input
if index >= len(__UpperCAmelCase ) or index < 0:
return None
UpperCamelCase__ : Tuple = items[random.randint(0 , len(__UpperCAmelCase ) - 1 )]
UpperCamelCase__ : Union[str, Any] = 0
UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ : Dict = _partition(__UpperCAmelCase , __UpperCAmelCase )
UpperCamelCase__ : Union[str, Any] = len(__UpperCAmelCase )
UpperCamelCase__ : List[str] = len(__UpperCAmelCase )
# index is the pivot
if m <= index < m + count:
return pivot
# must be in smaller
elif m > index:
return quick_select(__UpperCAmelCase , __UpperCAmelCase )
# must be in larger
else:
return quick_select(__UpperCAmelCase , index - (m + count) )
| 247 | 1 |
"""simple docstring"""
import numpy as np
from PIL import Image
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = np.array(_UpperCamelCase )
if arr.shape[0] != arr.shape[1]:
raise ValueError("The input array is not a square matrix" )
__lowerCAmelCase = 0
__lowerCAmelCase = 0
__lowerCAmelCase = 0
__lowerCAmelCase = 0
# compute the shape of the output matrix
__lowerCAmelCase = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape maxpool_shape
__lowerCAmelCase = np.zeros((maxpool_shape, maxpool_shape) )
while i < arr.shape[0]:
if i + size > arr.shape[0]:
# if the end of the matrix is reached, break
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the maximum of the pooling matrix
__lowerCAmelCase = np.max(arr[i : i + size, j : j + size] )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
__lowerCAmelCase = 0
__lowerCAmelCase = 0
return updated_arr
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = np.array(_UpperCamelCase )
if arr.shape[0] != arr.shape[1]:
raise ValueError("The input array is not a square matrix" )
__lowerCAmelCase = 0
__lowerCAmelCase = 0
__lowerCAmelCase = 0
__lowerCAmelCase = 0
# compute the shape of the output matrix
__lowerCAmelCase = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape avgpool_shape
__lowerCAmelCase = np.zeros((avgpool_shape, avgpool_shape) )
while i < arr.shape[0]:
# if the end of the matrix is reached, break
if i + size > arr.shape[0]:
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the average of the pooling matrix
__lowerCAmelCase = int(np.average(arr[i : i + size, j : j + size] ) )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
__lowerCAmelCase = 0
__lowerCAmelCase = 0
return updated_arr
# Main Function
if __name__ == "__main__":
from doctest import testmod
testmod(name="avgpooling", verbose=True)
# Loading the image
A : str = Image.open("path_to_image")
# Converting the image to numpy array and maxpooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(maxpooling(np.array(image), size=3, stride=2)).show()
# Converting the image to numpy array and averagepooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(avgpooling(np.array(image), size=3, stride=2)).show()
| 57 |
"""simple docstring"""
from diffusers.utils.testing_utils import require_onnxruntime
@require_onnxruntime
class _UpperCamelCase :
'''simple docstring'''
pass
| 57 | 1 |
from __future__ import annotations
import pandas as pd
def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = [0] * no_of_processes
UpperCAmelCase_ : Optional[int] = [0] * no_of_processes
# Copy the burst time into remaining_time[]
for i in range(__lowerCAmelCase ):
UpperCAmelCase_ : Tuple = burst_time[i]
UpperCAmelCase_ : Dict = 0
UpperCAmelCase_ : Dict = 0
UpperCAmelCase_ : Any = 999999999
UpperCAmelCase_ : Dict = 0
UpperCAmelCase_ : Optional[Any] = False
# Process until all processes are completed
while complete != no_of_processes:
for j in range(__lowerCAmelCase ):
if arrival_time[j] <= increment_time and remaining_time[j] > 0:
if remaining_time[j] < minm:
UpperCAmelCase_ : Dict = remaining_time[j]
UpperCAmelCase_ : int = j
UpperCAmelCase_ : Tuple = True
if not check:
increment_time += 1
continue
remaining_time[short] -= 1
UpperCAmelCase_ : int = remaining_time[short]
if minm == 0:
UpperCAmelCase_ : int = 999999999
if remaining_time[short] == 0:
complete += 1
UpperCAmelCase_ : Dict = False
# Find finish time of current process
UpperCAmelCase_ : List[Any] = increment_time + 1
# Calculate waiting time
UpperCAmelCase_ : Any = finish_time - arrival_time[short]
UpperCAmelCase_ : Dict = finar - burst_time[short]
if waiting_time[short] < 0:
UpperCAmelCase_ : List[str] = 0
# Increment time
increment_time += 1
return waiting_time
def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = [0] * no_of_processes
for i in range(__lowerCAmelCase ):
UpperCAmelCase_ : str = burst_time[i] + waiting_time[i]
return turn_around_time
def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = 0
UpperCAmelCase_ : Optional[Any] = 0
for i in range(__lowerCAmelCase ):
UpperCAmelCase_ : str = total_waiting_time + waiting_time[i]
UpperCAmelCase_ : Union[str, Any] = total_turn_around_time + turn_around_time[i]
print(f'''Average waiting time = {total_waiting_time / no_of_processes:.5f}''' )
print('''Average turn around time =''' , total_turn_around_time / no_of_processes )
if __name__ == "__main__":
print('Enter how many process you want to analyze')
__a = int(input())
__a = [0] * no_of_processes
__a = [0] * no_of_processes
__a = list(range(1, no_of_processes + 1))
for i in range(no_of_processes):
print('Enter the arrival time and burst time for process:--' + str(i + 1))
__a ,__a = map(int, input().split())
__a = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
__a = burst_time
__a = no_of_processes
__a = waiting_time
__a = calculate_turnaroundtime(bt, n, wt)
calculate_average_times(waiting_time, turn_around_time, no_of_processes)
__a = pd.DataFrame(
list(zip(processes, burst_time, arrival_time, waiting_time, turn_around_time)),
columns=[
'Process',
'BurstTime',
'ArrivalTime',
'WaitingTime',
'TurnAroundTime',
],
)
# Printing the dataFrame
pd.set_option('display.max_rows', fcfs.shape[0] + 1)
print(fcfs) | 362 |
import tempfile
import torch
from diffusers import IPNDMScheduler
from .test_schedulers import SchedulerCommonTest
class __a( _a ):
"""simple docstring"""
lowerCAmelCase = (IPNDMScheduler,)
lowerCAmelCase = (('''num_inference_steps''', 50),)
def a__ ( self ,**_SCREAMING_SNAKE_CASE ) -> int:
UpperCAmelCase_ : Tuple = {'''num_train_timesteps''': 1_000}
config.update(**_SCREAMING_SNAKE_CASE )
return config
def a__ ( self ,_SCREAMING_SNAKE_CASE=0 ,**_SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
UpperCAmelCase_ : Optional[int] = dict(self.forward_default_kwargs )
UpperCAmelCase_ : int = kwargs.pop('''num_inference_steps''' ,_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : str = self.dummy_sample
UpperCAmelCase_ : Optional[Any] = 0.1 * sample
UpperCAmelCase_ : Optional[int] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
UpperCAmelCase_ : Tuple = self.get_scheduler_config(**_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[int] = scheduler_class(**_SCREAMING_SNAKE_CASE )
scheduler.set_timesteps(_SCREAMING_SNAKE_CASE )
# copy over dummy past residuals
UpperCAmelCase_ : Optional[Any] = dummy_past_residuals[:]
if time_step is None:
UpperCAmelCase_ : Tuple = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[Any] = scheduler_class.from_pretrained(_SCREAMING_SNAKE_CASE )
new_scheduler.set_timesteps(_SCREAMING_SNAKE_CASE )
# copy over dummy past residuals
UpperCAmelCase_ : Optional[int] = dummy_past_residuals[:]
UpperCAmelCase_ : List[Any] = scheduler.step(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ).prev_sample
UpperCAmelCase_ : str = new_scheduler.step(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
UpperCAmelCase_ : Union[str, Any] = scheduler.step(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ).prev_sample
UpperCAmelCase_ : Optional[int] = new_scheduler.step(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def a__ ( self ) -> List[Any]:
pass
def a__ ( self ,_SCREAMING_SNAKE_CASE=0 ,**_SCREAMING_SNAKE_CASE ) -> List[Any]:
UpperCAmelCase_ : List[str] = dict(self.forward_default_kwargs )
UpperCAmelCase_ : Tuple = kwargs.pop('''num_inference_steps''' ,_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : int = self.dummy_sample
UpperCAmelCase_ : Tuple = 0.1 * sample
UpperCAmelCase_ : int = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
UpperCAmelCase_ : str = self.get_scheduler_config()
UpperCAmelCase_ : str = scheduler_class(**_SCREAMING_SNAKE_CASE )
scheduler.set_timesteps(_SCREAMING_SNAKE_CASE )
# copy over dummy past residuals (must be after setting timesteps)
UpperCAmelCase_ : Tuple = dummy_past_residuals[:]
if time_step is None:
UpperCAmelCase_ : Dict = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[int] = scheduler_class.from_pretrained(_SCREAMING_SNAKE_CASE )
# copy over dummy past residuals
new_scheduler.set_timesteps(_SCREAMING_SNAKE_CASE )
# copy over dummy past residual (must be after setting timesteps)
UpperCAmelCase_ : str = dummy_past_residuals[:]
UpperCAmelCase_ : Tuple = scheduler.step(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ).prev_sample
UpperCAmelCase_ : Optional[int] = new_scheduler.step(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
UpperCAmelCase_ : str = scheduler.step(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ).prev_sample
UpperCAmelCase_ : List[str] = new_scheduler.step(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def a__ ( self ,**_SCREAMING_SNAKE_CASE ) -> List[Any]:
UpperCAmelCase_ : Tuple = self.scheduler_classes[0]
UpperCAmelCase_ : List[Any] = self.get_scheduler_config(**_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Tuple = scheduler_class(**_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Any = 10
UpperCAmelCase_ : Tuple = self.dummy_model()
UpperCAmelCase_ : List[str] = self.dummy_sample_deter
scheduler.set_timesteps(_SCREAMING_SNAKE_CASE )
for i, t in enumerate(scheduler.timesteps ):
UpperCAmelCase_ : int = model(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Union[str, Any] = scheduler.step(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ).prev_sample
for i, t in enumerate(scheduler.timesteps ):
UpperCAmelCase_ : Optional[int] = model(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Dict = scheduler.step(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ).prev_sample
return sample
def a__ ( self ) -> str:
UpperCAmelCase_ : List[Any] = dict(self.forward_default_kwargs )
UpperCAmelCase_ : Any = kwargs.pop('''num_inference_steps''' ,_SCREAMING_SNAKE_CASE )
for scheduler_class in self.scheduler_classes:
UpperCAmelCase_ : int = self.get_scheduler_config()
UpperCAmelCase_ : List[Any] = scheduler_class(**_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[int] = self.dummy_sample
UpperCAmelCase_ : List[Any] = 0.1 * sample
if num_inference_steps is not None and hasattr(_SCREAMING_SNAKE_CASE ,'''set_timesteps''' ):
scheduler.set_timesteps(_SCREAMING_SNAKE_CASE )
elif num_inference_steps is not None and not hasattr(_SCREAMING_SNAKE_CASE ,'''set_timesteps''' ):
UpperCAmelCase_ : Union[str, Any] = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
UpperCAmelCase_ : str = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
UpperCAmelCase_ : int = dummy_past_residuals[:]
UpperCAmelCase_ : List[Any] = scheduler.timesteps[5]
UpperCAmelCase_ : Dict = scheduler.timesteps[6]
UpperCAmelCase_ : List[Any] = scheduler.step(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ).prev_sample
UpperCAmelCase_ : Dict = scheduler.step(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ).prev_sample
self.assertEqual(output_a.shape ,sample.shape )
self.assertEqual(output_a.shape ,output_a.shape )
UpperCAmelCase_ : List[Any] = scheduler.step(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ).prev_sample
UpperCAmelCase_ : Dict = scheduler.step(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ).prev_sample
self.assertEqual(output_a.shape ,sample.shape )
self.assertEqual(output_a.shape ,output_a.shape )
def a__ ( self ) -> Any:
for timesteps in [100, 1_000]:
self.check_over_configs(num_train_timesteps=_SCREAMING_SNAKE_CASE ,time_step=_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> Union[str, Any]:
for t, num_inference_steps in zip([1, 5, 10] ,[10, 50, 100] ):
self.check_over_forward(num_inference_steps=_SCREAMING_SNAKE_CASE ,time_step=_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> Union[str, Any]:
UpperCAmelCase_ : Optional[int] = self.full_loop()
UpperCAmelCase_ : str = torch.mean(torch.abs(_SCREAMING_SNAKE_CASE ) )
assert abs(result_mean.item() - 2_540_529 ) < 10 | 235 | 0 |
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all image processors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...image_processing_utils import ImageProcessingMixin
from ...utils import CONFIG_NAME, IMAGE_PROCESSOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
__snake_case = logging.get_logger(__name__)
__snake_case = OrderedDict(
[
('''align''', '''EfficientNetImageProcessor'''),
('''beit''', '''BeitImageProcessor'''),
('''bit''', '''BitImageProcessor'''),
('''blip''', '''BlipImageProcessor'''),
('''blip-2''', '''BlipImageProcessor'''),
('''bridgetower''', '''BridgeTowerImageProcessor'''),
('''chinese_clip''', '''ChineseCLIPImageProcessor'''),
('''clip''', '''CLIPImageProcessor'''),
('''clipseg''', '''ViTImageProcessor'''),
('''conditional_detr''', '''ConditionalDetrImageProcessor'''),
('''convnext''', '''ConvNextImageProcessor'''),
('''convnextv2''', '''ConvNextImageProcessor'''),
('''cvt''', '''ConvNextImageProcessor'''),
('''data2vec-vision''', '''BeitImageProcessor'''),
('''deformable_detr''', '''DeformableDetrImageProcessor'''),
('''deit''', '''DeiTImageProcessor'''),
('''deta''', '''DetaImageProcessor'''),
('''detr''', '''DetrImageProcessor'''),
('''dinat''', '''ViTImageProcessor'''),
('''donut-swin''', '''DonutImageProcessor'''),
('''dpt''', '''DPTImageProcessor'''),
('''efficientformer''', '''EfficientFormerImageProcessor'''),
('''efficientnet''', '''EfficientNetImageProcessor'''),
('''flava''', '''FlavaImageProcessor'''),
('''focalnet''', '''BitImageProcessor'''),
('''git''', '''CLIPImageProcessor'''),
('''glpn''', '''GLPNImageProcessor'''),
('''groupvit''', '''CLIPImageProcessor'''),
('''imagegpt''', '''ImageGPTImageProcessor'''),
('''instructblip''', '''BlipImageProcessor'''),
('''layoutlmv2''', '''LayoutLMv2ImageProcessor'''),
('''layoutlmv3''', '''LayoutLMv3ImageProcessor'''),
('''levit''', '''LevitImageProcessor'''),
('''mask2former''', '''Mask2FormerImageProcessor'''),
('''maskformer''', '''MaskFormerImageProcessor'''),
('''mgp-str''', '''ViTImageProcessor'''),
('''mobilenet_v1''', '''MobileNetV1ImageProcessor'''),
('''mobilenet_v2''', '''MobileNetV2ImageProcessor'''),
('''mobilevit''', '''MobileViTImageProcessor'''),
('''mobilevit''', '''MobileViTImageProcessor'''),
('''mobilevitv2''', '''MobileViTImageProcessor'''),
('''nat''', '''ViTImageProcessor'''),
('''oneformer''', '''OneFormerImageProcessor'''),
('''owlvit''', '''OwlViTImageProcessor'''),
('''perceiver''', '''PerceiverImageProcessor'''),
('''pix2struct''', '''Pix2StructImageProcessor'''),
('''poolformer''', '''PoolFormerImageProcessor'''),
('''regnet''', '''ConvNextImageProcessor'''),
('''resnet''', '''ConvNextImageProcessor'''),
('''sam''', '''SamImageProcessor'''),
('''segformer''', '''SegformerImageProcessor'''),
('''swiftformer''', '''ViTImageProcessor'''),
('''swin''', '''ViTImageProcessor'''),
('''swin2sr''', '''Swin2SRImageProcessor'''),
('''swinv2''', '''ViTImageProcessor'''),
('''table-transformer''', '''DetrImageProcessor'''),
('''timesformer''', '''VideoMAEImageProcessor'''),
('''tvlt''', '''TvltImageProcessor'''),
('''upernet''', '''SegformerImageProcessor'''),
('''van''', '''ConvNextImageProcessor'''),
('''videomae''', '''VideoMAEImageProcessor'''),
('''vilt''', '''ViltImageProcessor'''),
('''vit''', '''ViTImageProcessor'''),
('''vit_hybrid''', '''ViTHybridImageProcessor'''),
('''vit_mae''', '''ViTImageProcessor'''),
('''vit_msn''', '''ViTImageProcessor'''),
('''xclip''', '''CLIPImageProcessor'''),
('''yolos''', '''YolosImageProcessor'''),
]
)
__snake_case = _LazyAutoMapping(CONFIG_MAPPING_NAMES, IMAGE_PROCESSOR_MAPPING_NAMES)
def lowerCAmelCase_ ( __lowerCAmelCase )-> List[str]:
'''simple docstring'''
for module_name, extractors in IMAGE_PROCESSOR_MAPPING_NAMES.items():
if class_name in extractors:
UpperCAmelCase : Union[str, Any] =model_type_to_module_name(__lowerCAmelCase )
UpperCAmelCase : List[Any] =importlib.import_module(f'''.{module_name}''' , '''transformers.models''' )
try:
return getattr(__lowerCAmelCase , __lowerCAmelCase )
except AttributeError:
continue
for _, extractor in IMAGE_PROCESSOR_MAPPING._extra_content.items():
if getattr(__lowerCAmelCase , '''__name__''' , __lowerCAmelCase ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
UpperCAmelCase : Dict =importlib.import_module('''transformers''' )
if hasattr(__lowerCAmelCase , __lowerCAmelCase ):
return getattr(__lowerCAmelCase , __lowerCAmelCase )
return None
def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = False , __lowerCAmelCase = False , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = False , **__lowerCAmelCase , )-> Tuple:
'''simple docstring'''
UpperCAmelCase : List[str] =get_file_from_repo(
__lowerCAmelCase , __lowerCAmelCase , cache_dir=__lowerCAmelCase , force_download=__lowerCAmelCase , resume_download=__lowerCAmelCase , proxies=__lowerCAmelCase , use_auth_token=__lowerCAmelCase , revision=__lowerCAmelCase , local_files_only=__lowerCAmelCase , )
if resolved_config_file is None:
logger.info(
'''Could not locate the image processor configuration file, will try to use the model config instead.''' )
return {}
with open(__lowerCAmelCase , encoding='''utf-8''' ) as reader:
return json.load(__lowerCAmelCase )
class __snake_case :
def __init__( self ) -> Optional[Any]:
'''simple docstring'''
raise EnvironmentError(
'''AutoImageProcessor is designed to be instantiated '''
'''using the `AutoImageProcessor.from_pretrained(pretrained_model_name_or_path)` method.''' )
@classmethod
@replace_list_option_in_docstrings(__A )
def UpperCAmelCase__ ( cls , snake_case__ , **snake_case__ ) -> Dict:
'''simple docstring'''
UpperCAmelCase : str =kwargs.pop('''config''' , __A )
UpperCAmelCase : Optional[int] =kwargs.pop('''trust_remote_code''' , __A )
UpperCAmelCase : str =True
UpperCAmelCase , UpperCAmelCase : str =ImageProcessingMixin.get_image_processor_dict(__A , **__A )
UpperCAmelCase : Union[str, Any] =config_dict.get('''image_processor_type''' , __A )
UpperCAmelCase : Tuple =None
if "AutoImageProcessor" in config_dict.get('''auto_map''' , {} ):
UpperCAmelCase : Tuple =config_dict['''auto_map''']['''AutoImageProcessor''']
# If we still don't have the image processor class, check if we're loading from a previous feature extractor config
# and if so, infer the image processor class from there.
if image_processor_class is None and image_processor_auto_map is None:
UpperCAmelCase : List[str] =config_dict.pop('''feature_extractor_type''' , __A )
if feature_extractor_class is not None:
logger.warning(
'''Could not find image processor class in the image processor config or the model config. Loading'''
''' based on pattern matching with the model\'s feature extractor configuration.''' )
UpperCAmelCase : List[str] =feature_extractor_class.replace('''FeatureExtractor''' , '''ImageProcessor''' )
if "AutoFeatureExtractor" in config_dict.get('''auto_map''' , {} ):
UpperCAmelCase : List[str] =config_dict['''auto_map''']['''AutoFeatureExtractor''']
UpperCAmelCase : List[str] =feature_extractor_auto_map.replace('''FeatureExtractor''' , '''ImageProcessor''' )
logger.warning(
'''Could not find image processor auto map in the image processor config or the model config.'''
''' Loading based on pattern matching with the model\'s feature extractor configuration.''' )
# If we don't find the image processor class in the image processor config, let's try the model config.
if image_processor_class is None and image_processor_auto_map is None:
if not isinstance(__A , __A ):
UpperCAmelCase : Optional[Any] =AutoConfig.from_pretrained(__A , **__A )
# It could be in `config.image_processor_type``
UpperCAmelCase : List[str] =getattr(__A , '''image_processor_type''' , __A )
if hasattr(__A , '''auto_map''' ) and "AutoImageProcessor" in config.auto_map:
UpperCAmelCase : Union[str, Any] =config.auto_map['''AutoImageProcessor''']
if image_processor_class is not None:
UpperCAmelCase : str =image_processor_class_from_name(__A )
UpperCAmelCase : Optional[int] =image_processor_auto_map is not None
UpperCAmelCase : Union[str, Any] =image_processor_class is not None or type(__A ) in IMAGE_PROCESSOR_MAPPING
UpperCAmelCase : Union[str, Any] =resolve_trust_remote_code(
__A , __A , __A , __A )
if has_remote_code and trust_remote_code:
UpperCAmelCase : str =get_class_from_dynamic_module(
__A , __A , **__A )
UpperCAmelCase : Union[str, Any] =kwargs.pop('''code_revision''' , __A )
if os.path.isdir(__A ):
image_processor_class.register_for_auto_class()
return image_processor_class.from_dict(__A , **__A )
elif image_processor_class is not None:
return image_processor_class.from_dict(__A , **__A )
# Last try: we use the IMAGE_PROCESSOR_MAPPING.
elif type(__A ) in IMAGE_PROCESSOR_MAPPING:
UpperCAmelCase : Optional[Any] =IMAGE_PROCESSOR_MAPPING[type(__A )]
return image_processor_class.from_dict(__A , **__A )
raise ValueError(
f'''Unrecognized image processor in {pretrained_model_name_or_path}. Should have a '''
f'''`image_processor_type` key in its {IMAGE_PROCESSOR_NAME} of {CONFIG_NAME}, or one of the following '''
f'''`model_type` keys in its {CONFIG_NAME}: {', '.join(c for c in IMAGE_PROCESSOR_MAPPING_NAMES.keys() )}''' )
@staticmethod
def UpperCAmelCase__ ( snake_case__ , snake_case__ ) -> Any:
'''simple docstring'''
IMAGE_PROCESSOR_MAPPING.register(__A , __A )
| 348 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase_ : Optional[Any] = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : str = ["""NllbTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Dict = ["""NllbTokenizerFast"""]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb import NllbTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb_fast import NllbTokenizerFast
else:
import sys
lowerCamelCase_ : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 81 | 0 |
from __future__ import annotations
def lowerCamelCase__ ( _lowercase , _lowercase ):
'''simple docstring'''
UpperCAmelCase_, UpperCAmelCase_ : Optional[int] = set(_lowercase ), [start]
while stack:
UpperCAmelCase_ : int = stack.pop()
explored.add(_lowercase )
# Differences from BFS:
# 1) pop last element instead of first one
# 2) add adjacent elements to stack without exploring them
for adj in reversed(graph[v] ):
if adj not in explored:
stack.append(_lowercase )
return explored
__a = {
'A': ['B', 'C', 'D'],
'B': ['A', 'D', 'E'],
'C': ['A', 'F'],
'D': ['B', 'D'],
'E': ['B', 'F'],
'F': ['C', 'E', 'G'],
'G': ['F'],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
print(depth_first_search(G, 'A')) | 235 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_pegasus import PegasusTokenizer
else:
__a = None
__a = logging.get_logger(__name__)
__a = '▁'
__a = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
__a = {
'vocab_file': {'google/pegasus-xsum': 'https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model'},
'tokenizer_file': {
'google/pegasus-xsum': 'https://huggingface.co/google/pegasus-xsum/resolve/main/tokenizer.json'
},
}
__a = {
'google/pegasus-xsum': 512,
}
class __a( _a ):
"""simple docstring"""
lowerCAmelCase = VOCAB_FILES_NAMES
lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase = PegasusTokenizer
lowerCAmelCase = ['''input_ids''', '''attention_mask''']
def __init__( self ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE="<pad>" ,_SCREAMING_SNAKE_CASE="</s>" ,_SCREAMING_SNAKE_CASE="<unk>" ,_SCREAMING_SNAKE_CASE="<mask_2>" ,_SCREAMING_SNAKE_CASE="<mask_1>" ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE=103 ,**_SCREAMING_SNAKE_CASE ,) -> Optional[Any]:
UpperCAmelCase_ : Dict = offset
if additional_special_tokens is not None:
if not isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ):
raise TypeError(
f'''additional_special_tokens should be of type {type(_SCREAMING_SNAKE_CASE )}, but is'''
f''' {type(_SCREAMING_SNAKE_CASE )}''' )
UpperCAmelCase_ : str = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
f'''<unk_{i}>''' for i in range(len(_SCREAMING_SNAKE_CASE ) ,self.offset - 1 )
]
if len(set(_SCREAMING_SNAKE_CASE ) ) != len(_SCREAMING_SNAKE_CASE ):
raise ValueError(
'''Please make sure that the provided additional_special_tokens do not contain an incorrectly'''
f''' shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.''' )
UpperCAmelCase_ : int = additional_special_tokens_extended
else:
UpperCAmelCase_ : Any = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [f'''<unk_{i}>''' for i in range(2 ,self.offset )]
super().__init__(
_SCREAMING_SNAKE_CASE ,tokenizer_file=_SCREAMING_SNAKE_CASE ,pad_token=_SCREAMING_SNAKE_CASE ,eos_token=_SCREAMING_SNAKE_CASE ,unk_token=_SCREAMING_SNAKE_CASE ,mask_token=_SCREAMING_SNAKE_CASE ,mask_token_sent=_SCREAMING_SNAKE_CASE ,offset=_SCREAMING_SNAKE_CASE ,additional_special_tokens=_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ,)
UpperCAmelCase_ : str = vocab_file
UpperCAmelCase_ : Dict = False if not self.vocab_file else True
def a__ ( self ,_SCREAMING_SNAKE_CASE ) -> Any:
UpperCAmelCase_ : Dict = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
if all_special_ids != set(range(len(self.additional_special_tokens ) + 3 ) ):
raise ValueError(
'''There should be 3 special tokens: mask_token, pad_token, and eos_token +'''
f''' {len(self.additional_special_tokens )} additional_special_tokens, but got {all_special_ids}''' )
return [1 if x in all_special_ids else 0 for x in seq]
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = False ) -> List[int]:
if already_has_special_tokens:
return self._special_token_mask(_SCREAMING_SNAKE_CASE )
elif token_ids_a is None:
return self._special_token_mask(_SCREAMING_SNAKE_CASE ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=None ) -> List[int]:
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
UpperCAmelCase_ : Optional[int] = os.path.join(
_SCREAMING_SNAKE_CASE ,(filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_SCREAMING_SNAKE_CASE ):
copyfile(self.vocab_file ,_SCREAMING_SNAKE_CASE )
return (out_vocab_file,) | 235 | 1 |
'''simple docstring'''
from scipy.stats import pearsonr
import datasets
A__ : int ='''
Pearson correlation coefficient and p-value for testing non-correlation.
The Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.
The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.
'''
A__ : List[str] ='''
Args:
predictions (`list` of `int`): Predicted class labels, as returned by a model.
references (`list` of `int`): Ground truth labels.
return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.
Returns:
pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.
p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.
Examples:
Example 1-A simple example using only predictions and references.
>>> pearsonr_metric = datasets.load_metric("pearsonr")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])
>>> print(round(results[\'pearsonr\'], 2))
-0.74
Example 2-The same as Example 1, but that also returns the `p-value`.
>>> pearsonr_metric = datasets.load_metric("pearsonr")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)
>>> print(sorted(list(results.keys())))
[\'p-value\', \'pearsonr\']
>>> print(round(results[\'pearsonr\'], 2))
-0.74
>>> print(round(results[\'p-value\'], 2))
0.15
'''
A__ : List[str] ='''
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, Ilhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Antonio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase ( datasets.Metric ):
def lowercase__ ( self : Optional[int] ) -> List[str]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""float""" ),
"""references""": datasets.Value("""float""" ),
} ) , reference_urls=["""https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html"""] , )
def lowercase__ ( self : List[str] , __snake_case : str , __snake_case : List[Any] , __snake_case : Any=False ) -> Optional[int]:
if return_pvalue:
_lowerCAmelCase = pearsonr(__snake_case , __snake_case )
return {"pearsonr": results[0], "p-value": results[1]}
else:
return {"pearsonr": float(pearsonr(__snake_case , __snake_case )[0] )}
| 70 |
'''simple docstring'''
def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase = [0 for i in range(r + 1 )]
# nc0 = 1
_lowerCAmelCase = 1
for i in range(1 , n + 1 ):
# to compute current row from previous row.
_lowerCAmelCase = min(lowerCAmelCase , lowerCAmelCase )
while j > 0:
c[j] += c[j - 1]
j -= 1
return c[r]
print(binomial_coefficient(n=10, r=5))
| 70 | 1 |
import os
import pickle
import unittest
from transformers import AutoTokenizer
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.models.bert_japanese.tokenization_bert_japanese import (
VOCAB_FILES_NAMES,
BertJapaneseTokenizer,
CharacterTokenizer,
JumanppTokenizer,
MecabTokenizer,
SudachiTokenizer,
WordpieceTokenizer,
)
from transformers.testing_utils import custom_tokenizers, require_jumanpp, require_sudachi
from ...test_tokenization_common import TokenizerTesterMixin
@custom_tokenizers
class a ( __lowerCamelCase , unittest.TestCase ):
__lowerCAmelCase : List[Any] = BertJapaneseTokenizer
__lowerCAmelCase : Any = False
__lowerCAmelCase : Optional[int] = True
def __lowerCamelCase ( self :List[Any] ):
super().setUp()
snake_case__ : Dict = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''こんにちは''',
'''こん''',
'''にちは''',
'''ばんは''',
'''##こん''',
'''##にちは''',
'''##ばんは''',
'''世界''',
'''##世界''',
'''、''',
'''##、''',
'''。''',
'''##。''',
]
snake_case__ : Union[str, Any] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file ,'''w''' ,encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def __lowerCamelCase ( self :Union[str, Any] ,__lowercase :List[str] ):
snake_case__ : Any = '''こんにちは、世界。 \nこんばんは、世界。'''
snake_case__ : Optional[int] = '''こんにちは 、 世界 。 こんばんは 、 世界 。'''
return input_text, output_text
def __lowerCamelCase ( self :List[str] ,__lowercase :Optional[Any] ):
snake_case__ , snake_case__ : Union[str, Any] = self.get_input_output_texts(__lowercase )
snake_case__ : List[Any] = tokenizer.encode(__lowercase ,add_special_tokens=__lowercase )
snake_case__ : List[str] = tokenizer.decode(__lowercase ,clean_up_tokenization_spaces=__lowercase )
return text, ids
def __lowerCamelCase ( self :Tuple ):
pass # TODO add if relevant
def __lowerCamelCase ( self :Dict ):
pass # TODO add if relevant
def __lowerCamelCase ( self :Union[str, Any] ):
pass # TODO add if relevant
def __lowerCamelCase ( self :int ):
snake_case__ : List[Any] = self.tokenizer_class(self.vocab_file )
snake_case__ : Any = tokenizer.tokenize('''こんにちは、世界。\nこんばんは、世界。''' )
self.assertListEqual(__lowercase ,['''こんにちは''', '''、''', '''世界''', '''。''', '''こん''', '''##ばんは''', '''、''', '''世界''', '''。'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowercase ) ,[3, 1_2, 1_0, 1_4, 4, 9, 1_2, 1_0, 1_4] )
def __lowerCamelCase ( self :List[str] ):
snake_case__ : str = self.tokenizer_class(self.vocab_file ,word_tokenizer_type='''mecab''' )
self.assertIsNotNone(__lowercase )
snake_case__ : Union[str, Any] = '''こんにちは、世界。\nこんばんは、世界。'''
snake_case__ : Optional[int] = tokenizer.tokenize(__lowercase )
self.assertListEqual(__lowercase ,['''こんにちは''', '''、''', '''世界''', '''。''', '''こん''', '''##ばんは''', '''、''', '''世界''', '''。'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowercase ) ,[3, 1_2, 1_0, 1_4, 4, 9, 1_2, 1_0, 1_4] )
snake_case__ : int = os.path.join(self.tmpdirname ,'''tokenizer.bin''' )
with open(__lowercase ,'''wb''' ) as handle:
pickle.dump(__lowercase ,__lowercase )
with open(__lowercase ,'''rb''' ) as handle:
snake_case__ : Union[str, Any] = pickle.load(__lowercase )
snake_case__ : Dict = tokenizer_new.tokenize(__lowercase )
self.assertListEqual(__lowercase ,__lowercase )
def __lowerCamelCase ( self :Any ):
snake_case__ : str = MecabTokenizer(mecab_dic='''ipadic''' )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) ,['''アップルストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] ,)
def __lowerCamelCase ( self :Any ):
try:
snake_case__ : List[Any] = MecabTokenizer(mecab_dic='''unidic_lite''' )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) ,['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] ,)
def __lowerCamelCase ( self :Optional[Any] ):
try:
snake_case__ : Any = MecabTokenizer(mecab_dic='''unidic''' )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) ,['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] ,)
def __lowerCamelCase ( self :Any ):
snake_case__ : Dict = MecabTokenizer(do_lower_case=__lowercase ,mecab_dic='''ipadic''' )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) ,['''アップルストア''', '''で''', '''iphone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] ,)
def __lowerCamelCase ( self :List[str] ):
try:
snake_case__ : Union[str, Any] = MecabTokenizer(
do_lower_case=__lowercase ,normalize_text=__lowercase ,mecab_option='''-d /usr/local/lib/mecab/dic/jumandic''' )
except RuntimeError:
# if dict doesn't exist in the system, previous code raises this error.
return
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) ,['''アップルストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れた''', '''\u3000''', '''。'''] ,)
def __lowerCamelCase ( self :str ):
snake_case__ : List[str] = MecabTokenizer(normalize_text=__lowercase ,mecab_dic='''ipadic''' )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) ,['''アップルストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', ''' ''', '''。'''] ,)
@require_sudachi
def __lowerCamelCase ( self :Any ):
snake_case__ : Union[str, Any] = self.tokenizer_class(self.vocab_file ,word_tokenizer_type='''sudachi''' )
self.assertIsNotNone(__lowercase )
snake_case__ : Optional[int] = '''こんにちは、世界。\nこんばんは、世界。'''
snake_case__ : Dict = tokenizer.tokenize(__lowercase )
self.assertListEqual(__lowercase ,['''こんにちは''', '''、''', '''世界''', '''。''', '''こん''', '''##ばんは''', '''、''', '''世界''', '''。'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowercase ) ,[3, 1_2, 1_0, 1_4, 4, 9, 1_2, 1_0, 1_4] )
snake_case__ : Optional[int] = os.path.join(self.tmpdirname ,'''tokenizer.bin''' )
with open(__lowercase ,'''wb''' ) as handle:
pickle.dump(__lowercase ,__lowercase )
with open(__lowercase ,'''rb''' ) as handle:
snake_case__ : Optional[int] = pickle.load(__lowercase )
snake_case__ : Dict = tokenizer_new.tokenize(__lowercase )
self.assertListEqual(__lowercase ,__lowercase )
@require_sudachi
def __lowerCamelCase ( self :List[Any] ):
snake_case__ : List[str] = SudachiTokenizer(sudachi_dict_type='''core''' )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) ,[''' ''', '''\t''', '''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', ''' ''', '''が''', ''' ''', ''' ''', '''\n ''', '''発売''', '''さ''', '''れ''', '''た''', ''' ''', '''。''', ''' ''', ''' '''] ,)
@require_sudachi
def __lowerCamelCase ( self :Optional[int] ):
snake_case__ : Any = SudachiTokenizer(sudachi_dict_type='''core''' ,sudachi_split_mode='''A''' )
self.assertListEqual(tokenizer.tokenize('''外国人参政権''' ) ,['''外国''', '''人''', '''参政''', '''権'''] )
@require_sudachi
def __lowerCamelCase ( self :Union[str, Any] ):
snake_case__ : Any = SudachiTokenizer(sudachi_dict_type='''core''' ,sudachi_split_mode='''B''' )
self.assertListEqual(tokenizer.tokenize('''外国人参政権''' ) ,['''外国人''', '''参政権'''] )
@require_sudachi
def __lowerCamelCase ( self :Optional[Any] ):
snake_case__ : int = SudachiTokenizer(sudachi_dict_type='''core''' ,sudachi_split_mode='''C''' )
self.assertListEqual(tokenizer.tokenize('''外国人参政権''' ) ,['''外国人参政権'''] )
@require_sudachi
def __lowerCamelCase ( self :List[Any] ):
snake_case__ : List[Any] = SudachiTokenizer(do_lower_case=__lowercase ,sudachi_dict_type='''core''' )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) ,[''' ''', '''\t''', '''アップル''', '''ストア''', '''で''', '''iphone''', '''8''', ''' ''', '''が''', ''' ''', ''' ''', '''\n ''', '''発売''', '''さ''', '''れ''', '''た''', ''' ''', '''。''', ''' ''', ''' '''] ,)
@require_sudachi
def __lowerCamelCase ( self :str ):
snake_case__ : str = SudachiTokenizer(normalize_text=__lowercase ,sudachi_dict_type='''core''' )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) ,[''' ''', '''\t''', '''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', ''' ''', '''が''', ''' ''', ''' ''', '''\n ''', '''発売''', '''さ''', '''れ''', '''た''', '''\u3000''', '''。''', ''' ''', ''' '''] ,)
@require_sudachi
def __lowerCamelCase ( self :int ):
snake_case__ : List[str] = SudachiTokenizer(trim_whitespace=__lowercase ,sudachi_dict_type='''core''' )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) ,['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] ,)
@require_jumanpp
def __lowerCamelCase ( self :Optional[int] ):
snake_case__ : Optional[int] = self.tokenizer_class(self.vocab_file ,word_tokenizer_type='''jumanpp''' )
self.assertIsNotNone(__lowercase )
snake_case__ : Optional[int] = '''こんにちは、世界。\nこんばんは、世界。'''
snake_case__ : Tuple = tokenizer.tokenize(__lowercase )
self.assertListEqual(__lowercase ,['''こんにちは''', '''、''', '''世界''', '''。''', '''こん''', '''##ばんは''', '''、''', '''世界''', '''。'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowercase ) ,[3, 1_2, 1_0, 1_4, 4, 9, 1_2, 1_0, 1_4] )
snake_case__ : List[Any] = os.path.join(self.tmpdirname ,'''tokenizer.bin''' )
with open(__lowercase ,'''wb''' ) as handle:
pickle.dump(__lowercase ,__lowercase )
with open(__lowercase ,'''rb''' ) as handle:
snake_case__ : Any = pickle.load(__lowercase )
snake_case__ : Any = tokenizer_new.tokenize(__lowercase )
self.assertListEqual(__lowercase ,__lowercase )
@require_jumanpp
def __lowerCamelCase ( self :Optional[int] ):
snake_case__ : Union[str, Any] = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) ,['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''\u3000''', '''が''', '''\u3000''', '''\u3000''', '''\u3000''', '''発売''', '''さ''', '''れた''', '''\u3000''', '''。'''] ,)
@require_jumanpp
def __lowerCamelCase ( self :Tuple ):
snake_case__ : List[str] = JumanppTokenizer(do_lower_case=__lowercase )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) ,['''アップル''', '''ストア''', '''で''', '''iphone''', '''8''', '''\u3000''', '''が''', '''\u3000''', '''\u3000''', '''\u3000''', '''発売''', '''さ''', '''れた''', '''\u3000''', '''。'''] ,)
@require_jumanpp
def __lowerCamelCase ( self :List[str] ):
snake_case__ : Union[str, Any] = JumanppTokenizer(normalize_text=__lowercase )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) ,['''ア''', '''ッ''', '''フ''', '''゚''', '''ル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''\u3000''', '''が''', '''\u3000''', '''\u3000''', '''\u3000''', '''発売''', '''さ''', '''れた''', '''\u3000''', '''。'''] ,)
@require_jumanpp
def __lowerCamelCase ( self :Optional[int] ):
snake_case__ : Union[str, Any] = JumanppTokenizer(trim_whitespace=__lowercase )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) ,['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れた''', '''。'''] ,)
@require_jumanpp
def __lowerCamelCase ( self :int ):
snake_case__ : Tuple = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize('''ありがとうございますm(_ _)m見つけるのが大変です。''' ) ,['''ありがとう''', '''ございます''', '''m(_ _)m''', '''見つける''', '''の''', '''が''', '''大変です''', '''。'''] ,)
def __lowerCamelCase ( self :List[Any] ):
snake_case__ : Optional[Any] = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''こんにちは''', '''こん''', '''にちは''', '''ばんは''', '''##こん''', '''##にちは''', '''##ばんは''']
snake_case__ : Tuple = {}
for i, token in enumerate(__lowercase ):
snake_case__ : List[Any] = i
snake_case__ : Any = WordpieceTokenizer(vocab=__lowercase ,unk_token='''[UNK]''' )
self.assertListEqual(tokenizer.tokenize('''''' ) ,[] )
self.assertListEqual(tokenizer.tokenize('''こんにちは''' ) ,['''こんにちは'''] )
self.assertListEqual(tokenizer.tokenize('''こんばんは''' ) ,['''こん''', '''##ばんは'''] )
self.assertListEqual(tokenizer.tokenize('''こんばんは こんばんにちは こんにちは''' ) ,['''こん''', '''##ばんは''', '''[UNK]''', '''こんにちは'''] )
def __lowerCamelCase ( self :List[str] ):
snake_case__ : Optional[int] = BertJapaneseTokenizer.from_pretrained('''nlp-waseda/roberta-base-japanese-with-auto-jumanpp''' )
snake_case__ : Union[str, Any] = tokenizer.subword_tokenizer
snake_case__ : Optional[int] = subword_tokenizer.tokenize('''国境 の 長い トンネル を 抜ける と 雪国 であった 。''' )
self.assertListEqual(__lowercase ,['''▁国境''', '''▁の''', '''▁長い''', '''▁トンネル''', '''▁を''', '''▁抜ける''', '''▁と''', '''▁雪''', '''国''', '''▁であった''', '''▁。'''] )
snake_case__ : List[Any] = subword_tokenizer.tokenize('''こんばんは こんばん にち は こんにちは''' )
self.assertListEqual(__lowercase ,['''▁こん''', '''ばん''', '''は''', '''▁こん''', '''ばん''', '''▁に''', '''ち''', '''▁は''', '''▁こんにちは'''] )
def __lowerCamelCase ( self :Any ):
snake_case__ : Dict = self.tokenizer_class.from_pretrained('''cl-tohoku/bert-base-japanese''' )
snake_case__ : str = tokenizer.encode('''ありがとう。''' ,add_special_tokens=__lowercase )
snake_case__ : List[Any] = tokenizer.encode('''どういたしまして。''' ,add_special_tokens=__lowercase )
snake_case__ : Any = tokenizer.build_inputs_with_special_tokens(__lowercase )
snake_case__ : List[Any] = tokenizer.build_inputs_with_special_tokens(__lowercase ,__lowercase )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class a ( __lowerCamelCase , unittest.TestCase ):
__lowerCAmelCase : Optional[int] = BertJapaneseTokenizer
__lowerCAmelCase : Dict = False
def __lowerCamelCase ( self :Union[str, Any] ):
super().setUp()
snake_case__ : List[str] = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''こ''', '''ん''', '''に''', '''ち''', '''は''', '''ば''', '''世''', '''界''', '''、''', '''。''']
snake_case__ : str = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file ,'''w''' ,encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def __lowerCamelCase ( self :str ,**__lowercase :int ):
return BertJapaneseTokenizer.from_pretrained(self.tmpdirname ,subword_tokenizer_type='''character''' ,**__lowercase )
def __lowerCamelCase ( self :List[str] ,__lowercase :Any ):
snake_case__ : Optional[int] = '''こんにちは、世界。 \nこんばんは、世界。'''
snake_case__ : List[Any] = '''こ ん に ち は 、 世 界 。 こ ん ば ん は 、 世 界 。'''
return input_text, output_text
def __lowerCamelCase ( self :Tuple ):
pass # TODO add if relevant
def __lowerCamelCase ( self :Optional[Any] ):
pass # TODO add if relevant
def __lowerCamelCase ( self :Union[str, Any] ):
pass # TODO add if relevant
def __lowerCamelCase ( self :List[str] ):
snake_case__ : int = self.tokenizer_class(self.vocab_file ,subword_tokenizer_type='''character''' )
snake_case__ : List[Any] = tokenizer.tokenize('''こんにちは、世界。 \nこんばんは、世界。''' )
self.assertListEqual(
__lowercase ,['''こ''', '''ん''', '''に''', '''ち''', '''は''', '''、''', '''世''', '''界''', '''。''', '''こ''', '''ん''', '''ば''', '''ん''', '''は''', '''、''', '''世''', '''界''', '''。'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__lowercase ) ,[3, 4, 5, 6, 7, 1_1, 9, 1_0, 1_2, 3, 4, 8, 4, 7, 1_1, 9, 1_0, 1_2] )
def __lowerCamelCase ( self :Optional[int] ):
snake_case__ : int = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''こ''', '''ん''', '''に''', '''ち''', '''は''', '''ば''', '''世''', '''界''', '''、''', '''。''']
snake_case__ : List[str] = {}
for i, token in enumerate(__lowercase ):
snake_case__ : Dict = i
snake_case__ : Union[str, Any] = CharacterTokenizer(vocab=__lowercase ,unk_token='''[UNK]''' )
self.assertListEqual(tokenizer.tokenize('''''' ) ,[] )
self.assertListEqual(tokenizer.tokenize('''こんにちは''' ) ,['''こ''', '''ん''', '''に''', '''ち''', '''は'''] )
self.assertListEqual(tokenizer.tokenize('''こんにちほ''' ) ,['''こ''', '''ん''', '''に''', '''ち''', '''[UNK]'''] )
def __lowerCamelCase ( self :List[str] ):
snake_case__ : Dict = self.tokenizer_class.from_pretrained('''cl-tohoku/bert-base-japanese-char''' )
snake_case__ : Any = tokenizer.encode('''ありがとう。''' ,add_special_tokens=__lowercase )
snake_case__ : Optional[Any] = tokenizer.encode('''どういたしまして。''' ,add_special_tokens=__lowercase )
snake_case__ : Any = tokenizer.build_inputs_with_special_tokens(__lowercase )
snake_case__ : List[str] = tokenizer.build_inputs_with_special_tokens(__lowercase ,__lowercase )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class a ( unittest.TestCase ):
def __lowerCamelCase ( self :int ):
snake_case__ : Any = '''cl-tohoku/bert-base-japanese'''
snake_case__ : Dict = AutoTokenizer.from_pretrained(__lowercase )
self.assertIsInstance(__lowercase ,__lowercase )
class a ( unittest.TestCase ):
def __lowerCamelCase ( self :List[Any] ):
snake_case__ : List[str] = '''cl-tohoku/bert-base-japanese'''
with self.assertLogs('''transformers''' ,level='''WARNING''' ) as cm:
BertTokenizer.from_pretrained(__lowercase )
self.assertTrue(
cm.records[0].message.startswith(
'''The tokenizer class you load from this checkpoint is not the same type as the class this function'''
''' is called from.''' ) )
snake_case__ : Union[str, Any] = '''bert-base-cased'''
with self.assertLogs('''transformers''' ,level='''WARNING''' ) as cm:
BertJapaneseTokenizer.from_pretrained(__lowercase )
self.assertTrue(
cm.records[0].message.startswith(
'''The tokenizer class you load from this checkpoint is not the same type as the class this function'''
''' is called from.''' ) )
| 44 |
A__ = 0 # The first color of the flag.
A__ = 1 # The second color of the flag.
A__ = 2 # The third color of the flag.
A__ = (red, white, blue)
def _lowerCAmelCase ( __lowerCAmelCase ) -> list:
"""simple docstring"""
if not sequence:
return []
if len(__lowerCAmelCase ) == 1:
return list(__lowerCAmelCase )
snake_case__ : List[Any] = 0
snake_case__ : str = len(__lowerCAmelCase ) - 1
snake_case__ : List[Any] = 0
while mid <= high:
if sequence[mid] == colors[0]:
snake_case__ , snake_case__ : List[Any] = sequence[mid], sequence[low]
low += 1
mid += 1
elif sequence[mid] == colors[1]:
mid += 1
elif sequence[mid] == colors[2]:
snake_case__ , snake_case__ : int = sequence[high], sequence[mid]
high -= 1
else:
snake_case__ : List[Any] = f"""The elements inside the sequence must contains only {colors} values"""
raise ValueError(__lowerCAmelCase )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
A__ = input('''Enter numbers separated by commas:\n''').strip()
A__ = [int(item.strip()) for item in user_input.split(''',''')]
print(f"""{dutch_national_flag_sort(unsorted)}""")
| 44 | 1 |
"""simple docstring"""
import os
import zipfile
import requests
from get_ci_error_statistics import download_artifact, get_artifacts_links
def _SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_=7 ) -> List[Any]:
A__ = None
if token is not None:
A__ = {"Accept": "application/vnd.github+json", "Authorization": f"""Bearer {token}"""}
# The id of a workflow (not of a workflow run)
A__ = "636036"
A__ = f"""https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs"""
# On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results
url += f"""?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}"""
A__ = requests.get(lowercase_ , headers=lowercase_ ).json()
return result["workflow_runs"]
def _SCREAMING_SNAKE_CASE ( lowercase_ ) -> Any:
A__ = get_daily_ci_runs(lowercase_ )
A__ = None
for workflow_run in workflow_runs:
if workflow_run["status"] == "completed":
A__ = workflow_run["id"]
break
return workflow_run_id
def _SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> Union[str, Any]:
A__ = get_last_daily_ci_runs(lowercase_ )
if workflow_run_id is not None:
A__ = get_artifacts_links(worflow_run_id=lowercase_ , token=lowercase_ )
for artifact_name in artifact_names:
if artifact_name in artifacts_links:
A__ = artifacts_links[artifact_name]
download_artifact(
artifact_name=lowercase_ , artifact_url=lowercase_ , output_dir=lowercase_ , token=lowercase_ )
def _SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> Union[str, Any]:
get_last_daily_ci_artifacts(lowercase_ , lowercase_ , lowercase_ )
A__ = {}
for artifact_name in artifact_names:
A__ = os.path.join(lowercase_ , f"""{artifact_name}.zip""" )
if os.path.isfile(lowercase_ ):
A__ = {}
with zipfile.ZipFile(lowercase_ ) as z:
for filename in z.namelist():
if not os.path.isdir(lowercase_ ):
# read the file
with z.open(lowercase_ ) as f:
A__ = f.read().decode("UTF-8" )
return results
| 247 |
"""simple docstring"""
import pandas as pd
from matplotlib import pyplot as plt
from sklearn.linear_model import LinearRegression
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
# Fitting Polynomial Regression to the dataset
from sklearn.preprocessing import PolynomialFeatures
# Importing the dataset
SCREAMING_SNAKE_CASE = pd.read_csv(
"https://s3.us-west-2.amazonaws.com/public.gamelab.fun/dataset/"
"position_salaries.csv"
)
SCREAMING_SNAKE_CASE = dataset.iloc[:, 1:2].values
SCREAMING_SNAKE_CASE = dataset.iloc[:, 2].values
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = train_test_split(X, y, test_size=0.2, random_state=0)
SCREAMING_SNAKE_CASE = PolynomialFeatures(degree=4)
SCREAMING_SNAKE_CASE = poly_reg.fit_transform(X)
SCREAMING_SNAKE_CASE = LinearRegression()
pol_reg.fit(X_poly, y)
def _SCREAMING_SNAKE_CASE ( ) -> int:
plt.scatter(lowercase_ , lowercase_ , color="red" )
plt.plot(lowercase_ , pol_reg.predict(poly_reg.fit_transform(lowercase_ ) ) , color="blue" )
plt.title("Truth or Bluff (Linear Regression)" )
plt.xlabel("Position level" )
plt.ylabel("Salary" )
plt.show()
if __name__ == "__main__":
viz_polymonial()
# Predicting a new result with Polymonial Regression
pol_reg.predict(poly_reg.fit_transform([[5.5]]))
# output should be 132148.43750003
| 247 | 1 |
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
__UpperCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
__UpperCAmelCase = """
Examples:
```py
>>> from diffusers import KandinskyV22Pipeline, KandinskyV22PriorPipeline
>>> import torch
>>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\"kandinsky-community/kandinsky-2-2-prior\")
>>> pipe_prior.to(\"cuda\")
>>> prompt = \"red cat, 4k photo\"
>>> out = pipe_prior(prompt)
>>> image_emb = out.image_embeds
>>> zero_image_emb = out.negative_image_embeds
>>> pipe = KandinskyV22Pipeline.from_pretrained(\"kandinsky-community/kandinsky-2-2-decoder\")
>>> pipe.to(\"cuda\")
>>> image = pipe(
... image_embeds=image_emb,
... negative_image_embeds=zero_image_emb,
... height=768,
... width=768,
... num_inference_steps=50,
... ).images
>>> image[0].save(\"cat.png\")
```
"""
def __UpperCamelCase ( lowercase__ : Optional[int] , lowercase__ : str , lowercase__ : List[Any]=8 ) -> List[str]:
'''simple docstring'''
lowerCAmelCase_ : Optional[Any] = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
lowerCAmelCase_ : Optional[Any] = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class __a ( __UpperCamelCase ):
def __init__( self : List[str] , UpperCAmelCase : UNetaDConditionModel , UpperCAmelCase : DDPMScheduler , UpperCAmelCase : VQModel , ):
super().__init__()
self.register_modules(
unet=UpperCAmelCase , scheduler=UpperCAmelCase , movq=UpperCAmelCase , )
lowerCAmelCase_ : Dict = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def A ( self : List[Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Dict , UpperCAmelCase : Optional[int] , UpperCAmelCase : Tuple , UpperCAmelCase : List[Any] , UpperCAmelCase : Optional[Any] ):
if latents is None:
lowerCAmelCase_ : Union[str, Any] = randn_tensor(UpperCAmelCase , generator=UpperCAmelCase , device=UpperCAmelCase , dtype=UpperCAmelCase )
else:
if latents.shape != shape:
raise ValueError(F'Unexpected latents shape, got {latents.shape}, expected {shape}' )
lowerCAmelCase_ : Any = latents.to(UpperCAmelCase )
lowerCAmelCase_ : Union[str, Any] = latents * scheduler.init_noise_sigma
return latents
def A ( self : List[str] , UpperCAmelCase : int=0 ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("""Please install accelerate via `pip install accelerate`""" )
lowerCAmelCase_ : Optional[int] = torch.device(F'cuda:{gpu_id}' )
lowerCAmelCase_ : Any = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(UpperCAmelCase , UpperCAmelCase )
def A ( self : List[str] , UpperCAmelCase : Optional[Any]=0 ):
if is_accelerate_available() and is_accelerate_version(""">=""" , """0.17.0.dev0""" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("""`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.""" )
lowerCAmelCase_ : str = torch.device(F'cuda:{gpu_id}' )
if self.device.type != "cpu":
self.to("""cpu""" , silence_dtype_warnings=UpperCAmelCase )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
lowerCAmelCase_ : Dict = None
for cpu_offloaded_model in [self.unet, self.movq]:
lowerCAmelCase_ : Tuple = cpu_offload_with_hook(UpperCAmelCase , UpperCAmelCase , prev_module_hook=UpperCAmelCase )
# We'll offload the last model manually.
lowerCAmelCase_ : Optional[Any] = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def A ( self : str ):
if not hasattr(self.unet , """_hf_hook""" ):
return self.device
for module in self.unet.modules():
if (
hasattr(UpperCAmelCase , """_hf_hook""" )
and hasattr(module._hf_hook , """execution_device""" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(UpperCAmelCase )
def __call__( self : str , UpperCAmelCase : Union[torch.FloatTensor, List[torch.FloatTensor]] , UpperCAmelCase : Union[torch.FloatTensor, List[torch.FloatTensor]] , UpperCAmelCase : int = 5_12 , UpperCAmelCase : int = 5_12 , UpperCAmelCase : int = 1_00 , UpperCAmelCase : float = 4.0 , UpperCAmelCase : int = 1 , UpperCAmelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCAmelCase : Optional[torch.FloatTensor] = None , UpperCAmelCase : Optional[str] = "pil" , UpperCAmelCase : bool = True , ):
lowerCAmelCase_ : Union[str, Any] = self._execution_device
lowerCAmelCase_ : Union[str, Any] = guidance_scale > 1.0
if isinstance(UpperCAmelCase , UpperCAmelCase ):
lowerCAmelCase_ : List[Any] = torch.cat(UpperCAmelCase , dim=0 )
lowerCAmelCase_ : Dict = image_embeds.shape[0] * num_images_per_prompt
if isinstance(UpperCAmelCase , UpperCAmelCase ):
lowerCAmelCase_ : Any = torch.cat(UpperCAmelCase , dim=0 )
if do_classifier_free_guidance:
lowerCAmelCase_ : List[Any] = image_embeds.repeat_interleave(UpperCAmelCase , dim=0 )
lowerCAmelCase_ : Optional[int] = negative_image_embeds.repeat_interleave(UpperCAmelCase , dim=0 )
lowerCAmelCase_ : Dict = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=UpperCAmelCase )
self.scheduler.set_timesteps(UpperCAmelCase , device=UpperCAmelCase )
lowerCAmelCase_ : int = self.scheduler.timesteps
lowerCAmelCase_ : str = self.unet.config.in_channels
lowerCAmelCase_ : Dict = downscale_height_and_width(UpperCAmelCase , UpperCAmelCase , self.movq_scale_factor )
# create initial latent
lowerCAmelCase_ : List[Any] = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , self.scheduler , )
for i, t in enumerate(self.progress_bar(UpperCAmelCase ) ):
# expand the latents if we are doing classifier free guidance
lowerCAmelCase_ : Any = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowerCAmelCase_ : List[str] = {'image_embeds': image_embeds}
lowerCAmelCase_ : Union[str, Any] = self.unet(
sample=UpperCAmelCase , timestep=UpperCAmelCase , encoder_hidden_states=UpperCAmelCase , added_cond_kwargs=UpperCAmelCase , return_dict=UpperCAmelCase , )[0]
if do_classifier_free_guidance:
lowerCAmelCase_ : int = noise_pred.split(latents.shape[1] , dim=1 )
lowerCAmelCase_ : int = noise_pred.chunk(2 )
lowerCAmelCase_ : int = variance_pred.chunk(2 )
lowerCAmelCase_ : List[Any] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
lowerCAmelCase_ : Optional[int] = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , """variance_type""" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
lowerCAmelCase_ : Dict = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
lowerCAmelCase_ : Optional[Any] = self.scheduler.step(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , generator=UpperCAmelCase , )[0]
# post-processing
lowerCAmelCase_ : List[str] = self.movq.decode(UpperCAmelCase , force_not_quantize=UpperCAmelCase )['sample']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F'Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}' )
if output_type in ["np", "pil"]:
lowerCAmelCase_ : Dict = image * 0.5 + 0.5
lowerCAmelCase_ : List[Any] = image.clamp(0 , 1 )
lowerCAmelCase_ : Optional[Any] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
lowerCAmelCase_ : Any = self.numpy_to_pil(UpperCAmelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=UpperCAmelCase )
| 350 |
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
'EleutherAI/gpt-j-6B': 'https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json',
# See all GPT-J models at https://huggingface.co/models?filter=gpt_j
}
class __a ( __UpperCamelCase ):
__snake_case : Union[str, Any] = """gptj"""
__snake_case : int = {
"""max_position_embeddings""": """n_positions""",
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self : List[str] , UpperCAmelCase : Optional[int]=5_04_00 , UpperCAmelCase : Optional[int]=20_48 , UpperCAmelCase : str=40_96 , UpperCAmelCase : Any=28 , UpperCAmelCase : Dict=16 , UpperCAmelCase : List[str]=64 , UpperCAmelCase : int=None , UpperCAmelCase : Union[str, Any]="gelu_new" , UpperCAmelCase : Tuple=0.0 , UpperCAmelCase : Dict=0.0 , UpperCAmelCase : str=0.0 , UpperCAmelCase : Optional[Any]=1e-5 , UpperCAmelCase : List[Any]=0.02 , UpperCAmelCase : Optional[int]=True , UpperCAmelCase : Dict=5_02_56 , UpperCAmelCase : int=5_02_56 , UpperCAmelCase : Tuple=False , **UpperCAmelCase : Any , ):
lowerCAmelCase_ : Tuple = vocab_size
lowerCAmelCase_ : Union[str, Any] = n_positions
lowerCAmelCase_ : Union[str, Any] = n_embd
lowerCAmelCase_ : List[Any] = n_layer
lowerCAmelCase_ : List[Any] = n_head
lowerCAmelCase_ : Tuple = n_inner
lowerCAmelCase_ : Optional[Any] = rotary_dim
lowerCAmelCase_ : str = activation_function
lowerCAmelCase_ : str = resid_pdrop
lowerCAmelCase_ : List[Any] = embd_pdrop
lowerCAmelCase_ : Dict = attn_pdrop
lowerCAmelCase_ : Any = layer_norm_epsilon
lowerCAmelCase_ : Optional[int] = initializer_range
lowerCAmelCase_ : Optional[int] = use_cache
lowerCAmelCase_ : Optional[int] = bos_token_id
lowerCAmelCase_ : Any = eos_token_id
super().__init__(
bos_token_id=UpperCAmelCase , eos_token_id=UpperCAmelCase , tie_word_embeddings=UpperCAmelCase , **UpperCAmelCase )
class __a ( __UpperCamelCase ):
def __init__( self : Any , UpperCAmelCase : PretrainedConfig , UpperCAmelCase : str = "default" , UpperCAmelCase : List[PatchingSpec] = None , UpperCAmelCase : bool = False , ):
super().__init__(UpperCAmelCase , task=UpperCAmelCase , patching_specs=UpperCAmelCase , use_past=UpperCAmelCase )
if not getattr(self._config , """pad_token_id""" , UpperCAmelCase ):
# TODO: how to do that better?
lowerCAmelCase_ : List[Any] = 0
@property
def A ( self : List[Any] ):
lowerCAmelCase_ : Optional[int] = OrderedDict({"""input_ids""": {0: """batch""", 1: """sequence"""}} )
if self.use_past:
self.fill_with_past_key_values_(UpperCAmelCase , direction="""inputs""" )
lowerCAmelCase_ : Any = {0: """batch""", 1: """past_sequence + sequence"""}
else:
lowerCAmelCase_ : List[Any] = {0: """batch""", 1: """sequence"""}
return common_inputs
@property
def A ( self : Union[str, Any] ):
return self._config.n_layer
@property
def A ( self : Optional[Any] ):
return self._config.n_head
def A ( self : Optional[Any] , UpperCAmelCase : PreTrainedTokenizer , UpperCAmelCase : int = -1 , UpperCAmelCase : int = -1 , UpperCAmelCase : bool = False , UpperCAmelCase : Optional[TensorType] = None , ):
lowerCAmelCase_ : Optional[Any] = super(UpperCAmelCase , self ).generate_dummy_inputs(
UpperCAmelCase , batch_size=UpperCAmelCase , seq_length=UpperCAmelCase , is_pair=UpperCAmelCase , framework=UpperCAmelCase )
# We need to order the input in the way they appears in the forward()
lowerCAmelCase_ : List[Any] = OrderedDict({"""input_ids""": common_inputs["""input_ids"""]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
lowerCAmelCase_ , lowerCAmelCase_ : int = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
lowerCAmelCase_ : Optional[Any] = seqlen + 2
lowerCAmelCase_ : Optional[int] = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
lowerCAmelCase_ : Optional[int] = [
(torch.zeros(UpperCAmelCase ), torch.zeros(UpperCAmelCase )) for _ in range(self.num_layers )
]
lowerCAmelCase_ : Dict = common_inputs["""attention_mask"""]
if self.use_past:
lowerCAmelCase_ : Union[str, Any] = ordered_inputs["""attention_mask"""].dtype
lowerCAmelCase_ : str = torch.cat(
[ordered_inputs["""attention_mask"""], torch.ones(UpperCAmelCase , UpperCAmelCase , dtype=UpperCAmelCase )] , dim=1 )
return ordered_inputs
@property
def A ( self : Optional[int] ):
return 13
| 28 | 0 |
"""simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''microsoft/unispeech-large-1500h-cv''': (
'''https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json'''
),
# See all UniSpeech models at https://huggingface.co/models?filter=unispeech
}
class __snake_case ( _lowercase):
snake_case__ : List[str] = "unispeech"
def __init__( self : List[str] , __lowerCAmelCase : List[Any]=3_2 , __lowerCAmelCase : str=7_6_8 , __lowerCAmelCase : int=1_2 , __lowerCAmelCase : int=1_2 , __lowerCAmelCase : int=3_0_7_2 , __lowerCAmelCase : Tuple="gelu" , __lowerCAmelCase : Dict=0.1 , __lowerCAmelCase : Dict=0.1 , __lowerCAmelCase : str=0.1 , __lowerCAmelCase : Tuple=0.0 , __lowerCAmelCase : Optional[int]=0.0 , __lowerCAmelCase : List[Any]=0.1 , __lowerCAmelCase : Optional[int]=0.1 , __lowerCAmelCase : Tuple=0.02 , __lowerCAmelCase : Dict=1E-5 , __lowerCAmelCase : Optional[int]="group" , __lowerCAmelCase : Dict="gelu" , __lowerCAmelCase : int=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , __lowerCAmelCase : Optional[int]=(5, 2, 2, 2, 2, 2, 2) , __lowerCAmelCase : Union[str, Any]=(1_0, 3, 3, 3, 3, 2, 2) , __lowerCAmelCase : List[Any]=False , __lowerCAmelCase : List[str]=1_2_8 , __lowerCAmelCase : Any=1_6 , __lowerCAmelCase : Optional[int]=False , __lowerCAmelCase : Optional[Any]=True , __lowerCAmelCase : Union[str, Any]=0.05 , __lowerCAmelCase : Union[str, Any]=1_0 , __lowerCAmelCase : List[Any]=2 , __lowerCAmelCase : Dict=0.0 , __lowerCAmelCase : Optional[int]=1_0 , __lowerCAmelCase : Dict=0 , __lowerCAmelCase : List[str]=3_2_0 , __lowerCAmelCase : List[Any]=2 , __lowerCAmelCase : Dict=0.1 , __lowerCAmelCase : Tuple=1_0_0 , __lowerCAmelCase : Dict=2_5_6 , __lowerCAmelCase : str=2_5_6 , __lowerCAmelCase : List[Any]=0.1 , __lowerCAmelCase : Dict="mean" , __lowerCAmelCase : Union[str, Any]=False , __lowerCAmelCase : Dict=False , __lowerCAmelCase : Optional[Any]=2_5_6 , __lowerCAmelCase : Dict=8_0 , __lowerCAmelCase : int=0 , __lowerCAmelCase : Optional[int]=1 , __lowerCAmelCase : Dict=2 , __lowerCAmelCase : Any=0.5 , **__lowerCAmelCase : Optional[Any] , ):
"""simple docstring"""
super().__init__(**__lowerCAmelCase , pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase )
_lowerCamelCase : Dict = hidden_size
_lowerCamelCase : Any = feat_extract_norm
_lowerCamelCase : List[Any] = feat_extract_activation
_lowerCamelCase : Any = list(__lowerCAmelCase )
_lowerCamelCase : Tuple = list(__lowerCAmelCase )
_lowerCamelCase : int = list(__lowerCAmelCase )
_lowerCamelCase : List[str] = conv_bias
_lowerCamelCase : List[str] = num_conv_pos_embeddings
_lowerCamelCase : Tuple = num_conv_pos_embedding_groups
_lowerCamelCase : List[str] = len(self.conv_dim )
_lowerCamelCase : Tuple = num_hidden_layers
_lowerCamelCase : List[Any] = intermediate_size
_lowerCamelCase : Dict = hidden_act
_lowerCamelCase : Union[str, Any] = num_attention_heads
_lowerCamelCase : Tuple = hidden_dropout
_lowerCamelCase : List[Any] = attention_dropout
_lowerCamelCase : Optional[int] = activation_dropout
_lowerCamelCase : Optional[Any] = feat_proj_dropout
_lowerCamelCase : Optional[int] = final_dropout
_lowerCamelCase : Any = layerdrop
_lowerCamelCase : Any = layer_norm_eps
_lowerCamelCase : List[Any] = initializer_range
_lowerCamelCase : List[str] = num_ctc_classes
_lowerCamelCase : List[Any] = vocab_size
_lowerCamelCase : Optional[Any] = do_stable_layer_norm
_lowerCamelCase : Tuple = use_weighted_layer_sum
_lowerCamelCase : List[Any] = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
f''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
f''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_lowerCamelCase : Any = apply_spec_augment
_lowerCamelCase : Dict = mask_time_prob
_lowerCamelCase : List[str] = mask_time_length
_lowerCamelCase : Optional[Any] = mask_time_min_masks
_lowerCamelCase : List[str] = mask_feature_prob
_lowerCamelCase : int = mask_feature_length
_lowerCamelCase : Dict = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
_lowerCamelCase : Optional[Any] = num_codevectors_per_group
_lowerCamelCase : int = num_codevector_groups
_lowerCamelCase : List[Any] = contrastive_logits_temperature
_lowerCamelCase : List[str] = feat_quantizer_dropout
_lowerCamelCase : Dict = num_negatives
_lowerCamelCase : Optional[int] = codevector_dim
_lowerCamelCase : List[Any] = proj_codevector_dim
_lowerCamelCase : List[Any] = diversity_loss_weight
# ctc loss
_lowerCamelCase : Union[str, Any] = ctc_loss_reduction
_lowerCamelCase : Any = ctc_zero_infinity
# pretraining loss
_lowerCamelCase : str = replace_prob
@property
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 72 |
import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
a__ = logging.get_logger(__name__)
def __UpperCAmelCase ( __a : Dict ) -> Tuple:
"""simple docstring"""
_a : Any = R'''\w+[.]\d+'''
_a : Union[str, Any] = re.findall(__a ,__a )
for pat in pats:
_a : int = key.replace(__a ,'''_'''.join(pat.split('''.''' ) ) )
return key
def __UpperCAmelCase ( __a : List[str] ,__a : Union[str, Any] ,__a : Optional[int] ) -> Tuple:
"""simple docstring"""
_a : Dict = pt_tuple_key[:-1] + ('''scale''',)
if (
any('''norm''' in str_ for str_ in pt_tuple_key )
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
_a : Dict = pt_tuple_key[:-1] + ('''scale''',)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
_a : Optional[int] = pt_tuple_key[:-1] + ('''scale''',)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
_a : Union[str, Any] = pt_tuple_key[:-1] + ('''embedding''',)
return renamed_pt_tuple_key, pt_tensor
# conv layer
_a : Tuple = pt_tuple_key[:-1] + ('''kernel''',)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
_a : List[str] = pt_tensor.transpose(2 ,3 ,1 ,0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
_a : Dict = pt_tuple_key[:-1] + ('''kernel''',)
if pt_tuple_key[-1] == "weight":
_a : Union[str, Any] = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
_a : Dict = pt_tuple_key[:-1] + ('''weight''',)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
_a : Union[str, Any] = pt_tuple_key[:-1] + ('''bias''',)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def __UpperCAmelCase ( __a : Dict ,__a : str ,__a : str=42 ) -> Optional[int]:
"""simple docstring"""
_a : Tuple = {k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
_a : List[Any] = flax_model.init_weights(PRNGKey(__a ) )
_a : Optional[int] = flatten_dict(__a )
_a : Tuple = {}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
_a : List[str] = rename_key(__a )
_a : Optional[Any] = tuple(renamed_pt_key.split('''.''' ) )
# Correctly rename weight parameters
_a , _a : List[str] = rename_key_and_reshape_tensor(__a ,__a ,__a )
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F"""PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape """
F"""{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.""" )
# also add unexpected weight so that warning is thrown
_a : Dict = jnp.asarray(__a )
return unflatten_dict(__a )
| 235 | 0 |
'''simple docstring'''
import logging
from transformers import PretrainedConfig
SCREAMING_SNAKE_CASE__ = logging.getLogger(__name__)
SCREAMING_SNAKE_CASE__ = {
'bertabs-finetuned-cnndm': 'https://huggingface.co/remi/bertabs-finetuned-cnndm-extractive-abstractive-summarization/resolve/main/config.json',
}
class a_ ( lowerCamelCase ):
lowercase = """bertabs"""
def __init__( self , _SCREAMING_SNAKE_CASE=30522 , _SCREAMING_SNAKE_CASE=512 , _SCREAMING_SNAKE_CASE=6 , _SCREAMING_SNAKE_CASE=512 , _SCREAMING_SNAKE_CASE=8 , _SCREAMING_SNAKE_CASE=512 , _SCREAMING_SNAKE_CASE=0.2 , _SCREAMING_SNAKE_CASE=6 , _SCREAMING_SNAKE_CASE=768 , _SCREAMING_SNAKE_CASE=8 , _SCREAMING_SNAKE_CASE=2048 , _SCREAMING_SNAKE_CASE=0.2 , **_SCREAMING_SNAKE_CASE , ) -> str:
"""simple docstring"""
super().__init__(**_SCREAMING_SNAKE_CASE )
UpperCamelCase = vocab_size
UpperCamelCase = max_pos
UpperCamelCase = enc_layers
UpperCamelCase = enc_hidden_size
UpperCamelCase = enc_heads
UpperCamelCase = enc_ff_size
UpperCamelCase = enc_dropout
UpperCamelCase = dec_layers
UpperCamelCase = dec_hidden_size
UpperCamelCase = dec_heads
UpperCamelCase = dec_ff_size
UpperCamelCase = dec_dropout
| 183 |
'''simple docstring'''
import os
from typing import List, Optional, Union
from ...tokenization_utils import PreTrainedTokenizer
from ...tokenization_utils_base import AddedToken
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {'vocab_file': 'vocab.txt'}
SCREAMING_SNAKE_CASE__ = {
'vocab_file': {
'facebook/esm2_t6_8M_UR50D': 'https://huggingface.co/facebook/esm2_t6_8M_UR50D/resolve/main/vocab.txt',
'facebook/esm2_t12_35M_UR50D': 'https://huggingface.co/facebook/esm2_t12_35M_UR50D/resolve/main/vocab.txt',
},
}
SCREAMING_SNAKE_CASE__ = {
'facebook/esm2_t6_8M_UR50D': 1_0_2_4,
'facebook/esm2_t12_35M_UR50D': 1_0_2_4,
}
def lowercase__ ( __UpperCamelCase )-> Any:
with open(__UpperCamelCase , """r""" ) as f:
UpperCamelCase = f.read().splitlines()
return [l.strip() for l in lines]
class a_ ( lowerCamelCase ):
lowercase = VOCAB_FILES_NAMES
lowercase = PRETRAINED_VOCAB_FILES_MAP
lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase = ["""input_ids""", """attention_mask"""]
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE="<unk>" , _SCREAMING_SNAKE_CASE="<cls>" , _SCREAMING_SNAKE_CASE="<pad>" , _SCREAMING_SNAKE_CASE="<mask>" , _SCREAMING_SNAKE_CASE="<eos>" , **_SCREAMING_SNAKE_CASE , ) -> Any:
"""simple docstring"""
super().__init__(**_SCREAMING_SNAKE_CASE )
UpperCamelCase = load_vocab_file(_SCREAMING_SNAKE_CASE )
UpperCamelCase = dict(enumerate(self.all_tokens ) )
UpperCamelCase = {tok: ind for ind, tok in enumerate(self.all_tokens )}
UpperCamelCase = unk_token
UpperCamelCase = cls_token
UpperCamelCase = pad_token
UpperCamelCase = mask_token
UpperCamelCase = eos_token
UpperCamelCase = self.all_tokens
self._create_trie(self.unique_no_split_tokens )
def A__ ( self , _SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
return self._id_to_token.get(_SCREAMING_SNAKE_CASE , self.unk_token )
def A__ ( self , _SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
return self._token_to_id.get(_SCREAMING_SNAKE_CASE , self._token_to_id.get(self.unk_token ) )
def A__ ( self , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> List[str]:
"""simple docstring"""
return text.split()
def A__ ( self , _SCREAMING_SNAKE_CASE=False ) -> Dict:
"""simple docstring"""
return len(self._id_to_token )
def A__ ( self ) -> Tuple:
"""simple docstring"""
return {token: i for i, token in enumerate(self.all_tokens )}
def A__ ( self , _SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
return self._token_to_id.get(_SCREAMING_SNAKE_CASE , self._token_to_id.get(self.unk_token ) )
def A__ ( self , _SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
return self._id_to_token.get(_SCREAMING_SNAKE_CASE , self.unk_token )
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> List[int]:
"""simple docstring"""
UpperCamelCase = [self.cls_token_id]
UpperCamelCase = [self.eos_token_id] # No sep token in ESM vocabulary
if token_ids_a is None:
if self.eos_token_id is None:
return cls + token_ids_a
else:
return cls + token_ids_a + sep
elif self.eos_token_id is None:
raise ValueError("""Cannot tokenize multiple sequences when EOS token is not set!""" )
return cls + token_ids_a + sep + token_ids_a + sep # Multiple inputs always have an EOS token
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"""You should not supply a second sequence if the provided sequence of """
"""ids is already formatted with special tokens for the model.""" )
return [1 if token in self.all_special_ids else 0 for token in token_ids_a]
UpperCamelCase = [1] + ([0] * len(_SCREAMING_SNAKE_CASE )) + [1]
if token_ids_a is not None:
mask += [0] * len(_SCREAMING_SNAKE_CASE ) + [1]
return mask
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = os.path.join(_SCREAMING_SNAKE_CASE , (filename_prefix + """-""" if filename_prefix else """""") + """vocab.txt""" )
with open(_SCREAMING_SNAKE_CASE , """w""" ) as f:
f.write("""\n""".join(self.all_tokens ) )
return (vocab_file,)
@property
def A__ ( self ) -> int:
"""simple docstring"""
return self.get_vocab_size(with_added_tokens=_SCREAMING_SNAKE_CASE )
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = False ) -> int:
"""simple docstring"""
return super()._add_tokens(_SCREAMING_SNAKE_CASE , special_tokens=_SCREAMING_SNAKE_CASE )
| 183 | 1 |
from collections import defaultdict
def __UpperCAmelCase ( __a : str ,__a : str ) -> bool:
"""simple docstring"""
_a : int = first_str.lower().strip()
_a : int = second_str.lower().strip()
# Remove whitespace
_a : Any = first_str.replace(''' ''' ,'''''' )
_a : Union[str, Any] = second_str.replace(''' ''' ,'''''' )
# Strings of different lengths are not anagrams
if len(__a ) != len(__a ):
return False
# Default values for count should be 0
_a : defaultdict[str, int] = defaultdict(__a )
# For each character in input strings,
# increment count in the corresponding
for i in range(len(__a ) ):
count[first_str[i]] += 1
count[second_str[i]] -= 1
return all(_count == 0 for _count in count.values() )
if __name__ == "__main__":
from doctest import testmod
testmod()
a__ = input('''Enter the first string ''').strip()
a__ = input('''Enter the second string ''').strip()
a__ = check_anagrams(input_a, input_b)
print(f'''{input_a} and {input_b} are {"" if status else "not "}anagrams.''')
| 235 |
def __UpperCAmelCase ( __a : float ) -> float:
"""simple docstring"""
return 10 - x * x
def __UpperCAmelCase ( __a : float ,__a : float ) -> float:
"""simple docstring"""
if equation(__a ) * equation(__a ) >= 0:
raise ValueError('''Wrong space!''' )
_a : Dict = a
while (b - a) >= 0.01:
# Find middle point
_a : Any = (a + b) / 2
# Check if middle point is root
if equation(__a ) == 0.0:
break
# Decide the side to repeat the steps
if equation(__a ) * equation(__a ) < 0:
_a : str = c
else:
_a : Union[str, Any] = c
return c
if __name__ == "__main__":
import doctest
doctest.testmod()
print(bisection(-2, 5))
print(bisection(0, 6))
| 235 | 1 |
"""simple docstring"""
def lowerCamelCase ( _UpperCamelCase : int = 1_0_0_0 ) -> int:
'''simple docstring'''
return sum(e for e in range(3 , _UpperCamelCase ) if e % 3 == 0 or e % 5 == 0 )
if __name__ == "__main__":
print(F"{solution() = }")
| 368 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_chinese_clip import ChineseCLIPImageProcessor
UpperCAmelCase : str = logging.get_logger(__name__)
class lowerCamelCase__ ( A ):
"""simple docstring"""
def __init__( self : Optional[Any] , *UpperCamelCase : str , **UpperCamelCase : List[str] ):
'''simple docstring'''
warnings.warn(
"""The class ChineseCLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use ChineseCLIPImageProcessor instead.""" , UpperCamelCase , )
super().__init__(*UpperCamelCase , **UpperCamelCase )
| 320 | 0 |
"""simple docstring"""
import collections
import os
from typing import List, Optional, Tuple
from transformers.utils import is_jieba_available, requires_backends
if is_jieba_available():
import jieba
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_a : Any = logging.get_logger(__name__)
_a : Dict = {'vocab_file': 'vocab.txt'}
_a : str = {
'vocab_file': {
'openbmb/cpm-ant-10b': 'https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt',
},
}
_a : List[str] = {
'openbmb/cpm-ant-10b': 1_024,
}
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Union[str, Any] ) -> Tuple:
_lowerCAmelCase : List[str] = collections.OrderedDict()
with open(_lowerCamelCase ,"""r""" ,encoding="""utf-8""" ) as reader:
_lowerCAmelCase : Tuple = reader.readlines()
for index, token in enumerate(_lowerCamelCase ):
_lowerCAmelCase : str = token.rstrip("""\n""" )
_lowerCAmelCase : str = index
return vocab
class __A ( SCREAMING_SNAKE_CASE_ ):
def __init__( self , a__ , a__="<unk>" , a__=200 ):
_lowerCAmelCase : Optional[Any] = vocab
_lowerCAmelCase : Any = unk_token
_lowerCAmelCase : int = max_input_chars_per_word
def __A ( self , a__ ):
_lowerCAmelCase : Any = list(a__ )
if len(a__ ) > self.max_input_chars_per_word:
return [self.unk_token]
_lowerCAmelCase : Union[str, Any] = 0
_lowerCAmelCase : Dict = []
while start < len(a__ ):
_lowerCAmelCase : Tuple = len(a__ )
_lowerCAmelCase : int = None
while start < end:
_lowerCAmelCase : str = """""".join(chars[start:end] )
if substr in self.vocab:
_lowerCAmelCase : int = substr
break
end -= 1
if cur_substr is None:
sub_tokens.append(self.unk_token )
start += 1
else:
sub_tokens.append(a__ )
_lowerCAmelCase : Union[str, Any] = end
return sub_tokens
class __A ( SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Optional[int] = VOCAB_FILES_NAMES
_UpperCamelCase : List[Any] = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase : List[str] = ["input_ids", "attention_mask"]
_UpperCamelCase : str = False
def __init__( self , a__ , a__="<d>" , a__="</d>" , a__="<s>" , a__="</s>" , a__="<pad>" , a__="<unk>" , a__="</n>" , a__="</_>" , a__="left" , **a__ , ):
requires_backends(self , ["""jieba"""] )
super().__init__(
bod_token=a__ , eod_token=a__ , bos_token=a__ , eos_token=a__ , pad_token=a__ , unk_token=a__ , line_token=a__ , space_token=a__ , padding_side=a__ , **a__ , )
_lowerCAmelCase : Union[str, Any] = bod_token
_lowerCAmelCase : List[Any] = eod_token
_lowerCAmelCase : List[str] = load_vocab(a__ )
_lowerCAmelCase : Tuple = self.encoder[space_token]
_lowerCAmelCase : Optional[Any] = self.encoder[line_token]
del self.encoder[space_token]
del self.encoder[line_token]
_lowerCAmelCase : Any = collections.OrderedDict(sorted(self.encoder.items() , key=lambda a__ : x[1] ) )
_lowerCAmelCase : int = {v: k for k, v in self.encoder.items()}
_lowerCAmelCase : Optional[int] = WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token )
@property
def __A ( self ):
return self.encoder[self.bod_token]
@property
def __A ( self ):
return self.encoder[self.eod_token]
@property
def __A ( self ):
return self.encoder["\n"]
@property
def __A ( self ):
return len(self.encoder )
def __A ( self ):
return dict(self.encoder , **self.added_tokens_encoder )
def __A ( self , a__ ):
_lowerCAmelCase : Dict = []
for x in jieba.cut(a__ , cut_all=a__ ):
output_tokens.extend(self.wordpiece_tokenizer.tokenize(a__ ) )
return output_tokens
def __A ( self , a__ , **a__ ):
_lowerCAmelCase : Any = [i for i in token_ids if i >= 0]
_lowerCAmelCase : str = [
x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id
]
return super()._decode(a__ , **a__ )
def __A ( self , a__ ):
return token in self.encoder
def __A ( self , a__ ):
return "".join(a__ )
def __A ( self , a__ ):
return self.encoder.get(a__ , self.encoder.get(self.unk_token ) )
def __A ( self , a__ ):
return self.decoder.get(a__ , self.unk_token )
def __A ( self , a__ , a__ = None ):
if os.path.isdir(a__ ):
_lowerCAmelCase : int = os.path.join(
a__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
else:
_lowerCAmelCase : List[Any] = (filename_prefix + """-""" if filename_prefix else """""") + save_directory
_lowerCAmelCase : Any = 0
if " " in self.encoder:
_lowerCAmelCase : int = self.encoder[""" """]
del self.encoder[" "]
if "\n" in self.encoder:
_lowerCAmelCase : int = self.encoder["""\n"""]
del self.encoder["\n"]
_lowerCAmelCase : List[Any] = collections.OrderedDict(sorted(self.encoder.items() , key=lambda a__ : x[1] ) )
with open(a__ , """w""" , encoding="""utf-8""" ) as writer:
for token, token_index in self.encoder.items():
if index != token_index:
logger.warning(
F"Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."
""" Please check that the vocabulary is not corrupted!""" )
_lowerCAmelCase : List[str] = token_index
writer.write(token + """\n""" )
index += 1
return (vocab_file,)
def __A ( self , a__ , a__ = None ):
if token_ids_a is None:
return [self.bos_token_id] + token_ids_a
return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a
def __A ( self , a__ , a__ = None , a__ = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a__ , token_ids_a=a__ , already_has_special_tokens=a__ )
if token_ids_a is not None:
return [1] + ([0] * len(a__ )) + [1] + ([0] * len(a__ ))
return [1] + ([0] * len(a__ ))
| 44 | """simple docstring"""
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XGLMTokenizer, XGLMTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_a : Optional[int] = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
@require_tokenizers
class __A ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_UpperCamelCase : List[Any] = XGLMTokenizer
_UpperCamelCase : List[Any] = XGLMTokenizerFast
_UpperCamelCase : Dict = True
_UpperCamelCase : Tuple = True
def __A ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
_lowerCAmelCase : List[Any] = XGLMTokenizer(a__ , keep_accents=a__ )
tokenizer.save_pretrained(self.tmpdirname )
def __A ( self ):
_lowerCAmelCase : List[str] = """<pad>"""
_lowerCAmelCase : List[Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(a__ ) , a__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(a__ ) , a__ )
def __A ( self ):
_lowerCAmelCase : Optional[int] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(len(a__ ) , 1008 )
def __A ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 1008 )
def __A ( self ):
_lowerCAmelCase : List[Any] = XGLMTokenizer(a__ , keep_accents=a__ )
_lowerCAmelCase : Dict = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(a__ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(a__ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
_lowerCAmelCase : Any = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
a__ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
_lowerCAmelCase : List[str] = tokenizer.convert_tokens_to_ids(a__ )
self.assertListEqual(
a__ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
_lowerCAmelCase : Optional[int] = tokenizer.convert_ids_to_tokens(a__ )
self.assertListEqual(
a__ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
@cached_property
def __A ( self ):
return XGLMTokenizer.from_pretrained("""facebook/xglm-564M""" )
def __A ( self ):
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(a__ , f.name )
_lowerCAmelCase : Union[str, Any] = XGLMTokenizer(f.name , keep_accents=a__ )
_lowerCAmelCase : List[str] = pickle.dumps(a__ )
pickle.loads(a__ )
def __A ( self ):
if not self.test_rust_tokenizer:
return
_lowerCAmelCase : List[str] = self.get_tokenizer()
_lowerCAmelCase : Optional[Any] = self.get_rust_tokenizer()
_lowerCAmelCase : Tuple = """I was born in 92000, and this is falsé."""
_lowerCAmelCase : List[Any] = tokenizer.tokenize(a__ )
_lowerCAmelCase : Tuple = rust_tokenizer.tokenize(a__ )
self.assertListEqual(a__ , a__ )
_lowerCAmelCase : Union[str, Any] = tokenizer.encode(a__ , add_special_tokens=a__ )
_lowerCAmelCase : str = rust_tokenizer.encode(a__ , add_special_tokens=a__ )
self.assertListEqual(a__ , a__ )
_lowerCAmelCase : int = self.get_rust_tokenizer()
_lowerCAmelCase : Dict = tokenizer.encode(a__ )
_lowerCAmelCase : List[Any] = rust_tokenizer.encode(a__ )
self.assertListEqual(a__ , a__ )
@slow
def __A ( self ):
_lowerCAmelCase : int = """Hello World!"""
_lowerCAmelCase : Optional[int] = [2, 31227, 4447, 35]
self.assertListEqual(a__ , self.big_tokenizer.encode(a__ ) )
@slow
def __A ( self ):
_lowerCAmelCase : Any = (
"""This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"""
""" add words that should not exsist and be tokenized to unk, such as saoneuhaoesuth"""
)
# fmt: off
_lowerCAmelCase : List[str] = [2, 1018, 67, 11, 1988, 2617, 5631, 278, 11, 3407, 48, 71630, 28085, 4, 3234, 157, 13, 6, 5, 6, 4, 3526, 768, 15, 659, 57, 298, 3983, 864, 129, 21, 6, 5, 13675, 377, 652, 7580, 10341, 155, 2817, 422, 1666, 7, 1674, 53, 113, 202277, 17892, 33, 60, 87, 4, 3234, 157, 61, 2667, 52376, 19, 88, 23, 735]
# fmt: on
self.assertListEqual(a__ , self.big_tokenizer.encode(a__ ) )
@slow
def __A ( self ):
# fmt: off
_lowerCAmelCase : List[str] = {
"""input_ids""": [[2, 108825, 1163, 15, 88010, 473, 15898, 157, 13672, 1857, 312, 8, 238021, 1163, 53, 13672, 1857, 312, 8, 53283, 182396, 8, 18566, 16, 36733, 4101, 8, 230, 244017, 122553, 7, 15, 132597, 4, 293, 12511, 7610, 4, 3414, 132597, 9, 4, 32361, 362, 4, 734, 28512, 32569, 18, 4, 32361, 26096, 14982, 73, 18715, 21433, 235261, 15, 492, 12427, 16, 53, 18715, 21433, 65454, 15, 23659, 563, 16, 278, 597, 2843, 595, 7931, 182396, 64186, 22, 886, 595, 132981, 53, 25540, 3449, 43982, 39901, 5951, 878, 330, 4, 27694, 80269, 312, 53, 6517, 11780, 611, 20408, 5], [2, 6, 132597, 67, 42897, 33, 592, 8, 163729, 25540, 361, 136997, 109514, 173230, 7, 501, 60, 102913, 196, 5631, 235, 63243, 473, 6, 231757, 74, 5277, 7905, 53, 3095, 37317, 22, 454, 183874, 5], [2, 268, 31298, 46530, 6, 132935, 43831, 7, 597, 32, 24, 3688, 9865, 5]],
"""attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]
} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=a__ , model_name="""facebook/xglm-564M""" , padding=a__ , )
| 44 | 1 |
"""simple docstring"""
import argparse
from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta
from transformers.utils import logging
logging.set_verbosity_info()
def a__ ( SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : List[Any] ):
'''simple docstring'''
lowerCAmelCase : List[str] = TaConfig.from_json_file(SCREAMING_SNAKE_CASE )
print(f"""Building PyTorch model from configuration: {config}""" )
lowerCAmelCase : List[str] = TaForConditionalGeneration(SCREAMING_SNAKE_CASE )
# Load weights from tf checkpoint
load_tf_weights_in_ta(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
lowerCAmelCase__ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 368 |
"""simple docstring"""
import argparse
import glob
import importlib.util
import os
import re
import black
from doc_builder.style_doc import style_docstrings_in_code
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
lowerCAmelCase__ = '''src/diffusers'''
lowerCAmelCase__ = '''.'''
# This is to make sure the diffusers module imported is the one in the repo.
lowerCAmelCase__ = importlib.util.spec_from_file_location(
'''diffusers''',
os.path.join(DIFFUSERS_PATH, '''__init__.py'''),
submodule_search_locations=[DIFFUSERS_PATH],
)
lowerCAmelCase__ = spec.loader.load_module()
def a__ ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
return line.startswith(SCREAMING_SNAKE_CASE ) or len(SCREAMING_SNAKE_CASE ) <= 1 or re.search(r"^\s*\)(\s*->.*:|:)\s*$" , SCREAMING_SNAKE_CASE ) is not None
def a__ ( SCREAMING_SNAKE_CASE : Tuple ):
'''simple docstring'''
lowerCAmelCase : Dict = object_name.split("." )
lowerCAmelCase : Optional[int] = 0
# First let's find the module where our object lives.
lowerCAmelCase : Any = parts[i]
while i < len(SCREAMING_SNAKE_CASE ) and not os.path.isfile(os.path.join(SCREAMING_SNAKE_CASE , f"""{module}.py""" ) ):
i += 1
if i < len(SCREAMING_SNAKE_CASE ):
lowerCAmelCase : Optional[Any] = os.path.join(SCREAMING_SNAKE_CASE , parts[i] )
if i >= len(SCREAMING_SNAKE_CASE ):
raise ValueError(f"""`object_name` should begin with the name of a module of diffusers but got {object_name}.""" )
with open(os.path.join(SCREAMING_SNAKE_CASE , f"""{module}.py""" ) , "r" , encoding="utf-8" , newline="\n" ) as f:
lowerCAmelCase : List[Any] = f.readlines()
# Now let's find the class / func in the code!
lowerCAmelCase : List[str] = ""
lowerCAmelCase : int = 0
for name in parts[i + 1 :]:
while (
line_index < len(SCREAMING_SNAKE_CASE ) and re.search(rf"""^{indent}(class|def)\s+{name}(\(|\:)""" , lines[line_index] ) is None
):
line_index += 1
indent += " "
line_index += 1
if line_index >= len(SCREAMING_SNAKE_CASE ):
raise ValueError(f""" {object_name} does not match any function or class in {module}.""" )
# We found the beginning of the class / func, now let's find the end (when the indent diminishes).
lowerCAmelCase : List[str] = line_index
while line_index < len(SCREAMING_SNAKE_CASE ) and _should_continue(lines[line_index] , SCREAMING_SNAKE_CASE ):
line_index += 1
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
lowerCAmelCase : List[Any] = lines[start_index:line_index]
return "".join(SCREAMING_SNAKE_CASE )
lowerCAmelCase__ = re.compile(r'''^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)''')
lowerCAmelCase__ = re.compile(r'''^\s*(\S+)->(\S+)(\s+.*|$)''')
lowerCAmelCase__ = re.compile(r'''<FILL\s+[^>]*>''')
def a__ ( SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
lowerCAmelCase : int = code.split("\n" )
lowerCAmelCase : List[str] = 0
while idx < len(SCREAMING_SNAKE_CASE ) and len(lines[idx] ) == 0:
idx += 1
if idx < len(SCREAMING_SNAKE_CASE ):
return re.search(r"^(\s*)\S" , lines[idx] ).groups()[0]
return ""
def a__ ( SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
lowerCAmelCase : List[Any] = len(get_indent(SCREAMING_SNAKE_CASE ) ) > 0
if has_indent:
lowerCAmelCase : Tuple = f"""class Bla:\n{code}"""
lowerCAmelCase : Optional[Any] = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_1_9 , preview=SCREAMING_SNAKE_CASE )
lowerCAmelCase : Dict = black.format_str(SCREAMING_SNAKE_CASE , mode=SCREAMING_SNAKE_CASE )
lowerCAmelCase , lowerCAmelCase : List[Any] = style_docstrings_in_code(SCREAMING_SNAKE_CASE )
return result[len("class Bla:\n" ) :] if has_indent else result
def a__ ( SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : int=False ):
'''simple docstring'''
with open(SCREAMING_SNAKE_CASE , "r" , encoding="utf-8" , newline="\n" ) as f:
lowerCAmelCase : int = f.readlines()
lowerCAmelCase : List[str] = []
lowerCAmelCase : str = 0
# Not a for loop cause `lines` is going to change (if `overwrite=True`).
while line_index < len(SCREAMING_SNAKE_CASE ):
lowerCAmelCase : List[Any] = _re_copy_warning.search(lines[line_index] )
if search is None:
line_index += 1
continue
# There is some copied code here, let's retrieve the original.
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : List[Any] = search.groups()
lowerCAmelCase : List[str] = find_code_in_diffusers(SCREAMING_SNAKE_CASE )
lowerCAmelCase : Optional[Any] = get_indent(SCREAMING_SNAKE_CASE )
lowerCAmelCase : Dict = line_index + 1 if indent == theoretical_indent else line_index + 2
lowerCAmelCase : Optional[int] = theoretical_indent
lowerCAmelCase : List[str] = start_index
# Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment.
lowerCAmelCase : str = True
while line_index < len(SCREAMING_SNAKE_CASE ) and should_continue:
line_index += 1
if line_index >= len(SCREAMING_SNAKE_CASE ):
break
lowerCAmelCase : Tuple = lines[line_index]
lowerCAmelCase : str = _should_continue(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) and re.search(f"""^{indent}# End copy""" , SCREAMING_SNAKE_CASE ) is None
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
lowerCAmelCase : Tuple = lines[start_index:line_index]
lowerCAmelCase : List[str] = "".join(SCREAMING_SNAKE_CASE )
# Remove any nested `Copied from` comments to avoid circular copies
lowerCAmelCase : List[str] = [line for line in theoretical_code.split("\n" ) if _re_copy_warning.search(SCREAMING_SNAKE_CASE ) is None]
lowerCAmelCase : Union[str, Any] = "\n".join(SCREAMING_SNAKE_CASE )
# Before comparing, use the `replace_pattern` on the original code.
if len(SCREAMING_SNAKE_CASE ) > 0:
lowerCAmelCase : str = replace_pattern.replace("with" , "" ).split("," )
lowerCAmelCase : List[str] = [_re_replace_pattern.search(SCREAMING_SNAKE_CASE ) for p in patterns]
for pattern in patterns:
if pattern is None:
continue
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : List[str] = pattern.groups()
lowerCAmelCase : List[Any] = re.sub(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if option.strip() == "all-casing":
lowerCAmelCase : Optional[Any] = re.sub(obja.lower() , obja.lower() , SCREAMING_SNAKE_CASE )
lowerCAmelCase : Dict = re.sub(obja.upper() , obja.upper() , SCREAMING_SNAKE_CASE )
# Blackify after replacement. To be able to do that, we need the header (class or function definition)
# from the previous line
lowerCAmelCase : Union[str, Any] = blackify(lines[start_index - 1] + theoretical_code )
lowerCAmelCase : List[str] = theoretical_code[len(lines[start_index - 1] ) :]
# Test for a diff and act accordingly.
if observed_code != theoretical_code:
diffs.append([object_name, start_index] )
if overwrite:
lowerCAmelCase : Tuple = lines[:start_index] + [theoretical_code] + lines[line_index:]
lowerCAmelCase : int = start_index + 1
if overwrite and len(SCREAMING_SNAKE_CASE ) > 0:
# Warn the user a file has been modified.
print(f"""Detected changes, rewriting {filename}.""" )
with open(SCREAMING_SNAKE_CASE , "w" , encoding="utf-8" , newline="\n" ) as f:
f.writelines(SCREAMING_SNAKE_CASE )
return diffs
def a__ ( SCREAMING_SNAKE_CASE : bool = False ):
'''simple docstring'''
lowerCAmelCase : List[Any] = glob.glob(os.path.join(SCREAMING_SNAKE_CASE , "**/*.py" ) , recursive=SCREAMING_SNAKE_CASE )
lowerCAmelCase : str = []
for filename in all_files:
lowerCAmelCase : List[Any] = is_copy_consistent(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
diffs += [f"""- {filename}: copy does not match {d[0]} at line {d[1]}""" for d in new_diffs]
if not overwrite and len(SCREAMING_SNAKE_CASE ) > 0:
lowerCAmelCase : List[Any] = "\n".join(SCREAMING_SNAKE_CASE )
raise Exception(
"Found the following copy inconsistencies:\n"
+ diff
+ "\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them." )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
lowerCAmelCase__ = parser.parse_args()
check_copies(args.fix_and_overwrite)
| 133 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_a : Optional[int] = logging.get_logger(__name__)
_a : List[Any] = {
'kssteven/ibert-roberta-base': 'https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json',
'kssteven/ibert-roberta-large': 'https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json',
'kssteven/ibert-roberta-large-mnli': (
'https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json'
),
}
class __A ( SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : List[str] = "ibert"
def __init__( self , a__=30522 , a__=768 , a__=12 , a__=12 , a__=3072 , a__="gelu" , a__=0.1 , a__=0.1 , a__=512 , a__=2 , a__=0.0_2 , a__=1e-12 , a__=1 , a__=0 , a__=2 , a__="absolute" , a__=False , a__="none" , **a__ , ):
super().__init__(pad_token_id=a__ , bos_token_id=a__ , eos_token_id=a__ , **a__ )
_lowerCAmelCase : List[str] = vocab_size
_lowerCAmelCase : Tuple = hidden_size
_lowerCAmelCase : int = num_hidden_layers
_lowerCAmelCase : Union[str, Any] = num_attention_heads
_lowerCAmelCase : Optional[Any] = hidden_act
_lowerCAmelCase : List[Any] = intermediate_size
_lowerCAmelCase : List[str] = hidden_dropout_prob
_lowerCAmelCase : Dict = attention_probs_dropout_prob
_lowerCAmelCase : List[str] = max_position_embeddings
_lowerCAmelCase : Dict = type_vocab_size
_lowerCAmelCase : Union[str, Any] = initializer_range
_lowerCAmelCase : Union[str, Any] = layer_norm_eps
_lowerCAmelCase : Optional[int] = position_embedding_type
_lowerCAmelCase : Any = quant_mode
_lowerCAmelCase : Union[str, Any] = force_dequant
class __A ( SCREAMING_SNAKE_CASE_ ):
@property
def __A ( self ):
if self.task == "multiple-choice":
_lowerCAmelCase : Any = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
_lowerCAmelCase : Union[str, Any] = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 44 |
'''simple docstring'''
import numpy as np
import torch
from torch.nn import CrossEntropyLoss
from transformers import AutoModelForCausalLM, AutoTokenizer
import datasets
from datasets import logging
_lowerCamelCase : Union[str, Any] = "\\n\n"
_lowerCamelCase : List[str] = "\nPerplexity (PPL) is one of the most common metrics for evaluating language models.\nIt is defined as the exponentiated average negative log-likelihood of a sequence.\n\nFor more information, see https://huggingface.co/docs/transformers/perplexity\n"
_lowerCamelCase : Dict = "\nArgs:\n model_id (str): model used for calculating Perplexity\n NOTE: Perplexity can only be calculated for causal language models.\n This includes models such as gpt2, causal variations of bert,\n causal versions of t5, and more (the full list can be found\n in the AutoModelForCausalLM documentation here:\n https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )\n\n input_texts (list of str): input text, each separate text snippet\n is one list entry.\n batch_size (int): the batch size to run texts through the model. Defaults to 16.\n add_start_token (bool): whether to add the start token to the texts,\n so the perplexity can include the probability of the first word. Defaults to True.\n device (str): device to run on, defaults to 'cuda' when available\nReturns:\n perplexity: dictionary containing the perplexity scores for the texts\n in the input list, as well as the mean perplexity. If one of the input texts is\n longer than the max input length of the model, then it is truncated to the\n max length for the perplexity computation.\nExamples:\n Example 1:\n >>> perplexity = datasets.load_metric(\"perplexity\")\n >>> input_texts = [\"lorem ipsum\", \"Happy Birthday!\", \"Bienvenue\"]\n >>> results = perplexity.compute(model_id='gpt2',\n ... add_start_token=False,\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n ['perplexities', 'mean_perplexity']\n >>> print(round(results[\"mean_perplexity\"], 2))\n 78.22\n >>> print(round(results[\"perplexities\"][0], 2))\n 11.11\n\n Example 2:\n >>> perplexity = datasets.load_metric(\"perplexity\")\n >>> input_texts = datasets.load_dataset(\"wikitext\",\n ... \"wikitext-2-raw-v1\",\n ... split=\"test\")[\"text\"][:50] # doctest:+ELLIPSIS\n [...]\n >>> input_texts = [s for s in input_texts if s!='']\n >>> results = perplexity.compute(model_id='gpt2',\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n ['perplexities', 'mean_perplexity']\n >>> print(round(results[\"mean_perplexity\"], 2))\n 60.35\n >>> print(round(results[\"perplexities\"][0], 2))\n 81.12\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE ( datasets.Metric ):
"""simple docstring"""
def A ( self : Tuple ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'input_texts': datasets.Value('string' ),
} ) , reference_urls=['https://huggingface.co/docs/transformers/perplexity'] , )
def A ( self : Optional[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int = 1_6 , UpperCamelCase__ : bool = True , UpperCamelCase__ : List[Any]=None ):
"""simple docstring"""
if device is not None:
assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu."
if device == "gpu":
UpperCamelCase = 'cuda'
else:
UpperCamelCase = 'cuda' if torch.cuda.is_available() else 'cpu'
UpperCamelCase = AutoModelForCausalLM.from_pretrained(UpperCamelCase__ )
UpperCamelCase = model.to(UpperCamelCase__ )
UpperCamelCase = AutoTokenizer.from_pretrained(UpperCamelCase__ )
# if batch_size > 1 (which generally leads to padding being required), and
# if there is not an already assigned pad_token, assign an existing
# special token to also be the padding token
if tokenizer.pad_token is None and batch_size > 1:
UpperCamelCase = list(tokenizer.special_tokens_map_extended.values() )
# check that the model already has at least one special token defined
assert (
len(UpperCamelCase__ ) > 0
), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1."
# assign one of the special tokens to also be the pad token
tokenizer.add_special_tokens({'pad_token': existing_special_tokens[0]} )
if add_start_token:
# leave room for <BOS> token to be added:
assert (
tokenizer.bos_token is not None
), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False"
UpperCamelCase = model.config.max_length - 1
else:
UpperCamelCase = model.config.max_length
UpperCamelCase = tokenizer(
UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , padding=UpperCamelCase__ , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ , return_tensors='pt' , return_attention_mask=UpperCamelCase__ , ).to(UpperCamelCase__ )
UpperCamelCase = encodings['input_ids']
UpperCamelCase = encodings['attention_mask']
# check that each input is long enough:
if add_start_token:
assert torch.all(torch.ge(attn_masks.sum(1 ) , 1 ) ), "Each input text must be at least one token long."
else:
assert torch.all(
torch.ge(attn_masks.sum(1 ) , 2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings."
UpperCamelCase = []
UpperCamelCase = CrossEntropyLoss(reduction='none' )
for start_index in logging.tqdm(range(0 , len(UpperCamelCase__ ) , UpperCamelCase__ ) ):
UpperCamelCase = min(start_index + batch_size , len(UpperCamelCase__ ) )
UpperCamelCase = encoded_texts[start_index:end_index]
UpperCamelCase = attn_masks[start_index:end_index]
if add_start_token:
UpperCamelCase = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(UpperCamelCase__ )
UpperCamelCase = torch.cat([bos_tokens_tensor, encoded_batch] , dim=1 )
UpperCamelCase = torch.cat(
[torch.ones(bos_tokens_tensor.size() , dtype=torch.intaa ).to(UpperCamelCase__ ), attn_mask] , dim=1 )
UpperCamelCase = encoded_batch
with torch.no_grad():
UpperCamelCase = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ ).logits
UpperCamelCase = out_logits[..., :-1, :].contiguous()
UpperCamelCase = labels[..., 1:].contiguous()
UpperCamelCase = attn_mask[..., 1:].contiguous()
UpperCamelCase = torch.expa(
(loss_fct(shift_logits.transpose(1 , 2 ) , UpperCamelCase__ ) * shift_attention_mask_batch).sum(1 )
/ shift_attention_mask_batch.sum(1 ) )
ppls += perplexity_batch.tolist()
return {"perplexities": ppls, "mean_perplexity": np.mean(UpperCamelCase__ )}
| 28 | 0 |
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :str , SCREAMING_SNAKE_CASE :Any ) -> str:
if a < 0 or b < 0:
raise ValueError("""the value of both inputs must be positive""" )
__lowerCAmelCase : List[str] = str(bin(SCREAMING_SNAKE_CASE ) )[2:] # remove the leading "0b"
__lowerCAmelCase : Optional[int] = str(bin(SCREAMING_SNAKE_CASE ) )[2:]
__lowerCAmelCase : Optional[Any] = max(len(SCREAMING_SNAKE_CASE ) , len(SCREAMING_SNAKE_CASE ) )
return "0b" + "".join(
str(int("""1""" in (char_a, char_b) ) )
for char_a, char_b in zip(a_binary.zfill(SCREAMING_SNAKE_CASE ) , b_binary.zfill(SCREAMING_SNAKE_CASE ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod() | 352 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class snake_case_ ( __lowercase ):
A_ = ['image_processor', 'tokenizer']
A_ = 'ChineseCLIPImageProcessor'
A_ = ('BertTokenizer', 'BertTokenizerFast')
def __init__( self : Optional[Any] , _snake_case : List[Any]=None , _snake_case : str=None , **_snake_case : int )->List[str]:
'''simple docstring'''
__lowerCAmelCase : str = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , _snake_case , )
__lowerCAmelCase : List[str] = kwargs.pop("""feature_extractor""" )
__lowerCAmelCase : Union[str, Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(_snake_case , _snake_case )
__lowerCAmelCase : Any = self.image_processor
def __call__( self : Optional[Any] , _snake_case : Tuple=None , _snake_case : Tuple=None , _snake_case : List[str]=None , **_snake_case : Any )->Union[str, Any]:
'''simple docstring'''
if text is None and images is None:
raise ValueError("""You have to specify either text or images. Both cannot be none.""" )
if text is not None:
__lowerCAmelCase : List[Any] = self.tokenizer(_snake_case , return_tensors=_snake_case , **_snake_case )
if images is not None:
__lowerCAmelCase : Tuple = self.image_processor(_snake_case , return_tensors=_snake_case , **_snake_case )
if text is not None and images is not None:
__lowerCAmelCase : Optional[Any] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**_snake_case ) , tensor_type=_snake_case )
def UpperCAmelCase__ ( self : Optional[int] , *_snake_case : Union[str, Any] , **_snake_case : int )->Optional[Any]:
'''simple docstring'''
return self.tokenizer.batch_decode(*_snake_case , **_snake_case )
def UpperCAmelCase__ ( self : Dict , *_snake_case : Dict , **_snake_case : Any )->Dict:
'''simple docstring'''
return self.tokenizer.decode(*_snake_case , **_snake_case )
@property
def UpperCAmelCase__ ( self : Optional[int] )->str:
'''simple docstring'''
__lowerCAmelCase : Tuple = self.tokenizer.model_input_names
__lowerCAmelCase : Tuple = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def UpperCAmelCase__ ( self : Dict )->Union[str, Any]:
'''simple docstring'''
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , _snake_case , )
return self.image_processor_class | 232 | 0 |
"""simple docstring"""
from math import pow, sqrt
def lowerCamelCase__ ( *_lowerCamelCase : float ) -> bool:
lowerCamelCase_ = len(_lowerCamelCase ) > 0 and all(value > 0.0 for value in values )
return result
def lowerCamelCase__ ( _lowerCamelCase : float , _lowerCamelCase : float ) -> float | ValueError:
return (
round(sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(_lowerCamelCase , _lowerCamelCase )
else ValueError('Input Error: Molar mass values must greater than 0.' )
)
def lowerCamelCase__ ( _lowerCamelCase : float , _lowerCamelCase : float , _lowerCamelCase : float ) -> float | ValueError:
return (
round(effusion_rate * sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
else ValueError(
'Input Error: Molar mass and effusion rate values must greater than 0.' )
)
def lowerCamelCase__ ( _lowerCamelCase : float , _lowerCamelCase : float , _lowerCamelCase : float ) -> float | ValueError:
return (
round(effusion_rate / sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
else ValueError(
'Input Error: Molar mass and effusion rate values must greater than 0.' )
)
def lowerCamelCase__ ( _lowerCamelCase : float , _lowerCamelCase : float , _lowerCamelCase : float ) -> float | ValueError:
return (
round(molar_mass / pow(effusion_rate_a / effusion_rate_a , 2 ) , 6 )
if validate(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
else ValueError(
'Input Error: Molar mass and effusion rate values must greater than 0.' )
)
def lowerCamelCase__ ( _lowerCamelCase : float , _lowerCamelCase : float , _lowerCamelCase : float ) -> float | ValueError:
return (
round(pow(effusion_rate_a / effusion_rate_a , 2 ) / molar_mass , 6 )
if validate(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
else ValueError(
'Input Error: Molar mass and effusion rate values must greater than 0.' )
)
| 183 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class a ( unittest.TestCase ):
def __init__( self : Any , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : List[str]=7 , __SCREAMING_SNAKE_CASE : Dict=3 , __SCREAMING_SNAKE_CASE : Dict=18 , __SCREAMING_SNAKE_CASE : Union[str, Any]=30 , __SCREAMING_SNAKE_CASE : Optional[Any]=400 , __SCREAMING_SNAKE_CASE : Dict=True , __SCREAMING_SNAKE_CASE : Dict=None , __SCREAMING_SNAKE_CASE : Any=True , ) -> str:
lowerCamelCase_ = size if size is not None else {'height': 18, 'width': 18}
lowerCamelCase_ = parent
lowerCamelCase_ = batch_size
lowerCamelCase_ = num_channels
lowerCamelCase_ = image_size
lowerCamelCase_ = min_resolution
lowerCamelCase_ = max_resolution
lowerCamelCase_ = do_resize
lowerCamelCase_ = size
lowerCamelCase_ = apply_ocr
def UpperCamelCase ( self : int ) -> Tuple:
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class a ( __snake_case , unittest.TestCase ):
SCREAMING_SNAKE_CASE : str = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def UpperCamelCase ( self : List[str] ) -> int:
lowerCamelCase_ = LayoutLMvaImageProcessingTester(self )
@property
def UpperCamelCase ( self : Optional[Any] ) -> Any:
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase ( self : Tuple ) -> Optional[Any]:
lowerCamelCase_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , 'do_resize' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , 'size' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , 'apply_ocr' ) )
def UpperCamelCase ( self : Any ) -> Any:
lowerCamelCase_ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'height': 18, 'width': 18} )
lowerCamelCase_ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'height': 42, 'width': 42} )
def UpperCamelCase ( self : Dict ) -> Any:
pass
def UpperCamelCase ( self : int ) -> Dict:
# Initialize image_processing
lowerCamelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCamelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , Image.Image )
# Test not batched input
lowerCamelCase_ = image_processing(image_inputs[0] , return_tensors='pt' )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
self.assertIsInstance(encoding.words , __SCREAMING_SNAKE_CASE )
self.assertIsInstance(encoding.boxes , __SCREAMING_SNAKE_CASE )
# Test batched
lowerCamelCase_ = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def UpperCamelCase ( self : Optional[int] ) -> Optional[Any]:
# Initialize image_processing
lowerCamelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCamelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , numpify=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , np.ndarray )
# Test not batched input
lowerCamelCase_ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
lowerCamelCase_ = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def UpperCamelCase ( self : Dict ) -> int:
# Initialize image_processing
lowerCamelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCamelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , torchify=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , torch.Tensor )
# Test not batched input
lowerCamelCase_ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
lowerCamelCase_ = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def UpperCamelCase ( self : Dict ) -> Any:
# with apply_OCR = True
lowerCamelCase_ = LayoutLMvaImageProcessor()
from datasets import load_dataset
lowerCamelCase_ = load_dataset('hf-internal-testing/fixtures_docvqa' , split='test' )
lowerCamelCase_ = Image.open(ds[0]['file'] ).convert('RGB' )
lowerCamelCase_ = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
lowerCamelCase_ = [['11:14', 'to', '11:39', 'a.m', '11:39', 'to', '11:44', 'a.m.', '11:44', 'a.m.', 'to', '12:25', 'p.m.', '12:25', 'to', '12:58', 'p.m.', '12:58', 'to', '4:00', 'p.m.', '2:00', 'to', '5:00', 'p.m.', 'Coffee', 'Break', 'Coffee', 'will', 'be', 'served', 'for', 'men', 'and', 'women', 'in', 'the', 'lobby', 'adjacent', 'to', 'exhibit', 'area.', 'Please', 'move', 'into', 'exhibit', 'area.', '(Exhibits', 'Open)', 'TRRF', 'GENERAL', 'SESSION', '(PART', '|)', 'Presiding:', 'Lee', 'A.', 'Waller', 'TRRF', 'Vice', 'President', '“Introductory', 'Remarks”', 'Lee', 'A.', 'Waller,', 'TRRF', 'Vice', 'Presi-', 'dent', 'Individual', 'Interviews', 'with', 'TRRF', 'Public', 'Board', 'Members', 'and', 'Sci-', 'entific', 'Advisory', 'Council', 'Mem-', 'bers', 'Conducted', 'by', 'TRRF', 'Treasurer', 'Philip', 'G.', 'Kuehn', 'to', 'get', 'answers', 'which', 'the', 'public', 'refrigerated', 'warehousing', 'industry', 'is', 'looking', 'for.', 'Plus', 'questions', 'from', 'the', 'floor.', 'Dr.', 'Emil', 'M.', 'Mrak,', 'University', 'of', 'Cal-', 'ifornia,', 'Chairman,', 'TRRF', 'Board;', 'Sam', 'R.', 'Cecil,', 'University', 'of', 'Georgia', 'College', 'of', 'Agriculture;', 'Dr.', 'Stanley', 'Charm,', 'Tufts', 'University', 'School', 'of', 'Medicine;', 'Dr.', 'Robert', 'H.', 'Cotton,', 'ITT', 'Continental', 'Baking', 'Company;', 'Dr.', 'Owen', 'Fennema,', 'University', 'of', 'Wis-', 'consin;', 'Dr.', 'Robert', 'E.', 'Hardenburg,', 'USDA.', 'Questions', 'and', 'Answers', 'Exhibits', 'Open', 'Capt.', 'Jack', 'Stoney', 'Room', 'TRRF', 'Scientific', 'Advisory', 'Council', 'Meeting', 'Ballroom', 'Foyer']] # noqa: E231
lowerCamelCase_ = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , __SCREAMING_SNAKE_CASE )
self.assertListEqual(encoding.boxes , __SCREAMING_SNAKE_CASE )
# with apply_OCR = False
lowerCamelCase_ = LayoutLMvaImageProcessor(apply_ocr=__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
| 183 | 1 |
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin, SchedulerOutput
@dataclass
class lowercase_ ( __lowercase ):
UpperCamelCase_ : str = 4_2
UpperCamelCase_ : Any = 4_2
class lowercase_ ( __lowercase , __lowercase ):
UpperCamelCase_ : Tuple = 1
@register_to_config
def __init__( self : Tuple , A__ : int = 2000 , A__ : str = 0.15 , A__ : str = 0.01 , A__ : Optional[Any] = 1348.0 , A__ : int = 1e-5 , A__ : Optional[Any] = 1 , ) -> Tuple:
# standard deviation of the initial noise distribution
_snake_case = sigma_max
# setable values
_snake_case = None
self.set_sigmas(_a , _a , _a , _a )
def UpperCamelCase_ ( self : str , A__ : Optional[Any] , A__ : Dict = None ) -> torch.FloatTensor:
return sample
def UpperCamelCase_ ( self : int , A__ : str , A__ : List[str] = None , A__ : Union[str, Any] = None ) -> Any:
_snake_case = sampling_eps if sampling_eps is not None else self.config.sampling_eps
_snake_case = torch.linspace(1 , _a , _a , device=_a )
def UpperCamelCase_ ( self : List[Any] , A__ : Optional[int] , A__ : int = None , A__ : str = None , A__ : Optional[int] = None ) -> Optional[int]:
_snake_case = sigma_min if sigma_min is not None else self.config.sigma_min
_snake_case = sigma_max if sigma_max is not None else self.config.sigma_max
_snake_case = sampling_eps if sampling_eps is not None else self.config.sampling_eps
if self.timesteps is None:
self.set_timesteps(_a , _a )
_snake_case = sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps)
_snake_case = torch.exp(torch.linspace(math.log(_a ) , math.log(_a ) , _a ) )
_snake_case = torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps] )
def UpperCamelCase_ ( self : List[Any] , A__ : Tuple , A__ : Union[str, Any] ) -> int:
return torch.where(
timesteps == 0 , torch.zeros_like(t.to(timesteps.device ) ) , self.discrete_sigmas[timesteps - 1].to(timesteps.device ) , )
def UpperCamelCase_ ( self : str , A__ : int , A__ : Union[str, Any] , A__ : Dict , A__ : Dict = None , A__ : Union[str, Any] = True , ) -> Union[SdeVeOutput, Tuple]:
if self.timesteps is None:
raise ValueError(
'''`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler''' )
_snake_case = timestep * torch.ones(
sample.shape[0] , device=sample.device ) # torch.repeat_interleave(timestep, sample.shape[0])
_snake_case = (timestep * (len(self.timesteps ) - 1)).long()
# mps requires indices to be in the same device, so we use cpu as is the default with cuda
_snake_case = timesteps.to(self.discrete_sigmas.device )
_snake_case = self.discrete_sigmas[timesteps].to(sample.device )
_snake_case = self.get_adjacent_sigma(_a , _a ).to(sample.device )
_snake_case = torch.zeros_like(_a )
_snake_case = (sigma**2 - adjacent_sigma**2) ** 0.5
# equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x)
# also equation 47 shows the analog from SDE models to ancestral sampling methods
_snake_case = diffusion.flatten()
while len(diffusion.shape ) < len(sample.shape ):
_snake_case = diffusion.unsqueeze(-1 )
_snake_case = drift - diffusion**2 * model_output
# equation 6: sample noise for the diffusion term of
_snake_case = randn_tensor(
sample.shape , layout=sample.layout , generator=_a , device=sample.device , dtype=sample.dtype )
_snake_case = sample - drift # subtract because `dt` is a small negative timestep
# TODO is the variable diffusion the correct scaling term for the noise?
_snake_case = prev_sample_mean + diffusion * noise # add impact of diffusion field g
if not return_dict:
return (prev_sample, prev_sample_mean)
return SdeVeOutput(prev_sample=_a , prev_sample_mean=_a )
def UpperCamelCase_ ( self : List[Any] , A__ : List[str] , A__ : int , A__ : str = None , A__ : Optional[Any] = True , ) -> Union[SchedulerOutput, Tuple]:
if self.timesteps is None:
raise ValueError(
'''`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler''' )
# For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z"
# sample noise for correction
_snake_case = randn_tensor(sample.shape , layout=sample.layout , generator=_a ).to(sample.device )
# compute step size from the model_output, the noise, and the snr
_snake_case = torch.norm(model_output.reshape(model_output.shape[0] , -1 ) , dim=-1 ).mean()
_snake_case = torch.norm(noise.reshape(noise.shape[0] , -1 ) , dim=-1 ).mean()
_snake_case = (self.config.snr * noise_norm / grad_norm) ** 2 * 2
_snake_case = step_size * torch.ones(sample.shape[0] ).to(sample.device )
# self.repeat_scalar(step_size, sample.shape[0])
# compute corrected sample: model_output term and noise term
_snake_case = step_size.flatten()
while len(step_size.shape ) < len(sample.shape ):
_snake_case = step_size.unsqueeze(-1 )
_snake_case = sample + step_size * model_output
_snake_case = prev_sample_mean + ((step_size * 2) ** 0.5) * noise
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=_a )
def UpperCamelCase_ ( self : Any , A__ : Optional[int] , A__ : Optional[int] , A__ : Optional[Any] , ) -> torch.FloatTensor:
# Make sure sigmas and timesteps have the same device and dtype as original_samples
_snake_case = timesteps.to(original_samples.device )
_snake_case = self.discrete_sigmas.to(original_samples.device )[timesteps]
_snake_case = (
noise * sigmas[:, None, None, None]
if noise is not None
else torch.randn_like(_a ) * sigmas[:, None, None, None]
)
_snake_case = noise + original_samples
return noisy_samples
def __len__( self : Optional[Any] ) -> Union[str, Any]:
return self.config.num_train_timesteps
| 350 |
__A = '''ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/'''
def snake_case_(_UpperCamelCase ) -> bytes:
"""simple docstring"""
if not isinstance(_UpperCamelCase , _UpperCamelCase ):
_snake_case = F"""a bytes-like object is required, not '{data.__class__.__name__}'"""
raise TypeError(_UpperCamelCase )
_snake_case = ''''''.join(bin(_UpperCamelCase )[2:].zfill(8 ) for byte in data )
_snake_case = len(_UpperCamelCase ) % 6 != 0
if padding_needed:
# The padding that will be added later
_snake_case = b'''=''' * ((6 - len(_UpperCamelCase ) % 6) // 2)
# Append binary_stream with arbitrary binary digits (0's by default) to make its
# length a multiple of 6.
binary_stream += "0" * (6 - len(_UpperCamelCase ) % 6)
else:
_snake_case = b''''''
# Encode every 6 binary digits to their corresponding Base64 character
return (
"".join(
B64_CHARSET[int(binary_stream[index : index + 6] , 2 )]
for index in range(0 , len(_UpperCamelCase ) , 6 ) ).encode()
+ padding
)
def snake_case_(_UpperCamelCase ) -> bytes:
"""simple docstring"""
if not isinstance(_UpperCamelCase , _UpperCamelCase ) and not isinstance(_UpperCamelCase , _UpperCamelCase ):
_snake_case = (
'''argument should be a bytes-like object or ASCII string, '''
F"""not '{encoded_data.__class__.__name__}'"""
)
raise TypeError(_UpperCamelCase )
# In case encoded_data is a bytes-like object, make sure it contains only
# ASCII characters so we convert it to a string object
if isinstance(_UpperCamelCase , _UpperCamelCase ):
try:
_snake_case = encoded_data.decode('''utf-8''' )
except UnicodeDecodeError:
raise ValueError('''base64 encoded data should only contain ASCII characters''' )
_snake_case = encoded_data.count('''=''' )
# Check if the encoded string contains non base64 characters
if padding:
assert all(
char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found."
else:
assert all(
char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found."
# Check the padding
assert len(_UpperCamelCase ) % 4 == 0 and padding < 3, "Incorrect padding"
if padding:
# Remove padding if there is one
_snake_case = encoded_data[:-padding]
_snake_case = ''''''.join(
bin(B64_CHARSET.index(_UpperCamelCase ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2]
else:
_snake_case = ''''''.join(
bin(B64_CHARSET.index(_UpperCamelCase ) )[2:].zfill(6 ) for char in encoded_data )
_snake_case = [
int(binary_stream[index : index + 8] , 2 )
for index in range(0 , len(_UpperCamelCase ) , 8 )
]
return bytes(_UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 278 | 0 |
"""simple docstring"""
import os
import unittest
from transformers.models.bartpho.tokenization_bartpho import VOCAB_FILES_NAMES, BartphoTokenizer
from transformers.testing_utils import get_tests_dir
from ...test_tokenization_common import TokenizerTesterMixin
a__ : Optional[int] = get_tests_dir('''fixtures/test_sentencepiece_bpe.model''')
class UpperCamelCase_ ( UpperCamelCase , unittest.TestCase):
"""simple docstring"""
snake_case__ : Optional[int] = BartphoTokenizer
snake_case__ : Union[str, Any] = False
snake_case__ : Optional[int] = True
def UpperCAmelCase_ ( self : List[str] ) -> int:
super().setUp()
__SCREAMING_SNAKE_CASE = ["▁This", "▁is", "▁a", "▁t", "est"]
__SCREAMING_SNAKE_CASE = dict(zip(UpperCAmelCase__ , range(len(UpperCAmelCase__ ) ) ) )
__SCREAMING_SNAKE_CASE = {"unk_token": "<unk>"}
__SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["monolingual_vocab_file"] )
with open(self.monolingual_vocab_file , "w" , encoding="utf-8" ) as fp:
for token in vocab_tokens:
fp.write(F"""{token} {vocab_tokens[token]}\n""" )
__SCREAMING_SNAKE_CASE = BartphoTokenizer(UpperCAmelCase__ , self.monolingual_vocab_file , **self.special_tokens_map )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCAmelCase_ ( self : str , **UpperCAmelCase__ : int ) -> int:
kwargs.update(self.special_tokens_map )
return BartphoTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase__ )
def UpperCAmelCase_ ( self : List[Any] , UpperCAmelCase__ : Union[str, Any] ) -> List[Any]:
__SCREAMING_SNAKE_CASE = "This is a là test"
__SCREAMING_SNAKE_CASE = "This is a<unk><unk> test"
return input_text, output_text
def UpperCAmelCase_ ( self : List[str] ) -> str:
__SCREAMING_SNAKE_CASE = BartphoTokenizer(UpperCAmelCase__ , self.monolingual_vocab_file , **self.special_tokens_map )
__SCREAMING_SNAKE_CASE = "This is a là test"
__SCREAMING_SNAKE_CASE = "▁This ▁is ▁a ▁l à ▁t est".split()
__SCREAMING_SNAKE_CASE = tokenizer.tokenize(UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = tokens + [tokenizer.unk_token]
__SCREAMING_SNAKE_CASE = [4, 5, 6, 3, 3, 7, 8, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase__ ) , UpperCAmelCase__ )
| 54 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__snake_case = {
'''configuration_bloom''': ['''BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BloomConfig''', '''BloomOnnxConfig'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = ['''BloomTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'''BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BloomForCausalLM''',
'''BloomModel''',
'''BloomPreTrainedModel''',
'''BloomForSequenceClassification''',
'''BloomForTokenClassification''',
'''BloomForQuestionAnswering''',
]
if TYPE_CHECKING:
from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bloom_fast import BloomTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bloom import (
BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST,
BloomForCausalLM,
BloomForQuestionAnswering,
BloomForSequenceClassification,
BloomForTokenClassification,
BloomModel,
BloomPreTrainedModel,
)
else:
import sys
__snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 320 | 0 |
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase : Dict = logging.get_logger(__name__)
# TODO Update this
__lowerCamelCase : Any = {
"""facebook/esm-1b""": """https://huggingface.co/facebook/esm-1b/resolve/main/config.json""",
# See all ESM models at https://huggingface.co/models?filter=esm
}
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ):
"""simple docstring"""
a_ = "esm"
def __init__( self : Dict , __A : Optional[Any]=None , __A : str=None , __A : Optional[Any]=None , __A : List[Any]=7_6_8 , __A : Optional[int]=1_2 , __A : Optional[int]=1_2 , __A : List[Any]=3_0_7_2 , __A : Any=0.1 , __A : Union[str, Any]=0.1 , __A : Optional[int]=1_0_2_6 , __A : Any=0.0_2 , __A : List[str]=1e-1_2 , __A : Tuple="absolute" , __A : List[str]=True , __A : Optional[int]=None , __A : Dict=False , __A : List[str]=False , __A : Any=None , __A : Dict=None , **__A : Tuple , ):
super().__init__(pad_token_id=__A , mask_token_id=__A , **__A )
snake_case__ : Any = vocab_size
snake_case__ : List[Any] = hidden_size
snake_case__ : Any = num_hidden_layers
snake_case__ : Union[str, Any] = num_attention_heads
snake_case__ : Dict = intermediate_size
snake_case__ : Any = hidden_dropout_prob
snake_case__ : Optional[Any] = attention_probs_dropout_prob
snake_case__ : int = max_position_embeddings
snake_case__ : List[str] = initializer_range
snake_case__ : List[str] = layer_norm_eps
snake_case__ : Optional[Any] = position_embedding_type
snake_case__ : Union[str, Any] = use_cache
snake_case__ : Optional[Any] = emb_layer_norm_before
snake_case__ : Optional[Any] = token_dropout
snake_case__ : int = is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info("No esmfold_config supplied for folding model, using default values." )
snake_case__ : Any = EsmFoldConfig()
elif isinstance(__A , __A ):
snake_case__ : int = EsmFoldConfig(**__A )
snake_case__ : List[str] = esmfold_config
if vocab_list is None:
logger.warning("No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!" )
snake_case__ : List[Any] = get_default_vocab_list()
else:
snake_case__ : Dict = vocab_list
else:
snake_case__ : Any = None
snake_case__ : Dict = None
if self.esmfold_config is not None and getattr(self.esmfold_config , "use_esm_attn_map" , __A ):
raise ValueError("The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!" )
def _lowercase ( self : Union[str, Any] ):
snake_case__ : List[str] = super().to_dict()
if isinstance(self.esmfold_config , __A ):
snake_case__ : Optional[int] = self.esmfold_config.to_dict()
return output
@dataclass
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
a_ = None
a_ = True
a_ = False
a_ = False
a_ = False
a_ = 0
a_ = True
a_ = False
a_ = 1_2_8
a_ = None
def _lowercase ( self : Tuple ):
if self.trunk is None:
snake_case__ : Tuple = TrunkConfig()
elif isinstance(self.trunk , __A ):
snake_case__ : str = TrunkConfig(**self.trunk )
def _lowercase ( self : Dict ):
snake_case__ : int = asdict(self )
snake_case__ : Union[str, Any] = self.trunk.to_dict()
return output
@dataclass
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
a_ = 4_8
a_ = 1_0_2_4
a_ = 1_2_8
a_ = 3_2
a_ = 3_2
a_ = 3_2
a_ = 0
a_ = 0
a_ = False
a_ = 4
a_ = 1_2_8
a_ = None
def _lowercase ( self : str ):
if self.structure_module is None:
snake_case__ : Dict = StructureModuleConfig()
elif isinstance(self.structure_module , __A ):
snake_case__ : int = StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(f'''`max_recycles` should be positive, got {self.max_recycles}.''' )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
"`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got"
f''' {self.sequence_state_dim} and {self.sequence_state_dim}.''' )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
"`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got"
f''' {self.pairwise_state_dim} and {self.pairwise_state_dim}.''' )
snake_case__ : Optional[Any] = self.sequence_state_dim // self.sequence_head_width
snake_case__ : Tuple = self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
"`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got"
f''' {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.''' )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
"`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got"
f''' {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.''' )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(f'''`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.''' )
if self.dropout >= 0.4:
raise ValueError(f'''`dropout` should not be greater than 0.4, got {self.dropout}.''' )
def _lowercase ( self : int ):
snake_case__ : Optional[int] = asdict(self )
snake_case__ : Tuple = self.structure_module.to_dict()
return output
@dataclass
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
a_ = 3_8_4
a_ = 1_2_8
a_ = 1_6
a_ = 1_2_8
a_ = 1_2
a_ = 4
a_ = 8
a_ = 0.1
a_ = 8
a_ = 1
a_ = 2
a_ = 7
a_ = 1_0
a_ = 1E-8
a_ = 1E5
def _lowercase ( self : List[Any] ):
return asdict(self )
def SCREAMING_SNAKE_CASE ( ):
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 369 |
import sys
__lowerCamelCase : List[str] = (
"""73167176531330624919225119674426574742355349194934"""
"""96983520312774506326239578318016984801869478851843"""
"""85861560789112949495459501737958331952853208805511"""
"""12540698747158523863050715693290963295227443043557"""
"""66896648950445244523161731856403098711121722383113"""
"""62229893423380308135336276614282806444486645238749"""
"""30358907296290491560440772390713810515859307960866"""
"""70172427121883998797908792274921901699720888093776"""
"""65727333001053367881220235421809751254540594752243"""
"""52584907711670556013604839586446706324415722155397"""
"""53697817977846174064955149290862569321978468622482"""
"""83972241375657056057490261407972968652414535100474"""
"""82166370484403199890008895243450658541227588666881"""
"""16427171479924442928230863465674813919123162824586"""
"""17866458359124566529476545682848912883142607690042"""
"""24219022671055626321111109370544217506941658960408"""
"""07198403850962455444362981230987879927244284909188"""
"""84580156166097919133875499200524063689912560717606"""
"""05886116467109405077541002256983155200055935729725"""
"""71636269561882670428252483600823257530420752963450"""
)
def SCREAMING_SNAKE_CASE ( snake_case_ : str = N ):
snake_case__ : Any = -sys.maxsize - 1
for i in range(len(snake_case_ ) - 12 ):
snake_case__ : Tuple = 1
for j in range(13 ):
product *= int(n[i + j] )
if product > largest_product:
snake_case__ : Dict = product
return largest_product
if __name__ == "__main__":
print(f"{solution() = }")
| 286 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_torch_available,
is_vision_available,
)
__snake_case = {'configuration_beit': ['BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BeitConfig', 'BeitOnnxConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = ['BeitFeatureExtractor']
__snake_case = ['BeitImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'BEIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'BeitForImageClassification',
'BeitForMaskedImageModeling',
'BeitForSemanticSegmentation',
'BeitModel',
'BeitPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'FlaxBeitForImageClassification',
'FlaxBeitForMaskedImageModeling',
'FlaxBeitModel',
'FlaxBeitPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_beit import BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, BeitConfig, BeitOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_beit import BeitFeatureExtractor
from .image_processing_beit import BeitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_beit import (
BEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
BeitPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_beit import (
FlaxBeitForImageClassification,
FlaxBeitForMaskedImageModeling,
FlaxBeitModel,
FlaxBeitPreTrainedModel,
)
else:
import sys
__snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 320 |
from typing import Any
import numpy as np
def __SCREAMING_SNAKE_CASE ( snake_case_ ):
'''simple docstring'''
return np.array_equal(snake_case_ , matrix.conjugate().T )
def __SCREAMING_SNAKE_CASE ( snake_case_ , snake_case_ ):
'''simple docstring'''
_UpperCAmelCase = v.conjugate().T
_UpperCAmelCase = v_star.dot(snake_case_ )
assert isinstance(snake_case_ , np.ndarray )
return (v_star_dot.dot(snake_case_ )) / (v_star.dot(snake_case_ ))
def __SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
_UpperCAmelCase = np.array([[2, 2 + 1j, 4], [2 - 1j, 3, 1j], [4, -1j, 1]] )
_UpperCAmelCase = np.array([[1], [2], [3]] )
assert is_hermitian(snake_case_ ), f"""{a} is not hermitian."""
print(rayleigh_quotient(snake_case_ , snake_case_ ) )
_UpperCAmelCase = np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] )
assert is_hermitian(snake_case_ ), f"""{a} is not hermitian."""
assert rayleigh_quotient(snake_case_ , snake_case_ ) == float(3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
tests()
| 133 | 0 |
import argparse
import json
import os
import time
import zipfile
from get_ci_error_statistics import download_artifact, get_artifacts_links
from transformers import logging
lowercase : List[str] = logging.get_logger(__name__)
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : Tuple , _lowerCamelCase : List[Any]) -> Any:
'''simple docstring'''
__UpperCamelCase : str = set()
__UpperCamelCase : Optional[Any] = []
def parse_line(_lowerCamelCase : Tuple):
for line in fp:
if isinstance(_lowerCamelCase , _lowerCamelCase):
__UpperCamelCase : Tuple = line.decode("UTF-8")
if "warnings summary (final)" in line:
continue
# This means we are outside the body of a warning
elif not line.startswith(" "):
# process a single warning and move it to `selected_warnings`.
if len(_lowerCamelCase) > 0:
__UpperCamelCase : Optional[Any] = "\n".join(_lowerCamelCase)
# Only keep the warnings specified in `targets`
if any(F': {x}: ' in warning for x in targets):
selected_warnings.add(_lowerCamelCase)
buffer.clear()
continue
else:
__UpperCamelCase : Optional[Any] = line.strip()
buffer.append(_lowerCamelCase)
if from_gh:
for filename in os.listdir(_lowerCamelCase):
__UpperCamelCase : Any = os.path.join(_lowerCamelCase , _lowerCamelCase)
if not os.path.isdir(_lowerCamelCase):
# read the file
if filename != "warnings.txt":
continue
with open(_lowerCamelCase) as fp:
parse_line(_lowerCamelCase)
else:
try:
with zipfile.ZipFile(_lowerCamelCase) as z:
for filename in z.namelist():
if not os.path.isdir(_lowerCamelCase):
# read the file
if filename != "warnings.txt":
continue
with z.open(_lowerCamelCase) as fp:
parse_line(_lowerCamelCase)
except Exception:
logger.warning(
F'{artifact_path} is either an invalid zip file or something else wrong. This file is skipped.')
return selected_warnings
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : Any , _lowerCamelCase : Optional[int]) -> Dict:
'''simple docstring'''
__UpperCamelCase : Union[str, Any] = set()
__UpperCamelCase : str = [os.path.join(_lowerCamelCase , _lowerCamelCase) for p in os.listdir(_lowerCamelCase) if (p.endswith(".zip") or from_gh)]
for p in paths:
selected_warnings.update(extract_warnings_from_single_artifact(_lowerCamelCase , _lowerCamelCase))
return selected_warnings
if __name__ == "__main__":
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : Tuple) -> str:
'''simple docstring'''
return values.split(",")
lowercase : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--workflow_run_id', type=str, required=True, help='A GitHub Actions workflow run id.')
parser.add_argument(
'--output_dir',
type=str,
required=True,
help='Where to store the downloaded artifacts and other result files.',
)
parser.add_argument('--token', default=None, type=str, help='A token that has actions:read permission.')
# optional parameters
parser.add_argument(
'--targets',
default='DeprecationWarning,UserWarning,FutureWarning',
type=list_str,
help='Comma-separated list of target warning(s) which we want to extract.',
)
parser.add_argument(
'--from_gh',
action='store_true',
help='If running from a GitHub action workflow and collecting warnings from its artifacts.',
)
lowercase : Union[str, Any] = parser.parse_args()
lowercase : Tuple = args.from_gh
if from_gh:
# The artifacts have to be downloaded using `actions/download-artifact@v3`
pass
else:
os.makedirs(args.output_dir, exist_ok=True)
# get download links
lowercase : int = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, 'artifacts.json'), 'w', encoding='UTF-8') as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
# download artifacts
for idx, (name, url) in enumerate(artifacts.items()):
print(name)
print(url)
print('=' * 80)
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
# extract warnings from artifacts
lowercase : Any = extract_warnings(args.output_dir, args.targets)
lowercase : int = sorted(selected_warnings)
with open(os.path.join(args.output_dir, 'selected_warnings.json'), 'w', encoding='UTF-8') as fp:
json.dump(selected_warnings, fp, ensure_ascii=False, indent=4) | 356 |
from __future__ import annotations
import unittest
from transformers import DebertaVaConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
TFDebertaVaModel,
)
class lowerCamelCase__ :
'''simple docstring'''
def __init__( self :Optional[Any] , a :Optional[Any] , a :Dict=1_3 , a :Tuple=7 , a :List[Any]=True , a :List[str]=True , a :List[Any]=True , a :Optional[Any]=True , a :Union[str, Any]=9_9 , a :int=3_2 , a :Optional[Any]=2 , a :List[str]=4 , a :Optional[Any]=3_7 , a :Union[str, Any]="gelu" , a :Optional[int]=0.1 , a :Dict=0.1 , a :Tuple=5_1_2 , a :Union[str, Any]=1_6 , a :int=2 , a :Any=0.02 , a :Union[str, Any]=False , a :int=True , a :str="None" , a :Union[str, Any]=3 , a :str=4 , a :List[Any]=None , ) -> Tuple:
__UpperCamelCase : Tuple = parent
__UpperCamelCase : List[str] = batch_size
__UpperCamelCase : Optional[Any] = seq_length
__UpperCamelCase : Any = is_training
__UpperCamelCase : Dict = use_input_mask
__UpperCamelCase : List[str] = use_token_type_ids
__UpperCamelCase : Optional[int] = use_labels
__UpperCamelCase : Optional[Any] = vocab_size
__UpperCamelCase : Optional[Any] = hidden_size
__UpperCamelCase : Dict = num_hidden_layers
__UpperCamelCase : Any = num_attention_heads
__UpperCamelCase : str = intermediate_size
__UpperCamelCase : Union[str, Any] = hidden_act
__UpperCamelCase : Union[str, Any] = hidden_dropout_prob
__UpperCamelCase : Optional[Any] = attention_probs_dropout_prob
__UpperCamelCase : Tuple = max_position_embeddings
__UpperCamelCase : Tuple = type_vocab_size
__UpperCamelCase : Any = type_sequence_label_size
__UpperCamelCase : int = initializer_range
__UpperCamelCase : Dict = num_labels
__UpperCamelCase : Dict = num_choices
__UpperCamelCase : List[str] = relative_attention
__UpperCamelCase : Union[str, Any] = position_biased_input
__UpperCamelCase : Any = pos_att_type
__UpperCamelCase : Optional[Any] = scope
def _lowerCamelCase ( self :List[Any] ) -> List[Any]:
__UpperCamelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCamelCase : Tuple = None
if self.use_input_mask:
__UpperCamelCase : Any = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCamelCase : List[Any] = None
if self.use_token_type_ids:
__UpperCamelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__UpperCamelCase : Union[str, Any] = None
__UpperCamelCase : Optional[int] = None
__UpperCamelCase : List[Any] = None
if self.use_labels:
__UpperCamelCase : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCamelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__UpperCamelCase : List[str] = DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , initializer_range=self.initializer_range , return_dict=a , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowerCamelCase ( self :Optional[int] , a :int , a :List[Any] , a :Optional[int] , a :Union[str, Any] , a :Union[str, Any] , a :str , a :int ) -> Optional[int]:
__UpperCamelCase : List[str] = TFDebertaVaModel(config=a )
__UpperCamelCase : int = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
__UpperCamelCase : Optional[Any] = [input_ids, input_mask]
__UpperCamelCase : Optional[int] = model(a )
__UpperCamelCase : Any = model(a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCamelCase ( self :str , a :List[Any] , a :Dict , a :Tuple , a :Union[str, Any] , a :str , a :Optional[int] , a :Optional[int] ) -> Optional[int]:
__UpperCamelCase : List[Any] = TFDebertaVaForMaskedLM(config=a )
__UpperCamelCase : Optional[int] = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
__UpperCamelCase : Tuple = model(a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowerCamelCase ( self :List[Any] , a :Optional[int] , a :Optional[Any] , a :int , a :Optional[int] , a :Any , a :Dict , a :List[Any] ) -> Optional[int]:
__UpperCamelCase : Optional[int] = self.num_labels
__UpperCamelCase : int = TFDebertaVaForSequenceClassification(config=a )
__UpperCamelCase : Any = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
__UpperCamelCase : Optional[int] = model(a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowerCamelCase ( self :Optional[Any] , a :int , a :Dict , a :Union[str, Any] , a :Tuple , a :Tuple , a :Union[str, Any] , a :str ) -> int:
__UpperCamelCase : Tuple = self.num_labels
__UpperCamelCase : str = TFDebertaVaForTokenClassification(config=a )
__UpperCamelCase : Optional[int] = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
__UpperCamelCase : Union[str, Any] = model(a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _lowerCamelCase ( self :List[str] , a :List[Any] , a :Union[str, Any] , a :List[str] , a :Union[str, Any] , a :Optional[Any] , a :Union[str, Any] , a :Tuple ) -> int:
__UpperCamelCase : List[Any] = TFDebertaVaForQuestionAnswering(config=a )
__UpperCamelCase : Dict = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
__UpperCamelCase : Tuple = model(a )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _lowerCamelCase ( self :List[str] ) -> List[Any]:
__UpperCamelCase : Optional[int] = self.prepare_config_and_inputs()
(
(
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) ,
) : List[Any] = config_and_inputs
__UpperCamelCase : Optional[Any] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class lowerCamelCase__ ( __lowercase , __lowercase , unittest.TestCase):
'''simple docstring'''
_A = (
(
TFDebertaVaModel,
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
)
if is_tf_available()
else ()
)
_A = (
{
'feature-extraction': TFDebertaVaModel,
'fill-mask': TFDebertaVaForMaskedLM,
'question-answering': TFDebertaVaForQuestionAnswering,
'text-classification': TFDebertaVaForSequenceClassification,
'token-classification': TFDebertaVaForTokenClassification,
'zero-shot': TFDebertaVaForSequenceClassification,
}
if is_tf_available()
else {}
)
_A = False
_A = False
def _lowerCamelCase ( self :Dict ) -> str:
__UpperCamelCase : Dict = TFDebertaVaModelTester(self )
__UpperCamelCase : int = ConfigTester(self , config_class=a , hidden_size=3_7 )
def _lowerCamelCase ( self :Tuple ) -> Optional[int]:
self.config_tester.run_common_tests()
def _lowerCamelCase ( self :List[Any] ) -> List[str]:
__UpperCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a )
def _lowerCamelCase ( self :Optional[int] ) -> List[Any]:
__UpperCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*a )
def _lowerCamelCase ( self :Optional[Any] ) -> str:
__UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*a )
def _lowerCamelCase ( self :Optional[Any] ) -> Dict:
__UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*a )
def _lowerCamelCase ( self :Any ) -> Optional[Any]:
__UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*a )
@slow
def _lowerCamelCase ( self :int ) -> int:
__UpperCamelCase : Tuple = TFDebertaVaModel.from_pretrained("kamalkraj/deberta-v2-xlarge" )
self.assertIsNotNone(a )
@require_tf
class lowerCamelCase__ ( unittest.TestCase):
'''simple docstring'''
@unittest.skip(reason="Model not available yet" )
def _lowerCamelCase ( self :Optional[Any] ) -> Any:
pass
@slow
def _lowerCamelCase ( self :Any ) -> Optional[int]:
__UpperCamelCase : List[Any] = TFDebertaVaModel.from_pretrained("kamalkraj/deberta-v2-xlarge" )
__UpperCamelCase : List[Any] = tf.constant([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] )
__UpperCamelCase : Optional[Any] = tf.constant([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
__UpperCamelCase : str = model(a , attention_mask=a )[0]
__UpperCamelCase : Optional[int] = tf.constant(
[[[0.2356, 0.1948, 0.0369], [-0.1063, 0.3586, -0.5152], [-0.6399, -0.0259, -0.2525]]] )
tf.debugging.assert_near(output[:, 1:4, 1:4] , a , atol=1E-4 ) | 151 | 0 |
"""simple docstring"""
from __future__ import annotations
def lowercase ( A_ )-> None:
'''simple docstring'''
create_state_space_tree(A_ , [] , 0 , [0 for i in range(len(A_ ) )] )
def lowercase ( A_ , A_ , A_ , A_ , )-> None:
'''simple docstring'''
if index == len(A_ ):
print(A_ )
return
for i in range(len(A_ ) ):
if not index_used[i]:
current_sequence.append(sequence[i] )
a : Dict = True
create_state_space_tree(A_ , A_ , index + 1 , A_ )
current_sequence.pop()
a : int = False
__lowercase = [3, 1, 2, 4]
generate_all_permutations(sequence)
__lowercase = ["A", "B", "C"]
generate_all_permutations(sequence_a)
| 40 |
import PIL.Image
import PIL.ImageOps
from packaging import version
from PIL import Image
if version.parse(version.parse(PIL.__version__).base_version) >= version.parse('9.1.0'):
lowercase : int = {
'linear': PIL.Image.Resampling.BILINEAR,
'bilinear': PIL.Image.Resampling.BILINEAR,
'bicubic': PIL.Image.Resampling.BICUBIC,
'lanczos': PIL.Image.Resampling.LANCZOS,
'nearest': PIL.Image.Resampling.NEAREST,
}
else:
lowercase : Any = {
'linear': PIL.Image.LINEAR,
'bilinear': PIL.Image.BILINEAR,
'bicubic': PIL.Image.BICUBIC,
'lanczos': PIL.Image.LANCZOS,
'nearest': PIL.Image.NEAREST,
}
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : int) -> Any:
'''simple docstring'''
__UpperCamelCase : Union[str, Any] = (images / 2 + 0.5).clamp(0 , 1)
__UpperCamelCase : int = images.cpu().permute(0 , 2 , 3 , 1).float().numpy()
__UpperCamelCase : Dict = numpy_to_pil(_lowerCamelCase)
return images
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[Any]) -> Optional[Any]:
'''simple docstring'''
if images.ndim == 3:
__UpperCamelCase : Optional[int] = images[None, ...]
__UpperCamelCase : Dict = (images * 255).round().astype("uint8")
if images.shape[-1] == 1:
# special case for grayscale (single channel) images
__UpperCamelCase : int = [Image.fromarray(image.squeeze() , mode="L") for image in images]
else:
__UpperCamelCase : Union[str, Any] = [Image.fromarray(_lowerCamelCase) for image in images]
return pil_images | 232 | 0 |
import unittest
from transformers import AlbertTokenizer, AlbertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
_snake_case = get_tests_dir('''fixtures/spiece.model''')
@require_sentencepiece
@require_tokenizers
class _snake_case ( lowerCamelCase__ , unittest.TestCase ):
lowerCamelCase__: Any = AlbertTokenizer
lowerCamelCase__: Dict = AlbertTokenizerFast
lowerCamelCase__: Optional[int] = True
lowerCamelCase__: Dict = True
lowerCamelCase__: Union[str, Any] = True
def _lowerCamelCase ( self: Tuple ) -> List[Any]:
super().setUp()
# We have a SentencePiece fixture for testing
__UpperCAmelCase : Union[str, Any] = AlbertTokenizer(__A )
tokenizer.save_pretrained(self.tmpdirname )
def _lowerCamelCase ( self: List[Any] , __lowerCamelCase: Any ) -> List[str]:
__UpperCAmelCase : Tuple = '''this is a test'''
__UpperCAmelCase : Optional[int] = '''this is a test'''
return input_text, output_text
def _lowerCamelCase ( self: List[str] ) -> Dict:
__UpperCAmelCase : Optional[Any] = '''<pad>'''
__UpperCAmelCase : Union[str, Any] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__A ) , __A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__A ) , __A )
def _lowerCamelCase ( self: Any ) -> Any:
__UpperCAmelCase : List[str] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<pad>" )
self.assertEqual(vocab_keys[1] , "<unk>" )
self.assertEqual(vocab_keys[-1] , "▁eloquent" )
self.assertEqual(len(__A ) , 3_00_00 )
def _lowerCamelCase ( self: Dict ) -> Tuple:
self.assertEqual(self.get_tokenizer().vocab_size , 3_00_00 )
def _lowerCamelCase ( self: Dict ) -> List[str]:
if not self.test_rust_tokenizer:
return
__UpperCAmelCase : Tuple = self.get_tokenizer()
__UpperCAmelCase : Optional[Any] = self.get_rust_tokenizer()
__UpperCAmelCase : int = '''I was born in 92000, and this is falsé.'''
__UpperCAmelCase : int = tokenizer.tokenize(__A )
__UpperCAmelCase : Dict = rust_tokenizer.tokenize(__A )
self.assertListEqual(__A , __A )
__UpperCAmelCase : List[str] = tokenizer.encode(__A , add_special_tokens=__A )
__UpperCAmelCase : List[str] = rust_tokenizer.encode(__A , add_special_tokens=__A )
self.assertListEqual(__A , __A )
__UpperCAmelCase : str = self.get_rust_tokenizer()
__UpperCAmelCase : List[Any] = tokenizer.encode(__A )
__UpperCAmelCase : Dict = rust_tokenizer.encode(__A )
self.assertListEqual(__A , __A )
def _lowerCamelCase ( self: Tuple ) -> Tuple:
__UpperCAmelCase : Dict = AlbertTokenizer(__A , keep_accents=__A )
__UpperCAmelCase : List[Any] = tokenizer.tokenize("This is a test" )
self.assertListEqual(__A , ["▁this", "▁is", "▁a", "▁test"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__A ) , [48, 25, 21, 12_89] )
__UpperCAmelCase : Union[str, Any] = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
__A , ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "é", "."] )
__UpperCAmelCase : List[Any] = tokenizer.convert_tokens_to_ids(__A )
self.assertListEqual(__A , [31, 23, 3_86, 19, 5_61, 30_50, 15, 17, 48, 25, 82_56, 18, 1, 9] )
__UpperCAmelCase : List[str] = tokenizer.convert_ids_to_tokens(__A )
self.assertListEqual(
__A , ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "."] , )
def _lowerCamelCase ( self: int ) -> Optional[int]:
__UpperCAmelCase : Optional[Any] = AlbertTokenizer(__A )
__UpperCAmelCase : Union[str, Any] = tokenizer.encode("sequence builders" )
__UpperCAmelCase : Tuple = tokenizer.encode("multi-sequence build" )
__UpperCAmelCase : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(__A )
__UpperCAmelCase : Dict = tokenizer.build_inputs_with_special_tokens(__A , __A )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
@slow
def _lowerCamelCase ( self: Dict ) -> Any:
# fmt: off
__UpperCAmelCase : List[Any] = {'''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''input_ids''': [[2, 2_19_70, 13, 5, 60_92, 1_67, 28, 71_03, 21_53, 6_73, 8, 70_28, 1_20_51, 18, 17, 71_03, 21_53, 6_73, 8, 35_15, 1_86_84, 8, 44_61, 6, 19_27, 2_97, 8, 1_20_60, 26_07, 18, 13, 5, 44_61, 15, 1_05_38, 38, 8, 1_35, 15, 8_22, 58, 15, 9_93, 1_03_63, 15, 14_60, 80_05, 44_61, 15, 9_93, 2_55, 23_28, 9, 9, 9, 6, 26, 11_12, 8_16, 32_60, 13, 5, 1_03, 23_77, 6, 17, 11_12, 8_16, 27_82, 13, 5, 1_03, 1_06_41, 6, 29, 84, 25_12, 24_30, 7_82, 1_86_84, 27_61, 19, 8_08, 24_30, 25_56, 17, 8_55, 14_80, 94_77, 40_91, 1_28, 1_17_12, 15, 71_03, 21_53, 6_73, 17, 2_48_83, 99_90, 9, 3], [2, 1_15_02, 25, 10_06, 20, 7_82, 8, 1_18_09, 8_55, 17_32, 1_93_93, 1_86_67, 37, 3_67, 2_10_18, 69, 18_54, 34, 1_18_60, 1_91_24, 27, 1_56, 2_25, 17, 1_93, 41_41, 19, 65, 91_24, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [2, 14, 22_31, 8_86, 23_85, 1_76_59, 84, 14, 1_67_92, 19_52, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''token_type_ids''': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__A , model_name="albert-base-v2" , revision="6b6560eaf5ff2e250b00c50f380c5389a9c2d82e" , )
| 361 | import json
import os
from functools import lru_cache
from typing import TYPE_CHECKING, List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_snake_case = logging.get_logger(__name__)
_snake_case = {
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
_snake_case = {
'''vocab_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'''},
'''merges_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'''},
'''tokenizer_config_file''': {
'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json'''
},
}
_snake_case = {'''facebook/blenderbot-3B''': 128}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def _UpperCamelCase ( ) -> Dict:
__UpperCAmelCase : Tuple = (
list(range(ord("!" ), ord("~" ) + 1 ) ) + list(range(ord("¡" ), ord("¬" ) + 1 ) ) + list(range(ord("®" ), ord("ÿ" ) + 1 ) )
)
__UpperCAmelCase : str = bs[:]
__UpperCAmelCase : Any = 0
for b in range(2**8 ):
if b not in bs:
bs.append(snake_case__ )
cs.append(2**8 + n )
n += 1
__UpperCAmelCase : Optional[Any] = [chr(snake_case__ ) for n in cs]
return dict(zip(snake_case__, snake_case__ ) )
def _UpperCamelCase ( snake_case__ ) -> Any:
__UpperCAmelCase : List[Any] = set()
__UpperCAmelCase : Any = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__UpperCAmelCase : Union[str, Any] = char
return pairs
class _snake_case ( _lowercase ):
lowerCamelCase__: str = VOCAB_FILES_NAMES
lowerCamelCase__: List[Any] = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__: Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__: Dict = ["input_ids", "attention_mask"]
def __init__( self: Tuple , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: Optional[int] , __lowerCamelCase: List[str]="replace" , __lowerCamelCase: List[str]="<s>" , __lowerCamelCase: List[str]="</s>" , __lowerCamelCase: str="</s>" , __lowerCamelCase: Tuple="<s>" , __lowerCamelCase: Optional[int]="<unk>" , __lowerCamelCase: Any="<pad>" , __lowerCamelCase: List[str]="<mask>" , __lowerCamelCase: List[str]=False , **__lowerCamelCase: int , ) -> List[str]:
__UpperCAmelCase : int = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else bos_token
__UpperCAmelCase : List[Any] = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else eos_token
__UpperCAmelCase : Any = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else sep_token
__UpperCAmelCase : Tuple = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else cls_token
__UpperCAmelCase : Optional[Any] = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else unk_token
__UpperCAmelCase : List[Any] = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
__UpperCAmelCase : Dict = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else mask_token
super().__init__(
errors=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , unk_token=__lowerCamelCase , sep_token=__lowerCamelCase , cls_token=__lowerCamelCase , pad_token=__lowerCamelCase , mask_token=__lowerCamelCase , add_prefix_space=__lowerCamelCase , **__lowerCamelCase , )
with open(__lowerCamelCase , encoding="utf-8" ) as vocab_handle:
__UpperCAmelCase : List[Any] = json.load(__lowerCamelCase )
__UpperCAmelCase : Optional[Any] = {v: k for k, v in self.encoder.items()}
__UpperCAmelCase : Dict = errors # how to handle errors in decoding
__UpperCAmelCase : Optional[int] = bytes_to_unicode()
__UpperCAmelCase : Dict = {v: k for k, v in self.byte_encoder.items()}
with open(__lowerCamelCase , encoding="utf-8" ) as merges_handle:
__UpperCAmelCase : List[Any] = merges_handle.read().split("\n" )[1:-1]
__UpperCAmelCase : Union[str, Any] = [tuple(merge.split() ) for merge in bpe_merges]
__UpperCAmelCase : int = dict(zip(__lowerCamelCase , range(len(__lowerCamelCase ) ) ) )
__UpperCAmelCase : List[Any] = {}
__UpperCAmelCase : Tuple = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
__UpperCAmelCase : int = re.compile(R"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" )
@property
# Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot
def _lowerCamelCase ( self: Dict ) -> Any:
return len(self.encoder )
def _lowerCamelCase ( self: Optional[Any] ) -> List[str]:
return dict(self.encoder , **self.added_tokens_encoder )
def _lowerCamelCase ( self: int , __lowerCamelCase: List[Any] ) -> Union[str, Any]:
if token in self.cache:
return self.cache[token]
__UpperCAmelCase : List[Any] = tuple(__lowerCamelCase )
__UpperCAmelCase : Dict = get_pairs(__lowerCamelCase )
if not pairs:
return token
while True:
__UpperCAmelCase : Optional[int] = min(__lowerCamelCase , key=lambda __lowerCamelCase : self.bpe_ranks.get(__lowerCamelCase , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = bigram
__UpperCAmelCase : Optional[int] = []
__UpperCAmelCase : str = 0
while i < len(__lowerCamelCase ):
try:
__UpperCAmelCase : Union[str, Any] = word.index(__lowerCamelCase , __lowerCamelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
__UpperCAmelCase : Union[str, Any] = j
if word[i] == first and i < len(__lowerCamelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__UpperCAmelCase : List[Any] = tuple(__lowerCamelCase )
__UpperCAmelCase : str = new_word
if len(__lowerCamelCase ) == 1:
break
else:
__UpperCAmelCase : Optional[Any] = get_pairs(__lowerCamelCase )
__UpperCAmelCase : Optional[Any] = " ".join(__lowerCamelCase )
__UpperCAmelCase : Union[str, Any] = word
return word
def _lowerCamelCase ( self: Dict , __lowerCamelCase: Optional[Any] ) -> Dict:
__UpperCAmelCase : Any = []
for token in re.findall(self.pat , __lowerCamelCase ):
__UpperCAmelCase : int = "".join(
self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(__lowerCamelCase ).split(" " ) )
return bpe_tokens
def _lowerCamelCase ( self: int , __lowerCamelCase: str ) -> Dict:
return self.encoder.get(__lowerCamelCase , self.encoder.get(self.unk_token ) )
def _lowerCamelCase ( self: Tuple , __lowerCamelCase: List[Any] ) -> List[str]:
return self.decoder.get(__lowerCamelCase )
def _lowerCamelCase ( self: Any , __lowerCamelCase: Any ) -> int:
__UpperCAmelCase : Dict = "".join(__lowerCamelCase )
__UpperCAmelCase : Optional[int] = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors )
return text
def _lowerCamelCase ( self: List[Any] , __lowerCamelCase: str , __lowerCamelCase: Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(__lowerCamelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
__UpperCAmelCase : Any = os.path.join(
__lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
__UpperCAmelCase : Dict = os.path.join(
__lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(__lowerCamelCase , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__lowerCamelCase , ensure_ascii=__lowerCamelCase ) + "\n" )
__UpperCAmelCase : Optional[Any] = 0
with open(__lowerCamelCase , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __lowerCamelCase : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
" Please check that the tokenizer is not corrupted!" )
__UpperCAmelCase : Optional[Any] = token_index
writer.write(" ".join(__lowerCamelCase ) + "\n" )
index += 1
return vocab_file, merge_file
def _lowerCamelCase ( self: Dict , __lowerCamelCase: List[int] , __lowerCamelCase: Optional[List[int]] = None , __lowerCamelCase: bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowerCamelCase , token_ids_a=__lowerCamelCase , already_has_special_tokens=__lowerCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(__lowerCamelCase )) + [1]
return [1] + ([0] * len(__lowerCamelCase )) + [1, 1] + ([0] * len(__lowerCamelCase )) + [1]
def _lowerCamelCase ( self: Tuple , __lowerCamelCase: List[int] , __lowerCamelCase: Optional[List[int]] = None ) -> List[int]:
__UpperCAmelCase : int = [self.sep_token_id]
__UpperCAmelCase : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _lowerCamelCase ( self: str , __lowerCamelCase: Optional[int] , __lowerCamelCase: List[str]=False , **__lowerCamelCase: int ) -> List[Any]:
__UpperCAmelCase : Optional[Any] = kwargs.pop("add_prefix_space" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(__lowerCamelCase ) > 0 and not text[0].isspace()):
__UpperCAmelCase : Optional[Any] = " " + text
return (text, kwargs)
def _lowerCamelCase ( self: List[str] , __lowerCamelCase: List[int] , __lowerCamelCase: Optional[List[int]] = None ) -> List[str]:
return token_ids_a + [self.eos_token_id]
def _lowerCamelCase ( self: List[str] , __lowerCamelCase: "Conversation" ) -> List[int]:
__UpperCAmelCase : Tuple = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(" " + text )
else:
# Generated responses should contain them already.
inputs.append(__lowerCamelCase )
__UpperCAmelCase : Optional[int] = " ".join(__lowerCamelCase )
__UpperCAmelCase : Optional[Any] = self.encode(__lowerCamelCase )
if len(__lowerCamelCase ) > self.model_max_length:
__UpperCAmelCase : List[Any] = input_ids[-self.model_max_length :]
logger.warning(f'''Trimmed input from conversation as it was longer than {self.model_max_length} tokens.''' )
return input_ids
| 342 | 0 |
def UpperCAmelCase ( a_ = 1_0_0_0_0_0_0 ) -> int:
"""simple docstring"""
__A = set(range(3 , a_ , 2 ) )
primes.add(2 )
for p in range(3 , a_ , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , a_ , a_ ) ) )
__A = [float(a_ ) for n in range(limit + 1 )]
for p in primes:
for n in range(a_ , limit + 1 , a_ ):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:] ) )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 15 |
def __UpperCamelCase ( _A ):
if not numbers:
return 0
if not isinstance(_A , (list, tuple) ) or not all(
isinstance(_A , _A ) for number in numbers ):
raise ValueError('''numbers must be an iterable of integers''' )
lowerCAmelCase_ = lowerCAmelCase_ = lowerCAmelCase_ = numbers[0]
for i in range(1 , len(_A ) ):
# update the maximum and minimum subarray products
lowerCAmelCase_ = numbers[i]
if number < 0:
lowerCAmelCase_ , lowerCAmelCase_ = min_till_now, max_till_now
lowerCAmelCase_ = max(_A , max_till_now * number )
lowerCAmelCase_ = min(_A , min_till_now * number )
# update the maximum product found till now
lowerCAmelCase_ = max(_A , _A )
return max_prod
| 278 | 0 |
import unittest
from transformers import (
MODEL_FOR_OBJECT_DETECTION_MAPPING,
AutoFeatureExtractor,
AutoModelForObjectDetection,
ObjectDetectionPipeline,
is_vision_available,
pipeline,
)
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_pytesseract,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
@staticmethod
def SCREAMING_SNAKE_CASE ( *lowerCAmelCase : str , **lowerCAmelCase : str ) -> Optional[int]:
"""simple docstring"""
pass
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
lowerCamelCase : Any =MODEL_FOR_OBJECT_DETECTION_MAPPING
def SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase : int , lowerCAmelCase : int , lowerCAmelCase : Optional[Any] ) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : int = ObjectDetectionPipeline(model=__lowerCAmelCase , image_processor=__lowerCAmelCase )
return object_detector, ["./tests/fixtures/tests_samples/COCO/000000039769.png"]
def SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase : int , lowerCAmelCase : Tuple ) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase : List[str] = object_detector("""./tests/fixtures/tests_samples/COCO/000000039769.png""" , threshold=0.0 )
self.assertGreater(len(__lowerCAmelCase ) , 0 )
for detected_object in outputs:
self.assertEqual(
__lowerCAmelCase , {
"""score""": ANY(__lowerCAmelCase ),
"""label""": ANY(__lowerCAmelCase ),
"""box""": {"""xmin""": ANY(__lowerCAmelCase ), """ymin""": ANY(__lowerCAmelCase ), """xmax""": ANY(__lowerCAmelCase ), """ymax""": ANY(__lowerCAmelCase )},
} , )
import datasets
__lowerCAmelCase : Tuple = datasets.load_dataset("""hf-internal-testing/fixtures_image_utils""" , """image""" , split="""test""" )
__lowerCAmelCase : int = [
Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ),
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
# RGBA
dataset[0]["""file"""],
# LA
dataset[1]["""file"""],
# L
dataset[2]["""file"""],
]
__lowerCAmelCase : Dict = object_detector(__lowerCAmelCase , threshold=0.0 )
self.assertEqual(len(__lowerCAmelCase ) , len(__lowerCAmelCase ) )
for outputs in batch_outputs:
self.assertGreater(len(__lowerCAmelCase ) , 0 )
for detected_object in outputs:
self.assertEqual(
__lowerCAmelCase , {
"""score""": ANY(__lowerCAmelCase ),
"""label""": ANY(__lowerCAmelCase ),
"""box""": {"""xmin""": ANY(__lowerCAmelCase ), """ymin""": ANY(__lowerCAmelCase ), """xmax""": ANY(__lowerCAmelCase ), """ymax""": ANY(__lowerCAmelCase )},
} , )
@require_tf
@unittest.skip("""Object detection not implemented in TF""" )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> str:
"""simple docstring"""
pass
@require_torch
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase : Optional[Any] = """hf-internal-testing/tiny-detr-mobilenetsv3"""
__lowerCAmelCase : Union[str, Any] = AutoModelForObjectDetection.from_pretrained(__lowerCAmelCase )
__lowerCAmelCase : List[Any] = AutoFeatureExtractor.from_pretrained(__lowerCAmelCase )
__lowerCAmelCase : Union[str, Any] = ObjectDetectionPipeline(model=__lowerCAmelCase , feature_extractor=__lowerCAmelCase )
__lowerCAmelCase : Optional[Any] = object_detector("""http://images.cocodataset.org/val2017/000000039769.jpg""" , threshold=0.0 )
self.assertEqual(
nested_simplify(__lowerCAmelCase , decimals=4 ) , [
{"""score""": 0.3376, """label""": """LABEL_0""", """box""": {"""xmin""": 1_59, """ymin""": 1_20, """xmax""": 4_80, """ymax""": 3_59}},
{"""score""": 0.3376, """label""": """LABEL_0""", """box""": {"""xmin""": 1_59, """ymin""": 1_20, """xmax""": 4_80, """ymax""": 3_59}},
] , )
__lowerCAmelCase : str = object_detector(
[
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
] , threshold=0.0 , )
self.assertEqual(
nested_simplify(__lowerCAmelCase , decimals=4 ) , [
[
{"""score""": 0.3376, """label""": """LABEL_0""", """box""": {"""xmin""": 1_59, """ymin""": 1_20, """xmax""": 4_80, """ymax""": 3_59}},
{"""score""": 0.3376, """label""": """LABEL_0""", """box""": {"""xmin""": 1_59, """ymin""": 1_20, """xmax""": 4_80, """ymax""": 3_59}},
],
[
{"""score""": 0.3376, """label""": """LABEL_0""", """box""": {"""xmin""": 1_59, """ymin""": 1_20, """xmax""": 4_80, """ymax""": 3_59}},
{"""score""": 0.3376, """label""": """LABEL_0""", """box""": {"""xmin""": 1_59, """ymin""": 1_20, """xmax""": 4_80, """ymax""": 3_59}},
],
] , )
@require_torch
@slow
def SCREAMING_SNAKE_CASE ( self : str ) -> Any:
"""simple docstring"""
__lowerCAmelCase : Optional[Any] = """facebook/detr-resnet-50"""
__lowerCAmelCase : Union[str, Any] = AutoModelForObjectDetection.from_pretrained(__lowerCAmelCase )
__lowerCAmelCase : Any = AutoFeatureExtractor.from_pretrained(__lowerCAmelCase )
__lowerCAmelCase : Dict = ObjectDetectionPipeline(model=__lowerCAmelCase , feature_extractor=__lowerCAmelCase )
__lowerCAmelCase : Optional[int] = object_detector("""http://images.cocodataset.org/val2017/000000039769.jpg""" )
self.assertEqual(
nested_simplify(__lowerCAmelCase , decimals=4 ) , [
{"""score""": 0.9982, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 1_75, """ymax""": 1_17}},
{"""score""": 0.9960, """label""": """remote""", """box""": {"""xmin""": 3_33, """ymin""": 72, """xmax""": 3_68, """ymax""": 1_87}},
{"""score""": 0.9955, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 6_39, """ymax""": 4_73}},
{"""score""": 0.9988, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 3_14, """ymax""": 4_70}},
{"""score""": 0.9987, """label""": """cat""", """box""": {"""xmin""": 3_45, """ymin""": 23, """xmax""": 6_40, """ymax""": 3_68}},
] , )
__lowerCAmelCase : str = object_detector(
[
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
] )
self.assertEqual(
nested_simplify(__lowerCAmelCase , decimals=4 ) , [
[
{"""score""": 0.9982, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 1_75, """ymax""": 1_17}},
{"""score""": 0.9960, """label""": """remote""", """box""": {"""xmin""": 3_33, """ymin""": 72, """xmax""": 3_68, """ymax""": 1_87}},
{"""score""": 0.9955, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 6_39, """ymax""": 4_73}},
{"""score""": 0.9988, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 3_14, """ymax""": 4_70}},
{"""score""": 0.9987, """label""": """cat""", """box""": {"""xmin""": 3_45, """ymin""": 23, """xmax""": 6_40, """ymax""": 3_68}},
],
[
{"""score""": 0.9982, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 1_75, """ymax""": 1_17}},
{"""score""": 0.9960, """label""": """remote""", """box""": {"""xmin""": 3_33, """ymin""": 72, """xmax""": 3_68, """ymax""": 1_87}},
{"""score""": 0.9955, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 6_39, """ymax""": 4_73}},
{"""score""": 0.9988, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 3_14, """ymax""": 4_70}},
{"""score""": 0.9987, """label""": """cat""", """box""": {"""xmin""": 3_45, """ymin""": 23, """xmax""": 6_40, """ymax""": 3_68}},
],
] , )
@require_torch
@slow
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase : List[Any] = """facebook/detr-resnet-50"""
__lowerCAmelCase : List[str] = pipeline("""object-detection""" , model=__lowerCAmelCase )
__lowerCAmelCase : Dict = object_detector("""http://images.cocodataset.org/val2017/000000039769.jpg""" )
self.assertEqual(
nested_simplify(__lowerCAmelCase , decimals=4 ) , [
{"""score""": 0.9982, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 1_75, """ymax""": 1_17}},
{"""score""": 0.9960, """label""": """remote""", """box""": {"""xmin""": 3_33, """ymin""": 72, """xmax""": 3_68, """ymax""": 1_87}},
{"""score""": 0.9955, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 6_39, """ymax""": 4_73}},
{"""score""": 0.9988, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 3_14, """ymax""": 4_70}},
{"""score""": 0.9987, """label""": """cat""", """box""": {"""xmin""": 3_45, """ymin""": 23, """xmax""": 6_40, """ymax""": 3_68}},
] , )
__lowerCAmelCase : Optional[int] = object_detector(
[
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
] )
self.assertEqual(
nested_simplify(__lowerCAmelCase , decimals=4 ) , [
[
{"""score""": 0.9982, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 1_75, """ymax""": 1_17}},
{"""score""": 0.9960, """label""": """remote""", """box""": {"""xmin""": 3_33, """ymin""": 72, """xmax""": 3_68, """ymax""": 1_87}},
{"""score""": 0.9955, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 6_39, """ymax""": 4_73}},
{"""score""": 0.9988, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 3_14, """ymax""": 4_70}},
{"""score""": 0.9987, """label""": """cat""", """box""": {"""xmin""": 3_45, """ymin""": 23, """xmax""": 6_40, """ymax""": 3_68}},
],
[
{"""score""": 0.9982, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 1_75, """ymax""": 1_17}},
{"""score""": 0.9960, """label""": """remote""", """box""": {"""xmin""": 3_33, """ymin""": 72, """xmax""": 3_68, """ymax""": 1_87}},
{"""score""": 0.9955, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 6_39, """ymax""": 4_73}},
{"""score""": 0.9988, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 3_14, """ymax""": 4_70}},
{"""score""": 0.9987, """label""": """cat""", """box""": {"""xmin""": 3_45, """ymin""": 23, """xmax""": 6_40, """ymax""": 3_68}},
],
] , )
@require_torch
@slow
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
__lowerCAmelCase : Optional[Any] = 0.9985
__lowerCAmelCase : Optional[int] = """facebook/detr-resnet-50"""
__lowerCAmelCase : List[Any] = pipeline("""object-detection""" , model=__lowerCAmelCase )
__lowerCAmelCase : List[Any] = object_detector("""http://images.cocodataset.org/val2017/000000039769.jpg""" , threshold=__lowerCAmelCase )
self.assertEqual(
nested_simplify(__lowerCAmelCase , decimals=4 ) , [
{"""score""": 0.9988, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 3_14, """ymax""": 4_70}},
{"""score""": 0.9987, """label""": """cat""", """box""": {"""xmin""": 3_45, """ymin""": 23, """xmax""": 6_40, """ymax""": 3_68}},
] , )
@require_torch
@require_pytesseract
@slow
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Dict:
"""simple docstring"""
__lowerCAmelCase : str = """Narsil/layoutlmv3-finetuned-funsd"""
__lowerCAmelCase : str = 0.9993
__lowerCAmelCase : List[str] = pipeline("""object-detection""" , model=__lowerCAmelCase , threshold=__lowerCAmelCase )
__lowerCAmelCase : Any = object_detector(
"""https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png""" )
self.assertEqual(
nested_simplify(__lowerCAmelCase , decimals=4 ) , [
{"""score""": 0.9993, """label""": """I-ANSWER""", """box""": {"""xmin""": 2_94, """ymin""": 2_54, """xmax""": 3_43, """ymax""": 2_64}},
{"""score""": 0.9993, """label""": """I-ANSWER""", """box""": {"""xmin""": 2_94, """ymin""": 2_54, """xmax""": 3_43, """ymax""": 2_64}},
] , )
| 371 |
import warnings
from ...utils import logging
from .image_processing_glpn import GLPNImageProcessor
__UpperCAmelCase = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE ( a_ ):
"""simple docstring"""
def __init__( self : Optional[Any] , *lowerCAmelCase : int , **lowerCAmelCase : Optional[int] ) -> None:
"""simple docstring"""
warnings.warn(
"""The class GLPNFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use GLPNImageProcessor instead.""" , lowerCAmelCase , )
super().__init__(*lowerCAmelCase , **lowerCAmelCase )
| 139 | 0 |
"""simple docstring"""
from typing import Optional, Union
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models.modeling_utils import ModelMixin
class a ( UpperCAmelCase__, UpperCAmelCase__ ):
"""simple docstring"""
@register_to_config
def __init__( self: int , UpperCamelCase: int = 7_68 , ):
"""simple docstring"""
super().__init__()
A__ = nn.Parameter(torch.zeros(1 , snake_case_ ) )
A__ = nn.Parameter(torch.ones(1 , snake_case_ ) )
def UpperCamelCase ( self: List[Any] , UpperCamelCase: str = None , UpperCamelCase: List[str] = None , ):
"""simple docstring"""
A__ = nn.Parameter(self.mean.to(snake_case_ ).to(snake_case_ ) )
A__ = nn.Parameter(self.std.to(snake_case_ ).to(snake_case_ ) )
return self
def UpperCamelCase ( self: Union[str, Any] , UpperCamelCase: Dict ):
"""simple docstring"""
A__ = (embeds - self.mean) * 1.0 / self.std
return embeds
def UpperCamelCase ( self: Optional[Any] , UpperCamelCase: List[str] ):
"""simple docstring"""
A__ = (embeds * self.std) + self.mean
return embeds
| 335 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase_ : str = logging.get_logger(__name__)
lowerCamelCase_ : Any = {
'facebook/xmod-base': 'https://huggingface.co/facebook/xmod-base/resolve/main/config.json',
'facebook/xmod-large-prenorm': 'https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json',
'facebook/xmod-base-13-125k': 'https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json',
'facebook/xmod-base-30-125k': 'https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json',
'facebook/xmod-base-30-195k': 'https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json',
'facebook/xmod-base-60-125k': 'https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json',
'facebook/xmod-base-60-265k': 'https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json',
'facebook/xmod-base-75-125k': 'https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json',
'facebook/xmod-base-75-269k': 'https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json',
}
class _UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
lowercase_ : Tuple = """xmod"""
def __init__( self , snake_case_=3_0_5_2_2 , snake_case_=7_6_8 , snake_case_=1_2 , snake_case_=1_2 , snake_case_=3_0_7_2 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=5_1_2 , snake_case_=2 , snake_case_=0.02 , snake_case_=1E-12 , snake_case_=1 , snake_case_=0 , snake_case_=2 , snake_case_="absolute" , snake_case_=True , snake_case_=None , snake_case_=False , snake_case_=2 , snake_case_=False , snake_case_=True , snake_case_=True , snake_case_=("en_XX",) , snake_case_=None , **snake_case_ , ):
"""simple docstring"""
super().__init__(pad_token_id=snake_case_ , bos_token_id=snake_case_ , eos_token_id=snake_case_ , **snake_case_ )
A_ : Union[str, Any] = vocab_size
A_ : Any = hidden_size
A_ : List[str] = num_hidden_layers
A_ : Tuple = num_attention_heads
A_ : int = hidden_act
A_ : Any = intermediate_size
A_ : Any = hidden_dropout_prob
A_ : Dict = attention_probs_dropout_prob
A_ : Union[str, Any] = max_position_embeddings
A_ : List[Any] = type_vocab_size
A_ : List[str] = initializer_range
A_ : Any = layer_norm_eps
A_ : Optional[Any] = position_embedding_type
A_ : int = use_cache
A_ : Dict = classifier_dropout
A_ : int = pre_norm
A_ : Optional[Any] = adapter_reduction_factor
A_ : List[Any] = adapter_layer_norm
A_ : int = adapter_reuse_layer_norm
A_ : Dict = ln_before_adapter
A_ : List[str] = list(snake_case_ )
A_ : Union[str, Any] = default_language
class _UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
@property
def lowerCamelCase_ ( self ):
"""simple docstring"""
if self.task == "multiple-choice":
A_ : Dict = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
A_ : int = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] ) | 286 | 0 |
"""simple docstring"""
import collections
import json
import os
import re
from typing import TYPE_CHECKING, List, Optional, Tuple
import numpy as np
from ...tokenization_utils_fast import PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
A__ : Optional[Any] = logging.get_logger(__name__)
A__ : str = {'vocab_file': 'vocab.txt', 'emoji_file': 'emoji.json'}
A__ : Optional[int] = {
'vocab_file': {
'abeja/gpt-neox-japanese-2.7b': 'https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt',
},
'emoji_file': {
'abeja/gpt-neox-japanese-2.7b': 'https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json',
},
}
A__ : str = {
'abeja/gpt-neox-japanese-2.7b': 2_048,
}
def _snake_case ( lowerCamelCase__ : int , lowerCamelCase__ : Any ) -> List[Any]:
with open(lowerCamelCase__ , "r" , encoding="utf-8" ) as f:
lowerCamelCase_ : List[str] =json.loads(f.read() )
lowerCamelCase_ : Union[str, Any] =collections.OrderedDict()
lowerCamelCase_ : Union[str, Any] =collections.OrderedDict()
lowerCamelCase_ : int =collections.OrderedDict()
with open(lowerCamelCase__ , "r" , encoding="utf-8" ) as f:
lowerCamelCase_ : Optional[Any] =f.readlines()
lowerCamelCase_ : List[str] =[[t.rstrip("\n" )] if (t == "," or "," not in t) else t.rstrip("\n" ).split("," ) for t in token]
for idx, b in enumerate(lowerCamelCase__ ):
lowerCamelCase_ : Union[str, Any] =b
lowerCamelCase_ : Optional[int] =idx
for wd in b:
lowerCamelCase_ : int =idx
return vocab, raw_vocab, ids_to_tokens, emoji
class lowercase__ ( snake_case__ ):
_UpperCAmelCase :Any = VOCAB_FILES_NAMES
_UpperCAmelCase :Tuple = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase :List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase :Tuple = ["input_ids", "attention_mask"]
def __init__( self : List[Any] , snake_case__ : Union[str, Any] , snake_case__ : List[str] , snake_case__ : List[Any]="<|endoftext|>" , snake_case__ : Tuple="<|endoftext|>" , snake_case__ : List[Any]="<|startoftext|>" , snake_case__ : List[str]="<|endoftext|>" , snake_case__ : Optional[Any]=False , **snake_case__ : int , ):
super().__init__(
unk_token=snake_case__ , pad_token=snake_case__ , bos_token=snake_case__ , eos_token=snake_case__ , do_clean_text=snake_case__ , **snake_case__ , )
if not os.path.isfile(snake_case__ ):
raise ValueError(
F"""Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained"""
" model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`" )
if not os.path.isfile(snake_case__ ):
raise ValueError(
F"""Can't find a emoji file at path '{emoji_file}'. To load the emoji information from a Google"""
" pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`" )
lowerCamelCase_ : str =do_clean_text
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ : Optional[Any] =load_vocab_and_emoji(snake_case__ , snake_case__ )
lowerCamelCase_ : int =SubWordJapaneseTokenizer(
vocab=self.vocab , ids_to_tokens=self.ids_to_tokens , emoji=self.emoji )
@property
def UpperCAmelCase__ ( self : Any ):
# self.vocab contains support for character fluctuation unique to Japanese, and has a large number of vocab
return len(self.raw_vocab )
def UpperCAmelCase__ ( self : Optional[int] ):
return dict(self.raw_vocab , **self.added_tokens_encoder )
def UpperCAmelCase__ ( self : List[Any] , snake_case__ : Optional[Any] ):
return self.subword_tokenizer.tokenize(snake_case__ , clean=self.do_clean_text )
def UpperCAmelCase__ ( self : Optional[Any] , snake_case__ : int ):
return self.vocab.get(snake_case__ , self.vocab.get(self.unk_token ) )
def UpperCAmelCase__ ( self : int , snake_case__ : Optional[int] ):
return self.subword_tokenizer.convert_id_to_token(snake_case__ )
def UpperCAmelCase__ ( self : List[Any] , snake_case__ : Dict ):
lowerCamelCase_ : List[str] ="".join(snake_case__ ).strip()
return out_string
def UpperCAmelCase__ ( self : Optional[Any] , snake_case__ : "Conversation" ):
lowerCamelCase_ : Dict =[]
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(snake_case__ , add_special_tokens=snake_case__ ) + [self.eos_token_id] )
if len(snake_case__ ) > self.model_max_length:
lowerCamelCase_ : Any =input_ids[-self.model_max_length :]
return input_ids
def UpperCAmelCase__ ( self : Dict , snake_case__ : str , snake_case__ : Optional[str] = None ):
lowerCamelCase_ : int =0
if os.path.isdir(snake_case__ ):
lowerCamelCase_ : Optional[Any] =os.path.join(
snake_case__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
lowerCamelCase_ : Any =os.path.join(
snake_case__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["emoji_file"] )
else:
lowerCamelCase_ : Tuple =(
(filename_prefix + "-" if filename_prefix else "") + save_directory + VOCAB_FILES_NAMES["vocab_file"]
)
lowerCamelCase_ : str =(
(filename_prefix + "-" if filename_prefix else "") + save_directory + VOCAB_FILES_NAMES["emoji_file"]
)
with open(snake_case__ , "w" , encoding="utf-8" ) as writer:
for token_index, token in self.ids_to_tokens.items():
if index != token_index:
logger.warning(
F"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
" Please check that the vocabulary is not corrupted!" )
lowerCamelCase_ : Optional[int] =token_index
writer.write(",".join(snake_case__ ) + "\n" )
index += 1
with open(snake_case__ , "w" , encoding="utf-8" ) as writer:
json.dump(self.emoji , snake_case__ )
return vocab_file, emoji_file
class lowercase__ ( snake_case__ ):
def __init__( self : Optional[int] , snake_case__ : Optional[int] , snake_case__ : List[Any] , snake_case__ : List[str] ):
lowerCamelCase_ : str =vocab # same as swe
lowerCamelCase_ : Dict =ids_to_tokens # same as bpe
lowerCamelCase_ : List[Any] =emoji
lowerCamelCase_ : List[str] =np.max([len(snake_case__ ) for w in self.vocab.keys()] )
lowerCamelCase_ : str =re.compile(r"(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)" )
lowerCamelCase_ : Dict =re.compile(r"[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*" )
lowerCamelCase_ : Optional[int] =re.compile(r"[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}" )
lowerCamelCase_ : List[str] =re.compile(
r"([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*" )
lowerCamelCase_ : int =re.compile(
r"(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*" )
lowerCamelCase_ : Optional[Any] =re.compile(
r"((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*" )
lowerCamelCase_ : Union[str, Any] ="─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿"
lowerCamelCase_ : Any ="▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟"
lowerCamelCase_ : int =str.maketrans({k: "<BLOCK>" for k in keisen + blocks} )
def __len__( self : List[Any] ):
return len(self.ids_to_tokens )
def UpperCAmelCase__ ( self : Dict , snake_case__ : List[str] ):
lowerCamelCase_ : Union[str, Any] =self.content_repattera.sub("<URL>" , snake_case__ )
lowerCamelCase_ : int =self.content_repattera.sub("<EMAIL>" , snake_case__ )
lowerCamelCase_ : Union[str, Any] =self.content_repattera.sub("<TEL>" , snake_case__ )
lowerCamelCase_ : Any =self.content_repattera.sub("<DATE>" , snake_case__ )
lowerCamelCase_ : str =self.content_repattera.sub("<DATE>" , snake_case__ )
lowerCamelCase_ : Union[str, Any] =self.content_repattera.sub("<PRICE>" , snake_case__ )
lowerCamelCase_ : Optional[int] =content.translate(self.content_transa )
while "<BLOCK><BLOCK>" in content:
lowerCamelCase_ : List[str] =content.replace("<BLOCK><BLOCK>" , "<BLOCK>" )
return content
def UpperCAmelCase__ ( self : Optional[Any] , snake_case__ : Optional[int] , snake_case__ : List[str]=False ):
lowerCamelCase_ : Dict =text.replace(" " , "<SP>" )
lowerCamelCase_ : List[Any] =text.replace(" " , "<SP>" )
lowerCamelCase_ : Union[str, Any] =text.replace("\r\n" , "<BR>" )
lowerCamelCase_ : int =text.replace("\n" , "<BR>" )
lowerCamelCase_ : Tuple =text.replace("\r" , "<BR>" )
lowerCamelCase_ : str =text.replace("\t" , "<TAB>" )
lowerCamelCase_ : List[str] =text.replace("—" , "ー" )
lowerCamelCase_ : List[str] =text.replace("−" , "ー" )
for k, v in self.emoji["emoji"].items():
if k in text:
lowerCamelCase_ : Any =text.replace(snake_case__ , snake_case__ )
if clean:
lowerCamelCase_ : Optional[int] =self.clean_text(snake_case__ )
def check_simbol(snake_case__ : Any ):
lowerCamelCase_ : str =x.encode()
if len(snake_case__ ) == 1 and len(snake_case__ ) == 2:
lowerCamelCase_ : str =(int(e[0] ) << 8) + int(e[1] )
if (
(c >= 0xc_2a1 and c <= 0xc_2bf)
or (c >= 0xc_780 and c <= 0xc_783)
or (c >= 0xc_ab9 and c <= 0xc_bbf)
or (c >= 0xc_c80 and c <= 0xc_da2)
):
return True
return False
def checkuae(snake_case__ : Optional[Any] ):
lowerCamelCase_ : Optional[Any] =x.encode()
if len(snake_case__ ) == 1 and len(snake_case__ ) == 3:
lowerCamelCase_ : Union[str, Any] =(int(e[0] ) << 16) + (int(e[1] ) << 8) + int(e[2] )
if c >= 0xe28_080 and c <= 0xe2b_07f:
return True
return False
lowerCamelCase_ : Dict =0
lowerCamelCase_ : str =[]
while pos < len(snake_case__ ):
lowerCamelCase_ : Optional[Any] =min(len(snake_case__ ) , pos + self.maxlen + 1 ) if text[pos] == "<" else pos + 3
lowerCamelCase_ : Union[str, Any] =[] # (token_id, token, pos)
for e in range(snake_case__ , snake_case__ , -1 ):
lowerCamelCase_ : int =text[pos:e]
if wd in self.vocab:
if wd[0] == "<" and len(snake_case__ ) > 2:
lowerCamelCase_ : List[str] =[(self.vocab[wd], wd, e)]
break
else:
candidates.append((self.vocab[wd], wd, e) )
if len(snake_case__ ) > 0:
# the smallest token_id is adopted
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ : str =sorted(snake_case__ , key=lambda snake_case__ : x[0] )[0]
result.append(snake_case__ )
lowerCamelCase_ : Any =e
else:
lowerCamelCase_ : int =pos + 1
lowerCamelCase_ : Union[str, Any] =text[pos:end]
if check_simbol(snake_case__ ):
result.append("<KIGOU>" )
elif checkuae(snake_case__ ):
result.append("<U2000U2BFF>" )
else:
for i in wd.encode("utf-8" ):
result.append("<|byte%d|>" % i )
lowerCamelCase_ : Any =end
return result
def UpperCAmelCase__ ( self : Optional[Any] , snake_case__ : int , snake_case__ : str="\n" ):
lowerCamelCase_ : Any =[]
lowerCamelCase_ : Any =[]
lowerCamelCase_ : Optional[Any] =self.ids_to_tokens[index][0]
if word[:6] == "<|byte" and word[-2:] == "|>":
byte_tokens.append(int(word[6:-2] ) )
else:
if len(snake_case__ ) > 0:
words.append(bytearray(snake_case__ ).decode("utf-8" , errors="replace" ) )
lowerCamelCase_ : int =[]
if word[:7] == "<|emoji" and word[-2:] == "|>":
words.append(self.emoji["emoji_inv"][word] )
elif word == "<SP>":
words.append(" " )
elif word == "<BR>":
words.append(snake_case__ )
elif word == "<TAB>":
words.append("\t" )
elif word == "<BLOCK>":
words.append("▀" )
elif word == "<KIGOU>":
words.append("ǀ" )
elif word == "<U2000U2BFF>":
words.append("‖" )
else:
words.append(snake_case__ )
if len(snake_case__ ) > 0:
words.append(bytearray(snake_case__ ).decode("utf-8" , errors="replace" ) )
lowerCamelCase_ : str ="".join(snake_case__ )
return text
| 209 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_tf_available,
is_torch_available,
)
A__ : Optional[int] = {
'configuration_speech_to_text': ['SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Speech2TextConfig'],
'processing_speech_to_text': ['Speech2TextProcessor'],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Dict = ['Speech2TextTokenizer']
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Optional[Any] = ['Speech2TextFeatureExtractor']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Dict = [
'TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFSpeech2TextForConditionalGeneration',
'TFSpeech2TextModel',
'TFSpeech2TextPreTrainedModel',
]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Optional[int] = [
'SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST',
'Speech2TextForConditionalGeneration',
'Speech2TextModel',
'Speech2TextPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig
from .processing_speech_to_text import SpeechaTextProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speech_to_text import SpeechaTextTokenizer
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_speech_to_text import (
TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSpeechaTextForConditionalGeneration,
TFSpeechaTextModel,
TFSpeechaTextPreTrainedModel,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_to_text import (
SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechaTextForConditionalGeneration,
SpeechaTextModel,
SpeechaTextPreTrainedModel,
)
else:
import sys
A__ : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 209 | 1 |
'''simple docstring'''
import unittest
from transformers import MobileBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertModel,
)
class a__ :
def __init__( self , _UpperCamelCase , _UpperCamelCase=13 , _UpperCamelCase=7 , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=99 , _UpperCamelCase=64 , _UpperCamelCase=32 , _UpperCamelCase=5 , _UpperCamelCase=4 , _UpperCamelCase=37 , _UpperCamelCase="gelu" , _UpperCamelCase=0.1 , _UpperCamelCase=0.1 , _UpperCamelCase=512 , _UpperCamelCase=16 , _UpperCamelCase=2 , _UpperCamelCase=0.0_2 , _UpperCamelCase=3 , _UpperCamelCase=4 , _UpperCamelCase=None , ):
"""simple docstring"""
_lowercase : List[str] = parent
_lowercase : Optional[int] = batch_size
_lowercase : Optional[Any] = seq_length
_lowercase : List[str] = is_training
_lowercase : Union[str, Any] = use_input_mask
_lowercase : Dict = use_token_type_ids
_lowercase : Dict = use_labels
_lowercase : Optional[Any] = vocab_size
_lowercase : Dict = hidden_size
_lowercase : Dict = embedding_size
_lowercase : Any = num_hidden_layers
_lowercase : Dict = num_attention_heads
_lowercase : List[Any] = intermediate_size
_lowercase : Any = hidden_act
_lowercase : Dict = hidden_dropout_prob
_lowercase : Optional[Any] = attention_probs_dropout_prob
_lowercase : List[Any] = max_position_embeddings
_lowercase : str = type_vocab_size
_lowercase : str = type_sequence_label_size
_lowercase : Any = initializer_range
_lowercase : str = num_labels
_lowercase : Dict = num_choices
_lowercase : Optional[Any] = scope
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowercase : Optional[int] = None
if self.use_input_mask:
_lowercase : Dict = random_attention_mask([self.batch_size, self.seq_length] )
_lowercase : Tuple = None
if self.use_token_type_ids:
_lowercase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_lowercase : Tuple = None
_lowercase : Any = None
_lowercase : Optional[int] = None
if self.use_labels:
_lowercase : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowercase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_lowercase : Tuple = ids_tensor([self.batch_size] , self.num_choices )
_lowercase : List[Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowerCamelCase ( self ):
"""simple docstring"""
return MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowercase_ , initializer_range=self.initializer_range , )
def _lowerCamelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
_lowercase : Tuple = MobileBertModel(config=lowercase_ )
model.to(lowercase_ )
model.eval()
_lowercase : Optional[Any] = model(lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ )
_lowercase : Any = model(lowercase_ , token_type_ids=lowercase_ )
_lowercase : Tuple = model(lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _lowerCamelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
_lowercase : List[Any] = MobileBertForMaskedLM(config=lowercase_ )
model.to(lowercase_ )
model.eval()
_lowercase : Optional[Any] = model(lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowerCamelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
_lowercase : Union[str, Any] = MobileBertForNextSentencePrediction(config=lowercase_ )
model.to(lowercase_ )
model.eval()
_lowercase : str = model(
lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , labels=lowercase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def _lowerCamelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
_lowercase : List[str] = MobileBertForPreTraining(config=lowercase_ )
model.to(lowercase_ )
model.eval()
_lowercase : Any = model(
lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , labels=lowercase_ , next_sentence_label=lowercase_ , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def _lowerCamelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
_lowercase : Any = MobileBertForQuestionAnswering(config=lowercase_ )
model.to(lowercase_ )
model.eval()
_lowercase : List[Any] = model(
lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , start_positions=lowercase_ , end_positions=lowercase_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _lowerCamelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
_lowercase : Union[str, Any] = self.num_labels
_lowercase : Any = MobileBertForSequenceClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
_lowercase : Optional[int] = model(lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowerCamelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
_lowercase : List[str] = self.num_labels
_lowercase : Union[str, Any] = MobileBertForTokenClassification(config=lowercase_ )
model.to(lowercase_ )
model.eval()
_lowercase : Union[str, Any] = model(lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _lowerCamelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
_lowercase : List[str] = self.num_choices
_lowercase : Any = MobileBertForMultipleChoice(config=lowercase_ )
model.to(lowercase_ )
model.eval()
_lowercase : Any = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowercase : Tuple = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowercase : str = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowercase : Tuple = model(
lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , labels=lowercase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : List[str] = self.prepare_config_and_inputs()
(
_lowercase
) : Dict = config_and_inputs
_lowercase : List[Any] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class a__ ( _snake_case , _snake_case , unittest.TestCase ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = (
(
MobileBertModel,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
)
if is_torch_available()
else ()
)
_SCREAMING_SNAKE_CASE : List[str] = (
{
"""feature-extraction""": MobileBertModel,
"""fill-mask""": MobileBertForMaskedLM,
"""question-answering""": MobileBertForQuestionAnswering,
"""text-classification""": MobileBertForSequenceClassification,
"""token-classification""": MobileBertForTokenClassification,
"""zero-shot""": MobileBertForSequenceClassification,
}
if is_torch_available()
else {}
)
_SCREAMING_SNAKE_CASE : Tuple = True
def _lowerCamelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=False ):
"""simple docstring"""
_lowercase : str = super()._prepare_for_class(lowercase_ , lowercase_ , return_labels=lowercase_ )
if return_labels:
if model_class in get_values(lowercase_ ):
_lowercase : int = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=lowercase_ )
_lowercase : Dict = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowercase_ )
return inputs_dict
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : Dict = MobileBertModelTester(self )
_lowercase : List[str] = ConfigTester(self , config_class=lowercase_ , hidden_size=37 )
def _lowerCamelCase ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*lowercase_ )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*lowercase_ )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*lowercase_ )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*lowercase_ )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*lowercase_ )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*lowercase_ )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*lowercase_ )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*lowercase_ )
def _A ( snake_case ) -> List[str]:
return torch.tensor(
UpperCAmelCase_ , dtype=torch.long , device=UpperCAmelCase_ , )
_snake_case = 1e-3
@require_torch
@require_sentencepiece
@require_tokenizers
class a__ ( unittest.TestCase ):
@slow
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : List[Any] = MobileBertModel.from_pretrained("google/mobilebert-uncased" ).to(lowercase_ )
_lowercase : Dict = _long_tensor([[101, 7110, 1005, 1056, 2023, 11333, 17413, 1029, 102]] )
with torch.no_grad():
_lowercase : Optional[Any] = model(lowercase_ )[0]
_lowercase : Union[str, Any] = torch.Size((1, 9, 512) )
self.assertEqual(output.shape , lowercase_ )
_lowercase : Optional[int] = torch.tensor(
[
[
[-2.4_7_3_6_5_2_6E0_7, 8.2_6_9_1_6_5_6E0_4, 1.6_5_2_1_8_3_8E0_5],
[-5.7_5_4_1_7_0_4E-0_1, 3.9_0_5_6_0_2_2E0_0, 4.4_0_1_1_5_0_7E0_0],
[2.6_0_4_7_3_5_9E0_0, 1.5_6_7_7_6_5_2E0_0, -1.7_3_2_4_1_8_8E-0_1],
]
] , device=lowercase_ , )
# MobileBERT results range from 10e0 to 10e8. Even a 0.0000001% difference with a value of 10e8 results in a
# ~1 difference, it's therefore not a good idea to measure using addition.
# Here, we instead divide the expected result with the result in order to obtain ~1. We then check that the
# result is held between bounds: 1 - TOLERANCE < expected_result / result < 1 + TOLERANCE
_lowercase : Union[str, Any] = torch.all((expected_slice / output[..., :3, :3]) >= 1 - TOLERANCE )
_lowercase : List[str] = torch.all((expected_slice / output[..., :3, :3]) <= 1 + TOLERANCE )
self.assertTrue(lower_bound and upper_bound )
| 250 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = "▁"
lowercase__ = {"vocab_file": "spiece.model"}
lowercase__ = {
"vocab_file": {
"google/reformer-crime-and-punishment": (
"https://huggingface.co/google/reformer-crime-and-punishment/resolve/main/spiece.model"
)
}
}
lowercase__ = {
"google/reformer-crime-and-punishment": 524288,
}
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Dict = VOCAB_FILES_NAMES
UpperCAmelCase_ : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase_ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase_ : Dict = ["""input_ids""", """attention_mask"""]
def __init__( self : str , lowercase_ : Dict , lowercase_ : Tuple="</s>" , lowercase_ : Dict="<unk>" , lowercase_ : Tuple=[] , lowercase_ : Optional[Dict[str, Any]] = None , **lowercase_ : List[str] , ) -> None:
UpperCAmelCase : Dict = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=lowercase_ , unk_token=lowercase_ , additional_special_tokens=lowercase_ , sp_model_kwargs=self.sp_model_kwargs , **lowercase_ , )
UpperCAmelCase : List[Any] = vocab_file
UpperCAmelCase : str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowercase_ )
@property
def UpperCAmelCase_ ( self : Optional[int] ) -> List[Any]:
return self.sp_model.get_piece_size()
def UpperCAmelCase_ ( self : List[str] ) -> Dict[str, int]:
UpperCAmelCase : int = {self.convert_ids_to_tokens(lowercase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Any ) -> str:
UpperCAmelCase : Tuple = self.__dict__.copy()
UpperCAmelCase : Union[str, Any] = None
return state
def __setstate__( self : Optional[Any] , lowercase_ : Any ) -> List[str]:
UpperCAmelCase : Dict = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
UpperCAmelCase : Dict = {}
UpperCAmelCase : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCAmelCase_ ( self : List[Any] , lowercase_ : str ) -> List[str]:
return self.sp_model.encode(lowercase_ , out_type=lowercase_ )
def UpperCAmelCase_ ( self : int , lowercase_ : Tuple ) -> Optional[int]:
return self.sp_model.piece_to_id(lowercase_ )
def UpperCAmelCase_ ( self : List[str] , lowercase_ : Optional[int] ) -> List[str]:
if index < self.sp_model.get_piece_size():
UpperCAmelCase : Tuple = self.sp_model.IdToPiece(lowercase_ )
return token
def UpperCAmelCase_ ( self : List[str] , lowercase_ : Optional[int] ) -> Optional[int]:
UpperCAmelCase : Dict = []
UpperCAmelCase : int = ''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(lowercase_ ) + token
UpperCAmelCase : Any = []
else:
current_sub_tokens.append(lowercase_ )
out_string += self.sp_model.decode(lowercase_ )
return out_string.strip()
def UpperCAmelCase_ ( self : Union[str, Any] , lowercase_ : str , lowercase_ : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(lowercase_ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
UpperCAmelCase : int = os.path.join(
lowercase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowercase_ )
elif not os.path.isfile(self.vocab_file ):
with open(lowercase_ , 'wb' ) as fi:
UpperCAmelCase : Optional[int] = self.sp_model.serialized_model_proto()
fi.write(lowercase_ )
return (out_vocab_file,)
| 151 | 0 |
'''simple docstring'''
import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ : Dict = r"\w+[.]\d+"
lowerCAmelCase__ : List[Any] = re.findall(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
for pat in pats:
lowerCAmelCase__ : List[Any] = key.replace(_SCREAMING_SNAKE_CASE , """_""".join(pat.split(""".""" ) ) )
return key
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ : List[Any] = pt_tuple_key[:-1] + ("scale",)
if (
any("""norm""" in str_ for str_ in pt_tuple_key )
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
lowerCAmelCase__ : Optional[int] = pt_tuple_key[:-1] + ("scale",)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
lowerCAmelCase__ : Optional[int] = pt_tuple_key[:-1] + ("scale",)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
lowerCAmelCase__ : List[Any] = pt_tuple_key[:-1] + ("embedding",)
return renamed_pt_tuple_key, pt_tensor
# conv layer
lowerCAmelCase__ : List[str] = pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
lowerCAmelCase__ : List[str] = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
lowerCAmelCase__ : Tuple = pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight":
lowerCAmelCase__ : str = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
lowerCAmelCase__ : int = pt_tuple_key[:-1] + ("weight",)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
lowerCAmelCase__ : Union[str, Any] = pt_tuple_key[:-1] + ("bias",)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase=42 ):
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = {k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
lowerCAmelCase__ : Tuple = flax_model.init_weights(PRNGKey(_SCREAMING_SNAKE_CASE ) )
lowerCAmelCase__ : Optional[int] = flatten_dict(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : Optional[Any] = {}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
lowerCAmelCase__ : str = rename_key(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : int = tuple(renamed_pt_key.split(""".""" ) )
# Correctly rename weight parameters
lowerCAmelCase__ : int = rename_key_and_reshape_tensor(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
f"""PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape """
f"""{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.""" )
# also add unexpected weight so that warning is thrown
lowerCAmelCase__ : Optional[int] = jnp.asarray(_SCREAMING_SNAKE_CASE )
return unflatten_dict(_SCREAMING_SNAKE_CASE )
| 351 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
'''facebook/convnextv2-tiny-1k-224''': '''https://huggingface.co/facebook/convnextv2-tiny-1k-224/resolve/main/config.json''',
}
class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
__lowercase : Tuple = '''convnextv2'''
def __init__( self ,__UpperCAmelCase=3 ,__UpperCAmelCase=4 ,__UpperCAmelCase=4 ,__UpperCAmelCase=None ,__UpperCAmelCase=None ,__UpperCAmelCase="gelu" ,__UpperCAmelCase=0.0_2 ,__UpperCAmelCase=1E-12 ,__UpperCAmelCase=0.0 ,__UpperCAmelCase=224 ,__UpperCAmelCase=None ,__UpperCAmelCase=None ,**__UpperCAmelCase ,) -> Union[str, Any]:
super().__init__(**__UpperCAmelCase )
lowerCAmelCase__ : int = num_channels
lowerCAmelCase__ : List[Any] = patch_size
lowerCAmelCase__ : Union[str, Any] = num_stages
lowerCAmelCase__ : Tuple = [96, 192, 384, 768] if hidden_sizes is None else hidden_sizes
lowerCAmelCase__ : str = [3, 3, 9, 3] if depths is None else depths
lowerCAmelCase__ : Optional[Any] = hidden_act
lowerCAmelCase__ : str = initializer_range
lowerCAmelCase__ : List[str] = layer_norm_eps
lowerCAmelCase__ : Dict = drop_path_rate
lowerCAmelCase__ : int = image_size
lowerCAmelCase__ : int = ["""stem"""] + [F"""stage{idx}""" for idx in range(1 ,len(self.depths ) + 1 )]
lowerCAmelCase__ , lowerCAmelCase__ : Tuple = get_aligned_output_features_output_indices(
out_features=__UpperCAmelCase ,out_indices=__UpperCAmelCase ,stage_names=self.stage_names )
| 184 | 0 |
"""simple docstring"""
from math import factorial
def _A (__a , __a , __a ) -> float:
"""simple docstring"""
if successes > trials:
raise ValueError('''successes must be lower or equal to trials''' )
if trials < 0 or successes < 0:
raise ValueError('''the function is defined for non-negative integers''' )
if not isinstance(__a , __a ) or not isinstance(__a , __a ):
raise ValueError('''the function is defined for non-negative integers''' )
if not 0 < prob < 1:
raise ValueError('''prob has to be in range of 1 - 0''' )
SCREAMING_SNAKE_CASE_ : Optional[int] = (prob**successes) * ((1 - prob) ** (trials - successes))
# Calculate the binomial coefficient: n! / k!(n-k)!
SCREAMING_SNAKE_CASE_ : Optional[int] = float(factorial(__a ) )
coefficient /= factorial(__a ) * factorial(trials - successes )
return probability * coefficient
if __name__ == "__main__":
from doctest import testmod
testmod()
print("""Probability of 2 successes out of 4 trails""")
print("""with probability of 0.75 is:""", end=""" """)
print(binomial_distribution(2, 4, 0.7_5))
| 91 |
__magic_name__: str = [0, 2, 4, 6, 8]
__magic_name__: Optional[int] = [1, 3, 5, 7, 9]
def UpperCamelCase ( _A, _A, _A, _A ):
"""simple docstring"""
if remaining_length == 0:
if digits[0] == 0 or digits[-1] == 0:
return 0
for i in range(length // 2 - 1, -1, -1 ):
remainder += digits[i] + digits[length - i - 1]
if remainder % 2 == 0:
return 0
remainder //= 10
return 1
if remaining_length == 1:
if remainder % 2 == 0:
return 0
__magic_name__ : List[Any] = 0
for digit in range(10 ):
__magic_name__ : Optional[int] = digit
result += reversible_numbers(
0, (remainder + 2 * digit) // 10, _A, _A )
return result
__magic_name__ : str = 0
for digita in range(10 ):
__magic_name__ : Optional[Any] = digita
if (remainder + digita) % 2 == 0:
__magic_name__ : Tuple = ODD_DIGITS
else:
__magic_name__ : str = EVEN_DIGITS
for digita in other_parity_digits:
__magic_name__ : Tuple = digita
result += reversible_numbers(
remaining_length - 2, (remainder + digita + digita) // 10, _A, _A, )
return result
def UpperCamelCase ( _A = 9 ):
"""simple docstring"""
__magic_name__ : List[str] = 0
for length in range(1, max_power + 1 ):
result += reversible_numbers(_A, 0, [0] * length, _A )
return result
if __name__ == "__main__":
print(F"""{solution() = }""")
| 342 | 0 |
'''simple docstring'''
import argparse
import torch
from transformers import (
UniSpeechSatConfig,
UniSpeechSatForAudioFrameClassification,
UniSpeechSatForSequenceClassification,
UniSpeechSatForXVector,
WavaVecaFeatureExtractor,
logging,
)
logging.set_verbosity_info()
lowerCamelCase_ = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE_ ( __A : int , __A : Union[str, Any] , __A : List[str] ) -> int:
_SCREAMING_SNAKE_CASE = UniSpeechSatForSequenceClassification.from_pretrained(__A , config=__A )
_SCREAMING_SNAKE_CASE = downstream_dict["projector.weight"]
_SCREAMING_SNAKE_CASE = downstream_dict["projector.bias"]
_SCREAMING_SNAKE_CASE = downstream_dict["model.post_net.linear.weight"]
_SCREAMING_SNAKE_CASE = downstream_dict["model.post_net.linear.bias"]
return model
def SCREAMING_SNAKE_CASE_ ( __A : List[Any] , __A : Optional[Any] , __A : Any ) -> str:
_SCREAMING_SNAKE_CASE = UniSpeechSatForAudioFrameClassification.from_pretrained(__A , config=__A )
_SCREAMING_SNAKE_CASE = downstream_dict["model.linear.weight"]
_SCREAMING_SNAKE_CASE = downstream_dict["model.linear.bias"]
return model
def SCREAMING_SNAKE_CASE_ ( __A : Dict , __A : str , __A : str ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE = UniSpeechSatForXVector.from_pretrained(__A , config=__A )
_SCREAMING_SNAKE_CASE = downstream_dict["connector.weight"]
_SCREAMING_SNAKE_CASE = downstream_dict["connector.bias"]
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
_SCREAMING_SNAKE_CASE = downstream_dict[
f"""model.framelevel_feature_extractor.module.{i}.kernel.weight"""
]
_SCREAMING_SNAKE_CASE = downstream_dict[f"""model.framelevel_feature_extractor.module.{i}.kernel.bias"""]
_SCREAMING_SNAKE_CASE = downstream_dict["model.utterancelevel_feature_extractor.linear1.weight"]
_SCREAMING_SNAKE_CASE = downstream_dict["model.utterancelevel_feature_extractor.linear1.bias"]
_SCREAMING_SNAKE_CASE = downstream_dict["model.utterancelevel_feature_extractor.linear2.weight"]
_SCREAMING_SNAKE_CASE = downstream_dict["model.utterancelevel_feature_extractor.linear2.bias"]
_SCREAMING_SNAKE_CASE = downstream_dict["objective.W"]
return model
@torch.no_grad()
def SCREAMING_SNAKE_CASE_ ( __A : List[Any] , __A : Optional[int] , __A : Dict , __A : List[str] ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE = torch.load(__A , map_location="cpu" )
_SCREAMING_SNAKE_CASE = checkpoint["Downstream"]
_SCREAMING_SNAKE_CASE = UniSpeechSatConfig.from_pretrained(__A )
_SCREAMING_SNAKE_CASE = WavaVecaFeatureExtractor.from_pretrained(
__A , return_attention_mask=__A , do_normalize=__A )
_SCREAMING_SNAKE_CASE = hf_config.architectures[0]
if arch.endswith("ForSequenceClassification" ):
_SCREAMING_SNAKE_CASE = convert_classification(__A , __A , __A )
elif arch.endswith("ForAudioFrameClassification" ):
_SCREAMING_SNAKE_CASE = convert_diarization(__A , __A , __A )
elif arch.endswith("ForXVector" ):
_SCREAMING_SNAKE_CASE = convert_xvector(__A , __A , __A )
else:
raise NotImplementedError(f"""S3PRL weights conversion is not supported for {arch}""" )
if hf_config.use_weighted_layer_sum:
_SCREAMING_SNAKE_CASE = checkpoint["Featurizer"]["weights"]
hf_feature_extractor.save_pretrained(__A )
hf_model.save_pretrained(__A )
if __name__ == "__main__":
lowerCamelCase_ = argparse.ArgumentParser()
parser.add_argument(
'--base_model_name', default=None, type=str, help='Name of the huggingface pretrained base model.'
)
parser.add_argument('--config_path', default=None, type=str, help='Path to the huggingface classifier config.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to the s3prl checkpoint.')
parser.add_argument('--model_dump_path', default=None, type=str, help='Path to the final converted model.')
lowerCamelCase_ = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 111 |
'''simple docstring'''
import argparse
import math
import os
from copy import deepcopy
import torch
from audio_diffusion.models import DiffusionAttnUnetaD
from diffusion import sampling
from torch import nn
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
lowerCamelCase_ = {
'gwf-440k': {
'url': 'https://model-server.zqevans2.workers.dev/gwf-440k.ckpt',
'sample_rate': 4_80_00,
'sample_size': 6_55_36,
},
'jmann-small-190k': {
'url': 'https://model-server.zqevans2.workers.dev/jmann-small-190k.ckpt',
'sample_rate': 4_80_00,
'sample_size': 6_55_36,
},
'jmann-large-580k': {
'url': 'https://model-server.zqevans2.workers.dev/jmann-large-580k.ckpt',
'sample_rate': 4_80_00,
'sample_size': 13_10_72,
},
'maestro-uncond-150k': {
'url': 'https://model-server.zqevans2.workers.dev/maestro-uncond-150k.ckpt',
'sample_rate': 1_60_00,
'sample_size': 6_55_36,
},
'unlocked-uncond-250k': {
'url': 'https://model-server.zqevans2.workers.dev/unlocked-uncond-250k.ckpt',
'sample_rate': 1_60_00,
'sample_size': 6_55_36,
},
'honk-140k': {
'url': 'https://model-server.zqevans2.workers.dev/honk-140k.ckpt',
'sample_rate': 1_60_00,
'sample_size': 6_55_36,
},
}
def SCREAMING_SNAKE_CASE_ ( __A : List[Any] , __A : List[Any] ) -> Tuple:
return torch.atana(__A , __A ) / math.pi * 2
def SCREAMING_SNAKE_CASE_ ( __A : str ) -> Tuple:
_SCREAMING_SNAKE_CASE = torch.sin(t * math.pi / 2 ) ** 2
_SCREAMING_SNAKE_CASE = (1 - sigma**2) ** 0.5
return alpha_sigma_to_t(__A , __A )
class lowercase_ ( A ):
"""simple docstring"""
pass
class lowercase_ ( nn.Module ):
"""simple docstring"""
def __init__( self : int , __lowerCamelCase : Dict ):
"""simple docstring"""
super().__init__()
_SCREAMING_SNAKE_CASE = DiffusionAttnUnetaD(__lowerCamelCase , n_attn_layers=4 )
_SCREAMING_SNAKE_CASE = deepcopy(self.diffusion )
_SCREAMING_SNAKE_CASE = torch.quasirandom.SobolEngine(1 , scramble=__lowerCamelCase )
def SCREAMING_SNAKE_CASE_ ( __A : Any ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE = MODELS_MAP[model_name]["url"]
os.system(f"""wget {url} ./""" )
return f"""./{model_name}.ckpt"""
lowerCamelCase_ = {
'1': 'resnets.0',
'2': 'attentions.0',
'3': 'resnets.1',
'4': 'attentions.1',
'5': 'resnets.2',
'6': 'attentions.2',
}
lowerCamelCase_ = {
'8': 'resnets.0',
'9': 'attentions.0',
'10': 'resnets.1',
'11': 'attentions.1',
'12': 'resnets.2',
'13': 'attentions.2',
}
lowerCamelCase_ = {
'1': 'resnets.0',
'2': 'attentions.0',
'3': 'resnets.1',
'4': 'attentions.1',
'5': 'resnets.2',
'6': 'attentions.2',
'8': 'resnets.3',
'9': 'attentions.3',
'10': 'resnets.4',
'11': 'attentions.4',
'12': 'resnets.5',
'13': 'attentions.5',
}
lowerCamelCase_ = {
'0': 'resnets.0',
'1': 'resnets.1',
'2': 'resnets.2',
'4': 'resnets.0',
'5': 'resnets.1',
'6': 'resnets.2',
}
lowerCamelCase_ = {
'skip': 'conv_skip',
'main.0': 'conv_1',
'main.1': 'group_norm_1',
'main.3': 'conv_2',
'main.4': 'group_norm_2',
}
lowerCamelCase_ = {
'norm': 'group_norm',
'qkv_proj': ['query', 'key', 'value'],
'out_proj': ['proj_attn'],
}
def SCREAMING_SNAKE_CASE_ ( __A : Optional[int] ) -> Dict:
if name.startswith("skip" ):
return name.replace("skip" , RES_CONV_MAP["skip"] )
# name has to be of format main.{digit}
if not name.startswith("main." ):
raise ValueError(f"""ResConvBlock error with {name}""" )
return name.replace(name[:6] , RES_CONV_MAP[name[:6]] )
def SCREAMING_SNAKE_CASE_ ( __A : Union[str, Any] ) -> Union[str, Any]:
for key, value in ATTN_MAP.items():
if name.startswith(__A ) and not isinstance(__A , __A ):
return name.replace(__A , __A )
elif name.startswith(__A ):
return [name.replace(__A , __A ) for v in value]
raise ValueError(f"""Attn error with {name}""" )
def SCREAMING_SNAKE_CASE_ ( __A : Dict , __A : List[Any]=13 ) -> List[Any]:
_SCREAMING_SNAKE_CASE = input_string
if string.split("." )[0] == "timestep_embed":
return string.replace("timestep_embed" , "time_proj" )
_SCREAMING_SNAKE_CASE = 0
if string.startswith("net.3." ):
depth += 1
_SCREAMING_SNAKE_CASE = string[6:]
elif string.startswith("net." ):
_SCREAMING_SNAKE_CASE = string[4:]
while string.startswith("main.7." ):
depth += 1
_SCREAMING_SNAKE_CASE = string[7:]
if string.startswith("main." ):
_SCREAMING_SNAKE_CASE = string[5:]
# mid block
if string[:2].isdigit():
_SCREAMING_SNAKE_CASE = string[:2]
_SCREAMING_SNAKE_CASE = string[2:]
else:
_SCREAMING_SNAKE_CASE = string[0]
_SCREAMING_SNAKE_CASE = string[1:]
if depth == max_depth:
_SCREAMING_SNAKE_CASE = MID_NUM_TO_LAYER[layer_num]
_SCREAMING_SNAKE_CASE = "mid_block"
elif depth > 0 and int(__A ) < 7:
_SCREAMING_SNAKE_CASE = DOWN_NUM_TO_LAYER[layer_num]
_SCREAMING_SNAKE_CASE = f"""down_blocks.{depth}"""
elif depth > 0 and int(__A ) > 7:
_SCREAMING_SNAKE_CASE = UP_NUM_TO_LAYER[layer_num]
_SCREAMING_SNAKE_CASE = f"""up_blocks.{max_depth - depth - 1}"""
elif depth == 0:
_SCREAMING_SNAKE_CASE = DEPTH_0_TO_LAYER[layer_num]
_SCREAMING_SNAKE_CASE = f"""up_blocks.{max_depth - 1}""" if int(__A ) > 3 else "down_blocks.0"
if not string_left.startswith("." ):
raise ValueError(f"""Naming error with {input_string} and string_left: {string_left}.""" )
_SCREAMING_SNAKE_CASE = string_left[1:]
if "resnets" in new_layer:
_SCREAMING_SNAKE_CASE = convert_resconv_naming(__A )
elif "attentions" in new_layer:
_SCREAMING_SNAKE_CASE = convert_attn_naming(__A )
_SCREAMING_SNAKE_CASE = new_string_left
if not isinstance(__A , __A ):
_SCREAMING_SNAKE_CASE = prefix + "." + new_layer + "." + string_left
else:
_SCREAMING_SNAKE_CASE = [prefix + "." + new_layer + "." + s for s in string_left]
return new_string
def SCREAMING_SNAKE_CASE_ ( __A : List[Any] ) -> int:
_SCREAMING_SNAKE_CASE = {}
for k, v in state_dict.items():
if k.endswith("kernel" ):
# up- and downsample layers, don't have trainable weights
continue
_SCREAMING_SNAKE_CASE = rename(__A )
# check if we need to transform from Conv => Linear for attention
if isinstance(__A , __A ):
_SCREAMING_SNAKE_CASE = transform_conv_attns(__A , __A , __A )
else:
_SCREAMING_SNAKE_CASE = v
return new_state_dict
def SCREAMING_SNAKE_CASE_ ( __A : Tuple , __A : Dict , __A : Tuple ) -> Optional[int]:
if len(__A ) == 1:
if len(v.shape ) == 3:
# weight
_SCREAMING_SNAKE_CASE = v[:, :, 0]
else:
# bias
_SCREAMING_SNAKE_CASE = v
else:
# qkv matrices
_SCREAMING_SNAKE_CASE = v.shape[0]
_SCREAMING_SNAKE_CASE = trippled_shape // 3
for i in range(3 ):
if len(v.shape ) == 3:
_SCREAMING_SNAKE_CASE = v[i * single_shape : (i + 1) * single_shape, :, 0]
else:
_SCREAMING_SNAKE_CASE = v[i * single_shape : (i + 1) * single_shape]
return new_state_dict
def SCREAMING_SNAKE_CASE_ ( __A : str ) -> Optional[int]:
_SCREAMING_SNAKE_CASE = torch.device("cuda" if torch.cuda.is_available() else "cpu" )
_SCREAMING_SNAKE_CASE = args.model_path.split("/" )[-1].split("." )[0]
if not os.path.isfile(args.model_path ):
assert (
model_name == args.model_path
), f"""Make sure to provide one of the official model names {MODELS_MAP.keys()}"""
_SCREAMING_SNAKE_CASE = download(__A )
_SCREAMING_SNAKE_CASE = MODELS_MAP[model_name]["sample_rate"]
_SCREAMING_SNAKE_CASE = MODELS_MAP[model_name]["sample_size"]
_SCREAMING_SNAKE_CASE = Object()
_SCREAMING_SNAKE_CASE = sample_size
_SCREAMING_SNAKE_CASE = sample_rate
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE = UNetaDModel(sample_size=__A , sample_rate=__A )
_SCREAMING_SNAKE_CASE = diffusers_model.state_dict()
_SCREAMING_SNAKE_CASE = DiffusionUncond(__A )
orig_model.load_state_dict(torch.load(args.model_path , map_location=__A )["state_dict"] )
_SCREAMING_SNAKE_CASE = orig_model.diffusion_ema.eval()
_SCREAMING_SNAKE_CASE = orig_model.state_dict()
_SCREAMING_SNAKE_CASE = rename_orig_weights(__A )
_SCREAMING_SNAKE_CASE = set(renamed_state_dict.keys() ) - set(diffusers_state_dict.keys() )
_SCREAMING_SNAKE_CASE = set(diffusers_state_dict.keys() ) - set(renamed_state_dict.keys() )
assert len(__A ) == 0, f"""Problem with {renamed_minus_diffusers}"""
assert all(k.endswith("kernel" ) for k in list(__A ) ), f"""Problem with {diffusers_minus_renamed}"""
for key, value in renamed_state_dict.items():
assert (
diffusers_state_dict[key].squeeze().shape == value.squeeze().shape
), f"""Shape for {key} doesn't match. Diffusers: {diffusers_state_dict[key].shape} vs. {value.shape}"""
if key == "time_proj.weight":
_SCREAMING_SNAKE_CASE = value.squeeze()
_SCREAMING_SNAKE_CASE = value
diffusers_model.load_state_dict(__A )
_SCREAMING_SNAKE_CASE = 1_00
_SCREAMING_SNAKE_CASE = 33
_SCREAMING_SNAKE_CASE = IPNDMScheduler(num_train_timesteps=__A )
_SCREAMING_SNAKE_CASE = torch.manual_seed(__A )
_SCREAMING_SNAKE_CASE = torch.randn([1, 2, config.sample_size] , generator=__A ).to(__A )
_SCREAMING_SNAKE_CASE = torch.linspace(1 , 0 , steps + 1 , device=__A )[:-1]
_SCREAMING_SNAKE_CASE = get_crash_schedule(__A )
_SCREAMING_SNAKE_CASE = DanceDiffusionPipeline(unet=__A , scheduler=__A )
_SCREAMING_SNAKE_CASE = torch.manual_seed(33 )
_SCREAMING_SNAKE_CASE = pipe(num_inference_steps=__A , generator=__A ).audios
_SCREAMING_SNAKE_CASE = sampling.iplms_sample(__A , __A , __A , {} )
_SCREAMING_SNAKE_CASE = generated.clamp(-1 , 1 )
_SCREAMING_SNAKE_CASE = (generated - audio).abs().sum()
_SCREAMING_SNAKE_CASE = (generated - audio).abs().max()
if args.save:
pipe.save_pretrained(args.checkpoint_path )
print("Diff sum" , __A )
print("Diff max" , __A )
assert diff_max < 1e-3, f"""Diff max: {diff_max} is too much :-/"""
print(f"""Conversion for {model_name} successful!""" )
if __name__ == "__main__":
lowerCamelCase_ = argparse.ArgumentParser()
parser.add_argument('--model_path', default=None, type=str, required=True, help='Path to the model to convert.')
parser.add_argument(
'--save', default=True, type=bool, required=False, help='Whether to save the converted model or not.'
)
parser.add_argument('--checkpoint_path', default=None, type=str, required=True, help='Path to the output model.')
lowerCamelCase_ = parser.parse_args()
main(args)
| 111 | 1 |
"""simple docstring"""
import argparse
from collections import defaultdict
import yaml
__UpperCAmelCase = 'docs/source/en/_toctree.yml'
def _snake_case ( lowercase__ : Any ) -> Any:
'''simple docstring'''
lowerCAmelCase_ :Any = defaultdict(lowercase__ )
lowerCAmelCase_ :Union[str, Any] = []
lowerCAmelCase_ :int = []
for doc in doc_list:
if "local" in doc:
counts[doc["local"]] += 1
if doc["title"].lower() == "overview":
overview_doc.append({"""local""": doc["""local"""], """title""": doc["""title"""]} )
else:
new_doc_list.append(lowercase__ )
lowerCAmelCase_ :int = new_doc_list
lowerCAmelCase_ :str = [key for key, value in counts.items() if value > 1]
lowerCAmelCase_ :Tuple = []
for duplicate_key in duplicates:
lowerCAmelCase_ :Any = list({doc["""title"""] for doc in doc_list if doc["""local"""] == duplicate_key} )
if len(lowercase__ ) > 1:
raise ValueError(
f"""{duplicate_key} is present several times in the documentation table of content at """
"""`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the """
"""others.""" )
# Only add this once
new_doc.append({"""local""": duplicate_key, """title""": titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in doc_list if """local""" not in counts or counts[doc["""local"""]] == 1] )
lowerCAmelCase_ :int = sorted(lowercase__ , key=lambda lowercase__ : s["title"].lower() )
# "overview" gets special treatment and is always first
if len(lowercase__ ) > 1:
raise ValueError("""{doc_list} has two 'overview' docs which is not allowed.""" )
overview_doc.extend(lowercase__ )
# Sort
return overview_doc
def _snake_case ( lowercase__ : Optional[Any]=False ) -> str:
'''simple docstring'''
with open(lowercase__ , encoding="""utf-8""" ) as f:
lowerCAmelCase_ :int = yaml.safe_load(f.read() )
# Get to the API doc
lowerCAmelCase_ :List[str] = 0
while content[api_idx]["title"] != "API":
api_idx += 1
lowerCAmelCase_ :List[str] = content[api_idx]["""sections"""]
# Then to the model doc
lowerCAmelCase_ :int = 0
while api_doc[scheduler_idx]["title"] != "Schedulers":
scheduler_idx += 1
lowerCAmelCase_ :Dict = api_doc[scheduler_idx]["""sections"""]
lowerCAmelCase_ :Optional[Any] = clean_doc_toc(lowercase__ )
lowerCAmelCase_ :str = False
if new_scheduler_doc != scheduler_doc:
lowerCAmelCase_ :Optional[int] = True
if overwrite:
lowerCAmelCase_ :Tuple = new_scheduler_doc
if diff:
if overwrite:
lowerCAmelCase_ :str = api_doc
with open(lowercase__ , """w""" , encoding="""utf-8""" ) as f:
f.write(yaml.dump(lowercase__ , allow_unicode=lowercase__ ) )
else:
raise ValueError(
"""The model doc part of the table of content is not properly sorted, run `make style` to fix this.""" )
def _snake_case ( lowercase__ : Any=False ) -> int:
'''simple docstring'''
with open(lowercase__ , encoding="""utf-8""" ) as f:
lowerCAmelCase_ :int = yaml.safe_load(f.read() )
# Get to the API doc
lowerCAmelCase_ :Optional[int] = 0
while content[api_idx]["title"] != "API":
api_idx += 1
lowerCAmelCase_ :Optional[int] = content[api_idx]["""sections"""]
# Then to the model doc
lowerCAmelCase_ :List[Any] = 0
while api_doc[pipeline_idx]["title"] != "Pipelines":
pipeline_idx += 1
lowerCAmelCase_ :Optional[int] = False
lowerCAmelCase_ :Any = api_doc[pipeline_idx]["""sections"""]
lowerCAmelCase_ :str = []
# sort sub pipeline docs
for pipeline_doc in pipeline_docs:
if "section" in pipeline_doc:
lowerCAmelCase_ :int = pipeline_doc["""section"""]
lowerCAmelCase_ :Tuple = clean_doc_toc(lowercase__ )
if overwrite:
lowerCAmelCase_ :List[str] = new_sub_pipeline_doc
new_pipeline_docs.append(lowercase__ )
# sort overall pipeline doc
lowerCAmelCase_ :Union[str, Any] = clean_doc_toc(lowercase__ )
if new_pipeline_docs != pipeline_docs:
lowerCAmelCase_ :Tuple = True
if overwrite:
lowerCAmelCase_ :Optional[Any] = new_pipeline_docs
if diff:
if overwrite:
lowerCAmelCase_ :Tuple = api_doc
with open(lowercase__ , """w""" , encoding="""utf-8""" ) as f:
f.write(yaml.dump(lowercase__ , allow_unicode=lowercase__ ) )
else:
raise ValueError(
"""The model doc part of the table of content is not properly sorted, run `make style` to fix this.""" )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
__UpperCAmelCase = parser.parse_args()
check_scheduler_doc(args.fix_and_overwrite)
check_pipeline_doc(args.fix_and_overwrite)
| 84 |
'''simple docstring'''
import argparse
import math
import os
from copy import deepcopy
import torch
from audio_diffusion.models import DiffusionAttnUnetaD
from diffusion import sampling
from torch import nn
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
A_ = {
"gwf-440k": {
"url": "https://model-server.zqevans2.workers.dev/gwf-440k.ckpt",
"sample_rate": 4_80_00,
"sample_size": 6_55_36,
},
"jmann-small-190k": {
"url": "https://model-server.zqevans2.workers.dev/jmann-small-190k.ckpt",
"sample_rate": 4_80_00,
"sample_size": 6_55_36,
},
"jmann-large-580k": {
"url": "https://model-server.zqevans2.workers.dev/jmann-large-580k.ckpt",
"sample_rate": 4_80_00,
"sample_size": 13_10_72,
},
"maestro-uncond-150k": {
"url": "https://model-server.zqevans2.workers.dev/maestro-uncond-150k.ckpt",
"sample_rate": 1_60_00,
"sample_size": 6_55_36,
},
"unlocked-uncond-250k": {
"url": "https://model-server.zqevans2.workers.dev/unlocked-uncond-250k.ckpt",
"sample_rate": 1_60_00,
"sample_size": 6_55_36,
},
"honk-140k": {
"url": "https://model-server.zqevans2.workers.dev/honk-140k.ckpt",
"sample_rate": 1_60_00,
"sample_size": 6_55_36,
},
}
def A_ ( snake_case , snake_case ):
return torch.atana(snake_case , snake_case ) / math.pi * 2
def A_ ( snake_case ):
SCREAMING_SNAKE_CASE:List[Any] = torch.sin(t * math.pi / 2 ) ** 2
SCREAMING_SNAKE_CASE:Any = (1 - sigma**2) ** 0.5
return alpha_sigma_to_t(snake_case , snake_case )
class _snake_case ( _a ):
pass
class _snake_case ( nn.Module ):
def __init__( self : int ,SCREAMING_SNAKE_CASE__ : str ):
super().__init__()
SCREAMING_SNAKE_CASE:List[Any] = DiffusionAttnUnetaD(SCREAMING_SNAKE_CASE__ ,n_attn_layers=4 )
SCREAMING_SNAKE_CASE:List[str] = deepcopy(self.diffusion )
SCREAMING_SNAKE_CASE:Dict = torch.quasirandom.SobolEngine(1 ,scramble=SCREAMING_SNAKE_CASE__ )
def A_ ( snake_case ):
SCREAMING_SNAKE_CASE:List[Any] = MODELS_MAP[model_name]["url"]
os.system(F'''wget {url} ./''' )
return F'''./{model_name}.ckpt'''
A_ = {
"1": "resnets.0",
"2": "attentions.0",
"3": "resnets.1",
"4": "attentions.1",
"5": "resnets.2",
"6": "attentions.2",
}
A_ = {
"8": "resnets.0",
"9": "attentions.0",
"10": "resnets.1",
"11": "attentions.1",
"12": "resnets.2",
"13": "attentions.2",
}
A_ = {
"1": "resnets.0",
"2": "attentions.0",
"3": "resnets.1",
"4": "attentions.1",
"5": "resnets.2",
"6": "attentions.2",
"8": "resnets.3",
"9": "attentions.3",
"10": "resnets.4",
"11": "attentions.4",
"12": "resnets.5",
"13": "attentions.5",
}
A_ = {
"0": "resnets.0",
"1": "resnets.1",
"2": "resnets.2",
"4": "resnets.0",
"5": "resnets.1",
"6": "resnets.2",
}
A_ = {
"skip": "conv_skip",
"main.0": "conv_1",
"main.1": "group_norm_1",
"main.3": "conv_2",
"main.4": "group_norm_2",
}
A_ = {
"norm": "group_norm",
"qkv_proj": ["query", "key", "value"],
"out_proj": ["proj_attn"],
}
def A_ ( snake_case ):
if name.startswith("skip" ):
return name.replace("skip" , RES_CONV_MAP["skip"] )
# name has to be of format main.{digit}
if not name.startswith("main." ):
raise ValueError(F'''ResConvBlock error with {name}''' )
return name.replace(name[:6] , RES_CONV_MAP[name[:6]] )
def A_ ( snake_case ):
for key, value in ATTN_MAP.items():
if name.startswith(snake_case ) and not isinstance(snake_case , snake_case ):
return name.replace(snake_case , snake_case )
elif name.startswith(snake_case ):
return [name.replace(snake_case , snake_case ) for v in value]
raise ValueError(F'''Attn error with {name}''' )
def A_ ( snake_case , snake_case=13 ):
SCREAMING_SNAKE_CASE:Optional[Any] = input_string
if string.split("." )[0] == "timestep_embed":
return string.replace("timestep_embed" , "time_proj" )
SCREAMING_SNAKE_CASE:List[str] = 0
if string.startswith("net.3." ):
depth += 1
SCREAMING_SNAKE_CASE:Union[str, Any] = string[6:]
elif string.startswith("net." ):
SCREAMING_SNAKE_CASE:int = string[4:]
while string.startswith("main.7." ):
depth += 1
SCREAMING_SNAKE_CASE:Union[str, Any] = string[7:]
if string.startswith("main." ):
SCREAMING_SNAKE_CASE:str = string[5:]
# mid block
if string[:2].isdigit():
SCREAMING_SNAKE_CASE:Tuple = string[:2]
SCREAMING_SNAKE_CASE:Optional[Any] = string[2:]
else:
SCREAMING_SNAKE_CASE:Optional[Any] = string[0]
SCREAMING_SNAKE_CASE:Optional[Any] = string[1:]
if depth == max_depth:
SCREAMING_SNAKE_CASE:Any = MID_NUM_TO_LAYER[layer_num]
SCREAMING_SNAKE_CASE:List[str] = "mid_block"
elif depth > 0 and int(snake_case ) < 7:
SCREAMING_SNAKE_CASE:Union[str, Any] = DOWN_NUM_TO_LAYER[layer_num]
SCREAMING_SNAKE_CASE:Dict = F'''down_blocks.{depth}'''
elif depth > 0 and int(snake_case ) > 7:
SCREAMING_SNAKE_CASE:Any = UP_NUM_TO_LAYER[layer_num]
SCREAMING_SNAKE_CASE:Union[str, Any] = F'''up_blocks.{max_depth - depth - 1}'''
elif depth == 0:
SCREAMING_SNAKE_CASE:Optional[int] = DEPTH_0_TO_LAYER[layer_num]
SCREAMING_SNAKE_CASE:Any = F'''up_blocks.{max_depth - 1}''' if int(snake_case ) > 3 else "down_blocks.0"
if not string_left.startswith("." ):
raise ValueError(F'''Naming error with {input_string} and string_left: {string_left}.''' )
SCREAMING_SNAKE_CASE:List[Any] = string_left[1:]
if "resnets" in new_layer:
SCREAMING_SNAKE_CASE:List[str] = convert_resconv_naming(snake_case )
elif "attentions" in new_layer:
SCREAMING_SNAKE_CASE:List[Any] = convert_attn_naming(snake_case )
SCREAMING_SNAKE_CASE:List[Any] = new_string_left
if not isinstance(snake_case , snake_case ):
SCREAMING_SNAKE_CASE:Tuple = prefix + "." + new_layer + "." + string_left
else:
SCREAMING_SNAKE_CASE:int = [prefix + "." + new_layer + "." + s for s in string_left]
return new_string
def A_ ( snake_case ):
SCREAMING_SNAKE_CASE:int = {}
for k, v in state_dict.items():
if k.endswith("kernel" ):
# up- and downsample layers, don't have trainable weights
continue
SCREAMING_SNAKE_CASE:str = rename(snake_case )
# check if we need to transform from Conv => Linear for attention
if isinstance(snake_case , snake_case ):
SCREAMING_SNAKE_CASE:Optional[int] = transform_conv_attns(snake_case , snake_case , snake_case )
else:
SCREAMING_SNAKE_CASE:Optional[int] = v
return new_state_dict
def A_ ( snake_case , snake_case , snake_case ):
if len(snake_case ) == 1:
if len(v.shape ) == 3:
# weight
SCREAMING_SNAKE_CASE:List[str] = v[:, :, 0]
else:
# bias
SCREAMING_SNAKE_CASE:Optional[Any] = v
else:
# qkv matrices
SCREAMING_SNAKE_CASE:Optional[int] = v.shape[0]
SCREAMING_SNAKE_CASE:Optional[Any] = trippled_shape // 3
for i in range(3 ):
if len(v.shape ) == 3:
SCREAMING_SNAKE_CASE:Union[str, Any] = v[i * single_shape : (i + 1) * single_shape, :, 0]
else:
SCREAMING_SNAKE_CASE:List[Any] = v[i * single_shape : (i + 1) * single_shape]
return new_state_dict
def A_ ( snake_case ):
SCREAMING_SNAKE_CASE:Union[str, Any] = torch.device("cuda" if torch.cuda.is_available() else "cpu" )
SCREAMING_SNAKE_CASE:List[str] = args.model_path.split("/" )[-1].split("." )[0]
if not os.path.isfile(args.model_path ):
assert (
model_name == args.model_path
), F'''Make sure to provide one of the official model names {MODELS_MAP.keys()}'''
SCREAMING_SNAKE_CASE:List[str] = download(snake_case )
SCREAMING_SNAKE_CASE:List[str] = MODELS_MAP[model_name]["sample_rate"]
SCREAMING_SNAKE_CASE:Tuple = MODELS_MAP[model_name]["sample_size"]
SCREAMING_SNAKE_CASE:Union[str, Any] = Object()
SCREAMING_SNAKE_CASE:int = sample_size
SCREAMING_SNAKE_CASE:Any = sample_rate
SCREAMING_SNAKE_CASE:List[str] = 0
SCREAMING_SNAKE_CASE:Optional[Any] = UNetaDModel(sample_size=snake_case , sample_rate=snake_case )
SCREAMING_SNAKE_CASE:Optional[Any] = diffusers_model.state_dict()
SCREAMING_SNAKE_CASE:Optional[Any] = DiffusionUncond(snake_case )
orig_model.load_state_dict(torch.load(args.model_path , map_location=snake_case )["state_dict"] )
SCREAMING_SNAKE_CASE:Union[str, Any] = orig_model.diffusion_ema.eval()
SCREAMING_SNAKE_CASE:Dict = orig_model.state_dict()
SCREAMING_SNAKE_CASE:Union[str, Any] = rename_orig_weights(snake_case )
SCREAMING_SNAKE_CASE:Dict = set(renamed_state_dict.keys() ) - set(diffusers_state_dict.keys() )
SCREAMING_SNAKE_CASE:Dict = set(diffusers_state_dict.keys() ) - set(renamed_state_dict.keys() )
assert len(snake_case ) == 0, F'''Problem with {renamed_minus_diffusers}'''
assert all(k.endswith("kernel" ) for k in list(snake_case ) ), F'''Problem with {diffusers_minus_renamed}'''
for key, value in renamed_state_dict.items():
assert (
diffusers_state_dict[key].squeeze().shape == value.squeeze().shape
), F'''Shape for {key} doesn\'t match. Diffusers: {diffusers_state_dict[key].shape} vs. {value.shape}'''
if key == "time_proj.weight":
SCREAMING_SNAKE_CASE:Dict = value.squeeze()
SCREAMING_SNAKE_CASE:Union[str, Any] = value
diffusers_model.load_state_dict(snake_case )
SCREAMING_SNAKE_CASE:int = 100
SCREAMING_SNAKE_CASE:int = 33
SCREAMING_SNAKE_CASE:Any = IPNDMScheduler(num_train_timesteps=snake_case )
SCREAMING_SNAKE_CASE:str = torch.manual_seed(snake_case )
SCREAMING_SNAKE_CASE:Union[str, Any] = torch.randn([1, 2, config.sample_size] , generator=snake_case ).to(snake_case )
SCREAMING_SNAKE_CASE:int = torch.linspace(1 , 0 , steps + 1 , device=snake_case )[:-1]
SCREAMING_SNAKE_CASE:List[Any] = get_crash_schedule(snake_case )
SCREAMING_SNAKE_CASE:Union[str, Any] = DanceDiffusionPipeline(unet=snake_case , scheduler=snake_case )
SCREAMING_SNAKE_CASE:Union[str, Any] = torch.manual_seed(33 )
SCREAMING_SNAKE_CASE:Union[str, Any] = pipe(num_inference_steps=snake_case , generator=snake_case ).audios
SCREAMING_SNAKE_CASE:Tuple = sampling.iplms_sample(snake_case , snake_case , snake_case , {} )
SCREAMING_SNAKE_CASE:Union[str, Any] = generated.clamp(-1 , 1 )
SCREAMING_SNAKE_CASE:Union[str, Any] = (generated - audio).abs().sum()
SCREAMING_SNAKE_CASE:str = (generated - audio).abs().max()
if args.save:
pipe.save_pretrained(args.checkpoint_path )
print("Diff sum" , snake_case )
print("Diff max" , snake_case )
assert diff_max < 1e-3, F'''Diff max: {diff_max} is too much :-/'''
print(F'''Conversion for {model_name} successful!''' )
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
parser.add_argument("--model_path", default=None, type=str, required=True, help="Path to the model to convert.")
parser.add_argument(
"--save", default=True, type=bool, required=False, help="Whether to save the converted model or not."
)
parser.add_argument("--checkpoint_path", default=None, type=str, required=True, help="Path to the output model.")
A_ = parser.parse_args()
main(args)
| 139 | 0 |
'''simple docstring'''
import tensorflow as tf
from ...tf_utils import shape_list
class __snake_case( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self , A_ , A_ , A_ , A_ , A_=1 , A_=False , **A_ ) -> Optional[int]:
super().__init__(**A_ )
lowerCAmelCase = vocab_size
lowerCAmelCase = d_embed
lowerCAmelCase = d_proj
lowerCAmelCase = cutoffs + [vocab_size]
lowerCAmelCase = [0] + self.cutoffs
lowerCAmelCase = div_val
lowerCAmelCase = self.cutoffs[0]
lowerCAmelCase = len(self.cutoffs ) - 1
lowerCAmelCase = self.shortlist_size + self.n_clusters
lowerCAmelCase = keep_order
lowerCAmelCase = []
lowerCAmelCase = []
def __snake_case ( self , A_ ) -> int:
if self.n_clusters > 0:
lowerCAmelCase = self.add_weight(
shape=(self.n_clusters, self.d_embed) , initializer="""zeros""" , trainable=A_ , name="""cluster_weight""" )
lowerCAmelCase = self.add_weight(
shape=(self.n_clusters,) , initializer="""zeros""" , trainable=A_ , name="""cluster_bias""" )
if self.div_val == 1:
for i in range(len(self.cutoffs ) ):
if self.d_proj != self.d_embed:
lowerCAmelCase = self.add_weight(
shape=(self.d_embed, self.d_proj) , initializer="""zeros""" , trainable=A_ , name=f'out_projs_._{i}' , )
self.out_projs.append(A_ )
else:
self.out_projs.append(A_ )
lowerCAmelCase = self.add_weight(
shape=(self.vocab_size, self.d_embed) , initializer="""zeros""" , trainable=A_ , name=f'out_layers_._{i}_._weight' , )
lowerCAmelCase = self.add_weight(
shape=(self.vocab_size,) , initializer="""zeros""" , trainable=A_ , name=f'out_layers_._{i}_._bias' , )
self.out_layers.append((weight, bias) )
else:
for i in range(len(self.cutoffs ) ):
lowerCAmelCase, lowerCAmelCase = self.cutoff_ends[i], self.cutoff_ends[i + 1]
lowerCAmelCase = self.d_embed // (self.div_val**i)
lowerCAmelCase = self.add_weight(
shape=(d_emb_i, self.d_proj) , initializer="""zeros""" , trainable=A_ , name=f'out_projs_._{i}' )
self.out_projs.append(A_ )
lowerCAmelCase = self.add_weight(
shape=(r_idx - l_idx, d_emb_i) , initializer="""zeros""" , trainable=A_ , name=f'out_layers_._{i}_._weight' , )
lowerCAmelCase = self.add_weight(
shape=(r_idx - l_idx,) , initializer="""zeros""" , trainable=A_ , name=f'out_layers_._{i}_._bias' , )
self.out_layers.append((weight, bias) )
super().build(A_ )
@staticmethod
def __snake_case ( A_ , A_ , A_ , A_=None ) -> List[Any]:
lowerCAmelCase = x
if proj is not None:
lowerCAmelCase = tf.einsum("""ibd,ed->ibe""" , A_ , A_ )
return tf.einsum("""ibd,nd->ibn""" , A_ , A_ ) + b
@staticmethod
def __snake_case ( A_ , A_ ) -> Dict:
lowerCAmelCase = shape_list(A_ )
lowerCAmelCase = tf.range(lp_size[0] , dtype=target.dtype )
lowerCAmelCase = tf.stack([r, target] , 1 )
return tf.gather_nd(A_ , A_ )
def __snake_case ( self , A_ , A_ , A_=True , A_=False ) -> Tuple:
lowerCAmelCase = 0
if self.n_clusters == 0:
lowerCAmelCase = self._logit(A_ , self.out_layers[0][0] , self.out_layers[0][1] , self.out_projs[0] )
if target is not None:
lowerCAmelCase = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=A_ , logits=A_ )
lowerCAmelCase = tf.nn.log_softmax(A_ , axis=-1 )
else:
lowerCAmelCase = shape_list(A_ )
lowerCAmelCase = []
lowerCAmelCase = tf.zeros(hidden_sizes[:2] )
for i in range(len(self.cutoffs ) ):
lowerCAmelCase, lowerCAmelCase = self.cutoff_ends[i], self.cutoff_ends[i + 1]
if target is not None:
lowerCAmelCase = (target >= l_idx) & (target < r_idx)
lowerCAmelCase = tf.where(A_ )
lowerCAmelCase = tf.boolean_mask(A_ , A_ ) - l_idx
if self.div_val == 1:
lowerCAmelCase = self.out_layers[0][0][l_idx:r_idx]
lowerCAmelCase = self.out_layers[0][1][l_idx:r_idx]
else:
lowerCAmelCase = self.out_layers[i][0]
lowerCAmelCase = self.out_layers[i][1]
if i == 0:
lowerCAmelCase = tf.concat([cur_W, self.cluster_weight] , 0 )
lowerCAmelCase = tf.concat([cur_b, self.cluster_bias] , 0 )
lowerCAmelCase = self._logit(A_ , A_ , A_ , self.out_projs[0] )
lowerCAmelCase = tf.nn.log_softmax(A_ )
out.append(head_logprob[..., : self.cutoffs[0]] )
if target is not None:
lowerCAmelCase = tf.boolean_mask(A_ , A_ )
lowerCAmelCase = self._gather_logprob(A_ , A_ )
else:
lowerCAmelCase = self._logit(A_ , A_ , A_ , self.out_projs[i] )
lowerCAmelCase = tf.nn.log_softmax(A_ )
lowerCAmelCase = self.cutoffs[0] + i - 1 # No probability for the head cluster
lowerCAmelCase = head_logprob[..., cluster_prob_idx, None] + tail_logprob
out.append(A_ )
if target is not None:
lowerCAmelCase = tf.boolean_mask(A_ , A_ )
lowerCAmelCase = tf.boolean_mask(A_ , A_ )
lowerCAmelCase = self._gather_logprob(A_ , A_ )
cur_logprob += cur_head_logprob[:, self.cutoff_ends[1] + i - 1]
if target is not None:
loss += tf.scatter_nd(A_ , -cur_logprob , shape_list(A_ ) )
lowerCAmelCase = tf.concat(A_ , axis=-1 )
if target is not None:
if return_mean:
lowerCAmelCase = tf.reduce_mean(A_ )
# Add the training-time loss value to the layer using `self.add_loss()`.
self.add_loss(A_ )
# Log the loss as a metric (we could log arbitrary metrics,
# including different metrics for training and inference.
self.add_metric(A_ , name=self.name , aggregation="""mean""" if return_mean else """""" )
return out | 187 |
'''simple docstring'''
from __future__ import annotations
import pandas as pd
def _snake_case ( _SCREAMING_SNAKE_CASE : list[int] , _SCREAMING_SNAKE_CASE : list[int] , _SCREAMING_SNAKE_CASE : int ) -> list[int]:
"""simple docstring"""
lowerCAmelCase = [0] * no_of_processes
lowerCAmelCase = [0] * no_of_processes
# Copy the burst time into remaining_time[]
for i in range(_SCREAMING_SNAKE_CASE ):
lowerCAmelCase = burst_time[i]
lowerCAmelCase = 0
lowerCAmelCase = 0
lowerCAmelCase = 999_999_999
lowerCAmelCase = 0
lowerCAmelCase = False
# Process until all processes are completed
while complete != no_of_processes:
for j in range(_SCREAMING_SNAKE_CASE ):
if arrival_time[j] <= increment_time and remaining_time[j] > 0:
if remaining_time[j] < minm:
lowerCAmelCase = remaining_time[j]
lowerCAmelCase = j
lowerCAmelCase = True
if not check:
increment_time += 1
continue
remaining_time[short] -= 1
lowerCAmelCase = remaining_time[short]
if minm == 0:
lowerCAmelCase = 999_999_999
if remaining_time[short] == 0:
complete += 1
lowerCAmelCase = False
# Find finish time of current process
lowerCAmelCase = increment_time + 1
# Calculate waiting time
lowerCAmelCase = finish_time - arrival_time[short]
lowerCAmelCase = finar - burst_time[short]
if waiting_time[short] < 0:
lowerCAmelCase = 0
# Increment time
increment_time += 1
return waiting_time
def _snake_case ( _SCREAMING_SNAKE_CASE : list[int] , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : list[int] ) -> list[int]:
"""simple docstring"""
lowerCAmelCase = [0] * no_of_processes
for i in range(_SCREAMING_SNAKE_CASE ):
lowerCAmelCase = burst_time[i] + waiting_time[i]
return turn_around_time
def _snake_case ( _SCREAMING_SNAKE_CASE : list[int] , _SCREAMING_SNAKE_CASE : list[int] , _SCREAMING_SNAKE_CASE : int ) -> None:
"""simple docstring"""
lowerCAmelCase = 0
lowerCAmelCase = 0
for i in range(_SCREAMING_SNAKE_CASE ):
lowerCAmelCase = total_waiting_time + waiting_time[i]
lowerCAmelCase = total_turn_around_time + turn_around_time[i]
print(f'Average waiting time = {total_waiting_time / no_of_processes:.5f}' )
print("""Average turn around time =""" , total_turn_around_time / no_of_processes )
if __name__ == "__main__":
print('Enter how many process you want to analyze')
UpperCAmelCase = int(input())
UpperCAmelCase = [0] * no_of_processes
UpperCAmelCase = [0] * no_of_processes
UpperCAmelCase = list(range(1, no_of_processes + 1))
for i in range(no_of_processes):
print('Enter the arrival time and burst time for process:--' + str(i + 1))
UpperCAmelCase , UpperCAmelCase = map(int, input().split())
UpperCAmelCase = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
UpperCAmelCase = burst_time
UpperCAmelCase = no_of_processes
UpperCAmelCase = waiting_time
UpperCAmelCase = calculate_turnaroundtime(bt, n, wt)
calculate_average_times(waiting_time, turn_around_time, no_of_processes)
UpperCAmelCase = pd.DataFrame(
list(zip(processes, burst_time, arrival_time, waiting_time, turn_around_time)),
columns=[
'Process',
'BurstTime',
'ArrivalTime',
'WaitingTime',
'TurnAroundTime',
],
)
# Printing the dataFrame
pd.set_option('display.max_rows', fcfs.shape[0] + 1)
print(fcfs) | 187 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.