code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
from __future__ import annotations
def lowerCAmelCase__ ( a__: int = 4 ) -> list[list[int]]:
'''simple docstring'''
_UpperCAmelCase = abs(a__ ) or 4
return [[1 + x + y * row_size for x in range(a__ )] for y in range(a__ )]
def lowerCAmelCase__ ( a__: list[list[int]] ) -> list[list[int]]:
'''simple docstring'''
return reverse_row(transpose(a__ ) )
# OR.. transpose(reverse_column(matrix))
def lowerCAmelCase__ ( a__: list[list[int]] ) -> list[list[int]]:
'''simple docstring'''
return reverse_row(reverse_column(a__ ) )
# OR.. reverse_column(reverse_row(matrix))
def lowerCAmelCase__ ( a__: list[list[int]] ) -> list[list[int]]:
'''simple docstring'''
return reverse_column(transpose(a__ ) )
# OR.. transpose(reverse_row(matrix))
def lowerCAmelCase__ ( a__: list[list[int]] ) -> list[list[int]]:
'''simple docstring'''
_UpperCAmelCase = [list(a__ ) for x in zip(*a__ )]
return matrix
def lowerCAmelCase__ ( a__: list[list[int]] ) -> list[list[int]]:
'''simple docstring'''
_UpperCAmelCase = matrix[::-1]
return matrix
def lowerCAmelCase__ ( a__: list[list[int]] ) -> list[list[int]]:
'''simple docstring'''
_UpperCAmelCase = [x[::-1] for x in matrix]
return matrix
def lowerCAmelCase__ ( a__: list[list[int]] ) -> None:
'''simple docstring'''
for i in matrix:
print(*a__ )
if __name__ == "__main__":
lowerCAmelCase__ :Optional[Any] = make_matrix()
print('''\norigin:\n''')
print_matrix(matrix)
print('''\nrotate 90 counterclockwise:\n''')
print_matrix(rotate_aa(matrix))
lowerCAmelCase__ :Union[str, Any] = make_matrix()
print('''\norigin:\n''')
print_matrix(matrix)
print('''\nrotate 180:\n''')
print_matrix(rotate_aaa(matrix))
lowerCAmelCase__ :List[Any] = make_matrix()
print('''\norigin:\n''')
print_matrix(matrix)
print('''\nrotate 270 counterclockwise:\n''')
print_matrix(rotate_aaa(matrix))
| 329 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase__ :str = {
'''configuration_megatron_bert''': ['''MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MegatronBertConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ :Union[str, Any] = [
'''MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MegatronBertForCausalLM''',
'''MegatronBertForMaskedLM''',
'''MegatronBertForMultipleChoice''',
'''MegatronBertForNextSentencePrediction''',
'''MegatronBertForPreTraining''',
'''MegatronBertForQuestionAnswering''',
'''MegatronBertForSequenceClassification''',
'''MegatronBertForTokenClassification''',
'''MegatronBertModel''',
'''MegatronBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_megatron_bert import MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, MegatronBertConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_megatron_bert import (
MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
MegatronBertPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ :List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 329 | 1 |
'''simple docstring'''
import argparse
import re
from typing import Dict
import torch
from datasets import Audio, Dataset, load_dataset, load_metric
from transformers import AutoFeatureExtractor, pipeline
def UpperCamelCase ( a , a ) -> List[str]:
'''simple docstring'''
__magic_name__ =args.log_outputs
__magic_name__ ='_'.join(args.dataset.split('''/''' ) + [args.config, args.split] )
# load metric
__magic_name__ =load_metric('''wer''' )
__magic_name__ =load_metric('''cer''' )
# compute metrics
__magic_name__ =wer.compute(references=result['''target'''] , predictions=result['''prediction'''] )
__magic_name__ =cer.compute(references=result['''target'''] , predictions=result['''prediction'''] )
# print & log results
__magic_name__ =F'''WER: {wer_result}\nCER: {cer_result}'''
print(_UpperCAmelCase )
with open(F'''{dataset_id}_eval_results.txt''' , '''w''' ) as f:
f.write(_UpperCAmelCase )
# log all results in text file. Possibly interesting for analysis
if log_outputs is not None:
__magic_name__ =F'''log_{dataset_id}_predictions.txt'''
__magic_name__ =F'''log_{dataset_id}_targets.txt'''
with open(_UpperCAmelCase , '''w''' ) as p, open(_UpperCAmelCase , '''w''' ) as t:
# mapping function to write output
def write_to_file(a , a ):
p.write(F'''{i}''' + '''\n''' )
p.write(batch['''prediction'''] + '''\n''' )
t.write(F'''{i}''' + '''\n''' )
t.write(batch['''target'''] + '''\n''' )
result.map(_UpperCAmelCase , with_indices=_UpperCAmelCase )
def UpperCamelCase ( a ) -> str:
'''simple docstring'''
__magic_name__ ='[,?.!\-\;\:"“%‘”�—’…–]' # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training
__magic_name__ =re.sub(_UpperCAmelCase , '''''' , text.lower() )
# In addition, we can normalize the target text, e.g. removing new lines characters etc...
# note that order is important here!
__magic_name__ =['\n\n', '\n', ' ', ' ']
for t in token_sequences_to_ignore:
__magic_name__ =' '.join(text.split(_UpperCAmelCase ) )
return text
def UpperCamelCase ( a ) -> Union[str, Any]:
'''simple docstring'''
# load dataset
__magic_name__ =load_dataset(args.dataset , args.config , split=args.split , use_auth_token=_UpperCAmelCase )
# for testing: only process the first two examples as a test
# dataset = dataset.select(range(10))
# load processor
__magic_name__ =AutoFeatureExtractor.from_pretrained(args.model_id )
__magic_name__ =feature_extractor.sampling_rate
# resample audio
__magic_name__ =dataset.cast_column('''audio''' , Audio(sampling_rate=_UpperCAmelCase ) )
# load eval pipeline
if args.device is None:
__magic_name__ =0 if torch.cuda.is_available() else -1
__magic_name__ =pipeline('''automatic-speech-recognition''' , model=args.model_id , device=args.device )
# map function to decode audio
def map_to_pred(a ):
__magic_name__ =asr(
batch['''audio''']['''array'''] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s )
__magic_name__ =prediction['text']
__magic_name__ =normalize_text(batch['''sentence'''] )
return batch
# run inference on all examples
__magic_name__ =dataset.map(_UpperCAmelCase , remove_columns=dataset.column_names )
# compute and log_results
# do not change function below
log_results(_UpperCAmelCase , _UpperCAmelCase )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument(
"--model_id", type=str, required=True, help="Model identifier. Should be loadable with 🤗 Transformers"
)
parser.add_argument(
"--dataset",
type=str,
required=True,
help="Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets",
)
parser.add_argument(
"--config", type=str, required=True, help="Config of the dataset. *E.g.* `'en'` for Common Voice"
)
parser.add_argument("--split", type=str, required=True, help="Split of the dataset. *E.g.* `'test'`")
parser.add_argument(
"--chunk_length_s", type=float, default=None, help="Chunk length in seconds. Defaults to 5 seconds."
)
parser.add_argument(
"--stride_length_s", type=float, default=None, help="Stride of the audio chunks. Defaults to 1 second."
)
parser.add_argument(
"--log_outputs", action="store_true", help="If defined, write outputs to log file for analysis."
)
parser.add_argument(
"--device",
type=int,
default=None,
help="The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.",
)
_lowerCAmelCase = parser.parse_args()
main(args)
| 351 |
'''simple docstring'''
from pathlib import Path
import fire
from tqdm import tqdm
def UpperCamelCase ( a="ro" , a="en" , a="wmt16" , a=None ) -> None:
'''simple docstring'''
try:
import datasets
except (ModuleNotFoundError, ImportError):
raise ImportError('''run pip install datasets''' )
__magic_name__ = F'''{src_lang}-{tgt_lang}'''
print(F'''Converting {dataset}-{pair}''' )
__magic_name__ = datasets.load_dataset(a , a )
if save_dir is None:
__magic_name__ = F'''{dataset}-{pair}'''
__magic_name__ = Path(a )
save_dir.mkdir(exist_ok=a )
for split in ds.keys():
print(F'''Splitting {split} with {ds[split].num_rows} records''' )
# to save to val.source, val.target like summary datasets
__magic_name__ = '''val''' if split == '''validation''' else split
__magic_name__ = save_dir.joinpath(F'''{fn}.source''' )
__magic_name__ = save_dir.joinpath(F'''{fn}.target''' )
__magic_name__ = src_path.open('''w+''' )
__magic_name__ = tgt_path.open('''w+''' )
# reader is the bottleneck so writing one record at a time doesn't slow things down
for x in tqdm(ds[split] ):
__magic_name__ = x['''translation''']
src_fp.write(ex[src_lang] + '''\n''' )
tgt_fp.write(ex[tgt_lang] + '''\n''' )
print(F'''Saved {dataset} dataset to {save_dir}''' )
if __name__ == "__main__":
fire.Fire(download_wmt_dataset)
| 98 | 0 |
class __snake_case :
def __init__( self ,snake_case ):
'''simple docstring'''
lowercase : List[Any] = arr.split(""",""" )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Tuple = [int(self.array[0] )] * len(self.array )
lowercase : Any = [int(self.array[0] )] * len(self.array )
for i in range(1 ,len(self.array ) ):
lowercase : Optional[int] = max(
int(self.array[i] ) + sum_value[i - 1] ,int(self.array[i] ) )
lowercase : Optional[int] = max(sum_value[i] ,rear[i - 1] )
return rear[len(self.array ) - 1]
if __name__ == "__main__":
lowercase : Any = input("""please input some numbers:""")
lowercase : Union[str, Any] = SubArray(whole_array)
lowercase : Any = array.solve_sub_array()
print(("""the results is:""", re))
| 20 |
lowerCAmelCase_ = {
"joule": 1.0,
"kilojoule": 1_0_0_0,
"megajoule": 1_0_0_0_0_0_0,
"gigajoule": 1_0_0_0_0_0_0_0_0_0,
"wattsecond": 1.0,
"watthour": 3_6_0_0,
"kilowatthour": 3_6_0_0_0_0_0,
"newtonmeter": 1.0,
"calorie_nutr": 4_1_8_6.8,
"kilocalorie_nutr": 4_1_8_6_8_0_0.0_0,
"electronvolt": 1.602_176_634e-19,
"britishthermalunit_it": 1_0_5_5.0_5_5_8_5,
"footpound": 1.355_818,
}
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> float:
"""simple docstring"""
if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION:
snake_case_ : str = (
f'''Incorrect \'from_type\' or \'to_type\' value: {from_type!r}, {to_type!r}\n'''
f'''Valid values are: {", ".join(_UpperCamelCase )}'''
)
raise ValueError(_UpperCamelCase )
return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 279 | 0 |
"""simple docstring"""
from sklearn.metrics import mean_squared_error
import datasets
a__ : int = '''\
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
'''
a__ : Dict = '''\
Mean Squared Error(MSE) is the average of the square of difference between the predicted
and actual values.
'''
a__ : Any = '''
Args:
predictions: array-like of shape (n_samples,) or (n_samples, n_outputs)
Estimated target values.
references: array-like of shape (n_samples,) or (n_samples, n_outputs)
Ground truth (correct) target values.
sample_weight: array-like of shape (n_samples,), default=None
Sample weights.
multioutput: {"raw_values", "uniform_average"} or array-like of shape (n_outputs,), default="uniform_average"
Defines aggregating of multiple output values. Array-like value defines weights used to average errors.
"raw_values" : Returns a full set of errors in case of multioutput input.
"uniform_average" : Errors of all outputs are averaged with uniform weight.
squared : bool, default=True
If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value.
Returns:
mse : mean squared error.
Examples:
>>> mse_metric = datasets.load_metric("mse")
>>> predictions = [2.5, 0.0, 2, 8]
>>> references = [3, -0.5, 2, 7]
>>> results = mse_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'mse\': 0.375}
>>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False)
>>> print(rmse_result)
{\'mse\': 0.6123724356957945}
If you\'re using multi-dimensional lists, then set the config as follows :
>>> mse_metric = datasets.load_metric("mse", "multilist")
>>> predictions = [[0.5, 1], [-1, 1], [7, -6]]
>>> references = [[0, 2], [-1, 2], [8, -5]]
>>> results = mse_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'mse\': 0.7083333333333334}
>>> results = mse_metric.compute(predictions=predictions, references=references, multioutput=\'raw_values\')
>>> print(results) # doctest: +NORMALIZE_WHITESPACE
{\'mse\': array([0.41666667, 1. ])}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class UpperCamelCase_ ( datasets.Metric):
"""simple docstring"""
def UpperCAmelCase_ ( self : Optional[Any] ) -> List[Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , reference_urls=[
"https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html"
] , )
def UpperCAmelCase_ ( self : Tuple ) -> List[str]:
if self.config_name == "multilist":
return {
"predictions": datasets.Sequence(datasets.Value("float" ) ),
"references": datasets.Sequence(datasets.Value("float" ) ),
}
else:
return {
"predictions": datasets.Value("float" ),
"references": datasets.Value("float" ),
}
def UpperCAmelCase_ ( self : Tuple , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Any=None , UpperCAmelCase__ : List[Any]="uniform_average" , UpperCAmelCase__ : Optional[Any]=True ) -> Any:
__SCREAMING_SNAKE_CASE = mean_squared_error(
UpperCAmelCase__ , UpperCAmelCase__ , sample_weight=UpperCAmelCase__ , multioutput=UpperCAmelCase__ , squared=UpperCAmelCase__ )
return {"mse": mse}
| 195 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
a__ : List[str] = {
'''configuration_biogpt''': ['''BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BioGptConfig'''],
'''tokenization_biogpt''': ['''BioGptTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Union[str, Any] = [
'''BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BioGptForCausalLM''',
'''BioGptForTokenClassification''',
'''BioGptForSequenceClassification''',
'''BioGptModel''',
'''BioGptPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig
from .tokenization_biogpt import BioGptTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_biogpt import (
BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST,
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptPreTrainedModel,
)
else:
import sys
a__ : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 195 | 1 |
'''simple docstring'''
def __lowerCamelCase ( __snake_case : int, __snake_case : int ) -> int:
"""simple docstring"""
return number | (1 << position)
def __lowerCamelCase ( __snake_case : int, __snake_case : int ) -> int:
"""simple docstring"""
return number & ~(1 << position)
def __lowerCamelCase ( __snake_case : int, __snake_case : int ) -> int:
"""simple docstring"""
return number ^ (1 << position)
def __lowerCamelCase ( __snake_case : int, __snake_case : int ) -> bool:
"""simple docstring"""
return ((number >> position) & 1) == 1
def __lowerCamelCase ( __snake_case : int, __snake_case : int ) -> int:
"""simple docstring"""
return int((number & (1 << position)) != 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 134 |
'''simple docstring'''
import os
from math import logaa
def __lowerCamelCase ( __snake_case : str = "base_exp.txt" ) -> int:
"""simple docstring"""
A__ : float =0
A__ : Optional[int] =0
for i, line in enumerate(open(os.path.join(os.path.dirname(__snake_case ), __snake_case ) ) ):
A__ , A__ : Union[str, Any] =list(map(__snake_case, line.split(""",""" ) ) )
if x * logaa(__snake_case ) > largest:
A__ : List[str] =x * logaa(__snake_case )
A__ : Any =i + 1
return result
if __name__ == "__main__":
print(solution())
| 134 | 1 |
'''simple docstring'''
from __future__ import annotations
import math
def __UpperCamelCase ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ):
if depth < 0:
raise ValueError("Depth cannot be less than 0" )
if not scores:
raise ValueError("Scores cannot be empty" )
if depth == height:
return scores[node_index]
return (
max(
minimax(depth + 1, node_index * 2, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ), minimax(depth + 1, node_index * 2 + 1, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ), )
if is_max
else min(
minimax(depth + 1, node_index * 2, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ), minimax(depth + 1, node_index * 2 + 1, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ), )
)
def __UpperCamelCase ( ):
__UpperCAmelCase : Dict = [90, 23, 6, 33, 21, 65, 123, 34423]
__UpperCAmelCase : Optional[Any] = math.log(len(_UpperCAmelCase ), 2 )
print(F"Optimal value : {minimax(0, 0, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )}" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 37 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_imagegpt import ImageGPTImageProcessor
lowerCAmelCase__ : Optional[Any] = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( snake_case__ ):
"""simple docstring"""
def __init__( self : Any , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : Union[str, Any] ):
"""simple docstring"""
warnings.warn(
"The class ImageGPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use ImageGPTImageProcessor instead." , UpperCAmelCase_ , )
super().__init__(*UpperCAmelCase_ , **UpperCAmelCase_ )
| 37 | 1 |
'''simple docstring'''
from typing import Any
import numpy as np
def lowercase_ ( _lowercase ) -> bool:
'''simple docstring'''
return np.array_equal(_lowercase , matrix.conjugate().T )
def lowercase_ ( _lowercase , _lowercase ) -> Any:
'''simple docstring'''
lowerCamelCase_ : Tuple = v.conjugate().T
lowerCamelCase_ : Union[str, Any] = v_star.dot(_lowercase )
assert isinstance(_lowercase , np.ndarray )
return (v_star_dot.dot(_lowercase )) / (v_star.dot(_lowercase ))
def lowercase_ ( ) -> None:
'''simple docstring'''
lowerCamelCase_ : int = np.array([[2, 2 + 1j, 4], [2 - 1j, 3, 1j], [4, -1j, 1]] )
lowerCamelCase_ : Union[str, Any] = np.array([[1], [2], [3]] )
assert is_hermitian(_lowercase ), F"""{a} is not hermitian."""
print(rayleigh_quotient(_lowercase , _lowercase ) )
lowerCamelCase_ : str = np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] )
assert is_hermitian(_lowercase ), F"""{a} is not hermitian."""
assert rayleigh_quotient(_lowercase , _lowercase ) == float(3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
tests()
| 318 |
'''simple docstring'''
from __future__ import annotations
import time
__lowercase : List[Any] = list[tuple[int, int]]
__lowercase : List[Any] = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
__lowercase : Dict = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
class __lowercase :
def __init__(self , A , A , A , A , A ):
lowerCamelCase_ : Optional[int] = pos_x
lowerCamelCase_ : List[str] = pos_y
lowerCamelCase_ : List[Any] = (pos_y, pos_x)
lowerCamelCase_ : List[str] = goal_x
lowerCamelCase_ : Union[str, Any] = goal_y
lowerCamelCase_ : int = parent
class __lowercase :
def __init__(self , A , A ):
lowerCamelCase_ : Any = Node(start[1] , start[0] , goal[1] , goal[0] , A )
lowerCamelCase_ : Tuple = Node(goal[1] , goal[0] , goal[1] , goal[0] , A )
lowerCamelCase_ : Union[str, Any] = [self.start]
lowerCamelCase_ : List[str] = False
def UpperCAmelCase__ (self ):
while self.node_queue:
lowerCamelCase_ : Optional[Any] = self.node_queue.pop(0 )
if current_node.pos == self.target.pos:
lowerCamelCase_ : List[str] = True
return self.retrace_path(A )
lowerCamelCase_ : str = self.get_successors(A )
for node in successors:
self.node_queue.append(A )
if not self.reached:
return [self.start.pos]
return None
def UpperCAmelCase__ (self , A ):
lowerCamelCase_ : Dict = []
for action in delta:
lowerCamelCase_ : Any = parent.pos_x + action[1]
lowerCamelCase_ : Dict = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(A ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(A , A , self.target.pos_y , self.target.pos_x , A ) )
return successors
def UpperCAmelCase__ (self , A ):
lowerCamelCase_ : int = node
lowerCamelCase_ : str = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
lowerCamelCase_ : List[Any] = current_node.parent
path.reverse()
return path
class __lowercase :
def __init__(self , A , A ):
lowerCamelCase_ : List[str] = BreadthFirstSearch(A , A )
lowerCamelCase_ : Any = BreadthFirstSearch(A , A )
lowerCamelCase_ : Union[str, Any] = False
def UpperCAmelCase__ (self ):
while self.fwd_bfs.node_queue or self.bwd_bfs.node_queue:
lowerCamelCase_ : List[str] = self.fwd_bfs.node_queue.pop(0 )
lowerCamelCase_ : int = self.bwd_bfs.node_queue.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
lowerCamelCase_ : Optional[Any] = True
return self.retrace_bidirectional_path(
A , A )
lowerCamelCase_ : Optional[int] = current_bwd_node
lowerCamelCase_ : List[str] = current_fwd_node
lowerCamelCase_ : List[str] = {
self.fwd_bfs: self.fwd_bfs.get_successors(A ),
self.bwd_bfs: self.bwd_bfs.get_successors(A ),
}
for bfs in [self.fwd_bfs, self.bwd_bfs]:
for node in successors[bfs]:
bfs.node_queue.append(A )
if not self.reached:
return [self.fwd_bfs.start.pos]
return None
def UpperCAmelCase__ (self , A , A ):
lowerCamelCase_ : List[str] = self.fwd_bfs.retrace_path(A )
lowerCamelCase_ : int = self.bwd_bfs.retrace_path(A )
bwd_path.pop()
bwd_path.reverse()
lowerCamelCase_ : Dict = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
import doctest
doctest.testmod()
__lowercase : List[str] = (0, 0)
__lowercase : List[Any] = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
__lowercase : Tuple = time.time()
__lowercase : int = BreadthFirstSearch(init, goal)
__lowercase : Dict = bfs.search()
__lowercase : Dict = time.time() - start_bfs_time
print('''Unidirectional BFS computation time : ''', bfs_time)
__lowercase : int = time.time()
__lowercase : Optional[Any] = BidirectionalBreadthFirstSearch(init, goal)
__lowercase : Any = bd_bfs.search()
__lowercase : Dict = time.time() - start_bd_bfs_time
print('''Bidirectional BFS computation time : ''', bd_bfs_time)
| 318 | 1 |
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from torch import nn
from transformers import (
SpeechaTextaConfig,
SpeechaTextaForCausalLM,
SpeechaTextaTokenizer,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
_lowercase : Any = logging.get_logger(__name__)
_lowercase : Optional[Any] = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
}
_lowercase : Union[str, Any] = [
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
]
def snake_case__ ( __lowerCamelCase : Tuple , __lowerCamelCase : Any , __lowerCamelCase : int , __lowerCamelCase : str , __lowerCamelCase : List[Any] ):
"""simple docstring"""
for attribute in key.split('''.''' ):
lowerCamelCase__ : int =getattr(__lowerCamelCase , __lowerCamelCase )
if weight_type is not None:
lowerCamelCase__ : Union[str, Any] =getattr(__lowerCamelCase , __lowerCamelCase ).shape
else:
lowerCamelCase__ : str =hf_pointer.shape
assert hf_shape == value.shape, (
f'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
lowerCamelCase__ : Optional[int] =value
elif weight_type == "weight_g":
lowerCamelCase__ : Optional[int] =value
elif weight_type == "weight_v":
lowerCamelCase__ : Optional[Any] =value
elif weight_type == "bias":
lowerCamelCase__ : Dict =value
else:
lowerCamelCase__ : Optional[int] =value
logger.info(f'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def snake_case__ ( __lowerCamelCase : str , __lowerCamelCase : Any ):
"""simple docstring"""
lowerCamelCase__ : Dict =[]
lowerCamelCase__ : List[str] =fairseq_model.state_dict()
lowerCamelCase__ : Optional[int] =hf_model.feature_extractor
# if encoder has different dim to decoder -> use proj_weight
lowerCamelCase__ : Dict =None
for name, value in fairseq_dict.items():
lowerCamelCase__ : List[str] =False
if "conv_layers" in name:
load_conv_layer(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , hf_model.config.feat_extract_norm == '''group''' , )
lowerCamelCase__ : List[str] =True
elif name.split('''.''' )[0] == "proj":
lowerCamelCase__ : List[Any] =fairseq_model.proj
lowerCamelCase__ : Any =True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
lowerCamelCase__ : Optional[int] =True
if "*" in mapped_key:
lowerCamelCase__ : Any =name.split(__lowerCamelCase )[0].split('''.''' )[-2]
lowerCamelCase__ : Any =mapped_key.replace('''*''' , __lowerCamelCase )
if "weight_g" in name:
lowerCamelCase__ : Dict ='''weight_g'''
elif "weight_v" in name:
lowerCamelCase__ : Union[str, Any] ='''weight_v'''
elif "bias" in name:
lowerCamelCase__ : List[str] ='''bias'''
elif "weight" in name:
lowerCamelCase__ : Any ='''weight'''
else:
lowerCamelCase__ : Dict =None
set_recursively(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
continue
if not is_used:
unused_weights.append(__lowerCamelCase )
logger.warning(f'''Unused weights: {unused_weights}''' )
return proj_weight
def snake_case__ ( __lowerCamelCase : List[Any] , __lowerCamelCase : Tuple , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : int ):
"""simple docstring"""
lowerCamelCase__ : List[Any] =full_name.split('''conv_layers.''' )[-1]
lowerCamelCase__ : Union[str, Any] =name.split('''.''' )
lowerCamelCase__ : Tuple =int(items[0] )
lowerCamelCase__ : int =int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
lowerCamelCase__ : Optional[int] =value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
lowerCamelCase__ : Tuple =value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
lowerCamelCase__ : List[Any] =value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
lowerCamelCase__ : Tuple =value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(__lowerCamelCase )
def snake_case__ ( __lowerCamelCase : str ):
"""simple docstring"""
lowerCamelCase__ : str =emb.weight.shape
lowerCamelCase__ : str =nn.Linear(__lowerCamelCase , __lowerCamelCase , bias=__lowerCamelCase )
lowerCamelCase__ : List[str] =emb.weight.data
return lin_layer
def snake_case__ ( __lowerCamelCase : Optional[int] ):
"""simple docstring"""
with open(__lowerCamelCase , '''r''' , encoding='''utf-8''' ) as f:
lowerCamelCase__ : Optional[int] =f.readlines()
lowerCamelCase__ : Optional[Any] =[line.split(''' ''' )[0] for line in lines]
lowerCamelCase__ : Optional[Any] =len(__lowerCamelCase )
lowerCamelCase__ : Tuple ={
'''<s>''': 0,
'''<pad>''': 1,
'''</s>''': 2,
'''<unk>''': 3,
}
vocab_dict.update(dict(zip(__lowerCamelCase , range(4 , num_words + 4 ) ) ) )
return vocab_dict
@torch.no_grad()
def snake_case__ ( __lowerCamelCase : Union[str, Any] , __lowerCamelCase : int , __lowerCamelCase : Optional[Any] , __lowerCamelCase : int , __lowerCamelCase : str , __lowerCamelCase : Tuple , __lowerCamelCase : int , ):
"""simple docstring"""
lowerCamelCase__ : Any =WavaVecaConfig.from_pretrained(__lowerCamelCase )
lowerCamelCase__ : List[Any] =SpeechaTextaConfig.from_pretrained(
__lowerCamelCase , vocab_size=__lowerCamelCase , decoder_layers=__lowerCamelCase , do_stable_layer_norm=__lowerCamelCase )
lowerCamelCase__ : Any =WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=__lowerCamelCase , return_attention_mask=__lowerCamelCase , )
lowerCamelCase__ : List[str] =fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
lowerCamelCase__ : Any =model[0].eval()
# set weights for wav2vec2 encoder
lowerCamelCase__ : Tuple =WavaVecaModel(__lowerCamelCase )
lowerCamelCase__ : Optional[Any] =recursively_load_weights_wavaveca(model.encoder , __lowerCamelCase )
lowerCamelCase__ : Dict =SpeechaTextaForCausalLM(__lowerCamelCase )
lowerCamelCase__ : List[str] =hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=__lowerCamelCase )
# set output linear layer
unexpected_keys.remove('''embed_out''' )
lowerCamelCase__ : Optional[int] =nn.Parameter(model.decoder.embed_out.detach() )
# layer norm is init to identity matrix so leaving it is fine
logger.warning(f'''The following keys are missing when loading the decoder weights: {missing_keys}''' )
logger.warning(f'''The following keys are unexpected when loading the decoder weights: {unexpected_keys}''' )
lowerCamelCase__ : Optional[Any] =SpeechEncoderDecoderModel(encoder=__lowerCamelCase , decoder=__lowerCamelCase )
lowerCamelCase__ : List[Any] =False
# add projection layer
lowerCamelCase__ : Optional[Any] =nn.Parameter(projection_layer.weight )
lowerCamelCase__ : List[Any] =nn.Parameter(projection_layer.bias )
lowerCamelCase__ : Union[str, Any] =create_vocab_dict(__lowerCamelCase )
with open(os.path.join(__lowerCamelCase , '''vocab.json''' ) , '''w''' ) as fp:
json.dump(__lowerCamelCase , __lowerCamelCase )
lowerCamelCase__ : Optional[int] =SpeechaTextaTokenizer(os.path.join(__lowerCamelCase , '''vocab.json''' ) )
tokenizer.save_pretrained(__lowerCamelCase )
lowerCamelCase__ : Union[str, Any] =hf_wavavec.config.to_dict()
lowerCamelCase__ : int =tokenizer.pad_token_id
lowerCamelCase__ : Union[str, Any] =tokenizer.bos_token_id
lowerCamelCase__ : Union[str, Any] =tokenizer.eos_token_id
lowerCamelCase__ : Optional[Any] ='''speech_to_text_2'''
lowerCamelCase__ : Optional[int] ='''wav2vec2'''
lowerCamelCase__ : Any =SpeechEncoderDecoderConfig.from_dict(__lowerCamelCase )
hf_wavavec.save_pretrained(__lowerCamelCase )
feature_extractor.save_pretrained(__lowerCamelCase )
if __name__ == "__main__":
_lowercase : Dict = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument(
"--encoder_config_path",
default="facebook/wav2vec2-large-lv60",
type=str,
help="Path to hf encoder wav2vec2 checkpoint config",
)
parser.add_argument(
"--decoder_config_path",
default="facebook/s2t-small-mustc-en-fr-st",
type=str,
help="Path to hf decoder s2t checkpoint config",
)
parser.add_argument("--vocab_size", default=1_0_2_2_4, type=int, help="Vocab size of decoder")
parser.add_argument("--num_decoder_layers", default=7, type=int, help="Number of decoder layers")
_lowercase : List[str] = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
vocab_size=args.vocab_size,
num_decoder_layers=args.num_decoder_layers,
)
| 369 |
"""simple docstring"""
import gc
import unittest
from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def snake_case ( self : Union[str, Any] )-> Dict:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def snake_case ( self : str )-> Any:
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] =FlaxControlNetModel.from_pretrained(
'''lllyasviel/sd-controlnet-canny''', from_pt=lowerCamelCase, dtype=jnp.bfloataa )
lowerCamelCase__ , lowerCamelCase__ : Optional[int] =FlaxStableDiffusionControlNetPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''', controlnet=lowerCamelCase, from_pt=lowerCamelCase, dtype=jnp.bfloataa )
lowerCamelCase__ : Optional[int] =controlnet_params
lowerCamelCase__ : Dict ='''bird'''
lowerCamelCase__ : List[str] =jax.device_count()
lowerCamelCase__ : Optional[Any] =pipe.prepare_text_inputs([prompts] * num_samples )
lowerCamelCase__ : Dict =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png''' )
lowerCamelCase__ : List[Any] =pipe.prepare_image_inputs([canny_image] * num_samples )
lowerCamelCase__ : Optional[int] =jax.random.PRNGKey(0 )
lowerCamelCase__ : Dict =jax.random.split(lowerCamelCase, jax.device_count() )
lowerCamelCase__ : Tuple =replicate(lowerCamelCase )
lowerCamelCase__ : Tuple =shard(lowerCamelCase )
lowerCamelCase__ : Optional[int] =shard(lowerCamelCase )
lowerCamelCase__ : Tuple =pipe(
prompt_ids=lowerCamelCase, image=lowerCamelCase, params=lowerCamelCase, prng_seed=lowerCamelCase, num_inference_steps=50, jit=lowerCamelCase, ).images
assert images.shape == (jax.device_count(), 1, 768, 512, 3)
lowerCamelCase__ : Dict =images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
lowerCamelCase__ : Any =images[0, 253:256, 253:256, -1]
lowerCamelCase__ : Optional[Any] =jnp.asarray(jax.device_get(image_slice.flatten() ) )
lowerCamelCase__ : Dict =jnp.array(
[0.167_969, 0.116_699, 0.081_543, 0.154_297, 0.132_812, 0.108_887, 0.169_922, 0.169_922, 0.205_078] )
print(F'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
def snake_case ( self : Optional[int] )-> Optional[int]:
lowerCamelCase__ , lowerCamelCase__ : Dict =FlaxControlNetModel.from_pretrained(
'''lllyasviel/sd-controlnet-openpose''', from_pt=lowerCamelCase, dtype=jnp.bfloataa )
lowerCamelCase__ , lowerCamelCase__ : List[Any] =FlaxStableDiffusionControlNetPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''', controlnet=lowerCamelCase, from_pt=lowerCamelCase, dtype=jnp.bfloataa )
lowerCamelCase__ : Optional[Any] =controlnet_params
lowerCamelCase__ : int ='''Chef in the kitchen'''
lowerCamelCase__ : Optional[Any] =jax.device_count()
lowerCamelCase__ : Any =pipe.prepare_text_inputs([prompts] * num_samples )
lowerCamelCase__ : Any =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png''' )
lowerCamelCase__ : List[Any] =pipe.prepare_image_inputs([pose_image] * num_samples )
lowerCamelCase__ : Tuple =jax.random.PRNGKey(0 )
lowerCamelCase__ : Optional[Any] =jax.random.split(lowerCamelCase, jax.device_count() )
lowerCamelCase__ : int =replicate(lowerCamelCase )
lowerCamelCase__ : List[Any] =shard(lowerCamelCase )
lowerCamelCase__ : int =shard(lowerCamelCase )
lowerCamelCase__ : Tuple =pipe(
prompt_ids=lowerCamelCase, image=lowerCamelCase, params=lowerCamelCase, prng_seed=lowerCamelCase, num_inference_steps=50, jit=lowerCamelCase, ).images
assert images.shape == (jax.device_count(), 1, 768, 512, 3)
lowerCamelCase__ : Dict =images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
lowerCamelCase__ : List[str] =images[0, 253:256, 253:256, -1]
lowerCamelCase__ : int =jnp.asarray(jax.device_get(image_slice.flatten() ) )
lowerCamelCase__ : Any =jnp.array(
[[0.271_484, 0.261_719, 0.275_391, 0.277_344, 0.279_297, 0.291_016, 0.294_922, 0.302_734, 0.302_734]] )
print(F'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 272 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowercase = logging.get_logger(__name__)
__lowercase = {
'''google/realm-cc-news-pretrained-embedder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/config.json'''
),
'''google/realm-cc-news-pretrained-encoder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/config.json'''
),
'''google/realm-cc-news-pretrained-scorer''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/config.json'''
),
'''google/realm-cc-news-pretrained-openqa''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/config.json'''
),
'''google/realm-orqa-nq-openqa''': '''https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/config.json''',
'''google/realm-orqa-nq-reader''': '''https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/config.json''',
'''google/realm-orqa-wq-openqa''': '''https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/config.json''',
'''google/realm-orqa-wq-reader''': '''https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/config.json''',
# See all REALM models at https://huggingface.co/models?filter=realm
}
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
a__ : Any = """realm"""
def __init__( self , __lowercase=30_522 , __lowercase=768 , __lowercase=128 , __lowercase=12 , __lowercase=12 , __lowercase=8 , __lowercase=3_072 , __lowercase="gelu_new" , __lowercase=0.1 , __lowercase=0.1 , __lowercase=512 , __lowercase=2 , __lowercase=0.02 , __lowercase=1E-1_2 , __lowercase=256 , __lowercase=10 , __lowercase=1E-3 , __lowercase=5 , __lowercase=320 , __lowercase=13_353_718 , __lowercase=5_000 , __lowercase=1 , __lowercase=0 , __lowercase=2 , **__lowercase , ) -> Tuple:
super().__init__(pad_token_id=__lowercase , bos_token_id=__lowercase , eos_token_id=__lowercase , **__lowercase)
# Common config
__UpperCamelCase :Union[str, Any] = vocab_size
__UpperCamelCase :int = max_position_embeddings
__UpperCamelCase :List[Any] = hidden_size
__UpperCamelCase :List[str] = retriever_proj_size
__UpperCamelCase :int = num_hidden_layers
__UpperCamelCase :str = num_attention_heads
__UpperCamelCase :List[str] = num_candidates
__UpperCamelCase :List[Any] = intermediate_size
__UpperCamelCase :List[str] = hidden_act
__UpperCamelCase :List[Any] = hidden_dropout_prob
__UpperCamelCase :int = attention_probs_dropout_prob
__UpperCamelCase :List[Any] = initializer_range
__UpperCamelCase :Dict = type_vocab_size
__UpperCamelCase :List[str] = layer_norm_eps
# Reader config
__UpperCamelCase :List[Any] = span_hidden_size
__UpperCamelCase :int = max_span_width
__UpperCamelCase :List[str] = reader_layer_norm_eps
__UpperCamelCase :Optional[int] = reader_beam_size
__UpperCamelCase :Optional[Any] = reader_seq_len
# Retrieval config
__UpperCamelCase :str = num_block_records
__UpperCamelCase :Any = searcher_beam_size
| 43 | """simple docstring"""
import os
import sys
import unittest
lowerCAmelCase__ : Tuple = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import get_test_info # noqa: E402
from get_test_info import ( # noqa: E402
get_model_to_test_mapping,
get_model_to_tester_mapping,
get_test_to_tester_mapping,
)
lowerCAmelCase__ : Tuple = os.path.join('tests', 'models', 'bert', 'test_modeling_bert.py')
lowerCAmelCase__ : Optional[Any] = os.path.join('tests', 'models', 'blip', 'test_modeling_blip.py')
class snake_case ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self : List[str] ):
UpperCAmelCase__ = get_test_to_tester_mapping(lowerCamelCase__ )
UpperCAmelCase__ = get_test_to_tester_mapping(lowerCamelCase__ )
UpperCAmelCase__ = {'BertModelTest': 'BertModelTester'}
UpperCAmelCase__ = {
'BlipModelTest': 'BlipModelTester',
'BlipTextImageModelTest': 'BlipTextImageModelsModelTester',
'BlipTextModelTest': 'BlipTextModelTester',
'BlipTextRetrievalModelTest': 'BlipTextRetrievalModelTester',
'BlipVQAModelTest': 'BlipVQAModelTester',
'BlipVisionModelTest': 'BlipVisionModelTester',
}
self.assertEqual(get_test_info.to_json(lowerCamelCase__ ) ,lowerCamelCase__ )
self.assertEqual(get_test_info.to_json(lowerCamelCase__ ) ,lowerCamelCase__ )
def __lowerCAmelCase ( self : Optional[Any] ):
UpperCAmelCase__ = get_model_to_test_mapping(lowerCamelCase__ )
UpperCAmelCase__ = get_model_to_test_mapping(lowerCamelCase__ )
UpperCAmelCase__ = {
'BertForMaskedLM': ['BertModelTest'],
'BertForMultipleChoice': ['BertModelTest'],
'BertForNextSentencePrediction': ['BertModelTest'],
'BertForPreTraining': ['BertModelTest'],
'BertForQuestionAnswering': ['BertModelTest'],
'BertForSequenceClassification': ['BertModelTest'],
'BertForTokenClassification': ['BertModelTest'],
'BertLMHeadModel': ['BertModelTest'],
'BertModel': ['BertModelTest'],
}
UpperCAmelCase__ = {
'BlipForConditionalGeneration': ['BlipTextImageModelTest'],
'BlipForImageTextRetrieval': ['BlipTextRetrievalModelTest'],
'BlipForQuestionAnswering': ['BlipVQAModelTest'],
'BlipModel': ['BlipModelTest'],
'BlipTextModel': ['BlipTextModelTest'],
'BlipVisionModel': ['BlipVisionModelTest'],
}
self.assertEqual(get_test_info.to_json(lowerCamelCase__ ) ,lowerCamelCase__ )
self.assertEqual(get_test_info.to_json(lowerCamelCase__ ) ,lowerCamelCase__ )
def __lowerCAmelCase ( self : int ):
UpperCAmelCase__ = get_model_to_tester_mapping(lowerCamelCase__ )
UpperCAmelCase__ = get_model_to_tester_mapping(lowerCamelCase__ )
UpperCAmelCase__ = {
'BertForMaskedLM': ['BertModelTester'],
'BertForMultipleChoice': ['BertModelTester'],
'BertForNextSentencePrediction': ['BertModelTester'],
'BertForPreTraining': ['BertModelTester'],
'BertForQuestionAnswering': ['BertModelTester'],
'BertForSequenceClassification': ['BertModelTester'],
'BertForTokenClassification': ['BertModelTester'],
'BertLMHeadModel': ['BertModelTester'],
'BertModel': ['BertModelTester'],
}
UpperCAmelCase__ = {
'BlipForConditionalGeneration': ['BlipTextImageModelsModelTester'],
'BlipForImageTextRetrieval': ['BlipTextRetrievalModelTester'],
'BlipForQuestionAnswering': ['BlipVQAModelTester'],
'BlipModel': ['BlipModelTester'],
'BlipTextModel': ['BlipTextModelTester'],
'BlipVisionModel': ['BlipVisionModelTester'],
}
self.assertEqual(get_test_info.to_json(lowerCamelCase__ ) ,lowerCamelCase__ )
self.assertEqual(get_test_info.to_json(lowerCamelCase__ ) ,lowerCamelCase__ )
| 98 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {
"""facebook/xlm-roberta-xl""": """https://huggingface.co/facebook/xlm-roberta-xl/resolve/main/config.json""",
"""facebook/xlm-roberta-xxl""": """https://huggingface.co/facebook/xlm-roberta-xxl/resolve/main/config.json""",
# See all XLM-RoBERTa-XL models at https://huggingface.co/models?filter=xlm-roberta-xl
}
class UpperCAmelCase_ ( _lowercase):
snake_case__ = '''xlm-roberta-xl'''
def __init__( self : Tuple , __UpperCamelCase : Any=25_0880 , __UpperCamelCase : List[str]=2560 , __UpperCamelCase : Union[str, Any]=36 , __UpperCamelCase : Tuple=32 , __UpperCamelCase : Optional[Any]=1_0240 , __UpperCamelCase : Union[str, Any]="gelu" , __UpperCamelCase : str=0.1 , __UpperCamelCase : Union[str, Any]=0.1 , __UpperCamelCase : Optional[Any]=514 , __UpperCamelCase : Union[str, Any]=1 , __UpperCamelCase : List[Any]=0.0_2 , __UpperCamelCase : Any=1E-05 , __UpperCamelCase : Dict=1 , __UpperCamelCase : Dict=0 , __UpperCamelCase : Dict=2 , __UpperCamelCase : Optional[int]="absolute" , __UpperCamelCase : str=True , __UpperCamelCase : Tuple=None , **__UpperCamelCase : Dict , ) -> Union[str, Any]:
super().__init__(pad_token_id=__UpperCamelCase , bos_token_id=__UpperCamelCase , eos_token_id=__UpperCamelCase , **__UpperCamelCase )
_UpperCamelCase = vocab_size
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = hidden_act
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = type_vocab_size
_UpperCamelCase = initializer_range
_UpperCamelCase = layer_norm_eps
_UpperCamelCase = position_embedding_type
_UpperCamelCase = use_cache
_UpperCamelCase = classifier_dropout
class UpperCAmelCase_ ( _lowercase):
@property
def _UpperCamelCase ( self : Dict ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
_UpperCamelCase = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
_UpperCamelCase = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 54 | """simple docstring"""
from typing import Callable, List, Optional, Tuple, Union
import torch
from transformers import CLIPTextModel, CLIPTokenizer
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin, TransformeraDModel, VQModel
from ...schedulers import VQDiffusionScheduler
from ...utils import logging
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
UpperCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
class UpperCAmelCase_ ( _lowercase , _lowercase):
@register_to_config
def __init__( self : Tuple , __UpperCamelCase : bool , __UpperCamelCase : Optional[int] = None , __UpperCamelCase : Optional[int] = None ) -> int:
super().__init__()
_UpperCamelCase = learnable
if self.learnable:
assert hidden_size is not None, "learnable=True requires `hidden_size` to be set"
assert length is not None, "learnable=True requires `length` to be set"
_UpperCamelCase = torch.zeros(__UpperCamelCase , __UpperCamelCase )
else:
_UpperCamelCase = None
_UpperCamelCase = torch.nn.Parameter(__UpperCamelCase )
class UpperCAmelCase_ ( _lowercase):
snake_case__ = 42
snake_case__ = 42
snake_case__ = 42
snake_case__ = 42
snake_case__ = 42
snake_case__ = 42
def __init__( self : List[str] , __UpperCamelCase : VQModel , __UpperCamelCase : CLIPTextModel , __UpperCamelCase : CLIPTokenizer , __UpperCamelCase : TransformeraDModel , __UpperCamelCase : VQDiffusionScheduler , __UpperCamelCase : LearnedClassifierFreeSamplingEmbeddings , ) -> Optional[int]:
super().__init__()
self.register_modules(
vqvae=__UpperCamelCase , transformer=__UpperCamelCase , text_encoder=__UpperCamelCase , tokenizer=__UpperCamelCase , scheduler=__UpperCamelCase , learned_classifier_free_sampling_embeddings=__UpperCamelCase , )
def _UpperCamelCase ( self : Optional[Any] , __UpperCamelCase : Dict , __UpperCamelCase : Dict , __UpperCamelCase : List[str] ) -> str:
_UpperCamelCase = len(__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else 1
# get prompt text embeddings
_UpperCamelCase = self.tokenizer(
__UpperCamelCase , padding='''max_length''' , max_length=self.tokenizer.model_max_length , return_tensors='''pt''' , )
_UpperCamelCase = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
_UpperCamelCase = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
'''The following part of your input was truncated because CLIP can only handle sequences up to'''
F''' {self.tokenizer.model_max_length} tokens: {removed_text}''' )
_UpperCamelCase = text_input_ids[:, : self.tokenizer.model_max_length]
_UpperCamelCase = self.text_encoder(text_input_ids.to(self.device ) )[0]
# NOTE: This additional step of normalizing the text embeddings is from VQ-Diffusion.
# While CLIP does normalize the pooled output of the text transformer when combining
# the image and text embeddings, CLIP does not directly normalize the last hidden state.
#
# CLIP normalizing the pooled output.
# https://github.com/huggingface/transformers/blob/d92e22d1f28324f513f3080e5c47c071a3916721/src/transformers/models/clip/modeling_clip.py#L1052-L1053
_UpperCamelCase = prompt_embeds / prompt_embeds.norm(dim=-1 , keepdim=__UpperCamelCase )
# duplicate text embeddings for each generation per prompt
_UpperCamelCase = prompt_embeds.repeat_interleave(__UpperCamelCase , dim=0 )
if do_classifier_free_guidance:
if self.learned_classifier_free_sampling_embeddings.learnable:
_UpperCamelCase = self.learned_classifier_free_sampling_embeddings.embeddings
_UpperCamelCase = negative_prompt_embeds.unsqueeze(0 ).repeat(__UpperCamelCase , 1 , 1 )
else:
_UpperCamelCase = [''''''] * batch_size
_UpperCamelCase = text_input_ids.shape[-1]
_UpperCamelCase = self.tokenizer(
__UpperCamelCase , padding='''max_length''' , max_length=__UpperCamelCase , truncation=__UpperCamelCase , return_tensors='''pt''' , )
_UpperCamelCase = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# See comment for normalizing text embeddings
_UpperCamelCase = negative_prompt_embeds / negative_prompt_embeds.norm(dim=-1 , keepdim=__UpperCamelCase )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
_UpperCamelCase = negative_prompt_embeds.shape[1]
_UpperCamelCase = negative_prompt_embeds.repeat(1 , __UpperCamelCase , 1 )
_UpperCamelCase = negative_prompt_embeds.view(batch_size * num_images_per_prompt , __UpperCamelCase , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
_UpperCamelCase = torch.cat([negative_prompt_embeds, prompt_embeds] )
return prompt_embeds
@torch.no_grad()
def __call__( self : List[str] , __UpperCamelCase : Union[str, List[str]] , __UpperCamelCase : int = 100 , __UpperCamelCase : float = 5.0 , __UpperCamelCase : float = 1.0 , __UpperCamelCase : int = 1 , __UpperCamelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , __UpperCamelCase : Optional[torch.FloatTensor] = None , __UpperCamelCase : Optional[str] = "pil" , __UpperCamelCase : bool = True , __UpperCamelCase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __UpperCamelCase : int = 1 , ) -> Union[ImagePipelineOutput, Tuple]:
if isinstance(__UpperCamelCase , __UpperCamelCase ):
_UpperCamelCase = 1
elif isinstance(__UpperCamelCase , __UpperCamelCase ):
_UpperCamelCase = len(__UpperCamelCase )
else:
raise ValueError(F'''`prompt` has to be of type `str` or `list` but is {type(__UpperCamelCase )}''' )
_UpperCamelCase = batch_size * num_images_per_prompt
_UpperCamelCase = guidance_scale > 1.0
_UpperCamelCase = self._encode_prompt(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(__UpperCamelCase , __UpperCamelCase ) or callback_steps <= 0)
):
raise ValueError(
F'''`callback_steps` has to be a positive integer but is {callback_steps} of type'''
F''' {type(__UpperCamelCase )}.''' )
# get the initial completely masked latents unless the user supplied it
_UpperCamelCase = (batch_size, self.transformer.num_latent_pixels)
if latents is None:
_UpperCamelCase = self.transformer.num_vector_embeds - 1
_UpperCamelCase = torch.full(__UpperCamelCase , __UpperCamelCase ).to(self.device )
else:
if latents.shape != latents_shape:
raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
if (latents < 0).any() or (latents >= self.transformer.num_vector_embeds).any():
raise ValueError(
'''Unexpected latents value(s). All latents be valid embedding indices i.e. in the range 0,'''
F''' {self.transformer.num_vector_embeds - 1} (inclusive).''' )
_UpperCamelCase = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(__UpperCamelCase , device=self.device )
_UpperCamelCase = self.scheduler.timesteps.to(self.device )
_UpperCamelCase = latents
for i, t in enumerate(self.progress_bar(__UpperCamelCase ) ):
# expand the sample if we are doing classifier free guidance
_UpperCamelCase = torch.cat([sample] * 2 ) if do_classifier_free_guidance else sample
# predict the un-noised image
# model_output == `log_p_x_0`
_UpperCamelCase = self.transformer(__UpperCamelCase , encoder_hidden_states=__UpperCamelCase , timestep=__UpperCamelCase ).sample
if do_classifier_free_guidance:
_UpperCamelCase , _UpperCamelCase = model_output.chunk(2 )
_UpperCamelCase = model_output_uncond + guidance_scale * (model_output_text - model_output_uncond)
model_output -= torch.logsumexp(__UpperCamelCase , dim=1 , keepdim=__UpperCamelCase )
_UpperCamelCase = self.truncate(__UpperCamelCase , __UpperCamelCase )
# remove `log(0)`'s (`-inf`s)
_UpperCamelCase = model_output.clamp(-70 )
# compute the previous noisy sample x_t -> x_t-1
_UpperCamelCase = self.scheduler.step(__UpperCamelCase , timestep=__UpperCamelCase , sample=__UpperCamelCase , generator=__UpperCamelCase ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
_UpperCamelCase = self.vqvae.config.vq_embed_dim
_UpperCamelCase = (batch_size, self.transformer.height, self.transformer.width, embedding_channels)
_UpperCamelCase = self.vqvae.quantize.get_codebook_entry(__UpperCamelCase , shape=__UpperCamelCase )
_UpperCamelCase = self.vqvae.decode(__UpperCamelCase , force_not_quantize=__UpperCamelCase ).sample
_UpperCamelCase = (image / 2 + 0.5).clamp(0 , 1 )
_UpperCamelCase = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
_UpperCamelCase = self.numpy_to_pil(__UpperCamelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__UpperCamelCase )
def _UpperCamelCase ( self : Optional[Any] , __UpperCamelCase : torch.FloatTensor , __UpperCamelCase : float ) -> torch.FloatTensor:
_UpperCamelCase , _UpperCamelCase = torch.sort(__UpperCamelCase , 1 , descending=__UpperCamelCase )
_UpperCamelCase = torch.exp(__UpperCamelCase )
_UpperCamelCase = sorted_p_x_0.cumsum(dim=1 ) < truncation_rate
# Ensure that at least the largest probability is not zeroed out
_UpperCamelCase = torch.full_like(keep_mask[:, 0:1, :] , __UpperCamelCase )
_UpperCamelCase = torch.cat((all_true, keep_mask) , dim=1 )
_UpperCamelCase = keep_mask[:, :-1, :]
_UpperCamelCase = keep_mask.gather(1 , indices.argsort(1 ) )
_UpperCamelCase = log_p_x_0.clone()
_UpperCamelCase = -torch.inf # -inf = log(0)
return rv
| 54 | 1 |
import argparse
import os
import re
import packaging.version
UpperCAmelCase = '''examples/'''
UpperCAmelCase = {
'''examples''': (re.compile(R'''^check_min_version\("[^"]+"\)\s*$''', re.MULTILINE), '''check_min_version("VERSION")\n'''),
'''init''': (re.compile(R'''^__version__\s+=\s+"([^"]+)"\s*$''', re.MULTILINE), '''__version__ = "VERSION"\n'''),
'''setup''': (re.compile(R'''^(\s*)version\s*=\s*"[^"]+",''', re.MULTILINE), R'''\1version="VERSION",'''),
'''doc''': (re.compile(R'''^(\s*)release\s*=\s*"[^"]+"$''', re.MULTILINE), '''release = "VERSION"\n'''),
}
UpperCAmelCase = {
'''init''': '''src/diffusers/__init__.py''',
'''setup''': '''setup.py''',
}
UpperCAmelCase = '''README.md'''
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
with open(__SCREAMING_SNAKE_CASE , 'r' , encoding='utf-8' , newline='\n' ) as f:
lowercase = f.read()
lowercase , lowercase = REPLACE_PATTERNS[pattern]
lowercase = replace.replace('VERSION' , __SCREAMING_SNAKE_CASE )
lowercase = re_pattern.sub(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
with open(__SCREAMING_SNAKE_CASE , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.write(__SCREAMING_SNAKE_CASE )
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
for folder, directories, fnames in os.walk(__SCREAMING_SNAKE_CASE ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove('research_projects' )
if "legacy" in directories:
directories.remove('legacy' )
for fname in fnames:
if fname.endswith('.py' ):
update_version_in_file(os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE , pattern='examples' )
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=False ):
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if not patch:
update_version_in_examples(__SCREAMING_SNAKE_CASE )
def UpperCAmelCase_ ( ):
lowercase = '🤗 Transformers currently provides the following architectures'
lowercase = '1. Want to contribute a new model?'
with open(__SCREAMING_SNAKE_CASE , 'r' , encoding='utf-8' , newline='\n' ) as f:
lowercase = f.readlines()
# Find the start of the list.
lowercase = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
lowercase = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith('1.' ):
lowercase = lines[index].replace(
'https://huggingface.co/docs/diffusers/main/model_doc' , 'https://huggingface.co/docs/diffusers/model_doc' , )
index += 1
with open(__SCREAMING_SNAKE_CASE , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.writelines(__SCREAMING_SNAKE_CASE )
def UpperCAmelCase_ ( ):
with open(REPLACE_FILES['init'] , 'r' ) as f:
lowercase = f.read()
lowercase = REPLACE_PATTERNS['init'][0].search(__SCREAMING_SNAKE_CASE ).groups()[0]
return packaging.version.parse(__SCREAMING_SNAKE_CASE )
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE=False ):
lowercase = get_version()
if patch and default_version.is_devrelease:
raise ValueError('Can\'t create a patch version from the dev branch, checkout a released version!' )
if default_version.is_devrelease:
lowercase = default_version.base_version
elif patch:
lowercase = F'''{default_version.major}.{default_version.minor}.{default_version.micro + 1}'''
else:
lowercase = F'''{default_version.major}.{default_version.minor + 1}.0'''
# Now let's ask nicely if that's the right one.
lowercase = input(F'''Which version are you releasing? [{default_version}]''' )
if len(__SCREAMING_SNAKE_CASE ) == 0:
lowercase = default_version
print(F'''Updating version to {version}.''' )
global_version_update(__SCREAMING_SNAKE_CASE , patch=__SCREAMING_SNAKE_CASE )
def UpperCAmelCase_ ( ):
lowercase = get_version()
lowercase = F'''{current_version.major}.{current_version.minor + 1}.0.dev0'''
lowercase = current_version.base_version
# Check with the user we got that right.
lowercase = input(F'''Which version are we developing now? [{dev_version}]''' )
if len(__SCREAMING_SNAKE_CASE ) == 0:
lowercase = dev_version
print(F'''Updating version to {version}.''' )
global_version_update(__SCREAMING_SNAKE_CASE )
# print("Cleaning main README, don't forget to run `make fix-copies`.")
# clean_main_ref_in_model_list()
if __name__ == "__main__":
UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument('''--post_release''', action='''store_true''', help='''Whether this is pre or post release.''')
parser.add_argument('''--patch''', action='''store_true''', help='''Whether or not this is a patch release.''')
UpperCAmelCase = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print('''Nothing to do after a patch :-)''')
else:
post_release_work()
| 195 |
from manim import *
class A_ ( __lowerCamelCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = Rectangle(height=0.5 , width=0.5 )
lowercase = Rectangle(height=0.25 , width=0.25 )
lowercase = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
lowercase = [mem.copy() for i in range(6 )]
lowercase = [mem.copy() for i in range(6 )]
lowercase = VGroup(*snake_case ).arrange(snake_case , buff=0 )
lowercase = VGroup(*snake_case ).arrange(snake_case , buff=0 )
lowercase = VGroup(snake_case , snake_case ).arrange(snake_case , buff=0 )
lowercase = Text('CPU' , font_size=24 )
lowercase = Group(snake_case , snake_case ).arrange(snake_case , buff=0.5 , aligned_edge=snake_case )
cpu.move_to([-2.5, -0.5, 0] )
self.add(snake_case )
lowercase = [mem.copy() for i in range(4 )]
lowercase = VGroup(*snake_case ).arrange(snake_case , buff=0 )
lowercase = Text('GPU' , font_size=24 )
lowercase = Group(snake_case , snake_case ).arrange(snake_case , buff=0.5 , aligned_edge=snake_case )
gpu.move_to([-1, -1, 0] )
self.add(snake_case )
lowercase = [mem.copy() for i in range(6 )]
lowercase = VGroup(*snake_case ).arrange(snake_case , buff=0 )
lowercase = Text('Model' , font_size=24 )
lowercase = Group(snake_case , snake_case ).arrange(snake_case , buff=0.5 , aligned_edge=snake_case )
model.move_to([3, -1.0, 0] )
self.add(snake_case )
lowercase = []
lowercase = []
lowercase = []
for i, rect in enumerate(snake_case ):
rect.set_stroke(snake_case )
lowercase = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(snake_case , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=snake_case )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(model_cpu_arr[0] , direction=snake_case , buff=0.0 )
else:
cpu_target.next_to(model_cpu_arr[i - 1] , direction=snake_case , buff=0.0 )
self.add(snake_case )
model_cpu_arr.append(snake_case )
self.add(*snake_case , *snake_case , *snake_case )
lowercase = [mem.copy() for i in range(6 )]
lowercase = VGroup(*snake_case ).arrange(snake_case , buff=0 )
lowercase = Text('Loaded Checkpoint' , font_size=24 )
lowercase = Group(snake_case , snake_case ).arrange(snake_case , buff=0.5 , aligned_edge=snake_case )
checkpoint.move_to([3, 0.5, 0] )
self.add(snake_case )
lowercase = []
lowercase = []
for i, rect in enumerate(snake_case ):
lowercase = fill.copy().set_fill(snake_case , opacity=0.7 )
target.move_to(snake_case )
ckpt_arr.append(snake_case )
lowercase = target.copy()
if i < 5:
cpu_target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.move_to(cpu_right_col_base[i - 5] )
ckpt_cpu_arr.append(snake_case )
self.add(*snake_case , *snake_case )
lowercase = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
lowercase = MarkupText(
F'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(snake_case , snake_case )
lowercase = MarkupText(
F'''<span fgcolor=\'{BLUE}\'>●</span> Checkpoint''' , font_size=18 , )
blue_text.next_to(snake_case , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(snake_case )
lowercase = MarkupText(
F'''Based on the passed in configuration, weights are stored in\na variety of np.memmaps on disk or to a particular device.''' , font_size=24 , )
step_a.move_to([2, 2, 0] )
lowercase = [meta_mem.copy() for i in range(6 )]
lowercase = [meta_mem.copy() for i in range(6 )]
lowercase = VGroup(*snake_case ).arrange(snake_case , buff=0 )
lowercase = VGroup(*snake_case ).arrange(snake_case , buff=0 )
lowercase = VGroup(snake_case , snake_case ).arrange(snake_case , buff=0 )
lowercase = Text('Disk' , font_size=24 )
lowercase = Group(snake_case , snake_case ).arrange(snake_case , buff=0.5 , aligned_edge=snake_case )
disk.move_to([-4.0, -1.25, 0] )
self.play(Write(snake_case , run_time=3 ) , Write(snake_case , run_time=1 ) , Create(snake_case , run_time=1 ) )
lowercase = []
for i, rect in enumerate(snake_case ):
lowercase = rect.copy()
target.generate_target()
target.target.move_to(disk_left_col_base[i] ).scale(0.5 )
animations.append(MoveToTarget(snake_case , run_time=1.5 ) )
self.play(*snake_case )
self.play(FadeOut(snake_case ) )
lowercase = MarkupText(F'''Then, the checkpoint is removed from memory\nthrough garbage collection.''' , font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(snake_case , run_time=3 ) )
self.play(
FadeOut(snake_case , snake_case , *snake_case , *snake_case ) , )
self.wait()
| 195 | 1 |
'''simple docstring'''
import argparse
import glob
import importlib.util
import os
import re
import black
from doc_builder.style_doc import style_docstrings_in_code
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
lowerCamelCase : Optional[Any] = "src/diffusers"
lowerCamelCase : Optional[Any] = "."
# This is to make sure the diffusers module imported is the one in the repo.
lowerCamelCase : Tuple = importlib.util.spec_from_file_location(
"diffusers",
os.path.join(DIFFUSERS_PATH, "__init__.py"),
submodule_search_locations=[DIFFUSERS_PATH],
)
lowerCamelCase : Optional[int] = spec.loader.load_module()
def _lowerCAmelCase ( _UpperCamelCase : List[Any] , _UpperCamelCase : Dict ) -> int:
"""simple docstring"""
return line.startswith(_UpperCamelCase ) or len(_UpperCamelCase ) <= 1 or re.search(r'^\s*\)(\s*->.*:|:)\s*$' , _UpperCamelCase ) is not None
def _lowerCAmelCase ( _UpperCamelCase : List[Any] ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =object_name.split('.' )
_SCREAMING_SNAKE_CASE =0
# First let's find the module where our object lives.
_SCREAMING_SNAKE_CASE =parts[i]
while i < len(_UpperCamelCase ) and not os.path.isfile(os.path.join(_UpperCamelCase , f"{module}.py" ) ):
i += 1
if i < len(_UpperCamelCase ):
_SCREAMING_SNAKE_CASE =os.path.join(_UpperCamelCase , parts[i] )
if i >= len(_UpperCamelCase ):
raise ValueError(f"`object_name` should begin with the name of a module of diffusers but got {object_name}." )
with open(os.path.join(_UpperCamelCase , f"{module}.py" ) , 'r' , encoding='utf-8' , newline='\n' ) as f:
_SCREAMING_SNAKE_CASE =f.readlines()
# Now let's find the class / func in the code!
_SCREAMING_SNAKE_CASE =''
_SCREAMING_SNAKE_CASE =0
for name in parts[i + 1 :]:
while (
line_index < len(_UpperCamelCase ) and re.search(rf"^{indent}(class|def)\s+{name}(\(|\:)" , lines[line_index] ) is None
):
line_index += 1
indent += " "
line_index += 1
if line_index >= len(_UpperCamelCase ):
raise ValueError(f" {object_name} does not match any function or class in {module}." )
# We found the beginning of the class / func, now let's find the end (when the indent diminishes).
_SCREAMING_SNAKE_CASE =line_index
while line_index < len(_UpperCamelCase ) and _should_continue(lines[line_index] , _UpperCamelCase ):
line_index += 1
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
_SCREAMING_SNAKE_CASE =lines[start_index:line_index]
return "".join(_UpperCamelCase )
lowerCamelCase : Any = re.compile(r"^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)")
lowerCamelCase : Optional[int] = re.compile(r"^\s*(\S+)->(\S+)(\s+.*|$)")
lowerCamelCase : Tuple = re.compile(r"<FILL\s+[^>]*>")
def _lowerCAmelCase ( _UpperCamelCase : Dict ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =code.split('\n' )
_SCREAMING_SNAKE_CASE =0
while idx < len(_UpperCamelCase ) and len(lines[idx] ) == 0:
idx += 1
if idx < len(_UpperCamelCase ):
return re.search(r'^(\s*)\S' , lines[idx] ).groups()[0]
return ""
def _lowerCAmelCase ( _UpperCamelCase : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =len(get_indent(_UpperCamelCase ) ) > 0
if has_indent:
_SCREAMING_SNAKE_CASE =f"class Bla:\n{code}"
_SCREAMING_SNAKE_CASE =black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_19 , preview=_UpperCamelCase )
_SCREAMING_SNAKE_CASE =black.format_str(_UpperCamelCase , mode=_UpperCamelCase )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =style_docstrings_in_code(_UpperCamelCase )
return result[len('class Bla:\n' ) :] if has_indent else result
def _lowerCAmelCase ( _UpperCamelCase : Union[str, Any] , _UpperCamelCase : List[Any]=False ) -> List[str]:
"""simple docstring"""
with open(_UpperCamelCase , 'r' , encoding='utf-8' , newline='\n' ) as f:
_SCREAMING_SNAKE_CASE =f.readlines()
_SCREAMING_SNAKE_CASE =[]
_SCREAMING_SNAKE_CASE =0
# Not a for loop cause `lines` is going to change (if `overwrite=True`).
while line_index < len(_UpperCamelCase ):
_SCREAMING_SNAKE_CASE =_re_copy_warning.search(lines[line_index] )
if search is None:
line_index += 1
continue
# There is some copied code here, let's retrieve the original.
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =search.groups()
_SCREAMING_SNAKE_CASE =find_code_in_diffusers(_UpperCamelCase )
_SCREAMING_SNAKE_CASE =get_indent(_UpperCamelCase )
_SCREAMING_SNAKE_CASE =line_index + 1 if indent == theoretical_indent else line_index + 2
_SCREAMING_SNAKE_CASE =theoretical_indent
_SCREAMING_SNAKE_CASE =start_index
# Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment.
_SCREAMING_SNAKE_CASE =True
while line_index < len(_UpperCamelCase ) and should_continue:
line_index += 1
if line_index >= len(_UpperCamelCase ):
break
_SCREAMING_SNAKE_CASE =lines[line_index]
_SCREAMING_SNAKE_CASE =_should_continue(_UpperCamelCase , _UpperCamelCase ) and re.search(f"^{indent}# End copy" , _UpperCamelCase ) is None
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
_SCREAMING_SNAKE_CASE =lines[start_index:line_index]
_SCREAMING_SNAKE_CASE =''.join(_UpperCamelCase )
# Remove any nested `Copied from` comments to avoid circular copies
_SCREAMING_SNAKE_CASE =[line for line in theoretical_code.split('\n' ) if _re_copy_warning.search(_UpperCamelCase ) is None]
_SCREAMING_SNAKE_CASE ='\n'.join(_UpperCamelCase )
# Before comparing, use the `replace_pattern` on the original code.
if len(_UpperCamelCase ) > 0:
_SCREAMING_SNAKE_CASE =replace_pattern.replace('with' , '' ).split(',' )
_SCREAMING_SNAKE_CASE =[_re_replace_pattern.search(_UpperCamelCase ) for p in patterns]
for pattern in patterns:
if pattern is None:
continue
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =pattern.groups()
_SCREAMING_SNAKE_CASE =re.sub(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
if option.strip() == "all-casing":
_SCREAMING_SNAKE_CASE =re.sub(obja.lower() , obja.lower() , _UpperCamelCase )
_SCREAMING_SNAKE_CASE =re.sub(obja.upper() , obja.upper() , _UpperCamelCase )
# Blackify after replacement. To be able to do that, we need the header (class or function definition)
# from the previous line
_SCREAMING_SNAKE_CASE =blackify(lines[start_index - 1] + theoretical_code )
_SCREAMING_SNAKE_CASE =theoretical_code[len(lines[start_index - 1] ) :]
# Test for a diff and act accordingly.
if observed_code != theoretical_code:
diffs.append([object_name, start_index] )
if overwrite:
_SCREAMING_SNAKE_CASE =lines[:start_index] + [theoretical_code] + lines[line_index:]
_SCREAMING_SNAKE_CASE =start_index + 1
if overwrite and len(_UpperCamelCase ) > 0:
# Warn the user a file has been modified.
print(f"Detected changes, rewriting {filename}." )
with open(_UpperCamelCase , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.writelines(_UpperCamelCase )
return diffs
def _lowerCAmelCase ( _UpperCamelCase : bool = False ) -> Optional[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =glob.glob(os.path.join(_UpperCamelCase , '**/*.py' ) , recursive=_UpperCamelCase )
_SCREAMING_SNAKE_CASE =[]
for filename in all_files:
_SCREAMING_SNAKE_CASE =is_copy_consistent(_UpperCamelCase , _UpperCamelCase )
diffs += [f"- {filename}: copy does not match {d[0]} at line {d[1]}" for d in new_diffs]
if not overwrite and len(_UpperCamelCase ) > 0:
_SCREAMING_SNAKE_CASE ='\n'.join(_UpperCamelCase )
raise Exception(
'Found the following copy inconsistencies:\n'
+ diff
+ '\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them.' )
if __name__ == "__main__":
lowerCamelCase : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
lowerCamelCase : Tuple = parser.parse_args()
check_copies(args.fix_and_overwrite)
| 114 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase : Tuple = {"configuration_fnet": ["FNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "FNetConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : List[Any] = ["FNetTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Optional[int] = ["FNetTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : str = [
"FNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"FNetForMaskedLM",
"FNetForMultipleChoice",
"FNetForNextSentencePrediction",
"FNetForPreTraining",
"FNetForQuestionAnswering",
"FNetForSequenceClassification",
"FNetForTokenClassification",
"FNetLayer",
"FNetModel",
"FNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet import FNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet_fast import FNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_fnet import (
FNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FNetForMaskedLM,
FNetForMultipleChoice,
FNetForNextSentencePrediction,
FNetForPreTraining,
FNetForQuestionAnswering,
FNetForSequenceClassification,
FNetForTokenClassification,
FNetLayer,
FNetModel,
FNetPreTrainedModel,
)
else:
import sys
lowerCamelCase : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 114 | 1 |
'''simple docstring'''
# Author: OMKAR PATHAK, Nwachukwu Chidiebere
# Use a Python dictionary to construct the graph.
from __future__ import annotations
from pprint import pformat
from typing import Generic, TypeVar
_lowerCAmelCase = TypeVar('''T''')
class lowerCAmelCase_( Generic[T] ):
'''simple docstring'''
def __init__( self ,__UpperCAmelCase = True ) -> None:
lowerCAmelCase__ : dict[T, list[T]] = {} # dictionary of lists
lowerCAmelCase__ : Optional[int] = directed
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ) -> GraphAdjacencyList[T]:
if not self.directed: # For undirected graphs
# if both source vertex and destination vertex are both present in the
# adjacency list, add destination vertex to source vertex list of adjacent
# vertices and add source vertex to destination vertex list of adjacent
# vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(__UpperCAmelCase )
self.adj_list[destination_vertex].append(__UpperCAmelCase )
# if only source vertex is present in adjacency list, add destination vertex
# to source vertex list of adjacent vertices, then create a new vertex with
# destination vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(__UpperCAmelCase )
lowerCAmelCase__ : List[str] = [source_vertex]
# if only destination vertex is present in adjacency list, add source vertex
# to destination vertex list of adjacent vertices, then create a new vertex
# with source vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif destination_vertex in self.adj_list:
self.adj_list[destination_vertex].append(__UpperCAmelCase )
lowerCAmelCase__ : str = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and assign a list
# containing the destination vertex as it's first adjacent vertex also
# create a new vertex with destination vertex as key and assign a list
# containing the source vertex as it's first adjacent vertex.
else:
lowerCAmelCase__ : List[Any] = [destination_vertex]
lowerCAmelCase__ : str = [source_vertex]
else: # For directed graphs
# if both source vertex and destination vertex are present in adjacency
# list, add destination vertex to source vertex list of adjacent vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(__UpperCAmelCase )
# if only source vertex is present in adjacency list, add destination
# vertex to source vertex list of adjacent vertices and create a new vertex
# with destination vertex as key, which has no adjacent vertex
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(__UpperCAmelCase )
lowerCAmelCase__ : List[str] = []
# if only destination vertex is present in adjacency list, create a new
# vertex with source vertex as key and assign a list containing destination
# vertex as first adjacent vertex
elif destination_vertex in self.adj_list:
lowerCAmelCase__ : List[str] = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and a list containing
# destination vertex as it's first adjacent vertex. Then create a new vertex
# with destination vertex as key, which has no adjacent vertex
else:
lowerCAmelCase__ : Dict = [destination_vertex]
lowerCAmelCase__ : Optional[Any] = []
return self
def __repr__( self ) -> str:
return pformat(self.adj_list )
| 37 |
'''simple docstring'''
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel
if is_vision_available():
from transformers import MaskFormerImageProcessor
if is_vision_available():
from PIL import Image
class lowerCAmelCase_:
'''simple docstring'''
def __init__( self ,__UpperCAmelCase ,__UpperCAmelCase=2 ,__UpperCAmelCase=True ,__UpperCAmelCase=False ,__UpperCAmelCase=10 ,__UpperCAmelCase=3 ,__UpperCAmelCase=32 * 4 ,__UpperCAmelCase=32 * 6 ,__UpperCAmelCase=4 ,__UpperCAmelCase=32 ,) -> str:
lowerCAmelCase__ : Optional[int] = parent
lowerCAmelCase__ : Optional[int] = batch_size
lowerCAmelCase__ : Optional[int] = is_training
lowerCAmelCase__ : Dict = use_auxiliary_loss
lowerCAmelCase__ : Union[str, Any] = num_queries
lowerCAmelCase__ : str = num_channels
lowerCAmelCase__ : List[str] = min_size
lowerCAmelCase__ : int = max_size
lowerCAmelCase__ : Optional[Any] = num_labels
lowerCAmelCase__ : List[Any] = mask_feature_size
def UpperCAmelCase_ ( self ) -> Tuple:
lowerCAmelCase__ : str = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
__UpperCAmelCase )
lowerCAmelCase__ : str = torch.ones([self.batch_size, self.min_size, self.max_size] ,device=__UpperCAmelCase )
lowerCAmelCase__ : Any = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] ,device=__UpperCAmelCase ) > 0.5
).float()
lowerCAmelCase__ : Optional[int] = (torch.rand((self.batch_size, self.num_labels) ,device=__UpperCAmelCase ) > 0.5).long()
lowerCAmelCase__ : Any = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def UpperCAmelCase_ ( self ) -> Dict:
return MaskFormerConfig.from_backbone_and_decoder_configs(
backbone_config=SwinConfig(
depths=[1, 1, 1, 1] ,) ,decoder_config=DetrConfig(
decoder_ffn_dim=128 ,num_queries=self.num_queries ,decoder_attention_heads=2 ,d_model=self.mask_feature_size ,) ,mask_feature_size=self.mask_feature_size ,fpn_feature_size=self.mask_feature_size ,num_channels=self.num_channels ,num_labels=self.num_labels ,)
def UpperCAmelCase_ ( self ) -> Optional[int]:
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : List[str] = self.prepare_config_and_inputs()
lowerCAmelCase__ : List[str] = {"""pixel_values""": pixel_values, """pixel_mask""": pixel_mask}
return config, inputs_dict
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ) -> Any:
lowerCAmelCase__ : Optional[int] = output.encoder_hidden_states
lowerCAmelCase__ : Optional[int] = output.pixel_decoder_hidden_states
lowerCAmelCase__ : Dict = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(__UpperCAmelCase ) ,len(config.backbone_config.depths ) )
self.parent.assertTrue(len(__UpperCAmelCase ) ,len(config.backbone_config.depths ) )
self.parent.assertTrue(len(__UpperCAmelCase ) ,config.decoder_config.decoder_layers )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase=False ) -> Optional[Any]:
with torch.no_grad():
lowerCAmelCase__ : int = MaskFormerModel(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
lowerCAmelCase__ : str = model(pixel_values=__UpperCAmelCase ,pixel_mask=__UpperCAmelCase )
lowerCAmelCase__ : int = model(__UpperCAmelCase ,output_hidden_states=__UpperCAmelCase )
# the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the
# encoder and pixel decoder
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape ,(self.batch_size, self.num_queries, self.mask_feature_size) ,)
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(__UpperCAmelCase ,__UpperCAmelCase )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) -> Optional[int]:
lowerCAmelCase__ : Dict = MaskFormerForInstanceSegmentation(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
def comm_check_on_output(__UpperCAmelCase ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape ,(self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) ,)
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape ,(self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
lowerCAmelCase__ : List[Any] = model(pixel_values=__UpperCAmelCase ,pixel_mask=__UpperCAmelCase )
lowerCAmelCase__ : Dict = model(__UpperCAmelCase )
comm_check_on_output(__UpperCAmelCase )
lowerCAmelCase__ : Optional[int] = model(
pixel_values=__UpperCAmelCase ,pixel_mask=__UpperCAmelCase ,mask_labels=__UpperCAmelCase ,class_labels=__UpperCAmelCase )
comm_check_on_output(__UpperCAmelCase )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape ,torch.Size([1] ) )
@require_torch
class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
'''simple docstring'''
__lowercase : Optional[Any] = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else ()
__lowercase : int = (
{'''feature-extraction''': MaskFormerModel, '''image-segmentation''': MaskFormerForInstanceSegmentation}
if is_torch_available()
else {}
)
__lowercase : Union[str, Any] = False
__lowercase : Dict = False
__lowercase : Tuple = False
__lowercase : List[Any] = False
def UpperCAmelCase_ ( self ) -> Optional[int]:
lowerCAmelCase__ : str = MaskFormerModelTester(self )
lowerCAmelCase__ : List[Any] = ConfigTester(self ,config_class=__UpperCAmelCase ,has_text_modality=__UpperCAmelCase )
def UpperCAmelCase_ ( self ) -> List[str]:
self.config_tester.run_common_tests()
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
lowerCAmelCase__ , lowerCAmelCase__ : str = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(__UpperCAmelCase ,**__UpperCAmelCase ,output_hidden_states=__UpperCAmelCase )
def UpperCAmelCase_ ( self ) -> Optional[int]:
lowerCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*__UpperCAmelCase )
@unittest.skip(reason="""MaskFormer does not use inputs_embeds""" )
def UpperCAmelCase_ ( self ) -> List[Any]:
pass
@unittest.skip(reason="""MaskFormer does not have a get_input_embeddings method""" )
def UpperCAmelCase_ ( self ) -> str:
pass
@unittest.skip(reason="""MaskFormer is not a generative model""" )
def UpperCAmelCase_ ( self ) -> Any:
pass
@unittest.skip(reason="""MaskFormer does not use token embeddings""" )
def UpperCAmelCase_ ( self ) -> List[str]:
pass
@require_torch_multi_gpu
@unittest.skip(
reason="""MaskFormer has some layers using `add_module` which doesn't work well with `nn.DataParallel`""" )
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def UpperCAmelCase_ ( self ) -> List[str]:
pass
def UpperCAmelCase_ ( self ) -> Tuple:
lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ : str = model_class(__UpperCAmelCase )
lowerCAmelCase__ : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase__ : Dict = [*signature.parameters.keys()]
lowerCAmelCase__ : Tuple = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] ,__UpperCAmelCase )
@slow
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
for model_name in ["facebook/maskformer-swin-small-coco"]:
lowerCAmelCase__ : List[str] = MaskFormerModel.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
def UpperCAmelCase_ ( self ) -> str:
lowerCAmelCase__ : List[Any] = (self.model_tester.min_size,) * 2
lowerCAmelCase__ : Any = {
"""pixel_values""": torch.randn((2, 3, *size) ,device=__UpperCAmelCase ),
"""mask_labels""": torch.randn((2, 10, *size) ,device=__UpperCAmelCase ),
"""class_labels""": torch.zeros(2 ,10 ,device=__UpperCAmelCase ).long(),
}
lowerCAmelCase__ : Tuple = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(__UpperCAmelCase )
lowerCAmelCase__ : Union[str, Any] = model(**__UpperCAmelCase )
self.assertTrue(outputs.loss is not None )
def UpperCAmelCase_ ( self ) -> str:
lowerCAmelCase__ , lowerCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(__UpperCAmelCase ,**__UpperCAmelCase ,output_hidden_states=__UpperCAmelCase )
def UpperCAmelCase_ ( self ) -> Tuple:
lowerCAmelCase__ , lowerCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ : int = model_class(__UpperCAmelCase ).to(__UpperCAmelCase )
lowerCAmelCase__ : List[Any] = model(**__UpperCAmelCase ,output_attentions=__UpperCAmelCase )
self.assertTrue(outputs.attentions is not None )
def UpperCAmelCase_ ( self ) -> int:
if not self.model_tester.is_training:
return
# only MaskFormerForInstanceSegmentation has the loss
lowerCAmelCase__ : Dict = self.all_model_classes[1]
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
lowerCAmelCase__ : List[Any] = model_class(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.train()
lowerCAmelCase__ : List[str] = model(__UpperCAmelCase ,mask_labels=__UpperCAmelCase ,class_labels=__UpperCAmelCase ).loss
loss.backward()
def UpperCAmelCase_ ( self ) -> List[str]:
# only MaskFormerForInstanceSegmentation has the loss
lowerCAmelCase__ : Tuple = self.all_model_classes[1]
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
lowerCAmelCase__ : Union[str, Any] = True
lowerCAmelCase__ : Tuple = True
lowerCAmelCase__ : Optional[Any] = model_class(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.train()
lowerCAmelCase__ : Dict = model(__UpperCAmelCase ,mask_labels=__UpperCAmelCase ,class_labels=__UpperCAmelCase )
lowerCAmelCase__ : Optional[Any] = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
lowerCAmelCase__ : str = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
# we requires_grad=True in inputs_embeds (line 2152), the original implementation don't
lowerCAmelCase__ : Union[str, Any] = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
lowerCAmelCase__ : List[Any] = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=__UpperCAmelCase )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
_lowerCAmelCase = 1e-4
def _SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_vision
@slow
class lowerCAmelCase_( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCAmelCase_ ( self ) -> List[Any]:
return (
MaskFormerImageProcessor.from_pretrained("""facebook/maskformer-swin-small-coco""" )
if is_vision_available()
else None
)
def UpperCAmelCase_ ( self ) -> Any:
lowerCAmelCase__ : Any = MaskFormerModel.from_pretrained("""facebook/maskformer-swin-small-coco""" ).to(__UpperCAmelCase )
lowerCAmelCase__ : str = self.default_image_processor
lowerCAmelCase__ : str = prepare_img()
lowerCAmelCase__ : Optional[int] = image_processor(__UpperCAmelCase ,return_tensors="""pt""" ).to(__UpperCAmelCase )
lowerCAmelCase__ : Dict = inputs["""pixel_values"""].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(__UpperCAmelCase ,(1, 3, 800, 1088) )
with torch.no_grad():
lowerCAmelCase__ : Union[str, Any] = model(**__UpperCAmelCase )
lowerCAmelCase__ : Optional[Any] = torch.tensor(
[[-0.0_4_8_2, 0.9_2_2_8, 0.4_9_5_1], [-0.2_5_4_7, 0.8_0_1_7, 0.8_5_2_7], [-0.0_0_6_9, 0.3_3_8_5, -0.0_0_8_9]] ).to(__UpperCAmelCase )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] ,__UpperCAmelCase ,atol=__UpperCAmelCase ) )
lowerCAmelCase__ : Dict = torch.tensor(
[[-0.8_4_2_2, -0.8_4_3_4, -0.9_7_1_8], [-1.0_1_4_4, -0.5_5_6_5, -0.4_1_9_5], [-1.0_0_3_8, -0.4_4_8_4, -0.1_9_6_1]] ).to(__UpperCAmelCase )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] ,__UpperCAmelCase ,atol=__UpperCAmelCase ) )
lowerCAmelCase__ : Optional[int] = torch.tensor(
[[0.2_8_5_2, -0.0_1_5_9, 0.9_7_3_5], [0.6_2_5_4, 0.1_8_5_8, 0.8_5_2_9], [-0.0_6_8_0, -0.4_1_1_6, 1.8_4_1_3]] ).to(__UpperCAmelCase )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] ,__UpperCAmelCase ,atol=__UpperCAmelCase ) )
def UpperCAmelCase_ ( self ) -> Optional[Any]:
lowerCAmelCase__ : List[Any] = (
MaskFormerForInstanceSegmentation.from_pretrained("""facebook/maskformer-swin-small-coco""" )
.to(__UpperCAmelCase )
.eval()
)
lowerCAmelCase__ : Optional[Any] = self.default_image_processor
lowerCAmelCase__ : List[str] = prepare_img()
lowerCAmelCase__ : str = image_processor(__UpperCAmelCase ,return_tensors="""pt""" ).to(__UpperCAmelCase )
lowerCAmelCase__ : Optional[int] = inputs["""pixel_values"""].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(__UpperCAmelCase ,(1, 3, 800, 1088) )
with torch.no_grad():
lowerCAmelCase__ : List[Any] = model(**__UpperCAmelCase )
# masks_queries_logits
lowerCAmelCase__ : Optional[int] = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape ,(1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) ,)
lowerCAmelCase__ : Optional[int] = [
[-1.3_7_3_7_1_2_4, -1.7_7_2_4_9_3_7, -1.9_3_6_4_2_3_3],
[-1.5_9_7_7_2_8_1, -1.9_8_6_7_9_3_9, -2.1_5_2_3_6_9_5],
[-1.5_7_9_5_3_9_8, -1.9_2_6_9_8_3_2, -2.0_9_3_9_4_2],
]
lowerCAmelCase__ : Optional[int] = torch.tensor(__UpperCAmelCase ).to(__UpperCAmelCase )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] ,__UpperCAmelCase ,atol=__UpperCAmelCase ) )
# class_queries_logits
lowerCAmelCase__ : Tuple = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape ,(1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
lowerCAmelCase__ : Union[str, Any] = torch.tensor(
[
[1.65_12E00, -5.25_72E00, -3.35_19E00],
[3.61_69E-02, -5.90_25E00, -2.93_13E00],
[1.07_66E-04, -7.76_30E00, -5.12_63E00],
] ).to(__UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] ,__UpperCAmelCase ,atol=__UpperCAmelCase ) )
def UpperCAmelCase_ ( self ) -> str:
lowerCAmelCase__ : List[Any] = (
MaskFormerForInstanceSegmentation.from_pretrained("""facebook/maskformer-resnet101-coco-stuff""" )
.to(__UpperCAmelCase )
.eval()
)
lowerCAmelCase__ : Optional[Any] = self.default_image_processor
lowerCAmelCase__ : int = prepare_img()
lowerCAmelCase__ : Optional[Any] = image_processor(__UpperCAmelCase ,return_tensors="""pt""" ).to(__UpperCAmelCase )
lowerCAmelCase__ : str = inputs["""pixel_values"""].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(__UpperCAmelCase ,(1, 3, 800, 1088) )
with torch.no_grad():
lowerCAmelCase__ : str = model(**__UpperCAmelCase )
# masks_queries_logits
lowerCAmelCase__ : Optional[Any] = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape ,(1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) ,)
lowerCAmelCase__ : int = [[-0.9_0_4_6, -2.6_3_6_6, -4.6_0_6_2], [-3.4_1_7_9, -5.7_8_9_0, -8.8_0_5_7], [-4.9_1_7_9, -7.6_5_6_0, -1_0.7_7_1_1]]
lowerCAmelCase__ : List[str] = torch.tensor(__UpperCAmelCase ).to(__UpperCAmelCase )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] ,__UpperCAmelCase ,atol=__UpperCAmelCase ) )
# class_queries_logits
lowerCAmelCase__ : Optional[Any] = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape ,(1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
lowerCAmelCase__ : Tuple = torch.tensor(
[[4.7_1_8_8, -3.2_5_8_5, -2.8_8_5_7], [6.6_8_7_1, -2.9_1_8_1, -1.2_4_8_7], [7.2_4_4_9, -2.2_7_6_4, -2.1_8_7_4]] ).to(__UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] ,__UpperCAmelCase ,atol=__UpperCAmelCase ) )
def UpperCAmelCase_ ( self ) -> Optional[Any]:
lowerCAmelCase__ : str = (
MaskFormerForInstanceSegmentation.from_pretrained("""facebook/maskformer-swin-small-coco""" )
.to(__UpperCAmelCase )
.eval()
)
lowerCAmelCase__ : Dict = self.default_image_processor
lowerCAmelCase__ : Union[str, Any] = image_processor(
[np.zeros((3, 800, 1333) ), np.zeros((3, 800, 1333) )] ,segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] ,return_tensors="""pt""" ,)
lowerCAmelCase__ : Tuple = inputs["""pixel_values"""].to(__UpperCAmelCase )
lowerCAmelCase__ : Union[str, Any] = [el.to(__UpperCAmelCase ) for el in inputs["""mask_labels"""]]
lowerCAmelCase__ : Union[str, Any] = [el.to(__UpperCAmelCase ) for el in inputs["""class_labels"""]]
with torch.no_grad():
lowerCAmelCase__ : Any = model(**__UpperCAmelCase )
self.assertTrue(outputs.loss is not None )
| 37 | 1 |
"""simple docstring"""
from __future__ import annotations
def lowercase ( lowerCAmelCase__ : list[int] , lowerCAmelCase__ : list[int] , lowerCAmelCase__ : int ) -> tuple[float, list[float]]:
__a = list(range(len(lowerCAmelCase__ ) ) )
__a = [v / w for v, w in zip(lowerCAmelCase__ , lowerCAmelCase__ )]
index.sort(key=lambda lowerCAmelCase__ : ratio[i] , reverse=lowerCAmelCase__ )
__a = 0
__a = [0] * len(lowerCAmelCase__ )
for i in index:
if weight[i] <= capacity:
__a = 1
max_value += value[i]
capacity -= weight[i]
else:
__a = capacity / weight[i]
max_value += value[i] * capacity / weight[i]
break
return max_value, fractions
if __name__ == "__main__":
import doctest
doctest.testmod()
| 11 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase_ = {
"configuration_blip_2": [
"BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP",
"Blip2Config",
"Blip2QFormerConfig",
"Blip2VisionConfig",
],
"processing_blip_2": ["Blip2Processor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST",
"Blip2Model",
"Blip2QFormerModel",
"Blip2PreTrainedModel",
"Blip2ForConditionalGeneration",
"Blip2VisionModel",
]
if TYPE_CHECKING:
from .configuration_blip_a import (
BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlipaConfig,
BlipaQFormerConfig,
BlipaVisionConfig,
)
from .processing_blip_a import BlipaProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip_a import (
BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipaForConditionalGeneration,
BlipaModel,
BlipaPreTrainedModel,
BlipaQFormerModel,
BlipaVisionModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 11 | 1 |
'''simple docstring'''
def lowerCAmelCase (__A):
"""simple docstring"""
_a = 0
for ch in input_str:
_a = ord(_A)
_a = pow(2 , _A)
# If we already turned on bit for current character's unicode
if bitmap >> ch_unicode & 1 == 1:
return False
bitmap |= ch_bit_index_on
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 211 | '''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__lowercase = {
'''configuration_resnet''': ['''RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ResNetConfig''', '''ResNetOnnxConfig''']
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
'''RESNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ResNetForImageClassification''',
'''ResNetModel''',
'''ResNetPreTrainedModel''',
'''ResNetBackbone''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
'''TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFResNetForImageClassification''',
'''TFResNetModel''',
'''TFResNetPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
'''FlaxResNetForImageClassification''',
'''FlaxResNetModel''',
'''FlaxResNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_resnet import RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ResNetConfig, ResNetOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_resnet import (
RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
ResNetBackbone,
ResNetForImageClassification,
ResNetModel,
ResNetPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_resnet import (
TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFResNetForImageClassification,
TFResNetModel,
TFResNetPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_resnet import FlaxResNetForImageClassification, FlaxResNetModel, FlaxResNetPreTrainedModel
else:
import sys
__lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 272 | 0 |
"""simple docstring"""
# DISCLAIMER: This code is strongly influenced by https://github.com/pesser/pytorch_diffusion
# and https://github.com/hojonathanho/diffusion
import math
from dataclasses import dataclass
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from diffusers.configuration_utils import ConfigMixin, register_to_config
from diffusers.schedulers.scheduling_utils import SchedulerMixin
from diffusers.utils import BaseOutput, deprecate
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->DDIM
class __magic_name__ ( __UpperCAmelCase ):
__A : torch.FloatTensor
__A : Optional[torch.FloatTensor] = None
def lowerCamelCase (a_ :List[Any] , a_ :List[str]=0.9_99 , a_ :List[Any]="cosine" , ) -> str:
if alpha_transform_type == "cosine":
def alpha_bar_fn(a_ :Union[str, Any]):
return math.cos((t + 0.0_08) / 1.0_08 * math.pi / 2) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(a_ :Tuple):
return math.exp(t * -12.0)
else:
raise ValueError(F"""Unsupported alpha_tranform_type: {alpha_transform_type}""")
lowercase :str = []
for i in range(a_):
lowercase :Optional[Any] = i / num_diffusion_timesteps
lowercase :Union[str, Any] = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(a_) / alpha_bar_fn(a_) , a_))
return torch.tensor(a_ , dtype=torch.floataa)
class __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase ):
__A : Tuple = 1
@register_to_config
def __init__( self : Union[str, Any] , snake_case__ : int = 1_0_0_0 , snake_case__ : float = 0.00_01 , snake_case__ : float = 0.02 , snake_case__ : str = "linear" , snake_case__ : Optional[Union[np.ndarray, List[float]]] = None , snake_case__ : bool = True , snake_case__ : bool = True , snake_case__ : int = 0 , snake_case__ : str = "epsilon" , snake_case__ : float = 1.0 , **snake_case__ : Union[str, Any] , ):
'''simple docstring'''
if kwargs.get('''set_alpha_to_one''' , snake_case__ ) is not None:
lowercase :Any = (
'''The `set_alpha_to_one` argument is deprecated. Please use `set_alpha_to_zero` instead.'''
)
deprecate('''set_alpha_to_one''' , '''1.0.0''' , snake_case__ , standard_warn=snake_case__ )
lowercase :str = kwargs['''set_alpha_to_one''']
if trained_betas is not None:
lowercase :Any = torch.tensor(snake_case__ , dtype=torch.floataa )
elif beta_schedule == "linear":
lowercase :Optional[Any] = torch.linspace(snake_case__ , snake_case__ , snake_case__ , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
lowercase :Tuple = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , snake_case__ , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
lowercase :Dict = betas_for_alpha_bar(snake_case__ )
else:
raise NotImplementedError(f"""{beta_schedule} does is not implemented for {self.__class__}""" )
lowercase :int = 1.0 - self.betas
lowercase :int = torch.cumprod(self.alphas , dim=0 )
# At every step in inverted ddim, we are looking into the next alphas_cumprod
# For the final step, there is no next alphas_cumprod, and the index is out of bounds
# `set_alpha_to_zero` decides whether we set this parameter simply to zero
# in this case, self.step() just output the predicted noise
# or whether we use the final alpha of the "non-previous" one.
lowercase :Tuple = torch.tensor(0.0 ) if set_alpha_to_zero else self.alphas_cumprod[-1]
# standard deviation of the initial noise distribution
lowercase :Any = 1.0
# setable values
lowercase :Dict = None
lowercase :int = torch.from_numpy(np.arange(0 , snake_case__ ).copy().astype(np.intaa ) )
def __snake_case ( self : List[Any] , snake_case__ : torch.FloatTensor , snake_case__ : Optional[int] = None ):
'''simple docstring'''
return sample
def __snake_case ( self : Dict , snake_case__ : int , snake_case__ : Union[str, torch.device] = None ):
'''simple docstring'''
if num_inference_steps > self.config.num_train_timesteps:
raise ValueError(
f"""`num_inference_steps`: {num_inference_steps} cannot be larger than `self.config.train_timesteps`:"""
f""" {self.config.num_train_timesteps} as the unet model trained with this scheduler can only handle"""
f""" maximal {self.config.num_train_timesteps} timesteps.""" )
lowercase :Any = num_inference_steps
lowercase :Union[str, Any] = self.config.num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
lowercase :str = (np.arange(0 , snake_case__ ) * step_ratio).round().copy().astype(np.intaa )
lowercase :Any = torch.from_numpy(snake_case__ ).to(snake_case__ )
self.timesteps += self.config.steps_offset
def __snake_case ( self : List[Any] , snake_case__ : torch.FloatTensor , snake_case__ : int , snake_case__ : torch.FloatTensor , snake_case__ : float = 0.0 , snake_case__ : bool = False , snake_case__ : Optional[torch.FloatTensor] = None , snake_case__ : bool = True , ):
'''simple docstring'''
lowercase :Optional[Any] = timestep + self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
# change original implementation to exactly match noise levels for analogous forward process
lowercase :List[Any] = self.alphas_cumprod[timestep]
lowercase :List[str] = (
self.alphas_cumprod[prev_timestep]
if prev_timestep < self.config.num_train_timesteps
else self.final_alpha_cumprod
)
lowercase :Any = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
if self.config.prediction_type == "epsilon":
lowercase :Any = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
lowercase :str = model_output
elif self.config.prediction_type == "sample":
lowercase :List[Any] = model_output
lowercase :List[str] = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
elif self.config.prediction_type == "v_prediction":
lowercase :Any = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
lowercase :Tuple = (alpha_prod_t**0.5) * model_output + (beta_prod_t**0.5) * sample
else:
raise ValueError(
f"""prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or"""
''' `v_prediction`''' )
# 4. Clip or threshold "predicted x_0"
if self.config.clip_sample:
lowercase :Dict = pred_original_sample.clamp(
-self.config.clip_sample_range , self.config.clip_sample_range )
# 5. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
lowercase :List[Any] = (1 - alpha_prod_t_prev) ** 0.5 * pred_epsilon
# 6. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
lowercase :Optional[int] = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if not return_dict:
return (prev_sample, pred_original_sample)
return DDIMSchedulerOutput(prev_sample=snake_case__ , pred_original_sample=snake_case__ )
def __len__( self : Tuple ):
'''simple docstring'''
return self.config.num_train_timesteps
| 172 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {
'''caidas/swin2sr-classicalsr-x2-64''': (
'''https://huggingface.co/caidas/swin2sr-classicalsr-x2-64/resolve/main/config.json'''
),
}
class __magic_name__ ( __UpperCAmelCase ):
__A : Tuple = "swin2sr"
__A : Dict = {
"hidden_size": "embed_dim",
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self : List[str] , snake_case__ : List[str]=6_4 , snake_case__ : Union[str, Any]=1 , snake_case__ : Tuple=3 , snake_case__ : int=1_8_0 , snake_case__ : Union[str, Any]=[6, 6, 6, 6, 6, 6] , snake_case__ : List[str]=[6, 6, 6, 6, 6, 6] , snake_case__ : Tuple=8 , snake_case__ : List[Any]=2.0 , snake_case__ : Any=True , snake_case__ : Dict=0.0 , snake_case__ : Dict=0.0 , snake_case__ : Dict=0.1 , snake_case__ : Dict="gelu" , snake_case__ : Optional[int]=False , snake_case__ : Any=0.02 , snake_case__ : Any=1e-5 , snake_case__ : Optional[int]=2 , snake_case__ : Optional[int]=1.0 , snake_case__ : Optional[Any]="1conv" , snake_case__ : List[str]="pixelshuffle" , **snake_case__ : Tuple , ):
'''simple docstring'''
super().__init__(**snake_case__ )
lowercase :Dict = image_size
lowercase :List[str] = patch_size
lowercase :Tuple = num_channels
lowercase :int = embed_dim
lowercase :Any = depths
lowercase :Union[str, Any] = len(snake_case__ )
lowercase :List[str] = num_heads
lowercase :int = window_size
lowercase :Tuple = mlp_ratio
lowercase :List[Any] = qkv_bias
lowercase :Optional[int] = hidden_dropout_prob
lowercase :Tuple = attention_probs_dropout_prob
lowercase :Tuple = drop_path_rate
lowercase :Optional[Any] = hidden_act
lowercase :Union[str, Any] = use_absolute_embeddings
lowercase :Dict = layer_norm_eps
lowercase :Optional[Any] = initializer_range
lowercase :Optional[Any] = upscale
lowercase :Any = img_range
lowercase :Optional[int] = resi_connection
lowercase :Union[str, Any] = upsampler
| 172 | 1 |
"""simple docstring"""
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
return int((input_a, input_a).count(1 ) != 0 )
def UpperCAmelCase__ ():
'''simple docstring'''
assert or_gate(0 , 0 ) == 0
assert or_gate(0 , 1 ) == 1
assert or_gate(1 , 0 ) == 1
assert or_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(or_gate(0, 1))
print(or_gate(1, 0))
print(or_gate(0, 0))
print(or_gate(1, 1))
| 54 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy
a__ : Union[str, Any] = logging.get_logger(__name__)
class UpperCamelCase_ ( UpperCamelCase):
"""simple docstring"""
def __init__( self : int , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : float , **UpperCAmelCase__ : List[str] ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE = feature_size
__SCREAMING_SNAKE_CASE = sampling_rate
__SCREAMING_SNAKE_CASE = padding_value
__SCREAMING_SNAKE_CASE = kwargs.pop("padding_side" , "right" )
__SCREAMING_SNAKE_CASE = kwargs.pop("return_attention_mask" , UpperCAmelCase__ )
super().__init__(**UpperCAmelCase__ )
def UpperCAmelCase_ ( self : Optional[int] , UpperCAmelCase__ : Union[
BatchFeature,
List[BatchFeature],
Dict[str, BatchFeature],
Dict[str, List[BatchFeature]],
List[Dict[str, BatchFeature]],
] , UpperCAmelCase__ : Union[bool, str, PaddingStrategy] = True , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : bool = False , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : Optional[bool] = None , UpperCAmelCase__ : Optional[Union[str, TensorType]] = None , ) -> BatchFeature:
# If we have a list of dicts, let's convert it in a dict of lists
# We do this to allow using this method as a collate_fn function in PyTorch Dataloader
if isinstance(UpperCAmelCase__ , (list, tuple) ) and isinstance(processed_features[0] , (dict, BatchFeature) ):
__SCREAMING_SNAKE_CASE = {
key: [example[key] for example in processed_features] for key in processed_features[0].keys()
}
# The model's main input name, usually `input_values`, has be passed for padding
if self.model_input_names[0] not in processed_features:
raise ValueError(
"You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`"
F""" to this method that includes {self.model_input_names[0]}, but you provided"""
F""" {list(processed_features.keys() )}""" )
__SCREAMING_SNAKE_CASE = processed_features[self.model_input_names[0]]
__SCREAMING_SNAKE_CASE = (
return_attention_mask if return_attention_mask is not None else self.return_attention_mask
)
if len(UpperCAmelCase__ ) == 0:
if return_attention_mask:
__SCREAMING_SNAKE_CASE = []
return processed_features
# If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays
# and rebuild them afterwards if no return_tensors is specified
# Note that we lose the specific device the tensor may be on for PyTorch
__SCREAMING_SNAKE_CASE = required_input[0]
if isinstance(UpperCAmelCase__ , (list, tuple) ):
# first_element might be an empty list/tuple in some edge cases so we grab the first non empty element.
__SCREAMING_SNAKE_CASE = 0
while len(required_input[index] ) == 0:
index += 1
if index < len(UpperCAmelCase__ ):
__SCREAMING_SNAKE_CASE = required_input[index][0]
if return_tensors is None:
if is_tf_tensor(UpperCAmelCase__ ):
__SCREAMING_SNAKE_CASE = "tf"
elif is_torch_tensor(UpperCAmelCase__ ):
__SCREAMING_SNAKE_CASE = "pt"
elif isinstance(UpperCAmelCase__ , (int, float, list, tuple, np.ndarray) ):
__SCREAMING_SNAKE_CASE = "np"
else:
raise ValueError(
F"""type of {first_element} unknown: {type(UpperCAmelCase__ )}. """
"Should be one of a python, numpy, pytorch or tensorflow object." )
for key, value in processed_features.items():
if isinstance(value[0] , (int, float) ):
__SCREAMING_SNAKE_CASE = to_numpy(UpperCAmelCase__ )
else:
__SCREAMING_SNAKE_CASE = [to_numpy(UpperCAmelCase__ ) for v in value]
# Convert padding_strategy in PaddingStrategy
__SCREAMING_SNAKE_CASE = self._get_padding_strategies(padding=UpperCAmelCase__ , max_length=UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = processed_features[self.model_input_names[0]]
__SCREAMING_SNAKE_CASE = len(UpperCAmelCase__ )
if not all(len(UpperCAmelCase__ ) == batch_size for v in processed_features.values() ):
raise ValueError("Some items in the output dictionary have a different batch size than others." )
__SCREAMING_SNAKE_CASE = []
for i in range(UpperCAmelCase__ ):
__SCREAMING_SNAKE_CASE = {k: v[i] for k, v in processed_features.items()}
# truncation
__SCREAMING_SNAKE_CASE = self._truncate(
UpperCAmelCase__ , max_length=UpperCAmelCase__ , pad_to_multiple_of=UpperCAmelCase__ , truncation=UpperCAmelCase__ , )
truncated_inputs.append(UpperCAmelCase__ )
if padding_strategy == PaddingStrategy.LONGEST:
# make sure that `max_length` cannot be longer than the longest truncated length
__SCREAMING_SNAKE_CASE = max(len(input_slice[self.model_input_names[0]] ) for input_slice in truncated_inputs )
__SCREAMING_SNAKE_CASE = PaddingStrategy.MAX_LENGTH
__SCREAMING_SNAKE_CASE = {}
for i in range(UpperCAmelCase__ ):
# padding
__SCREAMING_SNAKE_CASE = self._pad(
truncated_inputs[i] , max_length=UpperCAmelCase__ , padding_strategy=UpperCAmelCase__ , pad_to_multiple_of=UpperCAmelCase__ , return_attention_mask=UpperCAmelCase__ , )
for key, value in outputs.items():
if key not in batch_outputs:
__SCREAMING_SNAKE_CASE = []
if value.dtype is np.dtype(np.floataa ):
__SCREAMING_SNAKE_CASE = value.astype(np.floataa )
batch_outputs[key].append(UpperCAmelCase__ )
return BatchFeature(UpperCAmelCase__ , tensor_type=UpperCAmelCase__ )
def UpperCAmelCase_ ( self : Optional[int] , UpperCAmelCase__ : Union[Dict[str, np.ndarray], BatchFeature] , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : Optional[bool] = None , ) -> dict:
__SCREAMING_SNAKE_CASE = processed_features[self.model_input_names[0]]
if padding_strategy == PaddingStrategy.LONGEST:
__SCREAMING_SNAKE_CASE = len(UpperCAmelCase__ )
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
__SCREAMING_SNAKE_CASE = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
__SCREAMING_SNAKE_CASE = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(UpperCAmelCase__ ) < max_length
if return_attention_mask and "attention_mask" not in processed_features:
__SCREAMING_SNAKE_CASE = np.ones(len(UpperCAmelCase__ ) , dtype=np.intaa )
if needs_to_be_padded:
__SCREAMING_SNAKE_CASE = max_length - len(UpperCAmelCase__ )
if self.padding_side == "right":
if return_attention_mask:
__SCREAMING_SNAKE_CASE = np.pad(
processed_features["attention_mask"] , (0, difference) )
__SCREAMING_SNAKE_CASE = ((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference)
__SCREAMING_SNAKE_CASE = np.pad(
UpperCAmelCase__ , UpperCAmelCase__ , "constant" , constant_values=self.padding_value )
elif self.padding_side == "left":
if return_attention_mask:
__SCREAMING_SNAKE_CASE = np.pad(
processed_features["attention_mask"] , (difference, 0) )
__SCREAMING_SNAKE_CASE = ((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0)
__SCREAMING_SNAKE_CASE = np.pad(
UpperCAmelCase__ , UpperCAmelCase__ , "constant" , constant_values=self.padding_value )
else:
raise ValueError("Invalid padding strategy:" + str(self.padding_side ) )
return processed_features
def UpperCAmelCase_ ( self : Union[str, Any] , UpperCAmelCase__ : Union[Dict[str, np.ndarray], BatchFeature] , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : Optional[bool] = None , ) -> str:
if not truncation:
return processed_features
elif truncation and max_length is None:
raise ValueError("When setting ``truncation=True``, make sure that ``max_length`` is defined." )
__SCREAMING_SNAKE_CASE = processed_features[self.model_input_names[0]]
# find `max_length` that fits `pad_to_multiple_of`
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
__SCREAMING_SNAKE_CASE = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
__SCREAMING_SNAKE_CASE = len(UpperCAmelCase__ ) > max_length
if needs_to_be_truncated:
__SCREAMING_SNAKE_CASE = processed_features[self.model_input_names[0]][:max_length]
if "attention_mask" in processed_features:
__SCREAMING_SNAKE_CASE = processed_features["attention_mask"][:max_length]
return processed_features
def UpperCAmelCase_ ( self : Optional[int] , UpperCAmelCase__ : Optional[Any]=False , UpperCAmelCase__ : Optional[int]=None ) -> str:
# Get padding strategy
if padding is not False:
if padding is True:
__SCREAMING_SNAKE_CASE = PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch
elif not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
__SCREAMING_SNAKE_CASE = PaddingStrategy(UpperCAmelCase__ )
elif isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
__SCREAMING_SNAKE_CASE = padding
else:
__SCREAMING_SNAKE_CASE = PaddingStrategy.DO_NOT_PAD
# Set max length if needed
if max_length is None:
if padding_strategy == PaddingStrategy.MAX_LENGTH:
raise ValueError(
F"""When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined""" )
# Test if we have a padding value
if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None):
raise ValueError(
"Asking to pad but the feature_extractor does not have a padding value. Please select a value to use"
" as `padding_value`. For example: `feature_extractor.padding_value = 0.0`." )
return padding_strategy
| 54 | 1 |
from __future__ import annotations
from statistics import mean
def lowercase_ ( A__ , A__ , A__ ) -> list[int]:
"""simple docstring"""
snake_case = [0] * no_of_processes
snake_case = [0] * no_of_processes
# Initialize remaining_time to waiting_time.
for i in range(lowerCamelCase__ ):
snake_case = burst_time[i]
snake_case = []
snake_case = 0
snake_case = 0
# When processes are not completed,
# A process whose arrival time has passed \
# and has remaining execution time is put into the ready_process.
# The shortest process in the ready_process, target_process is executed.
while completed != no_of_processes:
snake_case = []
snake_case = -1
for i in range(lowerCamelCase__ ):
if (arrival_time[i] <= total_time) and (remaining_time[i] > 0):
ready_process.append(lowerCamelCase__ )
if len(lowerCamelCase__ ) > 0:
snake_case = ready_process[0]
for i in ready_process:
if remaining_time[i] < remaining_time[target_process]:
snake_case = i
total_time += burst_time[target_process]
completed += 1
snake_case = 0
snake_case = (
total_time - arrival_time[target_process] - burst_time[target_process]
)
else:
total_time += 1
return waiting_time
def lowercase_ ( A__ , A__ , A__ ) -> list[int]:
"""simple docstring"""
snake_case = [0] * no_of_processes
for i in range(lowerCamelCase__ ):
snake_case = burst_time[i] + waiting_time[i]
return turn_around_time
if __name__ == "__main__":
print("[TEST CASE 01]")
_A = 4
_A = [2, 5, 3, 7]
_A = [0, 0, 0, 0]
_A = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
_A = calculate_turnaroundtime(
burst_time, no_of_processes, waiting_time
)
# Printing the Result
print("PID\tBurst Time\tArrival Time\tWaiting Time\tTurnaround Time")
for i, process_id in enumerate(list(range(1, 5))):
print(
f"{process_id}\t{burst_time[i]}\t\t\t{arrival_time[i]}\t\t\t\t"
f"{waiting_time[i]}\t\t\t\t{turn_around_time[i]}"
)
print(f"\nAverage waiting time = {mean(waiting_time):.5f}")
print(f"Average turnaround time = {mean(turn_around_time):.5f}")
| 354 |
import argparse
import dataclasses
import json
import logging
import os
import shutil
from typing import List, Optional
import datasets
from accelerate import Accelerator
from datasets import load_dataset
from finetuning import finetune
from tqdm.auto import tqdm
import transformers
from transformers import AutoConfig, set_seed
from transformers.trainer_utils import IntervalStrategy
_A = logging.getLogger(__name__)
_A = "pytorch_model.bin"
@dataclasses.dataclass
class lowerCamelCase :
UpperCAmelCase__ : str = dataclasses.field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models."} )
UpperCAmelCase__ : Optional[str] = dataclasses.field(
default=A_ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co."} , )
@dataclasses.dataclass
class lowerCamelCase :
UpperCAmelCase__ : str = dataclasses.field(metadata={"help": "A csv or a json file containing the training data."} )
UpperCAmelCase__ : str = dataclasses.field(metadata={"help": "A csv or a json file containing the data to predict on."} )
UpperCAmelCase__ : Optional[str] = dataclasses.field(
default=A_ , metadata={"help": "A csv or a json file containing the validation data."} )
UpperCAmelCase__ : Optional[str] = dataclasses.field(
default=A_ , metadata={"help": "The name of the task to train on."} , )
UpperCAmelCase__ : Optional[List[str]] = dataclasses.field(
default=A_ , metadata={"help": "The list of labels for the task."} )
@dataclasses.dataclass
class lowerCamelCase :
UpperCAmelCase__ : str = dataclasses.field(
metadata={"help": "The output directory where the model predictions and checkpoints will be written."} )
UpperCAmelCase__ : Optional[str] = dataclasses.field(
default="accuracy" , metadata={"help": "The evaluation metric used for the task."} )
UpperCAmelCase__ : Optional[str] = dataclasses.field(
default="no" , metadata={
"help": "The evaluation strategy to adopt during training. Possible values are: [\"no\", \"step\", \"epoch]"
} , )
UpperCAmelCase__ : Optional[int] = dataclasses.field(
default=10 , metadata={"help": "Number of evaluation calls with no improvement after which training will be stopped."} , )
UpperCAmelCase__ : Optional[float] = dataclasses.field(
default=0.0 , metadata={
"help": "How much the specified evaluation metric must improve to satisfy early stopping conditions."
} , )
UpperCAmelCase__ : Optional[bool] = dataclasses.field(
default=A_ , metadata={"help": "Whether to filter the pseudo-labeled data based on the confidence score."} , )
UpperCAmelCase__ : Optional[bool] = dataclasses.field(
default=A_ , metadata={"help": "Whether to filter the pseudo-labeled data based on the validation performance."} , )
UpperCAmelCase__ : Optional[bool] = dataclasses.field(
default=A_ , metadata={"help": "Whether to fine-tune on labeled data after pseudo training."} , )
UpperCAmelCase__ : Optional[float] = dataclasses.field(
default=0.0 , metadata={"help": "Confidence threshold for pseudo-labeled data filtering."} , )
UpperCAmelCase__ : Optional[int] = dataclasses.field(
default=1_00 , metadata={"help": "Number of evaluation calls with no improvement after which training will be stopped."} , )
UpperCAmelCase__ : Optional[int] = dataclasses.field(
default=A_ , metadata={"help": "Random seed for initialization."} , )
def lowercase_ ( A__ , A__ , A__ , A__ , A__ , A__ ) -> Union[str, Any]:
"""simple docstring"""
snake_case = datasets.concatenate_datasets([infer_input, infer_output] , axis=1 )
if args.do_filter_by_confidence:
snake_case = dataset.filter(lambda A__ : example["probability"] > args.confidence_threshold )
if args.do_filter_by_val_performance:
assert eval_result >= 0.0 and eval_result <= 1.0
snake_case = int(eval_result * len(A__ ) )
print(A__ )
snake_case = dataset.sort("probability" , reverse=A__ )
snake_case = dataset.select(range(A__ ) )
snake_case = dataset.remove_columns(["label", "probability"] )
snake_case = dataset.rename_column("prediction" , "label" )
snake_case = dataset.map(lambda A__ : {"label": idalabel[example["label"]]} )
snake_case = dataset.shuffle(seed=args.seed )
snake_case = os.path.join(A__ , F'train_pseudo.{args.data_file_extension}' )
if args.data_file_extension == "csv":
dataset.to_csv(A__ , index=A__ )
else:
dataset.to_json(A__ )
def lowercase_ ( A__ , A__ , A__ , A__ , **A__ ) -> List[Any]:
"""simple docstring"""
snake_case = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO , )
logger.info(accelerator.state )
# Setup logging, we only want one process per machine to log things on the
# screen. accelerator.is_local_main_process is only True for one process per
# machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
snake_case = STModelArguments(model_name_or_path=A__ )
snake_case = STDataArguments(train_file=A__ , infer_file=A__ )
snake_case = STTrainingArguments(output_dir=A__ )
snake_case = argparse.Namespace()
for arg_class in (model_args, data_args, training_args):
for key, value in vars(A__ ).items():
setattr(A__ , A__ , A__ )
for key, value in kwargs.items():
if hasattr(A__ , A__ ):
setattr(A__ , A__ , A__ )
# Sanity checks
snake_case = {}
snake_case = None
# You need to provide the training data and the data to predict on
assert args.train_file is not None
assert args.infer_file is not None
snake_case = args.train_file
snake_case = args.infer_file
if args.evaluation_strategy != IntervalStrategy.NO.value:
assert args.eval_file is not None
snake_case = args.eval_file
for key in data_files:
snake_case = data_files[key].split("." )[-1]
assert extension in ["csv", "json"], F'`{key}_file` should be a csv or a json file.'
if args.data_file_extension is None:
snake_case = extension
else:
assert extension == args.data_file_extension, F'`{key}_file` should be a {args.data_file_extension} file`.'
assert (
args.eval_metric in datasets.list_metrics()
), F'{args.eval_metric} not in the list of supported metrics {datasets.list_metrics()}.'
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed )
logger.info("Creating the initial data directory for self-training..." )
snake_case = F'{args.output_dir}/self-train_iter-{{}}'.format
snake_case = data_dir_format(0 )
if accelerator.is_main_process:
if args.output_dir is not None:
os.makedirs(args.output_dir , exist_ok=A__ )
os.makedirs(A__ , exist_ok=A__ )
accelerator.wait_for_everyone()
snake_case = None
snake_case = None
snake_case = 0
snake_case = False
# Show the progress bar
snake_case = tqdm(range(args.max_selftrain_iterations ) , disable=not accelerator.is_local_main_process )
# Self-train
for iteration in range(0 , int(args.max_selftrain_iterations ) ):
snake_case = data_dir_format(A__ )
assert os.path.exists(A__ )
# Stage 1: initial fine-tuning for iteration = 0 or pseudo-training for
# iteration > 0
snake_case = os.path.join(A__ , "stage-1" )
snake_case = {
"accelerator": accelerator,
"model_name_or_path": args.model_name_or_path,
"cache_dir": args.cache_dir,
"do_train": True,
"train_file": data_files["train"] if iteration == 0 else data_files["train_pseudo"],
"do_eval": True if args.eval_file is not None else False,
"eval_file": data_files["eval"],
"do_predict": True,
"infer_file": data_files["infer"],
"task_name": args.task_name,
"label_list": args.label_list,
"output_dir": current_output_dir,
"eval_metric": args.eval_metric,
"evaluation_strategy": args.evaluation_strategy,
"early_stopping_patience": args.early_stopping_patience,
"early_stopping_threshold": args.early_stopping_threshold,
"seed": args.seed,
}
# Add additional training arguments
for key, value in kwargs.items():
if key not in arguments_dict and not hasattr(A__ , A__ ):
arguments_dict.update({key: value} )
snake_case = os.path.join(A__ , "best-checkpoint" , A__ )
if os.path.exists(A__ ):
logger.info(
"Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 1." , A__ , A__ , )
else:
logger.info("***** Running self-training: iteration: %d, stage: 1 *****" , A__ )
finetune(**A__ )
accelerator.wait_for_everyone()
assert os.path.exists(A__ )
logger.info("Self-training job completed: iteration: %d, stage: 1." , A__ )
if iteration > 0 and args.finetune_on_labeled_data:
# Stage 2 (optional): fine-tuning on the original labeled data
snake_case = os.path.join(A__ , "best-checkpoint" )
snake_case = os.path.join(A__ , "stage-2" )
# Update arguments_dict
snake_case = model_path
snake_case = data_files["train"]
snake_case = current_output_dir
snake_case = os.path.join(A__ , "best-checkpoint" , A__ )
if os.path.exists(A__ ):
logger.info(
"Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 2." , A__ , A__ , )
else:
logger.info("***** Running self-training: iteration: %d, stage: 2 *****" , A__ )
finetune(**A__ )
accelerator.wait_for_everyone()
assert os.path.exists(A__ )
logger.info("Self-training job completed: iteration: %d, stage: 2." , A__ )
snake_case = iteration
snake_case = data_dir_format(iteration + 1 )
snake_case = AutoConfig.from_pretrained(os.path.join(A__ , "best-checkpoint" ) )
snake_case = config.idalabel
snake_case = os.path.join(A__ , "eval_results_best-checkpoint.json" )
snake_case = os.path.join(A__ , "test_results_best-checkpoint.json" )
assert os.path.exists(A__ )
with open(A__ , "r" ) as f:
snake_case = float(json.load(A__ )[args.eval_metric] )
snake_case = os.path.join(A__ , "infer_output_best-checkpoint.csv" )
assert os.path.exists(A__ )
# Loading the dataset from local csv or json files.
snake_case = load_dataset(args.data_file_extension , data_files={"data": data_files["infer"]} )["data"]
snake_case = load_dataset("csv" , data_files={"data": infer_output_file} )["data"]
if accelerator.is_main_process:
os.makedirs(A__ , exist_ok=A__ )
shutil.copy(A__ , os.path.join(A__ , F'eval_results_iter-{iteration}.json' ) )
if os.path.exists(A__ ):
shutil.copy(A__ , os.path.join(A__ , F'test_results_iter-{iteration}.json' ) )
create_pseudo_labeled_data(A__ , A__ , A__ , A__ , A__ , A__ )
accelerator.wait_for_everyone()
snake_case = os.path.join(A__ , F'train_pseudo.{args.data_file_extension}' )
if args.evaluation_strategy != IntervalStrategy.NO.value:
snake_case = eval_result
if best_iteration is None:
snake_case = new_iteration
snake_case = new_eval_result
else:
if new_eval_result - best_eval_result > args.early_stopping_threshold:
snake_case = new_iteration
snake_case = new_eval_result
snake_case = 0
else:
if new_eval_result == best_eval_result:
snake_case = new_iteration
snake_case = new_eval_result
early_stopping_patience_counter += 1
if early_stopping_patience_counter >= args.early_stopping_patience:
snake_case = True
progress_bar.update(1 )
if should_training_stop:
break
if best_iteration is not None:
# Save the best iteration
logger.info("Best iteration: %d" , A__ )
logger.info("Best evaluation result: %s = %f" , args.eval_metric , A__ )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(A__ , F'eval_results_iter-{iteration}.json' ) , os.path.join(A__ , "eval_results_best-iteration.json" ) , )
else:
# Assume that the last iteration is the best
logger.info("Best iteration: %d" , args.max_selftrain_iterations - 1 )
logger.info("Best evaluation result: %s = %f" , args.eval_metric , A__ )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(A__ , F'eval_results_iter-{args.max_selftrain_iterations - 1}.json' ) , os.path.join(A__ , "eval_results_best-iteration.json" ) , )
| 137 | 0 |
import gc
import threading
import time
import psutil
import torch
class a :
"""simple docstring"""
def __init__( self : int ) -> Tuple:
__UpperCAmelCase : Tuple = psutil.Process()
__UpperCAmelCase : str = False
def UpperCAmelCase ( self : List[str] ) -> Optional[Any]:
__UpperCAmelCase : Optional[int] = -1
while True:
__UpperCAmelCase : Optional[int] = max(self.process.memory_info().rss , self.cpu_memory_peak )
# can't sleep or will not catch the peak right (this comment is here on purpose)
if not self.peak_monitoring:
break
def UpperCAmelCase ( self : Any ) -> Any:
__UpperCAmelCase : Optional[int] = True
__UpperCAmelCase : Optional[Any] = threading.Thread(target=self.peak_monitor )
__UpperCAmelCase : Tuple = True
self.thread.start()
def UpperCAmelCase ( self : Optional[int] ) -> int:
__UpperCAmelCase : List[Any] = False
self.thread.join()
return self.cpu_memory_peak
a : str = PeakCPUMemory()
def lowerCamelCase__ ( ):
# Time
__UpperCAmelCase : List[Any] = {"""time""": time.time()}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
__UpperCAmelCase : Any = psutil.Process().memory_info().rss
cpu_peak_tracker.start()
# GPU mem
for i in range(torch.cuda.device_count() ):
__UpperCAmelCase : Any = torch.cuda.memory_allocated(__lowerCamelCase )
torch.cuda.reset_peak_memory_stats()
return measures
def lowerCamelCase__ ( __lowerCamelCase : Tuple ):
# Time
__UpperCAmelCase : Any = {"""time""": time.time() - start_measures["""time"""]}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
__UpperCAmelCase : Optional[int] = (psutil.Process().memory_info().rss - start_measures["""cpu"""]) / 2**20
__UpperCAmelCase : str = (cpu_peak_tracker.stop() - start_measures["""cpu"""]) / 2**20
# GPU mem
for i in range(torch.cuda.device_count() ):
__UpperCAmelCase : List[Any] = (torch.cuda.memory_allocated(__lowerCamelCase ) - start_measures[str(__lowerCamelCase )]) / 2**20
__UpperCAmelCase : List[str] = (torch.cuda.max_memory_allocated(__lowerCamelCase ) - start_measures[str(__lowerCamelCase )]) / 2**20
return measures
def lowerCamelCase__ ( __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Any ):
print(f"""{description}:""" )
print(f"""- Time: {measures["time"]:.2f}s""" )
for i in range(torch.cuda.device_count() ):
print(f"""- GPU {i} allocated: {measures[str(__lowerCamelCase )]:.2f}MiB""" )
__UpperCAmelCase : Optional[int] = measures[f"""{i}-peak"""]
print(f"""- GPU {i} peak: {peak:.2f}MiB""" )
print(f"""- CPU RAM allocated: {measures["cpu"]:.2f}MiB""" )
print(f"""- CPU RAM peak: {measures["cpu-peak"]:.2f}MiB""" )
| 114 |
import random
import unittest
import numpy as np
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionImgaImgPipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class a ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
a : int = 'hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline'
def UpperCAmelCase ( self : List[str] , __lowercase : Optional[Any]=0 ) -> Any:
__UpperCAmelCase : Any = floats_tensor((1, 3, 128, 128) , rng=random.Random(__lowercase ) )
__UpperCAmelCase : int = np.random.RandomState(__lowercase )
__UpperCAmelCase : Optional[Any] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 3,
"""strength""": 0.75,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def UpperCAmelCase ( self : Optional[int] ) -> Optional[Any]:
__UpperCAmelCase : List[Any] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
pipe.set_progress_bar_config(disable=__lowercase )
__UpperCAmelCase : int = self.get_dummy_inputs()
__UpperCAmelCase : Optional[Any] = pipe(**__lowercase ).images
__UpperCAmelCase : Union[str, Any] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 128, 128, 3)
__UpperCAmelCase : List[str] = np.array([0.69_643, 0.58_484, 0.50_314, 0.58_760, 0.55_368, 0.59_643, 0.51_529, 0.41_217, 0.49_087] )
assert np.abs(image_slice - expected_slice ).max() < 1e-1
def UpperCAmelCase ( self : Optional[Any] ) -> List[str]:
__UpperCAmelCase : Dict = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
__UpperCAmelCase : int = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
__UpperCAmelCase : Any = self.get_dummy_inputs()
__UpperCAmelCase : Tuple = pipe(**__lowercase ).images
__UpperCAmelCase : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__UpperCAmelCase : str = np.array([0.61_737, 0.54_642, 0.53_183, 0.54_465, 0.52_742, 0.60_525, 0.49_969, 0.40_655, 0.48_154] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def UpperCAmelCase ( self : str ) -> Tuple:
__UpperCAmelCase : Tuple = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
__UpperCAmelCase : int = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__lowercase )
# warmup pass to apply optimizations
__UpperCAmelCase : Optional[int] = pipe(**self.get_dummy_inputs() )
__UpperCAmelCase : Tuple = self.get_dummy_inputs()
__UpperCAmelCase : Any = pipe(**__lowercase ).images
__UpperCAmelCase : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__UpperCAmelCase : Optional[int] = np.array([0.52_761, 0.59_977, 0.49_033, 0.49_619, 0.54_282, 0.50_311, 0.47_600, 0.40_918, 0.45_203] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def UpperCAmelCase ( self : Optional[Any] ) -> str:
__UpperCAmelCase : int = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
__UpperCAmelCase : Union[str, Any] = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__lowercase )
__UpperCAmelCase : List[str] = self.get_dummy_inputs()
__UpperCAmelCase : int = pipe(**__lowercase ).images
__UpperCAmelCase : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__UpperCAmelCase : Tuple = np.array([0.52_911, 0.60_004, 0.49_229, 0.49_805, 0.54_502, 0.50_680, 0.47_777, 0.41_028, 0.45_304] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def UpperCAmelCase ( self : int ) -> Any:
__UpperCAmelCase : Any = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
__UpperCAmelCase : List[Any] = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__lowercase )
__UpperCAmelCase : Union[str, Any] = self.get_dummy_inputs()
__UpperCAmelCase : int = pipe(**__lowercase ).images
__UpperCAmelCase : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__UpperCAmelCase : List[str] = np.array([0.52_911, 0.60_004, 0.49_229, 0.49_805, 0.54_502, 0.50_680, 0.47_777, 0.41_028, 0.45_304] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def UpperCAmelCase ( self : Tuple ) -> str:
__UpperCAmelCase : Dict = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
__UpperCAmelCase : str = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__lowercase )
__UpperCAmelCase : Optional[Any] = self.get_dummy_inputs()
__UpperCAmelCase : int = pipe(**__lowercase ).images
__UpperCAmelCase : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__UpperCAmelCase : Union[str, Any] = np.array([0.65_331, 0.58_277, 0.48_204, 0.56_059, 0.53_665, 0.56_235, 0.50_969, 0.40_009, 0.46_552] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
@nightly
@require_onnxruntime
@require_torch_gpu
class a ( unittest.TestCase ):
"""simple docstring"""
@property
def UpperCAmelCase ( self : Dict ) -> List[Any]:
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def UpperCAmelCase ( self : Tuple ) -> Tuple:
__UpperCAmelCase : Optional[int] = ort.SessionOptions()
__UpperCAmelCase : List[Any] = False
return options
def UpperCAmelCase ( self : List[str] ) -> Tuple:
__UpperCAmelCase : List[Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""" )
__UpperCAmelCase : Dict = init_image.resize((768, 512) )
# using the PNDM scheduler by default
__UpperCAmelCase : Any = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , revision="""onnx""" , safety_checker=__lowercase , feature_extractor=__lowercase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__lowercase )
__UpperCAmelCase : Dict = """A fantasy landscape, trending on artstation"""
__UpperCAmelCase : str = np.random.RandomState(0 )
__UpperCAmelCase : Optional[Any] = pipe(
prompt=__lowercase , image=__lowercase , strength=0.75 , guidance_scale=7.5 , num_inference_steps=10 , generator=__lowercase , output_type="""np""" , )
__UpperCAmelCase : str = output.images
__UpperCAmelCase : int = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
__UpperCAmelCase : Union[str, Any] = np.array([0.4_909, 0.5_059, 0.5_372, 0.4_623, 0.4_876, 0.5_049, 0.4_820, 0.4_956, 0.5_019] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
def UpperCAmelCase ( self : Optional[Any] ) -> str:
__UpperCAmelCase : Tuple = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""" )
__UpperCAmelCase : int = init_image.resize((768, 512) )
__UpperCAmelCase : Tuple = LMSDiscreteScheduler.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , subfolder="""scheduler""" , revision="""onnx""" )
__UpperCAmelCase : Tuple = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , revision="""onnx""" , scheduler=__lowercase , safety_checker=__lowercase , feature_extractor=__lowercase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__lowercase )
__UpperCAmelCase : Dict = """A fantasy landscape, trending on artstation"""
__UpperCAmelCase : int = np.random.RandomState(0 )
__UpperCAmelCase : Optional[int] = pipe(
prompt=__lowercase , image=__lowercase , strength=0.75 , guidance_scale=7.5 , num_inference_steps=20 , generator=__lowercase , output_type="""np""" , )
__UpperCAmelCase : Union[str, Any] = output.images
__UpperCAmelCase : Union[str, Any] = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
__UpperCAmelCase : str = np.array([0.8_043, 0.926, 0.9_581, 0.8_119, 0.8_954, 0.913, 0.7_209, 0.7_463, 0.7_431] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
| 114 | 1 |
from __future__ import annotations
import requests
def lowerCAmelCase_ ( __A ) -> Dict:
'''simple docstring'''
UpperCAmelCase__ = f"""https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty"""
return requests.get(A_ ).json()
def lowerCAmelCase_ ( __A = 10 ) -> List[str]:
'''simple docstring'''
UpperCAmelCase__ = '''https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty'''
UpperCAmelCase__ = requests.get(A_ ).json()[:max_stories]
return [get_hackernews_story(A_ ) for story_id in story_ids]
def lowerCAmelCase_ ( __A = 10 ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase__ = hackernews_top_stories(A_ )
return "\n".join("* [{title}]({url})".format(**A_ ) for story in stories )
if __name__ == "__main__":
print(hackernews_top_stories_as_markdown())
| 358 | from __future__ import annotations
def lowerCAmelCase_ ( __A, __A, __A, ) -> tuple:
'''simple docstring'''
if (electron_conc, hole_conc, intrinsic_conc).count(0 ) != 1:
raise ValueError("You cannot supply more or less than 2 values" )
elif electron_conc < 0:
raise ValueError("Electron concentration cannot be negative in a semiconductor" )
elif hole_conc < 0:
raise ValueError("Hole concentration cannot be negative in a semiconductor" )
elif intrinsic_conc < 0:
raise ValueError(
"Intrinsic concentration cannot be negative in a semiconductor" )
elif electron_conc == 0:
return (
"electron_conc",
intrinsic_conc**2 / hole_conc,
)
elif hole_conc == 0:
return (
"hole_conc",
intrinsic_conc**2 / electron_conc,
)
elif intrinsic_conc == 0:
return (
"intrinsic_conc",
(electron_conc * hole_conc) ** 0.5,
)
else:
return (-1, -1)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 143 | 0 |
from __future__ import annotations
def _UpperCAmelCase (UpperCamelCase__ : list[int] , UpperCamelCase__ : list[int] , UpperCamelCase__ : int ):
_A : Dict = list(range(len(UpperCamelCase__ ) ) )
_A : Any = [v / w for v, w in zip(UpperCamelCase__ , UpperCamelCase__ )]
index.sort(key=lambda UpperCamelCase__ : ratio[i] , reverse=UpperCamelCase__ )
_A : float = 0
_A : list[float] = [0] * len(UpperCamelCase__ )
for i in index:
if weight[i] <= capacity:
_A : Union[str, Any] = 1
max_value += value[i]
capacity -= weight[i]
else:
_A : Optional[Any] = capacity / weight[i]
max_value += value[i] * capacity / weight[i]
break
return max_value, fractions
if __name__ == "__main__":
import doctest
doctest.testmod()
| 11 |
import re
from flax.core.frozen_dict import freeze
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.experimental import PartitionSpec as P
# Sentinels
lowerCAmelCase__ = object()
# For specifying empty leaf dict `{}`
lowerCAmelCase__ = object()
def _UpperCAmelCase (UpperCamelCase__ : str , UpperCamelCase__ : Union[str, Any] ):
_A : str = tuple((re.compile(x + "$" ) for x in qs) )
for i in range(len(UpperCamelCase__ ) - len(UpperCamelCase__ ) + 1 ):
_A : Tuple = [x.match(UpperCamelCase__ ) for x, y in zip(UpperCamelCase__ , ks[i:] )]
if matches and all(UpperCamelCase__ ):
return True
return False
def _UpperCAmelCase (UpperCamelCase__ : str ):
def replace(UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[int] ):
for rule, replacement in rules:
if _match(UpperCamelCase__ , UpperCamelCase__ ):
return replacement
return val
return replace
def _UpperCAmelCase ():
return [
# embeddings
(("transformer", "wpe", "embedding"), P("mp" , UpperCamelCase__ )),
(("transformer", "wte", "embedding"), P("mp" , UpperCamelCase__ )),
# atention
(("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(UpperCamelCase__ , "mp" )),
(("attention", "out_proj", "kernel"), P("mp" , UpperCamelCase__ )),
(("attention", "out_proj", "bias"), None),
# mlp
(("mlp", "c_fc", "kernel"), P(UpperCamelCase__ , "mp" )),
(("mlp", "c_fc", "bias"), P("mp" )),
(("mlp", "c_proj", "kernel"), P("mp" , UpperCamelCase__ )),
(("mlp", "c_proj", "bias"), None),
# layer norms
((r"ln_\d+", "bias"), None),
((r"\d+", r"ln_\d+", "scale"), None),
(("ln_f", "bias"), None),
(("ln_f", "scale"), None),
]
def _UpperCAmelCase (UpperCamelCase__ : List[str] ):
_A : int = _get_partition_rules()
_A : Optional[int] = _replacement_rules(UpperCamelCase__ )
_A : Optional[int] = {k: _unmatched for k in flatten_dict(UpperCamelCase__ )}
_A : List[str] = {k: replace(UpperCamelCase__ , UpperCamelCase__ ) for k, v in initd.items()}
assert _unmatched not in result.values(), "Incomplete partition spec."
return freeze(unflatten_dict(UpperCamelCase__ ) )
| 11 | 1 |
'''simple docstring'''
A__ : Any = [4, 1, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
A__ : str = [3, 7, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
A__ : Tuple = {
0: '''Sunday''',
1: '''Monday''',
2: '''Tuesday''',
3: '''Wednesday''',
4: '''Thursday''',
5: '''Friday''',
6: '''Saturday''',
}
def a_ ( _UpperCAmelCase : int ,_UpperCAmelCase : int ,_UpperCAmelCase : int ) -> str:
assert len(str(_UpperCAmelCase ) ) > 2, "year should be in YYYY format"
assert 1 <= month <= 12, "month should be between 1 to 12"
assert 1 <= day <= 31, "day should be between 1 to 31"
# Doomsday algorithm:
__snake_case : str = year // 1_00
__snake_case : Tuple = (5 * (century % 4) + 2) % 7
__snake_case : Any = year % 1_00
__snake_case : Optional[int] = centurian % 12
__snake_case : List[Any] = (
(centurian // 12) + centurian_m + (centurian_m // 4) + century_anchor
) % 7
__snake_case : Optional[int] = (
DOOMSDAY_NOT_LEAP[month - 1]
if (year % 4 != 0) or (centurian == 0 and (year % 4_00) == 0)
else DOOMSDAY_LEAP[month - 1]
)
__snake_case : Dict = (dooms_day + day - day_anchor) % 7
return WEEK_DAY_NAMES[week_day]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 353 |
'''simple docstring'''
def a_ ( _UpperCAmelCase : float ,_UpperCAmelCase : float ) -> float:
return price * (1 + tax_rate)
if __name__ == "__main__":
print(F"""{price_plus_tax(1_0_0, 0.25) = }""")
print(F"""{price_plus_tax(1_25.50, 0.05) = }""")
| 0 | 0 |
"""simple docstring"""
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import torch
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
@dataclass
class UpperCamelCase ( lowercase ):
UpperCAmelCase : Union[List[np.ndarray], torch.FloatTensor]
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_text_to_video_synth import TextToVideoSDPipeline
from .pipeline_text_to_video_synth_imgaimg import VideoToVideoSDPipeline # noqa: F401
from .pipeline_text_to_video_zero import TextToVideoZeroPipeline
| 172 | """simple docstring"""
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .tokenization_wavaveca import WavaVecaCTCTokenizer
class UpperCamelCase ( lowercase ):
UpperCAmelCase : Any = """Wav2Vec2FeatureExtractor"""
UpperCAmelCase : List[str] = """AutoTokenizer"""
def __init__(self : int , _A : List[str] , _A : str) -> str:
super().__init__(_A , _A)
__snake_case : Tuple = self.feature_extractor
__snake_case : str = False
@classmethod
def _lowercase (cls : Union[str, Any] , _A : Optional[Any] , **_A : str) -> List[Any]:
try:
return super().from_pretrained(_A , **_A)
except OSError:
warnings.warn(
f"Loading a tokenizer inside {cls.__name__} from a config that does not"
' include a `tokenizer_class` attribute is deprecated and will be '
'removed in v5. Please add `\'tokenizer_class\': \'Wav2Vec2CTCTokenizer\'`'
' attribute to either your `config.json` or `tokenizer_config.json` '
'file to suppress this warning: ' , _A , )
__snake_case : List[str] = WavaVecaFeatureExtractor.from_pretrained(_A , **_A)
__snake_case : Any = WavaVecaCTCTokenizer.from_pretrained(_A , **_A)
return cls(feature_extractor=_A , tokenizer=_A)
def __call__(self : int , *_A : List[Any] , **_A : str) -> str:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*_A , **_A)
if "raw_speech" in kwargs:
warnings.warn('Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.')
__snake_case : int = kwargs.pop('raw_speech')
else:
__snake_case : Optional[Any] = kwargs.pop('audio' , _A)
__snake_case : Tuple = kwargs.pop('sampling_rate' , _A)
__snake_case : Any = kwargs.pop('text' , _A)
if len(_A) > 0:
__snake_case : Any = args[0]
__snake_case : Dict = args[1:]
if audio is None and text is None:
raise ValueError('You need to specify either an `audio` or `text` input to process.')
if audio is not None:
__snake_case : str = self.feature_extractor(_A , *_A , sampling_rate=_A , **_A)
if text is not None:
__snake_case : List[str] = self.tokenizer(_A , **_A)
if text is None:
return inputs
elif audio is None:
return encodings
else:
__snake_case : List[str] = encodings['input_ids']
return inputs
def _lowercase (self : str , *_A : Optional[Any] , **_A : int) -> Any:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor.pad(*_A , **_A)
__snake_case : Optional[int] = kwargs.pop('input_features' , _A)
__snake_case : List[Any] = kwargs.pop('labels' , _A)
if len(_A) > 0:
__snake_case : Tuple = args[0]
__snake_case : Union[str, Any] = args[1:]
if input_features is not None:
__snake_case : Optional[Any] = self.feature_extractor.pad(_A , *_A , **_A)
if labels is not None:
__snake_case : Tuple = self.tokenizer.pad(_A , **_A)
if labels is None:
return input_features
elif input_features is None:
return labels
else:
__snake_case : str = labels['input_ids']
return input_features
def _lowercase (self : Union[str, Any] , *_A : Any , **_A : List[Any]) -> List[Any]:
return self.tokenizer.batch_decode(*_A , **_A)
def _lowercase (self : Union[str, Any] , *_A : Dict , **_A : Union[str, Any]) -> Any:
return self.tokenizer.decode(*_A , **_A)
@contextmanager
def _lowercase (self : List[str]) -> int:
warnings.warn(
'`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '
'labels by using the argument `text` of the regular `__call__` method (either in the same call as '
'your audio inputs, or in a separate call.')
__snake_case : Dict = True
__snake_case : Union[str, Any] = self.tokenizer
yield
__snake_case : Optional[Any] = self.feature_extractor
__snake_case : int = False
| 172 | 1 |
import os
import numpy
import onnx
def _a ( SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Optional[int] ):
__lowerCAmelCase = a.name
__lowerCAmelCase = b.name
__lowerCAmelCase = ""
__lowerCAmelCase = ""
__lowerCAmelCase = a == b
__lowerCAmelCase = name_a
__lowerCAmelCase = name_b
return res
def _a ( SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : List[Any] ):
for i, input_name in enumerate(node_proto.input ):
if input_name == name:
node_proto.input.insert(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
node_proto.input.pop(i + 1 )
if node_proto.op_type == "If":
_graph_replace_input_with(node_proto.attribute[0].g , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
_graph_replace_input_with(node_proto.attribute[1].g , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if node_proto.op_type == "Loop":
_graph_replace_input_with(node_proto.attribute[0].g , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def _a ( SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : List[Any] ):
for n in graph_proto.node:
_node_replace_input_with(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def _a ( SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : str ):
__lowerCAmelCase = list(model.graph.initializer )
__lowerCAmelCase = list(model_without_ext.graph.initializer )
for i, ref_i in ind_to_replace:
assert inits_with_data[i].name == inits[i].name
assert inits_with_data[ref_i].name == inits[ref_i].name
assert i > ref_i
__lowerCAmelCase = inits[i].name
__lowerCAmelCase = inits[ref_i].name
model_without_ext.graph.initializer.remove(inits[i] )
# for n in model.graph.node:
_graph_replace_input_with(model_without_ext.graph , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def _a ( SCREAMING_SNAKE_CASE_ : Optional[int] ):
__lowerCAmelCase = os.path.dirname(SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase = os.path.basename(SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase = onnx.load(os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
__lowerCAmelCase = list(model.graph.initializer )
__lowerCAmelCase = set()
__lowerCAmelCase = {}
__lowerCAmelCase = []
__lowerCAmelCase = 0
for i in range(len(SCREAMING_SNAKE_CASE_ ) ):
if i in dup_set:
continue
for j in range(i + 1 , len(SCREAMING_SNAKE_CASE_ ) ):
if j in dup_set:
continue
if _is_equal_tensor_proto(inits[i] , inits[j] ):
dup_set.add(SCREAMING_SNAKE_CASE_ )
dup_set.add(SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase = inits[j].data_type
__lowerCAmelCase = numpy.prod(inits[j].dims )
if dtype == 1:
mem_size *= 4
elif dtype == 6:
mem_size *= 4
elif dtype == 7 or dtype == 11:
mem_size *= 8
else:
print("unexpected data type: " , SCREAMING_SNAKE_CASE_ )
total_reduced_size += mem_size
__lowerCAmelCase = inits[i].name
__lowerCAmelCase = inits[j].name
if name_i in dup_map:
dup_map[name_i].append(SCREAMING_SNAKE_CASE_ )
else:
__lowerCAmelCase = [name_j]
ind_to_replace.append((j, i) )
print("total reduced size: " , total_reduced_size / 10_24 / 10_24 / 10_24 , "GB" )
__lowerCAmelCase = sorted(SCREAMING_SNAKE_CASE_ )
_remove_dup_initializers_from_model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase = "optimized_" + model_file_name
__lowerCAmelCase = os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
onnx.save(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return new_model
| 369 |
import hashlib
import unittest
from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available
from transformers.pipelines import DepthEstimationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
else:
class a__ :
@staticmethod
def __SCREAMING_SNAKE_CASE( *_A , **_A ):
"""simple docstring"""
pass
def _a ( SCREAMING_SNAKE_CASE_ : Image ):
__lowerCAmelCase = hashlib.mda(image.tobytes() )
return m.hexdigest()
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class a__ ( unittest.TestCase ):
_a : Tuple = MODEL_FOR_DEPTH_ESTIMATION_MAPPING
def __SCREAMING_SNAKE_CASE( self , _A , _A , _A ):
"""simple docstring"""
__lowerCAmelCase = DepthEstimationPipeline(model=_A , image_processor=_A )
return depth_estimator, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def __SCREAMING_SNAKE_CASE( self , _A , _A ):
"""simple docstring"""
__lowerCAmelCase = depth_estimator("./tests/fixtures/tests_samples/COCO/000000039769.png" )
self.assertEqual({"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )} , _A )
import datasets
__lowerCAmelCase = datasets.load_dataset("hf-internal-testing/fixtures_image_utils" , "image" , split="test" )
__lowerCAmelCase = depth_estimator(
[
Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ),
"http://images.cocodataset.org/val2017/000000039769.jpg",
# RGBA
dataset[0]["file"],
# LA
dataset[1]["file"],
# L
dataset[2]["file"],
] )
self.assertEqual(
[
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
] , _A , )
@require_tf
@unittest.skip("Depth estimation is not implemented in TF" )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
pass
@slow
@require_torch
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = "Intel/dpt-large"
__lowerCAmelCase = pipeline("depth-estimation" , model=_A )
__lowerCAmelCase = depth_estimator("http://images.cocodataset.org/val2017/000000039769.jpg" )
__lowerCAmelCase = hashimage(outputs["depth"] )
# This seems flaky.
# self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977")
self.assertEqual(nested_simplify(outputs["predicted_depth"].max().item() ) , 29.3_04 )
self.assertEqual(nested_simplify(outputs["predicted_depth"].min().item() ) , 2.6_62 )
@require_torch
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
self.skipTest("There is not hf-internal-testing tiny model for either GLPN nor DPT" )
| 102 | 0 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_flava import FlavaImageProcessor
SCREAMING_SNAKE_CASE_: str =logging.get_logger(__name__)
class __A ( UpperCamelCase__ ):
def __init__(self : List[Any] , *__a : List[str] , **__a : List[Any] ):
warnings.warn(
"The class FlavaFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use FlavaImageProcessor instead." , __a , )
super().__init__(*__a , **__a )
| 1 |
import enum
import os
from hashlib import shaaaa
from typing import Optional
from .. import config
from .logging import get_logger
a_ : List[Any] = get_logger(__name__)
class _snake_case ( enum.Enum ):
_lowercase : Any = '''all_checks'''
_lowercase : str = '''basic_checks'''
_lowercase : str = '''no_checks'''
class _snake_case ( A__ ):
pass
class _snake_case ( A__ ):
pass
class _snake_case ( A__ ):
pass
class _snake_case ( A__ ):
pass
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=None):
if expected_checksums is None:
logger.info('Unable to verify checksums.')
return
if len(set(_UpperCAmelCase) - set(_UpperCAmelCase)) > 0:
raise ExpectedMoreDownloadedFiles(str(set(_UpperCAmelCase) - set(_UpperCAmelCase)))
if len(set(_UpperCAmelCase) - set(_UpperCAmelCase)) > 0:
raise UnexpectedDownloadedFile(str(set(_UpperCAmelCase) - set(_UpperCAmelCase)))
SCREAMING_SNAKE_CASE = [url for url in expected_checksums if expected_checksums[url] != recorded_checksums[url]]
SCREAMING_SNAKE_CASE = ' for ' + verification_name if verification_name is not None else ''
if len(_UpperCAmelCase) > 0:
raise NonMatchingChecksumError(
F'''Checksums didn\'t match{for_verification_name}:\n'''
F'''{bad_urls}\n'''
'Set `verification_mode=\'no_checks\'` to skip checksums verification and ignore this error')
logger.info('All the checksums matched successfully' + for_verification_name)
class _snake_case ( A__ ):
pass
class _snake_case ( A__ ):
pass
class _snake_case ( A__ ):
pass
class _snake_case ( A__ ):
pass
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase):
if expected_splits is None:
logger.info('Unable to verify splits sizes.')
return
if len(set(_UpperCAmelCase) - set(_UpperCAmelCase)) > 0:
raise ExpectedMoreSplits(str(set(_UpperCAmelCase) - set(_UpperCAmelCase)))
if len(set(_UpperCAmelCase) - set(_UpperCAmelCase)) > 0:
raise UnexpectedSplits(str(set(_UpperCAmelCase) - set(_UpperCAmelCase)))
SCREAMING_SNAKE_CASE = [
{'expected': expected_splits[name], 'recorded': recorded_splits[name]}
for name in expected_splits
if expected_splits[name].num_examples != recorded_splits[name].num_examples
]
if len(_UpperCAmelCase) > 0:
raise NonMatchingSplitsSizesError(str(_UpperCAmelCase))
logger.info('All the splits matched successfully.')
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase = True):
if record_checksum:
SCREAMING_SNAKE_CASE = shaaaa()
with open(_UpperCAmelCase , 'rb') as f:
for chunk in iter(lambda: f.read(1 << 20) , B''):
m.update(_UpperCAmelCase)
SCREAMING_SNAKE_CASE = m.hexdigest()
else:
SCREAMING_SNAKE_CASE = None
return {"num_bytes": os.path.getsize(_UpperCAmelCase), "checksum": checksum}
def lowerCamelCase__ (_UpperCAmelCase):
if dataset_size and config.IN_MEMORY_MAX_SIZE:
return dataset_size < config.IN_MEMORY_MAX_SIZE
else:
return False
| 137 | 0 |
_snake_case = "Input must be a string of 8 numbers plus letter"
_snake_case = "TRWAGMYFPDXBNJZSQVHLCKE"
def A ( _lowerCamelCase ):
'''simple docstring'''
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
_lowerCAmelCase : Union[str, Any] = F"Expected string as input, found {type(_lowerCamelCase ).__name__}"
raise TypeError(_lowerCamelCase )
_lowerCAmelCase : Dict = spanish_id.replace("-" , "" ).upper()
if len(_lowerCamelCase ) != 9:
raise ValueError(_lowerCamelCase )
try:
_lowerCAmelCase : Union[str, Any] = int(spanish_id_clean[0:8] )
_lowerCAmelCase : int = spanish_id_clean[8]
except ValueError as ex:
raise ValueError(_lowerCamelCase ) from ex
if letter.isdigit():
raise ValueError(_lowerCamelCase )
return letter == LOOKUP_LETTERS[number % 23]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 362 |
_snake_case = 8.3144598
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if temperature < 0:
raise Exception("Temperature cannot be less than 0 K" )
if molar_mass <= 0:
raise Exception("Molar mass cannot be less than or equal to 0 kg/mol" )
else:
return (3 * UNIVERSAL_GAS_CONSTANT * temperature / molar_mass) ** 0.5
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# example
_snake_case = 300
_snake_case = 28
_snake_case = rms_speed_of_molecule(temperature, molar_mass)
print(f'''Vrms of Nitrogen gas at 300 K is {vrms} m/s''')
| 300 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
A_ :Any = {'''configuration_xglm''': ['''XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XGLMConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ :Any = ['''XGLMTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ :Optional[int] = ['''XGLMTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ :Optional[Any] = [
'''XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XGLMForCausalLM''',
'''XGLMModel''',
'''XGLMPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ :Tuple = [
'''FlaxXGLMForCausalLM''',
'''FlaxXGLMModel''',
'''FlaxXGLMPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ :Tuple = [
'''TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXGLMForCausalLM''',
'''TFXGLMModel''',
'''TFXGLMPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
A_ :Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 71 | import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase__ : int = logging.get_logger(__name__)
def UpperCamelCase__ ( A__ , A__=False ) -> List[Any]:
snake_case__ : str = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"""blocks.{i}.norm1.weight""", F"""deit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""blocks.{i}.norm1.bias""", F"""deit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append((F"""blocks.{i}.attn.proj.weight""", F"""deit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.attn.proj.bias""", F"""deit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((F"""blocks.{i}.norm2.weight""", F"""deit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""blocks.{i}.norm2.bias""", F"""deit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.weight""", F"""deit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.bias""", F"""deit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.weight""", F"""deit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.bias""", F"""deit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
('cls_token', 'deit.embeddings.cls_token'),
('dist_token', 'deit.embeddings.distillation_token'),
('patch_embed.proj.weight', 'deit.embeddings.patch_embeddings.projection.weight'),
('patch_embed.proj.bias', 'deit.embeddings.patch_embeddings.projection.bias'),
('pos_embed', 'deit.embeddings.position_embeddings'),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('norm.weight', 'layernorm.weight'),
('norm.bias', 'layernorm.bias'),
('pre_logits.fc.weight', 'pooler.dense.weight'),
('pre_logits.fc.bias', 'pooler.dense.bias'),
] )
# if just the base model, we should remove "deit" from all keys that start with "deit"
snake_case__ : Dict = [(pair[0], pair[1][4:]) if pair[1].startswith('deit' ) else pair for pair in rename_keys]
else:
# layernorm + classification heads
rename_keys.extend(
[
('norm.weight', 'deit.layernorm.weight'),
('norm.bias', 'deit.layernorm.bias'),
('head.weight', 'cls_classifier.weight'),
('head.bias', 'cls_classifier.bias'),
('head_dist.weight', 'distillation_classifier.weight'),
('head_dist.bias', 'distillation_classifier.bias'),
] )
return rename_keys
def UpperCamelCase__ ( A__ , A__ , A__=False ) -> Dict:
for i in range(config.num_hidden_layers ):
if base_model:
snake_case__ : Tuple = ''
else:
snake_case__ : List[Any] = 'deit.'
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
snake_case__ : Tuple = state_dict.pop(F"""blocks.{i}.attn.qkv.weight""" )
snake_case__ : List[Any] = state_dict.pop(F"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
snake_case__ : int = in_proj_weight[
: config.hidden_size, :
]
snake_case__ : Optional[Any] = in_proj_bias[: config.hidden_size]
snake_case__ : List[Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
snake_case__ : str = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
snake_case__ : Tuple = in_proj_weight[
-config.hidden_size :, :
]
snake_case__ : int = in_proj_bias[-config.hidden_size :]
def UpperCamelCase__ ( A__ , A__ , A__ ) -> str:
snake_case__ : Optional[int] = dct.pop(A__ )
snake_case__ : int = val
def UpperCamelCase__ ( ) -> Dict:
snake_case__ : str = 'http://images.cocodataset.org/val2017/000000039769.jpg'
snake_case__ : Dict = Image.open(requests.get(A__ , stream=A__ ).raw )
return im
@torch.no_grad()
def UpperCamelCase__ ( A__ , A__ ) -> List[str]:
snake_case__ : List[Any] = DeiTConfig()
# all deit models have fine-tuned heads
snake_case__ : Optional[int] = False
# dataset (fine-tuned on ImageNet 2012), patch_size and image_size
snake_case__ : Any = 1000
snake_case__ : Union[str, Any] = 'huggingface/label-files'
snake_case__ : int = 'imagenet-1k-id2label.json'
snake_case__ : str = json.load(open(hf_hub_download(A__ , A__ , repo_type='dataset' ) , 'r' ) )
snake_case__ : int = {int(A__ ): v for k, v in idalabel.items()}
snake_case__ : List[Any] = idalabel
snake_case__ : List[Any] = {v: k for k, v in idalabel.items()}
snake_case__ : Tuple = int(deit_name[-6:-4] )
snake_case__ : str = int(deit_name[-3:] )
# size of the architecture
if deit_name[9:].startswith('tiny' ):
snake_case__ : Optional[int] = 192
snake_case__ : str = 768
snake_case__ : Optional[Any] = 12
snake_case__ : Tuple = 3
elif deit_name[9:].startswith('small' ):
snake_case__ : str = 384
snake_case__ : str = 1536
snake_case__ : Dict = 12
snake_case__ : str = 6
if deit_name[9:].startswith('base' ):
pass
elif deit_name[4:].startswith('large' ):
snake_case__ : List[Any] = 1024
snake_case__ : str = 4096
snake_case__ : Tuple = 24
snake_case__ : Tuple = 16
# load original model from timm
snake_case__ : Optional[int] = timm.create_model(A__ , pretrained=A__ )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
snake_case__ : Optional[Any] = timm_model.state_dict()
snake_case__ : Tuple = create_rename_keys(A__ , A__ )
for src, dest in rename_keys:
rename_key(A__ , A__ , A__ )
read_in_q_k_v(A__ , A__ , A__ )
# load HuggingFace model
snake_case__ : int = DeiTForImageClassificationWithTeacher(A__ ).eval()
model.load_state_dict(A__ )
# Check outputs on an image, prepared by DeiTImageProcessor
snake_case__ : Union[str, Any] = int(
(256 / 224) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103
snake_case__ : List[Any] = DeiTImageProcessor(size=A__ , crop_size=config.image_size )
snake_case__ : Tuple = image_processor(images=prepare_img() , return_tensors='pt' )
snake_case__ : Tuple = encoding['pixel_values']
snake_case__ : Dict = model(A__ )
snake_case__ : Union[str, Any] = timm_model(A__ )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(A__ , outputs.logits , atol=1e-3 )
Path(A__ ).mkdir(exist_ok=A__ )
print(F"""Saving model {deit_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(A__ )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(A__ )
if __name__ == "__main__":
lowerCAmelCase__ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--deit_name''',
default='''vit_deit_base_distilled_patch16_224''',
type=str,
help='''Name of the DeiT timm model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
lowerCAmelCase__ : int = parser.parse_args()
convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
| 143 | 0 |
"""simple docstring"""
import argparse
import json
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinConfig, SwinForImageClassification
def lowerCAmelCase__ ( _UpperCamelCase : List[str] ) -> int:
"""simple docstring"""
snake_case = SwinConfig()
snake_case = swin_name.split('_' )
snake_case = name_split[1]
snake_case = int(name_split[4] )
snake_case = int(name_split[3][-1] )
if model_size == "tiny":
snake_case = 9_6
snake_case = (2, 2, 6, 2)
snake_case = (3, 6, 1_2, 2_4)
elif model_size == "small":
snake_case = 9_6
snake_case = (2, 2, 1_8, 2)
snake_case = (3, 6, 1_2, 2_4)
elif model_size == "base":
snake_case = 1_2_8
snake_case = (2, 2, 1_8, 2)
snake_case = (4, 8, 1_6, 3_2)
else:
snake_case = 1_9_2
snake_case = (2, 2, 1_8, 2)
snake_case = (6, 1_2, 2_4, 4_8)
if "in22k" in swin_name:
snake_case = 2_1_8_4_1
else:
snake_case = 1_0_0_0
snake_case = 'huggingface/label-files'
snake_case = 'imagenet-1k-id2label.json'
snake_case = json.load(open(hf_hub_download(_UpperCamelCase , _UpperCamelCase , repo_type='dataset' ) , 'r' ) )
snake_case = {int(_UpperCamelCase ): v for k, v in idalabel.items()}
snake_case = idalabel
snake_case = {v: k for k, v in idalabel.items()}
snake_case = img_size
snake_case = num_classes
snake_case = embed_dim
snake_case = depths
snake_case = num_heads
snake_case = window_size
return config
def lowerCAmelCase__ ( _UpperCamelCase : int ) -> int:
"""simple docstring"""
if "patch_embed.proj" in name:
snake_case = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
snake_case = name.replace('patch_embed.norm' , 'embeddings.norm' )
if "layers" in name:
snake_case = 'encoder.' + name
if "attn.proj" in name:
snake_case = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name:
snake_case = name.replace('attn' , 'attention.self' )
if "norm1" in name:
snake_case = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
snake_case = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
snake_case = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
snake_case = name.replace('mlp.fc2' , 'output.dense' )
if name == "norm.weight":
snake_case = 'layernorm.weight'
if name == "norm.bias":
snake_case = 'layernorm.bias'
if "head" in name:
snake_case = name.replace('head' , 'classifier' )
else:
snake_case = 'swin.' + name
return name
def lowerCAmelCase__ ( _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Dict ) -> str:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
snake_case = orig_state_dict.pop(_UpperCamelCase )
if "mask" in key:
continue
elif "qkv" in key:
snake_case = key.split('.' )
snake_case = int(key_split[1] )
snake_case = int(key_split[3] )
snake_case = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
snake_case = val[:dim, :]
snake_case = val[
dim : dim * 2, :
]
snake_case = val[-dim:, :]
else:
snake_case = val[
:dim
]
snake_case = val[
dim : dim * 2
]
snake_case = val[
-dim:
]
else:
snake_case = val
return orig_state_dict
def lowerCAmelCase__ ( _UpperCamelCase : int , _UpperCamelCase : Optional[int] ) -> List[str]:
"""simple docstring"""
snake_case = timm.create_model(_UpperCamelCase , pretrained=_UpperCamelCase )
timm_model.eval()
snake_case = get_swin_config(_UpperCamelCase )
snake_case = SwinForImageClassification(_UpperCamelCase )
model.eval()
snake_case = convert_state_dict(timm_model.state_dict() , _UpperCamelCase )
model.load_state_dict(_UpperCamelCase )
snake_case = 'http://images.cocodataset.org/val2017/000000039769.jpg'
snake_case = AutoImageProcessor.from_pretrained('microsoft/{}'.format(swin_name.replace('_' , '-' ) ) )
snake_case = Image.open(requests.get(_UpperCamelCase , stream=_UpperCamelCase ).raw )
snake_case = image_processor(images=_UpperCamelCase , return_tensors='pt' )
snake_case = timm_model(inputs['pixel_values'] )
snake_case = model(**_UpperCamelCase ).logits
assert torch.allclose(_UpperCamelCase , _UpperCamelCase , atol=1e-3 )
print(f"""Saving model {swin_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(_UpperCamelCase )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(_UpperCamelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--swin_name",
default="swin_tiny_patch4_window7_224",
type=str,
help="Name of the Swin timm model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_swin_checkpoint(args.swin_name, args.pytorch_dump_folder_path)
| 149 | """simple docstring"""
from typing import Optional
import numpy as np
import torch
from torch import nn
from transformers import GPTaConfig, GPTaLMHeadModel
from transformers.modeling_utils import ModuleUtilsMixin
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class lowerCAmelCase_ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase : int = [r"""h\.\d+\.attn\.bias""", r"""h\.\d+\.attn\.masked_bias"""]
@register_to_config
def __init__( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = 5_02_57 , lowerCAmelCase = 10_24 , lowerCAmelCase = 7_68 , lowerCAmelCase = 12 , lowerCAmelCase = 12 , lowerCAmelCase = None , lowerCAmelCase = "gelu_new" , lowerCAmelCase = 0.1 , lowerCAmelCase = 0.1 , lowerCAmelCase = 0.1 , lowerCAmelCase = 1E-5 , lowerCAmelCase = 0.02 , lowerCAmelCase = True , lowerCAmelCase = True , lowerCAmelCase = False , lowerCAmelCase = False , ):
"""simple docstring"""
super().__init__()
snake_case = prefix_length
if prefix_inner_dim != n_embd and prefix_hidden_dim is None:
raise ValueError(
F"""`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and"""
F""" `n_embd`: {n_embd} are not equal.""" )
snake_case = prefix_inner_dim
snake_case = prefix_hidden_dim
snake_case = (
nn.Linear(self.prefix_inner_dim , self.prefix_hidden_dim )
if self.prefix_hidden_dim is not None
else nn.Identity()
)
snake_case = (
nn.Linear(self.prefix_hidden_dim , lowerCAmelCase ) if self.prefix_hidden_dim is not None else nn.Identity()
)
snake_case = GPTaConfig(
vocab_size=lowerCAmelCase , n_positions=lowerCAmelCase , n_embd=lowerCAmelCase , n_layer=lowerCAmelCase , n_head=lowerCAmelCase , n_inner=lowerCAmelCase , activation_function=lowerCAmelCase , resid_pdrop=lowerCAmelCase , embd_pdrop=lowerCAmelCase , attn_pdrop=lowerCAmelCase , layer_norm_epsilon=lowerCAmelCase , initializer_range=lowerCAmelCase , scale_attn_weights=lowerCAmelCase , use_cache=lowerCAmelCase , scale_attn_by_inverse_layer_idx=lowerCAmelCase , reorder_and_upcast_attn=lowerCAmelCase , )
snake_case = GPTaLMHeadModel(lowerCAmelCase )
def snake_case ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = None , ):
"""simple docstring"""
snake_case = self.transformer.transformer.wte(lowerCAmelCase )
snake_case = self.encode_prefix(lowerCAmelCase )
snake_case = self.decode_prefix(lowerCAmelCase )
snake_case = torch.cat((prefix_embeds, embedding_text) , dim=1 )
if labels is not None:
snake_case = self.get_dummy_token(input_ids.shape[0] , input_ids.device )
snake_case = torch.cat((dummy_token, input_ids) , dim=1 )
snake_case = self.transformer(inputs_embeds=lowerCAmelCase , labels=lowerCAmelCase , attention_mask=lowerCAmelCase )
if self.prefix_hidden_dim is not None:
return out, hidden
else:
return out
def snake_case ( self , lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
return torch.zeros(lowerCAmelCase , self.prefix_length , dtype=torch.intaa , device=lowerCAmelCase )
def snake_case ( self , lowerCAmelCase ):
"""simple docstring"""
return self.encode_prefix(lowerCAmelCase )
@torch.no_grad()
def snake_case ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
snake_case = torch.split(lowerCAmelCase , 1 , dim=0 )
snake_case = []
snake_case = []
for feature in features:
snake_case = self.decode_prefix(feature.to(lowerCAmelCase ) ) # back to the clip feature
# Only support beam search for now
snake_case ,snake_case = self.generate_beam(
input_embeds=lowerCAmelCase , device=lowerCAmelCase , eos_token_id=lowerCAmelCase )
generated_tokens.append(output_tokens[0] )
generated_seq_lengths.append(seq_lengths[0] )
snake_case = torch.stack(lowerCAmelCase )
snake_case = torch.stack(lowerCAmelCase )
return generated_tokens, generated_seq_lengths
@torch.no_grad()
def snake_case ( self , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase = 5 , lowerCAmelCase = 67 , lowerCAmelCase = 1.0 , lowerCAmelCase = None , ):
"""simple docstring"""
snake_case = eos_token_id
snake_case = None
snake_case = None
snake_case = torch.ones(lowerCAmelCase , device=lowerCAmelCase , dtype=torch.int )
snake_case = torch.zeros(lowerCAmelCase , device=lowerCAmelCase , dtype=torch.bool )
if input_embeds is not None:
snake_case = input_embeds
else:
snake_case = self.transformer.transformer.wte(lowerCAmelCase )
for i in range(lowerCAmelCase ):
snake_case = self.transformer(inputs_embeds=lowerCAmelCase )
snake_case = outputs.logits
snake_case = logits[:, -1, :] / (temperature if temperature > 0 else 1.0)
snake_case = logits.softmax(-1 ).log()
if scores is None:
snake_case ,snake_case = logits.topk(lowerCAmelCase , -1 )
snake_case = generated.expand(lowerCAmelCase , *generated.shape[1:] )
snake_case ,snake_case = next_tokens.permute(1 , 0 ), scores.squeeze(0 )
if tokens is None:
snake_case = next_tokens
else:
snake_case = tokens.expand(lowerCAmelCase , *tokens.shape[1:] )
snake_case = torch.cat((tokens, next_tokens) , dim=1 )
else:
snake_case = -float(np.inf )
snake_case = 0
snake_case = scores[:, None] + logits
seq_lengths[~is_stopped] += 1
snake_case = scores_sum / seq_lengths[:, None]
snake_case ,snake_case = scores_sum_average.view(-1 ).topk(lowerCAmelCase , -1 )
snake_case = next_tokens // scores_sum.shape[1]
snake_case = seq_lengths[next_tokens_source]
snake_case = next_tokens % scores_sum.shape[1]
snake_case = next_tokens.unsqueeze(1 )
snake_case = tokens[next_tokens_source]
snake_case = torch.cat((tokens, next_tokens) , dim=1 )
snake_case = generated[next_tokens_source]
snake_case = scores_sum_average * seq_lengths
snake_case = is_stopped[next_tokens_source]
snake_case = self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0] , 1 , -1 )
snake_case = torch.cat((generated, next_token_embed) , dim=1 )
snake_case = is_stopped + next_tokens.eq(lowerCAmelCase ).squeeze()
if is_stopped.all():
break
snake_case = scores / seq_lengths
snake_case = scores.argsort(descending=lowerCAmelCase )
# tokens tensors are already padded to max_seq_length
snake_case = [tokens[i] for i in order]
snake_case = torch.stack(lowerCAmelCase , dim=0 )
snake_case = torch.tensor([seq_lengths[i] for i in order] , dtype=seq_lengths.dtype )
return output_texts, seq_lengths
| 149 | 1 |
import math
from typing import Callable, List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline
from diffusers.schedulers import DDIMScheduler, DDPMScheduler, LMSDiscreteScheduler, PNDMScheduler
def a ( A__ : Optional[Any] , A__ : Optional[Any] , A__ : Any=[] ) -> Optional[int]:
"""simple docstring"""
_lowercase =size[0] - overlap_pixels * 2
_lowercase =size[1] - overlap_pixels * 2
for letter in ["l", "r"]:
if letter in remove_borders:
size_x += overlap_pixels
for letter in ["t", "b"]:
if letter in remove_borders:
size_y += overlap_pixels
_lowercase =np.ones((size_y, size_x) , dtype=np.uinta ) * 255
_lowercase =np.pad(A__ , mode='linear_ramp' , pad_width=A__ , end_values=0 )
if "l" in remove_borders:
_lowercase =mask[:, overlap_pixels : mask.shape[1]]
if "r" in remove_borders:
_lowercase =mask[:, 0 : mask.shape[1] - overlap_pixels]
if "t" in remove_borders:
_lowercase =mask[overlap_pixels : mask.shape[0], :]
if "b" in remove_borders:
_lowercase =mask[0 : mask.shape[0] - overlap_pixels, :]
return mask
def a ( A__ : Optional[Any] , A__ : Optional[int] , A__ : Union[str, Any] ) -> Any:
"""simple docstring"""
return max(A__ , min(A__ , A__ ) )
def a ( A__ : [int] , A__ : [int] , A__ : [int] ) -> int:
"""simple docstring"""
return (
clamp(rect[0] , min[0] , max[0] ),
clamp(rect[1] , min[1] , max[1] ),
clamp(rect[2] , min[0] , max[0] ),
clamp(rect[3] , min[1] , max[1] ),
)
def a ( A__ : [int] , A__ : int , A__ : [int] ) -> Optional[int]:
"""simple docstring"""
_lowercase =list(A__ )
rect[0] -= overlap
rect[1] -= overlap
rect[2] += overlap
rect[3] += overlap
_lowercase =clamp_rect(A__ , [0, 0] , [image_size[0], image_size[1]] )
return rect
def a ( A__ : List[str] , A__ : str , A__ : Optional[int] , A__ : List[Any] ) -> Dict:
"""simple docstring"""
_lowercase =Image.new('RGB' , (tile.size[0] + original_slice, tile.size[1]) )
result.paste(
original_image.resize((tile.size[0], tile.size[1]) , Image.BICUBIC ).crop(
(slice_x, 0, slice_x + original_slice, tile.size[1]) ) , (0, 0) , )
result.paste(A__ , (original_slice, 0) )
return result
def a ( A__ : Union[str, Any] , A__ : List[Any] ) -> Dict:
"""simple docstring"""
_lowercase =(original_image_slice * 4, 0, tile.size[0], tile.size[1])
_lowercase =tile.crop(A__ )
return tile
def a ( A__ : List[str] , A__ : Optional[Any] ) -> int:
"""simple docstring"""
_lowercase =n % d
return n - divisor
class __lowerCAmelCase ( SCREAMING_SNAKE_CASE ):
def __init__( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = 350 , ) -> int:
'''simple docstring'''
super().__init__(
vae=__UpperCAmelCase , text_encoder=__UpperCAmelCase , tokenizer=__UpperCAmelCase , unet=__UpperCAmelCase , low_res_scheduler=__UpperCAmelCase , scheduler=__UpperCAmelCase , max_noise_level=__UpperCAmelCase , )
def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , **lowerCAmelCase ) -> Optional[Any]:
'''simple docstring'''
torch.manual_seed(0 )
_lowercase =(
min(image.size[0] - (tile_size + original_image_slice) , x * tile_size ),
min(image.size[1] - (tile_size + original_image_slice) , y * tile_size ),
min(image.size[0] , (x + 1) * tile_size ),
min(image.size[1] , (y + 1) * tile_size ),
)
_lowercase =add_overlap_rect(__UpperCAmelCase , __UpperCAmelCase , image.size )
_lowercase =image.crop(__UpperCAmelCase )
_lowercase =((crop_rect[0] + ((crop_rect[2] - crop_rect[0]) / 2)) / image.size[0]) * tile.size[0]
_lowercase =translated_slice_x - (original_image_slice / 2)
_lowercase =max(0 , __UpperCAmelCase )
_lowercase =squeeze_tile(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
_lowercase =to_input.size
_lowercase =to_input.resize((tile_size, tile_size) , Image.BICUBIC )
_lowercase =super(__UpperCAmelCase , self ).__call__(image=__UpperCAmelCase , **__UpperCAmelCase ).images[0]
_lowercase =upscaled_tile.resize((orig_input_size[0] * 4, orig_input_size[1] * 4) , Image.BICUBIC )
_lowercase =unsqueeze_tile(__UpperCAmelCase , __UpperCAmelCase )
_lowercase =upscaled_tile.resize((tile.size[0] * 4, tile.size[1] * 4) , Image.BICUBIC )
_lowercase =[]
if x == 0:
remove_borders.append('l' )
elif crop_rect[2] == image.size[0]:
remove_borders.append('r' )
if y == 0:
remove_borders.append('t' )
elif crop_rect[3] == image.size[1]:
remove_borders.append('b' )
_lowercase =Image.fromarray(
make_transparency_mask(
(upscaled_tile.size[0], upscaled_tile.size[1]) , tile_border * 4 , remove_borders=__UpperCAmelCase ) , mode='L' , )
final_image.paste(
__UpperCAmelCase , (crop_rect_with_overlap[0] * 4, crop_rect_with_overlap[1] * 4) , __UpperCAmelCase )
@torch.no_grad()
def __call__( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = 75 , lowerCAmelCase = 9.0 , lowerCAmelCase = 50 , lowerCAmelCase = None , lowerCAmelCase = 1 , lowerCAmelCase = 0.0 , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = 1 , lowerCAmelCase = 128 , lowerCAmelCase = 32 , lowerCAmelCase = 32 , ) -> Optional[int]:
'''simple docstring'''
_lowercase =Image.new('RGB' , (image.size[0] * 4, image.size[1] * 4) )
_lowercase =math.ceil(image.size[0] / tile_size )
_lowercase =math.ceil(image.size[1] / tile_size )
_lowercase =tcx * tcy
_lowercase =0
for y in range(__UpperCAmelCase ):
for x in range(__UpperCAmelCase ):
self._process_tile(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , prompt=__UpperCAmelCase , num_inference_steps=__UpperCAmelCase , guidance_scale=__UpperCAmelCase , noise_level=__UpperCAmelCase , negative_prompt=__UpperCAmelCase , num_images_per_prompt=__UpperCAmelCase , eta=__UpperCAmelCase , generator=__UpperCAmelCase , latents=__UpperCAmelCase , )
current_count += 1
if callback is not None:
callback({'progress': current_count / total_tile_count, 'image': final_image} )
return final_image
def a ( ) -> Dict:
"""simple docstring"""
_lowercase ='stabilityai/stable-diffusion-x4-upscaler'
_lowercase =StableDiffusionTiledUpscalePipeline.from_pretrained(A__ , revision='fp16' , torch_dtype=torch.floataa )
_lowercase =pipe.to('cuda' )
_lowercase =Image.open('../../docs/source/imgs/diffusers_library.jpg' )
def callback(A__ : str ):
print(F'''progress: {obj["progress"]:.4f}''' )
obj["image"].save('diffusers_library_progress.jpg' )
_lowercase =pipe(image=A__ , prompt='Black font, white background, vector' , noise_level=40 , callback=A__ )
final_image.save('diffusers_library.jpg' )
if __name__ == "__main__":
main()
| 205 |
def _a ( a :float , a :float ) -> float:
return price * (1 + tax_rate)
if __name__ == "__main__":
print(f"""{price_plus_tax(100, 0.25) = }""")
print(f"""{price_plus_tax(125.50, 0.05) = }""")
| 0 | 0 |
from collections.abc import Generator
from math import sin
def UpperCAmelCase ( _lowerCamelCase ):
if len(_lowerCamelCase ) != 32:
raise ValueError("Input must be of length 32" )
A : Any = B""
for i in [3, 2, 1, 0]:
little_endian += string_aa[8 * i : 8 * i + 8]
return little_endian
def UpperCAmelCase ( _lowerCamelCase ):
if i < 0:
raise ValueError("Input must be non-negative" )
A : List[Any] = format(_lowerCamelCase , "08x" )[-8:]
A : List[str] = B""
for i in [3, 2, 1, 0]:
little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode("utf-8" )
return little_endian_hex
def UpperCAmelCase ( _lowerCamelCase ):
A : Optional[Any] = B""
for char in message:
bit_string += format(_lowerCamelCase , "08b" ).encode("utf-8" )
A : int = format(len(_lowerCamelCase ) , "064b" ).encode("utf-8" )
# Pad bit_string to a multiple of 512 chars
bit_string += b"1"
while len(_lowerCamelCase ) % 512 != 448:
bit_string += b"0"
bit_string += to_little_endian(start_len[32:] ) + to_little_endian(start_len[:32] )
return bit_string
def UpperCAmelCase ( _lowerCamelCase ):
if len(_lowerCamelCase ) % 512 != 0:
raise ValueError("Input must have length that's a multiple of 512" )
for pos in range(0 , len(_lowerCamelCase ) , 512 ):
A : Optional[int] = bit_string[pos : pos + 512]
A : List[str] = []
for i in range(0 , 512 , 32 ):
block_words.append(int(to_little_endian(block[i : i + 32] ) , 2 ) )
yield block_words
def UpperCAmelCase ( _lowerCamelCase ):
if i < 0:
raise ValueError("Input must be non-negative" )
A : Union[str, Any] = format(_lowerCamelCase , "032b" )
A : List[str] = ""
for c in i_str:
new_str += "1" if c == "0" else "0"
return int(_lowerCamelCase , 2 )
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase ):
return (a + b) % 2**32
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase ):
if i < 0:
raise ValueError("Input must be non-negative" )
if shift < 0:
raise ValueError("Shift must be non-negative" )
return ((i << shift) ^ (i >> (32 - shift))) % 2**32
def UpperCAmelCase ( _lowerCamelCase ):
A : Union[str, Any] = preprocess(_lowerCamelCase )
A : Any = [int(2**32 * abs(sin(i + 1 ) ) ) for i in range(64 )]
# Starting states
A : Optional[int] = 0X67452301
A : Any = 0Xefcdab89
A : Tuple = 0X98badcfe
A : Union[str, Any] = 0X10325476
A : Optional[Any] = [
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
]
# Process bit string in chunks, each with 16 32-char words
for block_words in get_block_words(_lowerCamelCase ):
A : Optional[Any] = aa
A : Optional[Any] = ba
A : List[Any] = ca
A : Optional[int] = da
# Hash current chunk
for i in range(64 ):
if i <= 15:
# f = (b & c) | (not_32(b) & d) # Alternate definition for f
A : Dict = d ^ (b & (c ^ d))
A : Optional[Any] = i
elif i <= 31:
# f = (d & b) | (not_32(d) & c) # Alternate definition for f
A : Optional[int] = c ^ (d & (b ^ c))
A : List[Any] = (5 * i + 1) % 16
elif i <= 47:
A : Tuple = b ^ c ^ d
A : str = (3 * i + 5) % 16
else:
A : Union[str, Any] = c ^ (b | not_aa(_lowerCamelCase ))
A : Any = (7 * i) % 16
A : Union[str, Any] = (f + a + added_consts[i] + block_words[g]) % 2**32
A : Dict = d
A : Optional[int] = c
A : Optional[int] = b
A : Any = sum_aa(_lowerCamelCase , left_rotate_aa(_lowerCamelCase , shift_amounts[i] ) )
# Add hashed chunk to running total
A : Dict = sum_aa(_lowerCamelCase , _lowerCamelCase )
A : Any = sum_aa(_lowerCamelCase , _lowerCamelCase )
A : Dict = sum_aa(_lowerCamelCase , _lowerCamelCase )
A : Union[str, Any] = sum_aa(_lowerCamelCase , _lowerCamelCase )
A : Optional[Any] = reformat_hex(_lowerCamelCase ) + reformat_hex(_lowerCamelCase ) + reformat_hex(_lowerCamelCase ) + reformat_hex(_lowerCamelCase )
return digest
if __name__ == "__main__":
import doctest
doctest.testmod() | 256 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__SCREAMING_SNAKE_CASE = {"""configuration_ibert""": ["""IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """IBertConfig""", """IBertOnnxConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE = [
"""IBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""IBertForMaskedLM""",
"""IBertForMultipleChoice""",
"""IBertForQuestionAnswering""",
"""IBertForSequenceClassification""",
"""IBertForTokenClassification""",
"""IBertModel""",
"""IBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_ibert import IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, IBertConfig, IBertOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ibert import (
IBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
IBertForMaskedLM,
IBertForMultipleChoice,
IBertForQuestionAnswering,
IBertForSequenceClassification,
IBertForTokenClassification,
IBertModel,
IBertPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 256 | 1 |
'''simple docstring'''
from __future__ import annotations
__a: List[str] = 1.6021E-19 # units = C
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , ):
if (conductivity, electron_conc, mobility).count(0 ) != 1:
raise ValueError('''You cannot supply more or less than 2 values''' )
elif conductivity < 0:
raise ValueError('''Conductivity cannot be negative''' )
elif electron_conc < 0:
raise ValueError('''Electron concentration cannot be negative''' )
elif mobility < 0:
raise ValueError('''mobility cannot be negative''' )
elif conductivity == 0:
return (
"conductivity",
mobility * electron_conc * ELECTRON_CHARGE,
)
elif electron_conc == 0:
return (
"electron_conc",
conductivity / (mobility * ELECTRON_CHARGE),
)
else:
return (
"mobility",
conductivity / (electron_conc * ELECTRON_CHARGE),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 198 |
"""simple docstring"""
import pytest
import datasets
# Import fixture modules as plugins
SCREAMING_SNAKE_CASE : List[Any] = ["""tests.fixtures.files""", """tests.fixtures.hub""", """tests.fixtures.fsspec"""]
def lowercase ( _snake_case : Optional[int] , _snake_case : Optional[int] ) ->Tuple:
"""simple docstring"""
for item in items:
if any(marker in item.keywords for marker in ['''integration''', '''unit'''] ):
continue
item.add_marker(pytest.mark.unit )
def lowercase ( _snake_case : List[str] ) ->Optional[int]:
"""simple docstring"""
config.addinivalue_line('''markers''' , '''torchaudio_latest: mark test to run with torchaudio>=0.12''' )
@pytest.fixture(autouse=_snake_case )
def lowercase ( _snake_case : Optional[Any] , _snake_case : Dict ) ->Any:
"""simple docstring"""
__snake_case : List[Any] = tmp_path_factory.getbasetemp() / '''cache'''
__snake_case : int = test_hf_cache_home / '''datasets'''
__snake_case : Tuple = test_hf_cache_home / '''metrics'''
__snake_case : List[str] = test_hf_cache_home / '''modules'''
monkeypatch.setattr('''datasets.config.HF_DATASETS_CACHE''' , str(_snake_case ) )
monkeypatch.setattr('''datasets.config.HF_METRICS_CACHE''' , str(_snake_case ) )
monkeypatch.setattr('''datasets.config.HF_MODULES_CACHE''' , str(_snake_case ) )
__snake_case : Optional[int] = test_hf_datasets_cache / '''downloads'''
monkeypatch.setattr('''datasets.config.DOWNLOADED_DATASETS_PATH''' , str(_snake_case ) )
__snake_case : Tuple = test_hf_datasets_cache / '''downloads''' / '''extracted'''
monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_PATH''' , str(_snake_case ) )
@pytest.fixture(autouse=_snake_case , scope='''session''' )
def lowercase ( ) ->Any:
"""simple docstring"""
datasets.disable_progress_bar()
@pytest.fixture(autouse=_snake_case )
def lowercase ( _snake_case : Tuple ) ->Union[str, Any]:
"""simple docstring"""
monkeypatch.setattr('''datasets.config.HF_UPDATE_DOWNLOAD_COUNTS''' , _snake_case )
@pytest.fixture
def lowercase ( _snake_case : Any ) ->Optional[Any]:
"""simple docstring"""
monkeypatch.setattr('''sqlalchemy.util.deprecations.SILENCE_UBER_WARNING''' , _snake_case )
| 102 | 0 |
'''simple docstring'''
import argparse
import logging
import pickle
from collections import Counter
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO
)
_UpperCamelCase = logging.getLogger(__name__)
if __name__ == "__main__":
_UpperCamelCase = argparse.ArgumentParser(
description='''Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)'''
)
parser.add_argument(
'''--data_file''', type=str, default='''data/dump.bert-base-uncased.pickle''', help='''The binarized dataset.'''
)
parser.add_argument(
'''--token_counts_dump''', type=str, default='''data/token_counts.bert-base-uncased.pickle''', help='''The dump file.'''
)
parser.add_argument('''--vocab_size''', default=3_0522, type=int)
_UpperCamelCase = parser.parse_args()
logger.info(F'Loading data from {args.data_file}')
with open(args.data_file, '''rb''') as fp:
_UpperCamelCase = pickle.load(fp)
logger.info('''Counting occurrences for MLM.''')
_UpperCamelCase = Counter()
for tk_ids in data:
counter.update(tk_ids)
_UpperCamelCase = [0] * args.vocab_size
for k, v in counter.items():
_UpperCamelCase = v
logger.info(F'Dump to {args.token_counts_dump}')
with open(args.token_counts_dump, '''wb''') as handle:
pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL)
| 16 |
'''simple docstring'''
class _A :
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=None ) -> Optional[int]:
'''simple docstring'''
__UpperCAmelCase : int = data
__UpperCAmelCase : int = previous
__UpperCAmelCase : Union[str, Any] = next_node
def __str__( self ) -> str:
'''simple docstring'''
return f'{self.data}'
def __A ( self ) -> int:
'''simple docstring'''
return self.data
def __A ( self ) -> List[str]:
'''simple docstring'''
return self.next
def __A ( self ) -> str:
'''simple docstring'''
return self.previous
class _A :
def __init__( self , __UpperCAmelCase ) -> str:
'''simple docstring'''
__UpperCAmelCase : int = head
def __iter__( self ) -> str:
'''simple docstring'''
return self
def __A ( self ) -> str:
'''simple docstring'''
if not self.current:
raise StopIteration
else:
__UpperCAmelCase : List[str] = self.current.get_data()
__UpperCAmelCase : int = self.current.get_next()
return value
class _A :
def __init__( self ) -> List[Any]:
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = None # First node in list
__UpperCAmelCase : List[str] = None # Last node in list
def __str__( self ) -> int:
'''simple docstring'''
__UpperCAmelCase : Tuple = self.head
__UpperCAmelCase : Optional[int] = []
while current is not None:
nodes.append(current.get_data() )
__UpperCAmelCase : Any = current.get_next()
return " ".join(str(__UpperCAmelCase ) for node in nodes )
def __contains__( self , __UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
__UpperCAmelCase : List[Any] = self.head
while current:
if current.get_data() == value:
return True
__UpperCAmelCase : Optional[Any] = current.get_next()
return False
def __iter__( self ) -> str:
'''simple docstring'''
return LinkedListIterator(self.head )
def __A ( self ) -> List[Any]:
'''simple docstring'''
if self.head:
return self.head.get_data()
return None
def __A ( self ) -> Optional[Any]:
'''simple docstring'''
if self.tail:
return self.tail.get_data()
return None
def __A ( self , __UpperCAmelCase ) -> None:
'''simple docstring'''
if self.head is None:
__UpperCAmelCase : str = node
__UpperCAmelCase : List[str] = node
else:
self.insert_before_node(self.head , __UpperCAmelCase )
def __A ( self , __UpperCAmelCase ) -> None:
'''simple docstring'''
if self.head is None:
self.set_head(__UpperCAmelCase )
else:
self.insert_after_node(self.tail , __UpperCAmelCase )
def __A ( self , __UpperCAmelCase ) -> None:
'''simple docstring'''
__UpperCAmelCase : Optional[int] = Node(__UpperCAmelCase )
if self.head is None:
self.set_head(__UpperCAmelCase )
else:
self.set_tail(__UpperCAmelCase )
def __A ( self , __UpperCAmelCase , __UpperCAmelCase ) -> None:
'''simple docstring'''
__UpperCAmelCase : Tuple = node
__UpperCAmelCase : List[Any] = node.previous
if node.get_previous() is None:
__UpperCAmelCase : str = node_to_insert
else:
__UpperCAmelCase : Optional[Any] = node_to_insert
__UpperCAmelCase : List[Any] = node_to_insert
def __A ( self , __UpperCAmelCase , __UpperCAmelCase ) -> None:
'''simple docstring'''
__UpperCAmelCase : List[str] = node
__UpperCAmelCase : Union[str, Any] = node.next
if node.get_next() is None:
__UpperCAmelCase : Dict = node_to_insert
else:
__UpperCAmelCase : Any = node_to_insert
__UpperCAmelCase : List[str] = node_to_insert
def __A ( self , __UpperCAmelCase , __UpperCAmelCase ) -> None:
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = 1
__UpperCAmelCase : Optional[Any] = Node(__UpperCAmelCase )
__UpperCAmelCase : Optional[Any] = self.head
while node:
if current_position == position:
self.insert_before_node(__UpperCAmelCase , __UpperCAmelCase )
return
current_position += 1
__UpperCAmelCase : int = node.next
self.insert_after_node(self.tail , __UpperCAmelCase )
def __A ( self , __UpperCAmelCase ) -> Node:
'''simple docstring'''
__UpperCAmelCase : Dict = self.head
while node:
if node.get_data() == item:
return node
__UpperCAmelCase : List[str] = node.get_next()
raise Exception("""Node not found""" )
def __A ( self , __UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
if (node := self.get_node(__UpperCAmelCase )) is not None:
if node == self.head:
__UpperCAmelCase : Optional[int] = self.head.get_next()
if node == self.tail:
__UpperCAmelCase : Union[str, Any] = self.tail.get_previous()
self.remove_node_pointers(__UpperCAmelCase )
@staticmethod
def __A ( __UpperCAmelCase ) -> None:
'''simple docstring'''
if node.get_next():
__UpperCAmelCase : Optional[Any] = node.previous
if node.get_previous():
__UpperCAmelCase : int = node.next
__UpperCAmelCase : Tuple = None
__UpperCAmelCase : Union[str, Any] = None
def __A ( self ) -> List[Any]:
'''simple docstring'''
return self.head is None
def lowercase_ ( ):
"""simple docstring"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| 16 | 1 |
from __future__ import annotations
def UpperCAmelCase ( a_ ) -> float:
"""simple docstring"""
__A = 0.00
__A = 0
for resistor in resistors:
if resistor <= 0:
__A = F'''Resistor at index {index} has a negative or zero value!'''
raise ValueError(_lowerCAmelCase )
first_sum += 1 / float(_lowerCAmelCase )
index += 1
return 1 / first_sum
def UpperCAmelCase ( a_ ) -> float:
"""simple docstring"""
__A = 0.00
__A = 0
for resistor in resistors:
sum_r += resistor
if resistor < 0:
__A = F'''Resistor at index {index} has a negative value!'''
raise ValueError(_lowerCAmelCase )
index += 1
return sum_r
if __name__ == "__main__":
import doctest
doctest.testmod()
| 15 |
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class __magic_name__ ( lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase = (DDPMScheduler,)
def SCREAMING_SNAKE_CASE ( self :Union[str, Any] , **snake_case :str ):
'''simple docstring'''
A_ : Dict = {
"num_train_timesteps": 1_000,
"beta_start": 0.0001,
"beta_end": 0.02,
"beta_schedule": "linear",
"variance_type": "fixed_small",
"clip_sample": True,
}
config.update(**snake_case )
return config
def SCREAMING_SNAKE_CASE ( self :int ):
'''simple docstring'''
for timesteps in [1, 5, 100, 1_000]:
self.check_over_configs(num_train_timesteps=snake_case )
def SCREAMING_SNAKE_CASE ( self :Optional[Any] ):
'''simple docstring'''
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=snake_case , beta_end=snake_case )
def SCREAMING_SNAKE_CASE ( self :int ):
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=snake_case )
def SCREAMING_SNAKE_CASE ( self :List[Any] ):
'''simple docstring'''
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=snake_case )
def SCREAMING_SNAKE_CASE ( self :Any ):
'''simple docstring'''
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=snake_case )
def SCREAMING_SNAKE_CASE ( self :str ):
'''simple docstring'''
self.check_over_configs(thresholding=snake_case )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=snake_case , prediction_type=snake_case , sample_max_value=snake_case , )
def SCREAMING_SNAKE_CASE ( self :Optional[int] ):
'''simple docstring'''
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=snake_case )
def SCREAMING_SNAKE_CASE ( self :List[str] ):
'''simple docstring'''
for t in [0, 500, 999]:
self.check_over_forward(time_step=snake_case )
def SCREAMING_SNAKE_CASE ( self :Optional[Any] ):
'''simple docstring'''
A_ : Tuple = self.scheduler_classes[0]
A_ : List[str] = self.get_scheduler_config()
A_ : List[str] = scheduler_class(**snake_case )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.00979 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.02 ) ) < 1e-5
def SCREAMING_SNAKE_CASE ( self :List[str] ):
'''simple docstring'''
A_ : int = self.scheduler_classes[0]
A_ : List[str] = self.get_scheduler_config()
A_ : int = scheduler_class(**snake_case )
A_ : Tuple = len(snake_case )
A_ : List[str] = self.dummy_model()
A_ : Optional[Any] = self.dummy_sample_deter
A_ : List[str] = torch.manual_seed(0 )
for t in reversed(range(snake_case ) ):
# 1. predict noise residual
A_ : Tuple = model(snake_case , snake_case )
# 2. predict previous mean of sample x_t-1
A_ : Dict = scheduler.step(snake_case , snake_case , snake_case , generator=snake_case ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
A_ : Optional[int] = pred_prev_sample
A_ : Tuple = torch.sum(torch.abs(snake_case ) )
A_ : str = torch.mean(torch.abs(snake_case ) )
assert abs(result_sum.item() - 258.9606 ) < 1e-2
assert abs(result_mean.item() - 0.3372 ) < 1e-3
def SCREAMING_SNAKE_CASE ( self :Dict ):
'''simple docstring'''
A_ : Optional[int] = self.scheduler_classes[0]
A_ : int = self.get_scheduler_config(prediction_type="v_prediction" )
A_ : List[str] = scheduler_class(**snake_case )
A_ : int = len(snake_case )
A_ : Dict = self.dummy_model()
A_ : str = self.dummy_sample_deter
A_ : Any = torch.manual_seed(0 )
for t in reversed(range(snake_case ) ):
# 1. predict noise residual
A_ : Optional[int] = model(snake_case , snake_case )
# 2. predict previous mean of sample x_t-1
A_ : Tuple = scheduler.step(snake_case , snake_case , snake_case , generator=snake_case ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
A_ : List[str] = pred_prev_sample
A_ : Optional[Any] = torch.sum(torch.abs(snake_case ) )
A_ : List[str] = torch.mean(torch.abs(snake_case ) )
assert abs(result_sum.item() - 202.0296 ) < 1e-2
assert abs(result_mean.item() - 0.2631 ) < 1e-3
def SCREAMING_SNAKE_CASE ( self :Any ):
'''simple docstring'''
A_ : str = self.scheduler_classes[0]
A_ : Optional[Any] = self.get_scheduler_config()
A_ : Dict = scheduler_class(**snake_case )
A_ : Optional[int] = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=snake_case )
A_ : Optional[int] = scheduler.timesteps
for i, timestep in enumerate(snake_case ):
if i == len(snake_case ) - 1:
A_ : str = -1
else:
A_ : List[str] = timesteps[i + 1]
A_ : Optional[int] = scheduler.previous_timestep(snake_case )
A_ : List[str] = prev_t.item()
self.assertEqual(snake_case , snake_case )
def SCREAMING_SNAKE_CASE ( self :str ):
'''simple docstring'''
A_ : Optional[Any] = self.scheduler_classes[0]
A_ : int = self.get_scheduler_config()
A_ : Tuple = scheduler_class(**snake_case )
A_ : List[str] = [100, 87, 50, 51, 0]
with self.assertRaises(snake_case , msg="`custom_timesteps` must be in descending order." ):
scheduler.set_timesteps(timesteps=snake_case )
def SCREAMING_SNAKE_CASE ( self :List[Any] ):
'''simple docstring'''
A_ : Any = self.scheduler_classes[0]
A_ : Union[str, Any] = self.get_scheduler_config()
A_ : Optional[int] = scheduler_class(**snake_case )
A_ : Union[str, Any] = [100, 87, 50, 1, 0]
A_ : Optional[int] = len(snake_case )
with self.assertRaises(snake_case , msg="Can only pass one of `num_inference_steps` or `custom_timesteps`." ):
scheduler.set_timesteps(num_inference_steps=snake_case , timesteps=snake_case )
def SCREAMING_SNAKE_CASE ( self :str ):
'''simple docstring'''
A_ : Union[str, Any] = self.scheduler_classes[0]
A_ : Optional[Any] = self.get_scheduler_config()
A_ : Optional[int] = scheduler_class(**snake_case )
A_ : Optional[int] = [scheduler.config.num_train_timesteps]
with self.assertRaises(
snake_case , msg="`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}" , ):
scheduler.set_timesteps(timesteps=snake_case )
| 300 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowercase : Optional[int] = {'configuration_yolos': ['YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP', 'YolosConfig', 'YolosOnnxConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Any = ['YolosFeatureExtractor']
lowercase : List[Any] = ['YolosImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : List[Any] = [
'YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST',
'YolosForObjectDetection',
'YolosModel',
'YolosPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_yolos import YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP, YolosConfig, YolosOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_yolos import YolosFeatureExtractor
from .image_processing_yolos import YolosImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_yolos import (
YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST,
YolosForObjectDetection,
YolosModel,
YolosPreTrainedModel,
)
else:
import sys
lowercase : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 151 |
import argparse
import json
import os
import time
import zipfile
from get_ci_error_statistics import download_artifact, get_artifacts_links
from transformers import logging
lowercase : List[str] = logging.get_logger(__name__)
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : Tuple , _lowerCamelCase : List[Any]) -> Any:
'''simple docstring'''
__UpperCamelCase : str = set()
__UpperCamelCase : Optional[Any] = []
def parse_line(_lowerCamelCase : Tuple):
for line in fp:
if isinstance(_lowerCamelCase , _lowerCamelCase):
__UpperCamelCase : Tuple = line.decode("UTF-8")
if "warnings summary (final)" in line:
continue
# This means we are outside the body of a warning
elif not line.startswith(" "):
# process a single warning and move it to `selected_warnings`.
if len(_lowerCamelCase) > 0:
__UpperCamelCase : Optional[Any] = "\n".join(_lowerCamelCase)
# Only keep the warnings specified in `targets`
if any(F': {x}: ' in warning for x in targets):
selected_warnings.add(_lowerCamelCase)
buffer.clear()
continue
else:
__UpperCamelCase : Optional[Any] = line.strip()
buffer.append(_lowerCamelCase)
if from_gh:
for filename in os.listdir(_lowerCamelCase):
__UpperCamelCase : Any = os.path.join(_lowerCamelCase , _lowerCamelCase)
if not os.path.isdir(_lowerCamelCase):
# read the file
if filename != "warnings.txt":
continue
with open(_lowerCamelCase) as fp:
parse_line(_lowerCamelCase)
else:
try:
with zipfile.ZipFile(_lowerCamelCase) as z:
for filename in z.namelist():
if not os.path.isdir(_lowerCamelCase):
# read the file
if filename != "warnings.txt":
continue
with z.open(_lowerCamelCase) as fp:
parse_line(_lowerCamelCase)
except Exception:
logger.warning(
F'{artifact_path} is either an invalid zip file or something else wrong. This file is skipped.')
return selected_warnings
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : Any , _lowerCamelCase : Optional[int]) -> Dict:
'''simple docstring'''
__UpperCamelCase : Union[str, Any] = set()
__UpperCamelCase : str = [os.path.join(_lowerCamelCase , _lowerCamelCase) for p in os.listdir(_lowerCamelCase) if (p.endswith(".zip") or from_gh)]
for p in paths:
selected_warnings.update(extract_warnings_from_single_artifact(_lowerCamelCase , _lowerCamelCase))
return selected_warnings
if __name__ == "__main__":
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : Tuple) -> str:
'''simple docstring'''
return values.split(",")
lowercase : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--workflow_run_id', type=str, required=True, help='A GitHub Actions workflow run id.')
parser.add_argument(
'--output_dir',
type=str,
required=True,
help='Where to store the downloaded artifacts and other result files.',
)
parser.add_argument('--token', default=None, type=str, help='A token that has actions:read permission.')
# optional parameters
parser.add_argument(
'--targets',
default='DeprecationWarning,UserWarning,FutureWarning',
type=list_str,
help='Comma-separated list of target warning(s) which we want to extract.',
)
parser.add_argument(
'--from_gh',
action='store_true',
help='If running from a GitHub action workflow and collecting warnings from its artifacts.',
)
lowercase : Union[str, Any] = parser.parse_args()
lowercase : Tuple = args.from_gh
if from_gh:
# The artifacts have to be downloaded using `actions/download-artifact@v3`
pass
else:
os.makedirs(args.output_dir, exist_ok=True)
# get download links
lowercase : int = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, 'artifacts.json'), 'w', encoding='UTF-8') as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
# download artifacts
for idx, (name, url) in enumerate(artifacts.items()):
print(name)
print(url)
print('=' * 80)
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
# extract warnings from artifacts
lowercase : Any = extract_warnings(args.output_dir, args.targets)
lowercase : int = sorted(selected_warnings)
with open(os.path.join(args.output_dir, 'selected_warnings.json'), 'w', encoding='UTF-8') as fp:
json.dump(selected_warnings, fp, ensure_ascii=False, indent=4) | 151 | 1 |
from maths.prime_check import is_prime
def lowerCAmelCase_ ( A_):
if not isinstance(A_ ,A_):
UpperCamelCase__: Any = F"Input value of [number={number}] must be an integer"
raise TypeError(A_)
if is_prime(A_) and is_prime(number + 2):
return number + 2
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 149 |
import torch
from diffusers import DDIMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class _a ( UpperCamelCase__):
"""simple docstring"""
UpperCamelCase__ = (DDIMParallelScheduler,)
UpperCamelCase__ = (("""eta""", 0.0), ("""num_inference_steps""", 50))
def UpperCAmelCase_ ( self: int , **__lowerCamelCase: Dict ):
'''simple docstring'''
UpperCamelCase__: Any = {
"num_train_timesteps": 1000,
"beta_start": 0.0_001,
"beta_end": 0.02,
"beta_schedule": "linear",
"clip_sample": True,
}
config.update(**__lowerCamelCase )
return config
def UpperCAmelCase_ ( self: int , **__lowerCamelCase: Optional[int] ):
'''simple docstring'''
UpperCamelCase__: str = self.scheduler_classes[0]
UpperCamelCase__: Optional[int] = self.get_scheduler_config(**__lowerCamelCase )
UpperCamelCase__: List[str] = scheduler_class(**__lowerCamelCase )
UpperCamelCase__ , UpperCamelCase__: int = 10, 0.0
UpperCamelCase__: List[Any] = self.dummy_model()
UpperCamelCase__: Optional[int] = self.dummy_sample_deter
scheduler.set_timesteps(__lowerCamelCase )
for t in scheduler.timesteps:
UpperCamelCase__: Optional[Any] = model(__lowerCamelCase , __lowerCamelCase )
UpperCamelCase__: Tuple = scheduler.step(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ).prev_sample
return sample
def UpperCAmelCase_ ( self: List[Any] ):
'''simple docstring'''
for timesteps in [100, 500, 1000]:
self.check_over_configs(num_train_timesteps=__lowerCamelCase )
def UpperCAmelCase_ ( self: Tuple ):
'''simple docstring'''
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=__lowerCamelCase )
UpperCamelCase__: Tuple = self.scheduler_classes[0]
UpperCamelCase__: Optional[int] = self.get_scheduler_config(steps_offset=1 )
UpperCamelCase__: str = scheduler_class(**__lowerCamelCase )
scheduler.set_timesteps(5 )
assert torch.equal(scheduler.timesteps , torch.LongTensor([801, 601, 401, 201, 1] ) )
def UpperCAmelCase_ ( self: Optional[Any] ):
'''simple docstring'''
for beta_start, beta_end in zip([0.0_001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=__lowerCamelCase , beta_end=__lowerCamelCase )
def UpperCAmelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=__lowerCamelCase )
def UpperCAmelCase_ ( self: List[str] ):
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__lowerCamelCase )
def UpperCAmelCase_ ( self: Dict ):
'''simple docstring'''
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=__lowerCamelCase )
def UpperCAmelCase_ ( self: Any ):
'''simple docstring'''
for timestep_spacing in ["trailing", "leading"]:
self.check_over_configs(timestep_spacing=__lowerCamelCase )
def UpperCAmelCase_ ( self: List[str] ):
'''simple docstring'''
for rescale_betas_zero_snr in [True, False]:
self.check_over_configs(rescale_betas_zero_snr=__lowerCamelCase )
def UpperCAmelCase_ ( self: Optional[int] ):
'''simple docstring'''
self.check_over_configs(thresholding=__lowerCamelCase )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(
thresholding=__lowerCamelCase , prediction_type=__lowerCamelCase , sample_max_value=__lowerCamelCase , )
def UpperCAmelCase_ ( self: Tuple ):
'''simple docstring'''
for t in [1, 10, 49]:
self.check_over_forward(time_step=__lowerCamelCase )
def UpperCAmelCase_ ( self: int ):
'''simple docstring'''
for t, num_inference_steps in zip([1, 10, 50] , [10, 50, 500] ):
self.check_over_forward(time_step=__lowerCamelCase , num_inference_steps=__lowerCamelCase )
def UpperCAmelCase_ ( self: Optional[int] ):
'''simple docstring'''
for t, eta in zip([1, 10, 49] , [0.0, 0.5, 1.0] ):
self.check_over_forward(time_step=__lowerCamelCase , eta=__lowerCamelCase )
def UpperCAmelCase_ ( self: Optional[int] ):
'''simple docstring'''
UpperCamelCase__: Any = self.scheduler_classes[0]
UpperCamelCase__: Optional[int] = self.get_scheduler_config()
UpperCamelCase__: str = scheduler_class(**__lowerCamelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(420 , 400 ) - 0.14_771 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(980 , 960 ) - 0.32_460 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 , 486 ) - 0.00_979 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 , 998 ) - 0.02 ) ) < 1e-5
def UpperCAmelCase_ ( self: Optional[Any] ):
'''simple docstring'''
UpperCamelCase__: List[Any] = self.scheduler_classes[0]
UpperCamelCase__: Union[str, Any] = self.get_scheduler_config()
UpperCamelCase__: Any = scheduler_class(**__lowerCamelCase )
UpperCamelCase__ , UpperCamelCase__: Union[str, Any] = 10, 0.0
scheduler.set_timesteps(__lowerCamelCase )
UpperCamelCase__: Tuple = self.dummy_model()
UpperCamelCase__: Union[str, Any] = self.dummy_sample_deter
UpperCamelCase__: Dict = self.dummy_sample_deter + 0.1
UpperCamelCase__: Dict = self.dummy_sample_deter - 0.1
UpperCamelCase__: int = samplea.shape[0]
UpperCamelCase__: List[str] = torch.stack([samplea, samplea, samplea] , dim=0 )
UpperCamelCase__: Union[str, Any] = torch.arange(__lowerCamelCase )[0:3, None].repeat(1 , __lowerCamelCase )
UpperCamelCase__: str = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
UpperCamelCase__: Optional[int] = scheduler.batch_step_no_noise(__lowerCamelCase , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) , __lowerCamelCase )
UpperCamelCase__: Dict = torch.sum(torch.abs(__lowerCamelCase ) )
UpperCamelCase__: Tuple = torch.mean(torch.abs(__lowerCamelCase ) )
assert abs(result_sum.item() - 1_147.7_904 ) < 1e-2
assert abs(result_mean.item() - 0.4_982 ) < 1e-3
def UpperCAmelCase_ ( self: Optional[Any] ):
'''simple docstring'''
UpperCamelCase__: str = self.full_loop()
UpperCamelCase__: List[str] = torch.sum(torch.abs(__lowerCamelCase ) )
UpperCamelCase__: Any = torch.mean(torch.abs(__lowerCamelCase ) )
assert abs(result_sum.item() - 172.0_067 ) < 1e-2
assert abs(result_mean.item() - 0.223_967 ) < 1e-3
def UpperCAmelCase_ ( self: str ):
'''simple docstring'''
UpperCamelCase__: Optional[int] = self.full_loop(prediction_type="v_prediction" )
UpperCamelCase__: List[Any] = torch.sum(torch.abs(__lowerCamelCase ) )
UpperCamelCase__: Optional[Any] = torch.mean(torch.abs(__lowerCamelCase ) )
assert abs(result_sum.item() - 52.5_302 ) < 1e-2
assert abs(result_mean.item() - 0.0_684 ) < 1e-3
def UpperCAmelCase_ ( self: List[str] ):
'''simple docstring'''
UpperCamelCase__: Any = self.full_loop(set_alpha_to_one=__lowerCamelCase , beta_start=0.01 )
UpperCamelCase__: Optional[Any] = torch.sum(torch.abs(__lowerCamelCase ) )
UpperCamelCase__: Optional[Any] = torch.mean(torch.abs(__lowerCamelCase ) )
assert abs(result_sum.item() - 149.8_295 ) < 1e-2
assert abs(result_mean.item() - 0.1_951 ) < 1e-3
def UpperCAmelCase_ ( self: List[Any] ):
'''simple docstring'''
UpperCamelCase__: Tuple = self.full_loop(set_alpha_to_one=__lowerCamelCase , beta_start=0.01 )
UpperCamelCase__: Optional[int] = torch.sum(torch.abs(__lowerCamelCase ) )
UpperCamelCase__: List[Any] = torch.mean(torch.abs(__lowerCamelCase ) )
assert abs(result_sum.item() - 149.0_784 ) < 1e-2
assert abs(result_mean.item() - 0.1_941 ) < 1e-3
| 149 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase__ = {
"configuration_mctct": ["MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP", "MCTCTConfig"],
"feature_extraction_mctct": ["MCTCTFeatureExtractor"],
"processing_mctct": ["MCTCTProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST",
"MCTCTForCTC",
"MCTCTModel",
"MCTCTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
from .feature_extraction_mctct import MCTCTFeatureExtractor
from .processing_mctct import MCTCTProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 310 |
"""simple docstring"""
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTForImageClassification, ViTForMaskedImageModeling, ViTModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : Dict , __a : List[Any] , __a : str=13 , __a : Any=30 , __a : List[str]=2 , __a : Dict=3 , __a : Union[str, Any]=True , __a : Dict=True , __a : List[str]=32 , __a : Tuple=5 , __a : str=4 , __a : List[str]=37 , __a : Tuple="gelu" , __a : str=0.1 , __a : Optional[int]=0.1 , __a : Union[str, Any]=10 , __a : Optional[Any]=0.02 , __a : List[Any]=None , __a : str=2 , ) -> int:
_UpperCamelCase : Tuple = parent
_UpperCamelCase : str = batch_size
_UpperCamelCase : Tuple = image_size
_UpperCamelCase : List[str] = patch_size
_UpperCamelCase : Dict = num_channels
_UpperCamelCase : List[str] = is_training
_UpperCamelCase : Any = use_labels
_UpperCamelCase : int = hidden_size
_UpperCamelCase : List[Any] = num_hidden_layers
_UpperCamelCase : Union[str, Any] = num_attention_heads
_UpperCamelCase : Optional[int] = intermediate_size
_UpperCamelCase : Any = hidden_act
_UpperCamelCase : Dict = hidden_dropout_prob
_UpperCamelCase : Dict = attention_probs_dropout_prob
_UpperCamelCase : Optional[int] = type_sequence_label_size
_UpperCamelCase : int = initializer_range
_UpperCamelCase : Optional[int] = scope
_UpperCamelCase : Any = encoder_stride
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
_UpperCamelCase : Optional[int] = (image_size // patch_size) ** 2
_UpperCamelCase : Optional[int] = num_patches + 1
def __SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]:
_UpperCamelCase : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCamelCase : Union[str, Any] = None
if self.use_labels:
_UpperCamelCase : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCamelCase : Any = self.get_config()
return config, pixel_values, labels
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[str]:
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__a , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def __SCREAMING_SNAKE_CASE ( self : Tuple , __a : Optional[int] , __a : Union[str, Any] , __a : Tuple ) -> Union[str, Any]:
_UpperCamelCase : Optional[Any] = ViTModel(config=__a )
model.to(__a )
model.eval()
_UpperCamelCase : Tuple = model(__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __SCREAMING_SNAKE_CASE ( self : Dict , __a : str , __a : Optional[int] , __a : int ) -> Optional[int]:
_UpperCamelCase : Tuple = ViTForMaskedImageModeling(config=__a )
model.to(__a )
model.eval()
_UpperCamelCase : Any = model(__a )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
_UpperCamelCase : Union[str, Any] = 1
_UpperCamelCase : Union[str, Any] = ViTForMaskedImageModeling(__a )
model.to(__a )
model.eval()
_UpperCamelCase : List[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_UpperCamelCase : Dict = model(__a )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def __SCREAMING_SNAKE_CASE ( self : List[Any] , __a : Tuple , __a : int , __a : Dict ) -> int:
_UpperCamelCase : Any = self.type_sequence_label_size
_UpperCamelCase : Optional[Any] = ViTForImageClassification(__a )
model.to(__a )
model.eval()
_UpperCamelCase : int = model(__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_UpperCamelCase : Tuple = 1
_UpperCamelCase : Union[str, Any] = ViTForImageClassification(__a )
model.to(__a )
model.eval()
_UpperCamelCase : Optional[int] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_UpperCamelCase : List[Any] = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __SCREAMING_SNAKE_CASE ( self : str ) -> Tuple:
_UpperCamelCase : Dict = self.prepare_config_and_inputs()
(
(
_UpperCamelCase
), (
_UpperCamelCase
), (
_UpperCamelCase
),
) : Union[str, Any] = config_and_inputs
_UpperCamelCase : Union[str, Any] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Optional[Any] = (
(
ViTModel,
ViTForImageClassification,
ViTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE__ :Any = (
{"feature-extraction": ViTModel, "image-classification": ViTForImageClassification}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE__ :str = True
SCREAMING_SNAKE_CASE__ :List[Any] = False
SCREAMING_SNAKE_CASE__ :int = False
SCREAMING_SNAKE_CASE__ :int = False
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[Any]:
_UpperCamelCase : Dict = ViTModelTester(self )
_UpperCamelCase : Any = ConfigTester(self , config_class=__a , has_text_modality=__a , hidden_size=37 )
def __SCREAMING_SNAKE_CASE ( self : str ) -> Optional[Any]:
self.config_tester.run_common_tests()
@unittest.skip(reason="ViT does not use inputs_embeds" )
def __SCREAMING_SNAKE_CASE ( self : int ) -> List[str]:
pass
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Union[str, Any]:
_UpperCamelCase, _UpperCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase : List[Any] = model_class(__a )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_UpperCamelCase : Any = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__a , nn.Linear ) )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[Any]:
_UpperCamelCase, _UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase : Any = model_class(__a )
_UpperCamelCase : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCamelCase : List[str] = [*signature.parameters.keys()]
_UpperCamelCase : Optional[Any] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __a )
def __SCREAMING_SNAKE_CASE ( self : Any ) -> int:
_UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def __SCREAMING_SNAKE_CASE ( self : str ) -> List[str]:
_UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__a )
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> Union[str, Any]:
_UpperCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__a )
@slow
def __SCREAMING_SNAKE_CASE ( self : str ) -> List[str]:
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase : List[str] = ViTModel.from_pretrained(__a )
self.assertIsNotNone(__a )
def lowercase__ ( ) -> str:
"""simple docstring"""
_UpperCamelCase : Tuple = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[int]:
return ViTImageProcessor.from_pretrained("google/vit-base-patch16-224" ) if is_vision_available() else None
@slow
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> Dict:
_UpperCamelCase : List[Any] = ViTForImageClassification.from_pretrained("google/vit-base-patch16-224" ).to(__a )
_UpperCamelCase : str = self.default_image_processor
_UpperCamelCase : List[Any] = prepare_img()
_UpperCamelCase : Any = image_processor(images=__a , return_tensors="pt" ).to(__a )
# forward pass
with torch.no_grad():
_UpperCamelCase : Dict = model(**__a )
# verify the logits
_UpperCamelCase : Tuple = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , __a )
_UpperCamelCase : str = torch.tensor([-0.27_44, 0.82_15, -0.08_36] ).to(__a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __a , atol=1e-4 ) )
@slow
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> str:
# ViT models have an `interpolate_pos_encoding` argument in their forward method,
# allowing to interpolate the pre-trained position embeddings in order to use
# the model on higher resolutions. The DINO model by Facebook AI leverages this
# to visualize self-attention on higher resolution images.
_UpperCamelCase : List[str] = ViTModel.from_pretrained("facebook/dino-vits8" ).to(__a )
_UpperCamelCase : Union[str, Any] = ViTImageProcessor.from_pretrained("facebook/dino-vits8" , size=480 )
_UpperCamelCase : List[str] = prepare_img()
_UpperCamelCase : int = image_processor(images=__a , return_tensors="pt" )
_UpperCamelCase : Any = inputs.pixel_values.to(__a )
# forward pass
with torch.no_grad():
_UpperCamelCase : str = model(__a , interpolate_pos_encoding=__a )
# verify the logits
_UpperCamelCase : int = torch.Size((1, 3601, 384) )
self.assertEqual(outputs.last_hidden_state.shape , __a )
_UpperCamelCase : int = torch.tensor(
[[4.23_40, 4.39_06, -6.66_92], [4.54_63, 1.89_28, -6.72_57], [4.44_29, 0.84_96, -5.85_85]] ).to(__a )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , __a , atol=1e-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Any:
_UpperCamelCase : Tuple = ViTModel.from_pretrained("facebook/dino-vits8" , torch_dtype=torch.floataa , device_map="auto" )
_UpperCamelCase : int = self.default_image_processor
_UpperCamelCase : Dict = prepare_img()
_UpperCamelCase : Union[str, Any] = image_processor(images=__a , return_tensors="pt" )
_UpperCamelCase : Any = inputs.pixel_values.to(__a )
# forward pass to make sure inference works in fp16
with torch.no_grad():
_UpperCamelCase : int = model(__a )
| 310 | 1 |
"""simple docstring"""
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetaImageProcessor
class UpperCAmelCase_ ( unittest.TestCase):
def __init__( self : Tuple , __UpperCamelCase : List[Any] , __UpperCamelCase : str=7 , __UpperCamelCase : Optional[int]=3 , __UpperCamelCase : List[Any]=30 , __UpperCamelCase : List[Any]=400 , __UpperCamelCase : List[str]=True , __UpperCamelCase : List[Any]=None , __UpperCamelCase : List[Any]=True , __UpperCamelCase : Optional[Any]=[0.5, 0.5, 0.5] , __UpperCamelCase : str=[0.5, 0.5, 0.5] , __UpperCamelCase : Optional[int]=True , __UpperCamelCase : List[Any]=1 / 255 , __UpperCamelCase : Tuple=True , ) -> Optional[Any]:
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
_UpperCamelCase = size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 1333}
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = num_channels
_UpperCamelCase = min_resolution
_UpperCamelCase = max_resolution
_UpperCamelCase = do_resize
_UpperCamelCase = size
_UpperCamelCase = do_normalize
_UpperCamelCase = image_mean
_UpperCamelCase = image_std
_UpperCamelCase = do_rescale
_UpperCamelCase = rescale_factor
_UpperCamelCase = do_pad
def _UpperCamelCase ( self : Tuple ) -> int:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def _UpperCamelCase ( self : int , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Dict=False ) -> int:
if not batched:
_UpperCamelCase = image_inputs[0]
if isinstance(__UpperCamelCase , Image.Image ):
_UpperCamelCase , _UpperCamelCase = image.size
else:
_UpperCamelCase , _UpperCamelCase = image.shape[1], image.shape[2]
if w < h:
_UpperCamelCase = int(self.size['''shortest_edge'''] * h / w )
_UpperCamelCase = self.size['''shortest_edge''']
elif w > h:
_UpperCamelCase = self.size['''shortest_edge''']
_UpperCamelCase = int(self.size['''shortest_edge'''] * w / h )
else:
_UpperCamelCase = self.size['''shortest_edge''']
_UpperCamelCase = self.size['''shortest_edge''']
else:
_UpperCamelCase = []
for image in image_inputs:
_UpperCamelCase , _UpperCamelCase = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
_UpperCamelCase = max(__UpperCamelCase , key=lambda __UpperCamelCase : item[0] )[0]
_UpperCamelCase = max(__UpperCamelCase , key=lambda __UpperCamelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class UpperCAmelCase_ ( _lowercase , unittest.TestCase):
snake_case__ = DetaImageProcessor if is_vision_available() else None
def _UpperCamelCase ( self : Optional[Any] ) -> Optional[int]:
_UpperCamelCase = DetaImageProcessingTester(self )
@property
def _UpperCamelCase ( self : str ) -> Dict:
return self.image_processor_tester.prepare_image_processor_dict()
def _UpperCamelCase ( self : Union[str, Any] ) -> Dict:
_UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__UpperCamelCase , '''image_mean''' ) )
self.assertTrue(hasattr(__UpperCamelCase , '''image_std''' ) )
self.assertTrue(hasattr(__UpperCamelCase , '''do_normalize''' ) )
self.assertTrue(hasattr(__UpperCamelCase , '''do_resize''' ) )
self.assertTrue(hasattr(__UpperCamelCase , '''do_rescale''' ) )
self.assertTrue(hasattr(__UpperCamelCase , '''do_pad''' ) )
self.assertTrue(hasattr(__UpperCamelCase , '''size''' ) )
def _UpperCamelCase ( self : Any ) -> Tuple:
_UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 18, '''longest_edge''': 1333} )
self.assertEqual(image_processor.do_pad , __UpperCamelCase )
def _UpperCamelCase ( self : str ) -> int:
pass
def _UpperCamelCase ( self : Tuple ) -> Any:
# Initialize image_processing
_UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCamelCase , Image.Image )
# Test not batched input
_UpperCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
_UpperCamelCase , _UpperCamelCase = self.image_processor_tester.get_expected_values(__UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_UpperCamelCase , _UpperCamelCase = self.image_processor_tester.get_expected_values(__UpperCamelCase , batched=__UpperCamelCase )
_UpperCamelCase = image_processing(__UpperCamelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _UpperCamelCase ( self : Any ) -> List[str]:
# Initialize image_processing
_UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCamelCase , numpify=__UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCamelCase , np.ndarray )
# Test not batched input
_UpperCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
_UpperCamelCase , _UpperCamelCase = self.image_processor_tester.get_expected_values(__UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_UpperCamelCase = image_processing(__UpperCamelCase , return_tensors='''pt''' ).pixel_values
_UpperCamelCase , _UpperCamelCase = self.image_processor_tester.get_expected_values(__UpperCamelCase , batched=__UpperCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _UpperCamelCase ( self : Tuple ) -> Union[str, Any]:
# Initialize image_processing
_UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCamelCase , torchify=__UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCamelCase , torch.Tensor )
# Test not batched input
_UpperCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
_UpperCamelCase , _UpperCamelCase = self.image_processor_tester.get_expected_values(__UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_UpperCamelCase = image_processing(__UpperCamelCase , return_tensors='''pt''' ).pixel_values
_UpperCamelCase , _UpperCamelCase = self.image_processor_tester.get_expected_values(__UpperCamelCase , batched=__UpperCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def _UpperCamelCase ( self : Union[str, Any] ) -> Tuple:
# prepare image and target
_UpperCamelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''' ) as f:
_UpperCamelCase = json.loads(f.read() )
_UpperCamelCase = {'''image_id''': 3_9769, '''annotations''': target}
# encode them
_UpperCamelCase = DetaImageProcessor()
_UpperCamelCase = image_processing(images=__UpperCamelCase , annotations=__UpperCamelCase , return_tensors='''pt''' )
# verify pixel values
_UpperCamelCase = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['''pixel_values'''].shape , __UpperCamelCase )
_UpperCamelCase = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , __UpperCamelCase , atol=1E-4 ) )
# verify area
_UpperCamelCase = torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , __UpperCamelCase ) )
# verify boxes
_UpperCamelCase = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , __UpperCamelCase )
_UpperCamelCase = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , __UpperCamelCase , atol=1E-3 ) )
# verify image_id
_UpperCamelCase = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , __UpperCamelCase ) )
# verify is_crowd
_UpperCamelCase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , __UpperCamelCase ) )
# verify class_labels
_UpperCamelCase = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , __UpperCamelCase ) )
# verify orig_size
_UpperCamelCase = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , __UpperCamelCase ) )
# verify size
_UpperCamelCase = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , __UpperCamelCase ) )
@slow
def _UpperCamelCase ( self : List[str] ) -> Dict:
# prepare image, target and masks_path
_UpperCamelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''' ) as f:
_UpperCamelCase = json.loads(f.read() )
_UpperCamelCase = {'''file_name''': '''000000039769.png''', '''image_id''': 3_9769, '''segments_info''': target}
_UpperCamelCase = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' )
# encode them
_UpperCamelCase = DetaImageProcessor(format='''coco_panoptic''' )
_UpperCamelCase = image_processing(images=__UpperCamelCase , annotations=__UpperCamelCase , masks_path=__UpperCamelCase , return_tensors='''pt''' )
# verify pixel values
_UpperCamelCase = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['''pixel_values'''].shape , __UpperCamelCase )
_UpperCamelCase = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , __UpperCamelCase , atol=1E-4 ) )
# verify area
_UpperCamelCase = torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , __UpperCamelCase ) )
# verify boxes
_UpperCamelCase = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , __UpperCamelCase )
_UpperCamelCase = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , __UpperCamelCase , atol=1E-3 ) )
# verify image_id
_UpperCamelCase = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , __UpperCamelCase ) )
# verify is_crowd
_UpperCamelCase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , __UpperCamelCase ) )
# verify class_labels
_UpperCamelCase = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , __UpperCamelCase ) )
# verify masks
_UpperCamelCase = 82_2873
self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , __UpperCamelCase )
# verify orig_size
_UpperCamelCase = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , __UpperCamelCase ) )
# verify size
_UpperCamelCase = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , __UpperCamelCase ) )
| 256 | """simple docstring"""
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
UpperCAmelCase = """
Hugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf originally as a company that developed a chatbot app targeted at teenagers.[2] After open-sourcing the model behind the chatbot, the company pivoted to focus on being a platform for machine learning.
In March 2021, Hugging Face raised $40 million in a Series B funding round.[3]
On April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model.[4] In 2022, the workshop concluded with the announcement of BLOOM, a multilingual large language model with 176 billion parameters.[5]
"""
class UpperCAmelCase_ ( unittest.TestCase , _lowercase):
def _UpperCamelCase ( self : List[str] ) -> Tuple:
_UpperCamelCase = load_tool('''text-question-answering''' )
self.tool.setup()
_UpperCamelCase = load_tool('''text-question-answering''' , remote=__UpperCamelCase )
def _UpperCamelCase ( self : Union[str, Any] ) -> int:
_UpperCamelCase = self.tool(__UpperCamelCase , '''What did Hugging Face do in April 2021?''' )
self.assertEqual(__UpperCamelCase , '''launched the BigScience Research Workshop''' )
def _UpperCamelCase ( self : Optional[Any] ) -> Optional[Any]:
_UpperCamelCase = self.remote_tool(__UpperCamelCase , '''What did Hugging Face do in April 2021?''' )
self.assertEqual(__UpperCamelCase , '''launched the BigScience Research Workshop''' )
def _UpperCamelCase ( self : Dict ) -> int:
_UpperCamelCase = self.tool(text=__UpperCamelCase , question='''What did Hugging Face do in April 2021?''' )
self.assertEqual(__UpperCamelCase , '''launched the BigScience Research Workshop''' )
def _UpperCamelCase ( self : Optional[int] ) -> Union[str, Any]:
_UpperCamelCase = self.remote_tool(text=__UpperCamelCase , question='''What did Hugging Face do in April 2021?''' )
self.assertEqual(__UpperCamelCase , '''launched the BigScience Research Workshop''' )
| 256 | 1 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
import torch.nn.functional as F
from transformers import (
ClapTextConfig,
ClapTextModelWithProjection,
RobertaTokenizer,
SpeechTaHifiGan,
SpeechTaHifiGanConfig,
)
from diffusers import (
AudioLDMPipeline,
AutoencoderKL,
DDIMScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_AUDIO_BATCH_PARAMS, TEXT_TO_AUDIO_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class UpperCamelCase ( lowercase , unittest.TestCase ):
UpperCAmelCase : str = AudioLDMPipeline
UpperCAmelCase : str = TEXT_TO_AUDIO_PARAMS
UpperCAmelCase : Union[str, Any] = TEXT_TO_AUDIO_BATCH_PARAMS
UpperCAmelCase : str = frozenset(
[
"""num_inference_steps""",
"""num_waveforms_per_prompt""",
"""generator""",
"""latents""",
"""output_type""",
"""return_dict""",
"""callback""",
"""callback_steps""",
] )
def _lowercase (self : int) -> Union[str, Any]:
torch.manual_seed(0)
__snake_case : str = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=(32, 64) , class_embed_type='simple_projection' , projection_class_embeddings_input_dim=32 , class_embeddings_concat=_A , )
__snake_case : int = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=_A , set_alpha_to_one=_A , )
torch.manual_seed(0)
__snake_case : Optional[Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=1 , out_channels=1 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0)
__snake_case : Tuple = ClapTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , projection_dim=32 , )
__snake_case : Tuple = ClapTextModelWithProjection(_A)
__snake_case : Optional[Any] = RobertaTokenizer.from_pretrained('hf-internal-testing/tiny-random-roberta' , model_max_length=77)
__snake_case : Union[str, Any] = SpeechTaHifiGanConfig(
model_in_dim=8 , sampling_rate=1_60_00 , upsample_initial_channel=16 , upsample_rates=[2, 2] , upsample_kernel_sizes=[4, 4] , resblock_kernel_sizes=[3, 7] , resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5]] , normalize_before=_A , )
__snake_case : List[Any] = SpeechTaHifiGan(_A)
__snake_case : Tuple = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'vocoder': vocoder,
}
return components
def _lowercase (self : List[str] , _A : Dict , _A : List[Any]=0) -> Dict:
if str(_A).startswith('mps'):
__snake_case : List[Any] = torch.manual_seed(_A)
else:
__snake_case : Tuple = torch.Generator(device=_A).manual_seed(_A)
__snake_case : Tuple = {
'prompt': 'A hammer hitting a wooden surface',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
}
return inputs
def _lowercase (self : Optional[Any]) -> Dict:
__snake_case : Optional[int] = 'cpu' # ensure determinism for the device-dependent torch.Generator
__snake_case : Dict = self.get_dummy_components()
__snake_case : Union[str, Any] = AudioLDMPipeline(**_A)
__snake_case : Any = audioldm_pipe.to(_A)
audioldm_pipe.set_progress_bar_config(disable=_A)
__snake_case : List[str] = self.get_dummy_inputs(_A)
__snake_case : str = audioldm_pipe(**_A)
__snake_case : Tuple = output.audios[0]
assert audio.ndim == 1
assert len(_A) == 2_56
__snake_case : Any = audio[:10]
__snake_case : Optional[Any] = np.array(
[-0.0_050, 0.0_050, -0.0_060, 0.0_033, -0.0_026, 0.0_033, -0.0_027, 0.0_033, -0.0_028, 0.0_033])
assert np.abs(audio_slice - expected_slice).max() < 1E-2
def _lowercase (self : Optional[Any]) -> Union[str, Any]:
__snake_case : Any = self.get_dummy_components()
__snake_case : Optional[int] = AudioLDMPipeline(**_A)
__snake_case : List[Any] = audioldm_pipe.to(_A)
__snake_case : List[str] = audioldm_pipe.to(_A)
audioldm_pipe.set_progress_bar_config(disable=_A)
__snake_case : Union[str, Any] = self.get_dummy_inputs(_A)
__snake_case : Dict = 3 * [inputs['prompt']]
# forward
__snake_case : List[Any] = audioldm_pipe(**_A)
__snake_case : Union[str, Any] = output.audios[0]
__snake_case : List[str] = self.get_dummy_inputs(_A)
__snake_case : Optional[Any] = 3 * [inputs.pop('prompt')]
__snake_case : Dict = audioldm_pipe.tokenizer(
_A , padding='max_length' , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=_A , return_tensors='pt' , )
__snake_case : Tuple = text_inputs['input_ids'].to(_A)
__snake_case : str = audioldm_pipe.text_encoder(
_A , )
__snake_case : Union[str, Any] = prompt_embeds.text_embeds
# additional L_2 normalization over each hidden-state
__snake_case : Optional[int] = F.normalize(_A , dim=-1)
__snake_case : Any = prompt_embeds
# forward
__snake_case : Tuple = audioldm_pipe(**_A)
__snake_case : Optional[Any] = output.audios[0]
assert np.abs(audio_a - audio_a).max() < 1E-2
def _lowercase (self : Tuple) -> int:
__snake_case : Optional[Any] = self.get_dummy_components()
__snake_case : str = AudioLDMPipeline(**_A)
__snake_case : List[str] = audioldm_pipe.to(_A)
__snake_case : Dict = audioldm_pipe.to(_A)
audioldm_pipe.set_progress_bar_config(disable=_A)
__snake_case : str = self.get_dummy_inputs(_A)
__snake_case : str = 3 * ['this is a negative prompt']
__snake_case : Any = negative_prompt
__snake_case : Optional[int] = 3 * [inputs['prompt']]
# forward
__snake_case : str = audioldm_pipe(**_A)
__snake_case : Union[str, Any] = output.audios[0]
__snake_case : Dict = self.get_dummy_inputs(_A)
__snake_case : Any = 3 * [inputs.pop('prompt')]
__snake_case : Dict = []
for p in [prompt, negative_prompt]:
__snake_case : int = audioldm_pipe.tokenizer(
_A , padding='max_length' , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=_A , return_tensors='pt' , )
__snake_case : Optional[Any] = text_inputs['input_ids'].to(_A)
__snake_case : Tuple = audioldm_pipe.text_encoder(
_A , )
__snake_case : Union[str, Any] = text_embeds.text_embeds
# additional L_2 normalization over each hidden-state
__snake_case : Tuple = F.normalize(_A , dim=-1)
embeds.append(_A)
__snake_case , __snake_case : Tuple = embeds
# forward
__snake_case : int = audioldm_pipe(**_A)
__snake_case : List[Any] = output.audios[0]
assert np.abs(audio_a - audio_a).max() < 1E-2
def _lowercase (self : Optional[Any]) -> Union[str, Any]:
__snake_case : Tuple = 'cpu' # ensure determinism for the device-dependent torch.Generator
__snake_case : Any = self.get_dummy_components()
__snake_case : int = PNDMScheduler(skip_prk_steps=_A)
__snake_case : Any = AudioLDMPipeline(**_A)
__snake_case : Dict = audioldm_pipe.to(_A)
audioldm_pipe.set_progress_bar_config(disable=_A)
__snake_case : Tuple = self.get_dummy_inputs(_A)
__snake_case : Tuple = 'egg cracking'
__snake_case : Union[str, Any] = audioldm_pipe(**_A , negative_prompt=_A)
__snake_case : Dict = output.audios[0]
assert audio.ndim == 1
assert len(_A) == 2_56
__snake_case : Any = audio[:10]
__snake_case : Tuple = np.array(
[-0.0_051, 0.0_050, -0.0_060, 0.0_034, -0.0_026, 0.0_033, -0.0_027, 0.0_033, -0.0_028, 0.0_032])
assert np.abs(audio_slice - expected_slice).max() < 1E-2
def _lowercase (self : List[str]) -> str:
__snake_case : Optional[Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator
__snake_case : Union[str, Any] = self.get_dummy_components()
__snake_case : Union[str, Any] = PNDMScheduler(skip_prk_steps=_A)
__snake_case : List[str] = AudioLDMPipeline(**_A)
__snake_case : List[Any] = audioldm_pipe.to(_A)
audioldm_pipe.set_progress_bar_config(disable=_A)
__snake_case : List[Any] = 'A hammer hitting a wooden surface'
# test num_waveforms_per_prompt=1 (default)
__snake_case : Dict = audioldm_pipe(_A , num_inference_steps=2).audios
assert audios.shape == (1, 2_56)
# test num_waveforms_per_prompt=1 (default) for batch of prompts
__snake_case : Tuple = 2
__snake_case : Tuple = audioldm_pipe([prompt] * batch_size , num_inference_steps=2).audios
assert audios.shape == (batch_size, 2_56)
# test num_waveforms_per_prompt for single prompt
__snake_case : List[Any] = 2
__snake_case : List[Any] = audioldm_pipe(_A , num_inference_steps=2 , num_waveforms_per_prompt=_A).audios
assert audios.shape == (num_waveforms_per_prompt, 2_56)
# test num_waveforms_per_prompt for batch of prompts
__snake_case : Optional[Any] = 2
__snake_case : Dict = audioldm_pipe(
[prompt] * batch_size , num_inference_steps=2 , num_waveforms_per_prompt=_A).audios
assert audios.shape == (batch_size * num_waveforms_per_prompt, 2_56)
def _lowercase (self : List[Any]) -> Tuple:
__snake_case : List[Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator
__snake_case : int = self.get_dummy_components()
__snake_case : Union[str, Any] = AudioLDMPipeline(**_A)
__snake_case : Union[str, Any] = audioldm_pipe.to(_A)
audioldm_pipe.set_progress_bar_config(disable=_A)
__snake_case : Optional[Any] = audioldm_pipe.vocoder.config.sampling_rate
__snake_case : Any = self.get_dummy_inputs(_A)
__snake_case : Tuple = audioldm_pipe(audio_length_in_s=0.016 , **_A)
__snake_case : Dict = output.audios[0]
assert audio.ndim == 1
assert len(_A) / vocoder_sampling_rate == 0.016
__snake_case : Any = audioldm_pipe(audio_length_in_s=0.032 , **_A)
__snake_case : int = output.audios[0]
assert audio.ndim == 1
assert len(_A) / vocoder_sampling_rate == 0.032
def _lowercase (self : Tuple) -> Optional[Any]:
__snake_case : Optional[Any] = self.get_dummy_components()
__snake_case : Dict = AudioLDMPipeline(**_A)
__snake_case : int = audioldm_pipe.to(_A)
audioldm_pipe.set_progress_bar_config(disable=_A)
__snake_case : Any = ['hey']
__snake_case : Any = audioldm_pipe(_A , num_inference_steps=1)
__snake_case : Optional[Any] = output.audios.shape
assert audio_shape == (1, 2_56)
__snake_case : Any = audioldm_pipe.vocoder.config
config.model_in_dim *= 2
__snake_case : Tuple = SpeechTaHifiGan(_A).to(_A)
__snake_case : Tuple = audioldm_pipe(_A , num_inference_steps=1)
__snake_case : Any = output.audios.shape
# waveform shape is unchanged, we just have 2x the number of mel channels in the spectrogram
assert audio_shape == (1, 2_56)
def _lowercase (self : Optional[Any]) -> Tuple:
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=_A)
def _lowercase (self : Tuple) -> Optional[Any]:
self._test_inference_batch_single_identical(test_mean_pixel_difference=_A)
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def _lowercase (self : List[Any]) -> Any:
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=_A)
@slow
class UpperCamelCase ( unittest.TestCase ):
def _lowercase (self : int) -> Optional[int]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowercase (self : int , _A : Union[str, Any] , _A : Any="cpu" , _A : Optional[Any]=torch.floataa , _A : Any=0) -> Any:
__snake_case : Tuple = torch.Generator(device=_A).manual_seed(_A)
__snake_case : Dict = np.random.RandomState(_A).standard_normal((1, 8, 1_28, 16))
__snake_case : Tuple = torch.from_numpy(_A).to(device=_A , dtype=_A)
__snake_case : Tuple = {
'prompt': 'A hammer hitting a wooden surface',
'latents': latents,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 2.5,
}
return inputs
def _lowercase (self : Optional[Any]) -> int:
__snake_case : Any = AudioLDMPipeline.from_pretrained('cvssp/audioldm')
__snake_case : List[str] = audioldm_pipe.to(_A)
audioldm_pipe.set_progress_bar_config(disable=_A)
__snake_case : int = self.get_inputs(_A)
__snake_case : Tuple = 25
__snake_case : Dict = audioldm_pipe(**_A).audios[0]
assert audio.ndim == 1
assert len(_A) == 8_19_20
__snake_case : Union[str, Any] = audio[7_72_30:7_72_40]
__snake_case : List[str] = np.array(
[-0.4_884, -0.4_607, 0.0_023, 0.5_007, 0.5_896, 0.5_151, 0.3_813, -0.0_208, -0.3_687, -0.4_315])
__snake_case : List[str] = np.abs(expected_slice - audio_slice).max()
assert max_diff < 1E-2
def _lowercase (self : Dict) -> str:
__snake_case : Dict = AudioLDMPipeline.from_pretrained('cvssp/audioldm')
__snake_case : Union[str, Any] = LMSDiscreteScheduler.from_config(audioldm_pipe.scheduler.config)
__snake_case : str = audioldm_pipe.to(_A)
audioldm_pipe.set_progress_bar_config(disable=_A)
__snake_case : Dict = self.get_inputs(_A)
__snake_case : Optional[int] = audioldm_pipe(**_A).audios[0]
assert audio.ndim == 1
assert len(_A) == 8_19_20
__snake_case : List[str] = audio[2_77_80:2_77_90]
__snake_case : List[str] = np.array([-0.2_131, -0.0_873, -0.0_124, -0.0_189, 0.0_569, 0.1_373, 0.1_883, 0.2_886, 0.3_297, 0.2_212])
__snake_case : Optional[int] = np.abs(expected_slice - audio_slice).max()
assert max_diff < 3E-2
| 95 | """simple docstring"""
def __UpperCAmelCase ( UpperCAmelCase_ : list , UpperCAmelCase_ : int , UpperCAmelCase_ : int = 0 , UpperCAmelCase_ : int = 0 ) -> int:
'''simple docstring'''
__snake_case : str = right or len(UpperCAmelCase_ ) - 1
if left > right:
return -1
elif list_data[left] == key:
return left
elif list_data[right] == key:
return right
else:
return search(UpperCAmelCase_ , UpperCAmelCase_ , left + 1 , right - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 95 | 1 |
"""simple docstring"""
import argparse
import logging
import pickle
from collections import Counter
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO
)
lowerCAmelCase_ = logging.getLogger(__name__)
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser(
description='Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)'
)
parser.add_argument(
'--data_file', type=str, default='data/dump.bert-base-uncased.pickle', help='The binarized dataset.'
)
parser.add_argument(
'--token_counts_dump', type=str, default='data/token_counts.bert-base-uncased.pickle', help='The dump file.'
)
parser.add_argument('--vocab_size', default=30_522, type=int)
lowerCAmelCase_ = parser.parse_args()
logger.info(F'''Loading data from {args.data_file}''')
with open(args.data_file, 'rb') as fp:
lowerCAmelCase_ = pickle.load(fp)
logger.info('Counting occurrences for MLM.')
lowerCAmelCase_ = Counter()
for tk_ids in data:
counter.update(tk_ids)
lowerCAmelCase_ = [0] * args.vocab_size
for k, v in counter.items():
lowerCAmelCase_ = v
logger.info(F'''Dump to {args.token_counts_dump}''')
with open(args.token_counts_dump, 'wb') as handle:
pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL)
| 16 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
lowerCAmelCase_ = {
'google/tapas-base-finetuned-sqa': (
'https://huggingface.co/google/tapas-base-finetuned-sqa/resolve/main/config.json'
),
'google/tapas-base-finetuned-wtq': (
'https://huggingface.co/google/tapas-base-finetuned-wtq/resolve/main/config.json'
),
'google/tapas-base-finetuned-wikisql-supervised': (
'https://huggingface.co/google/tapas-base-finetuned-wikisql-supervised/resolve/main/config.json'
),
'google/tapas-base-finetuned-tabfact': (
'https://huggingface.co/google/tapas-base-finetuned-tabfact/resolve/main/config.json'
),
}
class __A ( A_ ):
'''simple docstring'''
lowerCAmelCase : str = "tapas"
def __init__( self : List[Any] ,_snake_case : Dict=30_522 ,_snake_case : Union[str, Any]=768 ,_snake_case : int=12 ,_snake_case : Union[str, Any]=12 ,_snake_case : Union[str, Any]=3_072 ,_snake_case : List[Any]="gelu" ,_snake_case : Optional[int]=0.1 ,_snake_case : Tuple=0.1 ,_snake_case : List[Any]=1_024 ,_snake_case : Any=[3, 256, 256, 2, 256, 256, 10] ,_snake_case : List[Any]=0.02 ,_snake_case : Union[str, Any]=1e-12 ,_snake_case : str=0 ,_snake_case : Any=10.0 ,_snake_case : int=0 ,_snake_case : Optional[Any]=1.0 ,_snake_case : List[str]=None ,_snake_case : Tuple=1.0 ,_snake_case : Tuple=False ,_snake_case : List[Any]=None ,_snake_case : int=1.0 ,_snake_case : List[Any]=1.0 ,_snake_case : Optional[int]=False ,_snake_case : Optional[int]=False ,_snake_case : Optional[int]="ratio" ,_snake_case : Any=None ,_snake_case : Union[str, Any]=None ,_snake_case : List[str]=64 ,_snake_case : Optional[Any]=32 ,_snake_case : Optional[Any]=False ,_snake_case : Optional[int]=True ,_snake_case : Dict=False ,_snake_case : Tuple=False ,_snake_case : int=True ,_snake_case : List[str]=False ,_snake_case : Dict=None ,_snake_case : Optional[int]=None ,**_snake_case : int ,) -> List[Any]:
"""simple docstring"""
super().__init__(pad_token_id=_snake_case ,**_snake_case )
# BERT hyperparameters (with updated max_position_embeddings and type_vocab_sizes)
lowercase__ : Optional[int] = vocab_size
lowercase__ : List[str] = hidden_size
lowercase__ : Any = num_hidden_layers
lowercase__ : Optional[Any] = num_attention_heads
lowercase__ : Optional[int] = hidden_act
lowercase__ : List[Any] = intermediate_size
lowercase__ : List[Any] = hidden_dropout_prob
lowercase__ : Dict = attention_probs_dropout_prob
lowercase__ : str = max_position_embeddings
lowercase__ : Dict = type_vocab_sizes
lowercase__ : Optional[Any] = initializer_range
lowercase__ : Dict = layer_norm_eps
# Fine-tuning task hyperparameters
lowercase__ : Any = positive_label_weight
lowercase__ : int = num_aggregation_labels
lowercase__ : List[str] = aggregation_loss_weight
lowercase__ : Optional[int] = use_answer_as_supervision
lowercase__ : Optional[Any] = answer_loss_importance
lowercase__ : Union[str, Any] = use_normalized_answer_loss
lowercase__ : str = huber_loss_delta
lowercase__ : str = temperature
lowercase__ : int = aggregation_temperature
lowercase__ : List[Any] = use_gumbel_for_cells
lowercase__ : Tuple = use_gumbel_for_aggregation
lowercase__ : Union[str, Any] = average_approximation_function
lowercase__ : Union[str, Any] = cell_selection_preference
lowercase__ : Any = answer_loss_cutoff
lowercase__ : List[Any] = max_num_rows
lowercase__ : str = max_num_columns
lowercase__ : int = average_logits_per_cell
lowercase__ : str = select_one_column
lowercase__ : str = allow_empty_column_selection
lowercase__ : Any = init_cell_selection_weights_to_zero
lowercase__ : Optional[int] = reset_position_index_per_cell
lowercase__ : Union[str, Any] = disable_per_token_loss
# Aggregation hyperparameters
lowercase__ : Optional[Any] = aggregation_labels
lowercase__ : List[Any] = no_aggregation_label_index
if isinstance(self.aggregation_labels ,_snake_case ):
lowercase__ : Union[str, Any] = {int(_snake_case ): v for k, v in aggregation_labels.items()}
| 16 | 1 |
'''simple docstring'''
from __future__ import annotations
def _snake_case ( A , A ) -> int:
if len(A ) < k or k < 0:
raise ValueError('''Invalid Input''' )
lowerCAmelCase__ = lowerCAmelCase__ = sum(array[:k] )
for i in range(len(A ) - k ):
lowerCAmelCase__ = current_sum - array[i] + array[i + k]
lowerCAmelCase__ = max(A , A )
return max_sum
if __name__ == "__main__":
from doctest import testmod
from random import randint
testmod()
__UpperCAmelCase = [randint(-1_000, 1_000) for i in range(100)]
__UpperCAmelCase = randint(0, 110)
print(f"""The maximum sum of {k} consecutive elements is {max_sum_in_array(array,k)}""") | 370 |
'''simple docstring'''
from typing import Dict, Iterable, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
__UpperCAmelCase = logging.get_logger(__name__)
def _snake_case ( A , A , A ) -> Optional[Any]:
return [
int(1000 * (box[0] / width) ),
int(1000 * (box[1] / height) ),
int(1000 * (box[2] / width) ),
int(1000 * (box[3] / height) ),
]
def _snake_case ( A , A , A ) -> Union[str, Any]:
lowerCAmelCase__ = to_pil_image(A )
lowerCAmelCase__ , lowerCAmelCase__ = pil_image.size
lowerCAmelCase__ = pytesseract.image_to_data(A , lang=A , output_type='''dict''' , config=A )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = data['''text'''], data['''left'''], data['''top'''], data['''width'''], data['''height''']
# filter empty words and corresponding coordinates
lowerCAmelCase__ = [idx for idx, word in enumerate(A ) if not word.strip()]
lowerCAmelCase__ = [word for idx, word in enumerate(A ) if idx not in irrelevant_indices]
lowerCAmelCase__ = [coord for idx, coord in enumerate(A ) if idx not in irrelevant_indices]
lowerCAmelCase__ = [coord for idx, coord in enumerate(A ) if idx not in irrelevant_indices]
lowerCAmelCase__ = [coord for idx, coord in enumerate(A ) if idx not in irrelevant_indices]
lowerCAmelCase__ = [coord for idx, coord in enumerate(A ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
lowerCAmelCase__ = []
for x, y, w, h in zip(A , A , A , A ):
lowerCAmelCase__ = [x, y, x + w, y + h]
actual_boxes.append(A )
# finally, normalize the bounding boxes
lowerCAmelCase__ = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(A , A , A ) )
assert len(A ) == len(A ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class a__ ( a__ ):
'''simple docstring'''
lowercase__ : Any = ["pixel_values"]
def __init__( self , lowerCamelCase_ = True , lowerCamelCase_ = None , lowerCamelCase_ = PILImageResampling.BILINEAR , lowerCamelCase_ = True , lowerCamelCase_ = 1 / 2_55 , lowerCamelCase_ = True , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = True , lowerCamelCase_ = None , lowerCamelCase_ = "" , **lowerCamelCase_ , ) -> None:
super().__init__(**lowerCamelCase_ )
lowerCAmelCase__ = size if size is not None else {'''height''': 2_24, '''width''': 2_24}
lowerCAmelCase__ = get_size_dict(lowerCamelCase_ )
lowerCAmelCase__ = do_resize
lowerCAmelCase__ = size
lowerCAmelCase__ = resample
lowerCAmelCase__ = do_rescale
lowerCAmelCase__ = rescale_value
lowerCAmelCase__ = do_normalize
lowerCAmelCase__ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowerCAmelCase__ = image_std if image_std is not None else IMAGENET_STANDARD_STD
lowerCAmelCase__ = apply_ocr
lowerCAmelCase__ = ocr_lang
lowerCAmelCase__ = tesseract_config
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = PILImageResampling.BILINEAR , lowerCamelCase_ = None , **lowerCamelCase_ , ) -> np.ndarray:
lowerCAmelCase__ = get_size_dict(lowerCamelCase_ )
if "height" not in size or "width" not in size:
raise ValueError(F"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""" )
lowerCAmelCase__ = (size['''height'''], size['''width'''])
return resize(lowerCamelCase_ , size=lowerCamelCase_ , resample=lowerCamelCase_ , data_format=lowerCamelCase_ , **lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = None , **lowerCamelCase_ , ) -> np.ndarray:
return rescale(lowerCamelCase_ , scale=lowerCamelCase_ , data_format=lowerCamelCase_ , **lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = None , **lowerCamelCase_ , ) -> np.ndarray:
return normalize(lowerCamelCase_ , mean=lowerCamelCase_ , std=lowerCamelCase_ , data_format=lowerCamelCase_ , **lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_=None , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = ChannelDimension.FIRST , **lowerCamelCase_ , ) -> PIL.Image.Image:
lowerCAmelCase__ = do_resize if do_resize is not None else self.do_resize
lowerCAmelCase__ = size if size is not None else self.size
lowerCAmelCase__ = get_size_dict(lowerCamelCase_ )
lowerCAmelCase__ = resample if resample is not None else self.resample
lowerCAmelCase__ = do_rescale if do_rescale is not None else self.do_rescale
lowerCAmelCase__ = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCAmelCase__ = do_normalize if do_normalize is not None else self.do_normalize
lowerCAmelCase__ = image_mean if image_mean is not None else self.image_mean
lowerCAmelCase__ = image_std if image_std is not None else self.image_std
lowerCAmelCase__ = apply_ocr if apply_ocr is not None else self.apply_ocr
lowerCAmelCase__ = ocr_lang if ocr_lang is not None else self.ocr_lang
lowerCAmelCase__ = tesseract_config if tesseract_config is not None else self.tesseract_config
lowerCAmelCase__ = make_list_of_images(lowerCamelCase_ )
if not valid_images(lowerCamelCase_ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''If do_normalize is True, image_mean and image_std must be specified.''' )
# All transformations expect numpy arrays.
lowerCAmelCase__ = [to_numpy_array(lowerCamelCase_ ) for image in images]
# Tesseract OCR to get words + normalized bounding boxes
if apply_ocr:
requires_backends(self , '''pytesseract''' )
lowerCAmelCase__ = []
lowerCAmelCase__ = []
for image in images:
lowerCAmelCase__ , lowerCAmelCase__ = apply_tesseract(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
words_batch.append(lowerCamelCase_ )
boxes_batch.append(lowerCamelCase_ )
if do_resize:
lowerCAmelCase__ = [self.resize(image=lowerCamelCase_ , size=lowerCamelCase_ , resample=lowerCamelCase_ ) for image in images]
if do_rescale:
lowerCAmelCase__ = [self.rescale(image=lowerCamelCase_ , scale=lowerCamelCase_ ) for image in images]
if do_normalize:
lowerCAmelCase__ = [self.normalize(image=lowerCamelCase_ , mean=lowerCamelCase_ , std=lowerCamelCase_ ) for image in images]
lowerCAmelCase__ = [to_channel_dimension_format(lowerCamelCase_ , lowerCamelCase_ ) for image in images]
lowerCAmelCase__ = BatchFeature(data={'''pixel_values''': images} , tensor_type=lowerCamelCase_ )
if apply_ocr:
lowerCAmelCase__ = words_batch
lowerCAmelCase__ = boxes_batch
return data | 228 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase__ = {
"configuration_informer": [
"INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"InformerConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = [
"INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"InformerForPrediction",
"InformerModel",
"InformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_informer import (
INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
InformerForPrediction,
InformerModel,
InformerPreTrainedModel,
)
else:
import sys
lowercase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 151 |
'''simple docstring'''
import argparse
import os
import torch
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
lowercase__ = {
"sample_size": 32,
"in_channels": 3,
"out_channels": 3,
"layers_per_block": 2,
"num_class_embeds": 1000,
"block_out_channels": [32, 64],
"attention_head_dim": 8,
"down_block_types": [
"ResnetDownsampleBlock2D",
"AttnDownBlock2D",
],
"up_block_types": [
"AttnUpBlock2D",
"ResnetUpsampleBlock2D",
],
"resnet_time_scale_shift": "scale_shift",
"upsample_type": "resnet",
"downsample_type": "resnet",
}
lowercase__ = {
"sample_size": 64,
"in_channels": 3,
"out_channels": 3,
"layers_per_block": 3,
"num_class_embeds": 1000,
"block_out_channels": [192, 192 * 2, 192 * 3, 192 * 4],
"attention_head_dim": 64,
"down_block_types": [
"ResnetDownsampleBlock2D",
"AttnDownBlock2D",
"AttnDownBlock2D",
"AttnDownBlock2D",
],
"up_block_types": [
"AttnUpBlock2D",
"AttnUpBlock2D",
"AttnUpBlock2D",
"ResnetUpsampleBlock2D",
],
"resnet_time_scale_shift": "scale_shift",
"upsample_type": "resnet",
"downsample_type": "resnet",
}
lowercase__ = {
"sample_size": 256,
"in_channels": 3,
"out_channels": 3,
"layers_per_block": 2,
"num_class_embeds": None,
"block_out_channels": [256, 256, 256 * 2, 256 * 2, 256 * 4, 256 * 4],
"attention_head_dim": 64,
"down_block_types": [
"ResnetDownsampleBlock2D",
"ResnetDownsampleBlock2D",
"ResnetDownsampleBlock2D",
"AttnDownBlock2D",
"AttnDownBlock2D",
"AttnDownBlock2D",
],
"up_block_types": [
"AttnUpBlock2D",
"AttnUpBlock2D",
"AttnUpBlock2D",
"ResnetUpsampleBlock2D",
"ResnetUpsampleBlock2D",
"ResnetUpsampleBlock2D",
],
"resnet_time_scale_shift": "default",
"upsample_type": "resnet",
"downsample_type": "resnet",
}
lowercase__ = {
"num_train_timesteps": 40,
"sigma_min": 0.002,
"sigma_max": 80.0,
}
lowercase__ = {
"num_train_timesteps": 201,
"sigma_min": 0.002,
"sigma_max": 80.0,
}
lowercase__ = {
"num_train_timesteps": 151,
"sigma_min": 0.002,
"sigma_max": 80.0,
}
def UpperCamelCase( UpperCAmelCase_ ):
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError('boolean value expected' )
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_=False ):
UpperCAmelCase : str = checkpoint[F"""{old_prefix}.in_layers.0.weight"""]
UpperCAmelCase : List[str] = checkpoint[F"""{old_prefix}.in_layers.0.bias"""]
UpperCAmelCase : int = checkpoint[F"""{old_prefix}.in_layers.2.weight"""]
UpperCAmelCase : int = checkpoint[F"""{old_prefix}.in_layers.2.bias"""]
UpperCAmelCase : List[str] = checkpoint[F"""{old_prefix}.emb_layers.1.weight"""]
UpperCAmelCase : Optional[int] = checkpoint[F"""{old_prefix}.emb_layers.1.bias"""]
UpperCAmelCase : str = checkpoint[F"""{old_prefix}.out_layers.0.weight"""]
UpperCAmelCase : int = checkpoint[F"""{old_prefix}.out_layers.0.bias"""]
UpperCAmelCase : str = checkpoint[F"""{old_prefix}.out_layers.3.weight"""]
UpperCAmelCase : Tuple = checkpoint[F"""{old_prefix}.out_layers.3.bias"""]
if has_skip:
UpperCAmelCase : int = checkpoint[F"""{old_prefix}.skip_connection.weight"""]
UpperCAmelCase : Optional[int] = checkpoint[F"""{old_prefix}.skip_connection.bias"""]
return new_checkpoint
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_=None ):
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Any = checkpoint[F"""{old_prefix}.qkv.weight"""].chunk(3 , dim=0 )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : str = checkpoint[F"""{old_prefix}.qkv.bias"""].chunk(3 , dim=0 )
UpperCAmelCase : List[Any] = checkpoint[F"""{old_prefix}.norm.weight"""]
UpperCAmelCase : Dict = checkpoint[F"""{old_prefix}.norm.bias"""]
UpperCAmelCase : Dict = weight_q.squeeze(-1 ).squeeze(-1 )
UpperCAmelCase : List[Any] = bias_q.squeeze(-1 ).squeeze(-1 )
UpperCAmelCase : Optional[Any] = weight_k.squeeze(-1 ).squeeze(-1 )
UpperCAmelCase : Any = bias_k.squeeze(-1 ).squeeze(-1 )
UpperCAmelCase : Dict = weight_v.squeeze(-1 ).squeeze(-1 )
UpperCAmelCase : int = bias_v.squeeze(-1 ).squeeze(-1 )
UpperCAmelCase : List[str] = (
checkpoint[F"""{old_prefix}.proj_out.weight"""].squeeze(-1 ).squeeze(-1 )
)
UpperCAmelCase : Tuple = checkpoint[F"""{old_prefix}.proj_out.bias"""].squeeze(-1 ).squeeze(-1 )
return new_checkpoint
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase : Union[str, Any] = torch.load(UpperCAmelCase_ , map_location='cpu' )
UpperCAmelCase : Optional[Any] = {}
UpperCAmelCase : str = checkpoint['time_embed.0.weight']
UpperCAmelCase : Dict = checkpoint['time_embed.0.bias']
UpperCAmelCase : Optional[int] = checkpoint['time_embed.2.weight']
UpperCAmelCase : str = checkpoint['time_embed.2.bias']
if unet_config["num_class_embeds"] is not None:
UpperCAmelCase : str = checkpoint['label_emb.weight']
UpperCAmelCase : Any = checkpoint['input_blocks.0.0.weight']
UpperCAmelCase : List[str] = checkpoint['input_blocks.0.0.bias']
UpperCAmelCase : Tuple = unet_config['down_block_types']
UpperCAmelCase : Union[str, Any] = unet_config['layers_per_block']
UpperCAmelCase : Dict = unet_config['attention_head_dim']
UpperCAmelCase : Optional[Any] = unet_config['block_out_channels']
UpperCAmelCase : str = 1
UpperCAmelCase : int = channels_list[0]
for i, layer_type in enumerate(UpperCAmelCase_ ):
UpperCAmelCase : Optional[Any] = channels_list[i]
UpperCAmelCase : Any = current_channels != prev_channels
if layer_type == "ResnetDownsampleBlock2D":
for j in range(UpperCAmelCase_ ):
UpperCAmelCase : Any = F"""down_blocks.{i}.resnets.{j}"""
UpperCAmelCase : Any = F"""input_blocks.{current_layer}.0"""
UpperCAmelCase : Dict = True if j == 0 and downsample_block_has_skip else False
UpperCAmelCase : List[Any] = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , has_skip=UpperCAmelCase_ )
current_layer += 1
elif layer_type == "AttnDownBlock2D":
for j in range(UpperCAmelCase_ ):
UpperCAmelCase : str = F"""down_blocks.{i}.resnets.{j}"""
UpperCAmelCase : Optional[Any] = F"""input_blocks.{current_layer}.0"""
UpperCAmelCase : Tuple = True if j == 0 and downsample_block_has_skip else False
UpperCAmelCase : Tuple = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , has_skip=UpperCAmelCase_ )
UpperCAmelCase : Optional[Any] = F"""down_blocks.{i}.attentions.{j}"""
UpperCAmelCase : List[Any] = F"""input_blocks.{current_layer}.1"""
UpperCAmelCase : Optional[int] = convert_attention(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
current_layer += 1
if i != len(UpperCAmelCase_ ) - 1:
UpperCAmelCase : Optional[Any] = F"""down_blocks.{i}.downsamplers.0"""
UpperCAmelCase : List[str] = F"""input_blocks.{current_layer}.0"""
UpperCAmelCase : str = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
current_layer += 1
UpperCAmelCase : Tuple = current_channels
# hardcoded the mid-block for now
UpperCAmelCase : int = 'mid_block.resnets.0'
UpperCAmelCase : Tuple = 'middle_block.0'
UpperCAmelCase : str = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
UpperCAmelCase : List[Any] = 'mid_block.attentions.0'
UpperCAmelCase : List[Any] = 'middle_block.1'
UpperCAmelCase : Optional[int] = convert_attention(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
UpperCAmelCase : Optional[Any] = 'mid_block.resnets.1'
UpperCAmelCase : Dict = 'middle_block.2'
UpperCAmelCase : Union[str, Any] = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
UpperCAmelCase : List[str] = 0
UpperCAmelCase : int = unet_config['up_block_types']
for i, layer_type in enumerate(UpperCAmelCase_ ):
if layer_type == "ResnetUpsampleBlock2D":
for j in range(layers_per_block + 1 ):
UpperCAmelCase : int = F"""up_blocks.{i}.resnets.{j}"""
UpperCAmelCase : List[Any] = F"""output_blocks.{current_layer}.0"""
UpperCAmelCase : List[str] = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , has_skip=UpperCAmelCase_ )
current_layer += 1
if i != len(UpperCAmelCase_ ) - 1:
UpperCAmelCase : Any = F"""up_blocks.{i}.upsamplers.0"""
UpperCAmelCase : int = F"""output_blocks.{current_layer-1}.1"""
UpperCAmelCase : List[Any] = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
elif layer_type == "AttnUpBlock2D":
for j in range(layers_per_block + 1 ):
UpperCAmelCase : int = F"""up_blocks.{i}.resnets.{j}"""
UpperCAmelCase : int = F"""output_blocks.{current_layer}.0"""
UpperCAmelCase : Optional[int] = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , has_skip=UpperCAmelCase_ )
UpperCAmelCase : Union[str, Any] = F"""up_blocks.{i}.attentions.{j}"""
UpperCAmelCase : Tuple = F"""output_blocks.{current_layer}.1"""
UpperCAmelCase : Any = convert_attention(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
current_layer += 1
if i != len(UpperCAmelCase_ ) - 1:
UpperCAmelCase : str = F"""up_blocks.{i}.upsamplers.0"""
UpperCAmelCase : Optional[Any] = F"""output_blocks.{current_layer-1}.2"""
UpperCAmelCase : Any = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
UpperCAmelCase : Any = checkpoint['out.0.weight']
UpperCAmelCase : Optional[int] = checkpoint['out.0.bias']
UpperCAmelCase : Tuple = checkpoint['out.2.weight']
UpperCAmelCase : str = checkpoint['out.2.bias']
return new_checkpoint
if __name__ == "__main__":
lowercase__ = argparse.ArgumentParser()
parser.add_argument("--unet_path", default=None, type=str, required=True, help="Path to the unet.pt to convert.")
parser.add_argument(
"--dump_path", default=None, type=str, required=True, help="Path to output the converted UNet model."
)
parser.add_argument("--class_cond", default=True, type=str, help="Whether the model is class-conditional.")
lowercase__ = parser.parse_args()
lowercase__ = strabool(args.class_cond)
lowercase__ = os.path.basename(args.unet_path)
print(f'''Checkpoint: {ckpt_name}''')
# Get U-Net config
if "imagenet64" in ckpt_name:
lowercase__ = IMAGENET_64_UNET_CONFIG
elif "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
lowercase__ = LSUN_256_UNET_CONFIG
elif "test" in ckpt_name:
lowercase__ = TEST_UNET_CONFIG
else:
raise ValueError(f'''Checkpoint type {ckpt_name} is not currently supported.''')
if not args.class_cond:
lowercase__ = None
lowercase__ = con_pt_to_diffuser(args.unet_path, unet_config)
lowercase__ = UNetaDModel(**unet_config)
image_unet.load_state_dict(converted_unet_ckpt)
# Get scheduler config
if "cd" in ckpt_name or "test" in ckpt_name:
lowercase__ = CD_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "imagenet64" in ckpt_name:
lowercase__ = CT_IMAGENET_64_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
lowercase__ = CT_LSUN_256_SCHEDULER_CONFIG
else:
raise ValueError(f'''Checkpoint type {ckpt_name} is not currently supported.''')
lowercase__ = CMStochasticIterativeScheduler(**scheduler_config)
lowercase__ = ConsistencyModelPipeline(unet=image_unet, scheduler=cm_scheduler)
consistency_model.save_pretrained(args.dump_path)
| 151 | 1 |
'''simple docstring'''
from __future__ import annotations
from math import pi, sqrt
def __UpperCamelCase ( _UpperCAmelCase, _UpperCAmelCase ):
if inductance <= 0:
raise ValueError("Inductance cannot be 0 or negative" )
elif capacitance <= 0:
raise ValueError("Capacitance cannot be 0 or negative" )
else:
return (
"Resonant frequency",
float(1 / (2 * pi * (sqrt(inductance * capacitance ))) ),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 37 |
'''simple docstring'''
from datetime import datetime as dt
import os
from github import Github
lowerCAmelCase__ : Union[str, Any] = [
"good first issue",
"good second issue",
"good difficult issue",
"feature request",
"new model",
"wip",
]
def __UpperCamelCase ( ):
__UpperCAmelCase : Optional[int] = Github(os.environ["GITHUB_TOKEN"] )
__UpperCAmelCase : Union[str, Any] = g.get_repo("huggingface/transformers" )
__UpperCAmelCase : Union[str, Any] = repo.get_issues(state="open" )
for issue in open_issues:
__UpperCAmelCase : int = sorted([comment for comment in issue.get_comments()], key=lambda _UpperCAmelCase : i.created_at, reverse=_UpperCAmelCase )
__UpperCAmelCase : Any = comments[0] if len(_UpperCAmelCase ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.")
issue.edit(state="closed" )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would add stale comment to {issue.number}")
issue.create_comment(
"This issue has been automatically marked as stale because it has not had "
"recent activity. If you think this still needs to be addressed "
"please comment on this thread.\n\nPlease note that issues that do not follow the "
"[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) "
"are likely to be ignored." )
if __name__ == "__main__":
main()
| 37 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__snake_case = {'''configuration_vit_mae''': ['''VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTMAEConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'''VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ViTMAEForPreTraining''',
'''ViTMAELayer''',
'''ViTMAEModel''',
'''ViTMAEPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'''TFViTMAEForPreTraining''',
'''TFViTMAEModel''',
'''TFViTMAEPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_mae import (
VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMAEForPreTraining,
ViTMAELayer,
ViTMAEModel,
ViTMAEPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel
else:
import sys
__snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 310 |
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __lowerCamelCase (_a , unittest.TestCase ):
_lowercase = MgpstrTokenizer
_lowercase = False
_lowercase = {}
_lowercase = False
def snake_case_ ( self: int ):
'''simple docstring'''
super().setUp()
# fmt: off
__UpperCamelCase = ['[GO]', '[s]', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']
# fmt: on
__UpperCamelCase = dict(zip(A_,range(len(A_ ) ) ) )
__UpperCamelCase = os.path.join(self.tmpdirname,VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file,'w',encoding='utf-8' ) as fp:
fp.write(json.dumps(A_ ) + '\n' )
def snake_case_ ( self: Dict,**A_: Tuple ):
'''simple docstring'''
return MgpstrTokenizer.from_pretrained(self.tmpdirname,**A_ )
def snake_case_ ( self: List[Any],A_: Optional[Any] ):
'''simple docstring'''
__UpperCamelCase = 'tester'
__UpperCamelCase = 'tester'
return input_text, output_text
@unittest.skip('MGP-STR always lower cases letters.' )
def snake_case_ ( self: str ):
'''simple docstring'''
pass
def snake_case_ ( self: List[Any] ):
'''simple docstring'''
__UpperCamelCase = self.get_tokenizers(do_lower_case=A_ )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
__UpperCamelCase = '[SPECIAL_TOKEN]'
tokenizer.add_special_tokens({'cls_token': special_token} )
__UpperCamelCase = tokenizer.encode([special_token],add_special_tokens=A_ )
self.assertEqual(len(A_ ),1 )
__UpperCamelCase = tokenizer.decode(A_,skip_special_tokens=A_ )
self.assertTrue(special_token not in decoded )
def snake_case_ ( self: Dict ):
'''simple docstring'''
__UpperCamelCase = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
__UpperCamelCase, __UpperCamelCase = self.get_input_output_texts(A_ )
__UpperCamelCase = tokenizer.tokenize(A_ )
__UpperCamelCase = tokenizer.convert_tokens_to_ids(A_ )
__UpperCamelCase = tokenizer.encode(A_,add_special_tokens=A_ )
self.assertListEqual(A_,A_ )
__UpperCamelCase = tokenizer.convert_ids_to_tokens(A_ )
self.assertNotEqual(len(A_ ),0 )
__UpperCamelCase = tokenizer.decode(A_ )
self.assertIsInstance(A_,A_ )
self.assertEqual(text_a.replace(' ','' ),A_ )
@unittest.skip('MGP-STR tokenizer only handles one sequence.' )
def snake_case_ ( self: int ):
'''simple docstring'''
pass
@unittest.skip('inputs cannot be pretokenized in MgpstrTokenizer' )
def snake_case_ ( self: List[str] ):
'''simple docstring'''
pass
| 310 | 1 |
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
if is_torch_available():
import torch
from transformers import XLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_torch
class _snake_case ( unittest.TestCase ):
@slow
def snake_case__ ( self):
UpperCAmelCase__ : Any = XLMRobertaModel.from_pretrained("""xlm-roberta-base""")
UpperCAmelCase__ : Optional[int] = torch.tensor([[0, 581, 1_0269, 83, 9_9942, 136, 6_0742, 23, 70, 8_0583, 1_8276, 2]])
# The dog is cute and lives in the garden house
UpperCAmelCase__ : Dict = torch.Size((1, 12, 768)) # batch_size, sequence_length, embedding_vector_dim
UpperCAmelCase__ : List[Any] = torch.tensor(
[[-0.0101, 0.1218, -0.0803, 0.0801, 0.1327, 0.0776, -0.1215, 0.2383, 0.3338, 0.3106, 0.0300, 0.0252]])
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
UpperCAmelCase__ : Dict = model(_lowerCamelCase)["""last_hidden_state"""].detach()
self.assertEqual(output.shape , _lowerCamelCase)
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , _lowerCamelCase , atol=1e-3))
@slow
def snake_case__ ( self):
UpperCAmelCase__ : List[Any] = XLMRobertaModel.from_pretrained("""xlm-roberta-large""")
UpperCAmelCase__ : Dict = torch.tensor([[0, 581, 1_0269, 83, 9_9942, 136, 6_0742, 23, 70, 8_0583, 1_8276, 2]])
# The dog is cute and lives in the garden house
UpperCAmelCase__ : List[str] = torch.Size((1, 12, 1024)) # batch_size, sequence_length, embedding_vector_dim
UpperCAmelCase__ : Union[str, Any] = torch.tensor(
[[-0.0699, -0.0318, 0.0705, -0.1241, 0.0999, -0.0520, 0.1004, -0.1838, -0.4704, 0.1437, 0.0821, 0.0126]])
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
UpperCAmelCase__ : Optional[int] = model(_lowerCamelCase)["""last_hidden_state"""].detach()
self.assertEqual(output.shape , _lowerCamelCase)
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , _lowerCamelCase , atol=1e-3)) | 283 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__A ={
'configuration_table_transformer': [
'TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'TableTransformerConfig',
'TableTransformerOnnxConfig',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =[
'TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TableTransformerForObjectDetection',
'TableTransformerModel',
'TableTransformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TableTransformerConfig,
TableTransformerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TableTransformerForObjectDetection,
TableTransformerModel,
TableTransformerPreTrainedModel,
)
else:
import sys
__A =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 283 | 1 |
import os
from huggingface_hub.constants import HUGGINGFACE_HUB_CACHE, hf_cache_home
UpperCAmelCase : Tuple = HUGGINGFACE_HUB_CACHE
UpperCAmelCase : Union[str, Any] = """config.json"""
UpperCAmelCase : Union[str, Any] = """diffusion_pytorch_model.bin"""
UpperCAmelCase : Optional[Any] = """diffusion_flax_model.msgpack"""
UpperCAmelCase : Optional[int] = """model.onnx"""
UpperCAmelCase : int = """diffusion_pytorch_model.safetensors"""
UpperCAmelCase : List[Any] = """weights.pb"""
UpperCAmelCase : Optional[int] = """https://huggingface.co"""
UpperCAmelCase : str = default_cache_path
UpperCAmelCase : str = """diffusers_modules"""
UpperCAmelCase : int = os.getenv("""HF_MODULES_CACHE""", os.path.join(hf_cache_home, """modules"""))
UpperCAmelCase : Dict = ["""fp16""", """non-ema"""]
UpperCAmelCase : List[str] = """.self_attn"""
| 95 |
import torch
from transformers import PreTrainedModel, XLMRobertaConfig, XLMRobertaModel
class __lowerCAmelCase ( UpperCamelCase__):
_lowercase : Tuple = """M-CLIP"""
def __init__( self , lowerCAmelCase__=1_0_2_4 , lowerCAmelCase__=7_6_8 , **lowerCAmelCase__ ) -> Any:
'''simple docstring'''
a__ : int =transformerDimSize
a__ : Dict =imageDimSize
super().__init__(**lowerCAmelCase__ )
class __lowerCAmelCase ( UpperCamelCase__):
_lowercase : Optional[Any] = MCLIPConfig
def __init__( self , lowerCAmelCase__ , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> List[str]:
'''simple docstring'''
super().__init__(lowerCAmelCase__ , *lowerCAmelCase__ , **lowerCAmelCase__ )
a__ : Tuple =XLMRobertaModel(lowerCAmelCase__ )
a__ : List[str] =torch.nn.Linear(
in_features=config.transformerDimensions , out_features=config.numDims )
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> Union[str, Any]:
'''simple docstring'''
a__ : Optional[Any] =self.transformer(input_ids=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ )[0]
a__ : int =(embs * attention_mask.unsqueeze(2 )).sum(dim=1 ) / attention_mask.sum(dim=1 )[:, None]
return self.LinearTransformation(lowerCAmelCase__ ), embs
| 95 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__lowercase: Union[str, Any] = {
"configuration_squeezebert": [
"SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"SqueezeBertConfig",
"SqueezeBertOnnxConfig",
],
"tokenization_squeezebert": ["SqueezeBertTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase: Dict = ["SqueezeBertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase: Union[str, Any] = [
"SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"SqueezeBertForMaskedLM",
"SqueezeBertForMultipleChoice",
"SqueezeBertForQuestionAnswering",
"SqueezeBertForSequenceClassification",
"SqueezeBertForTokenClassification",
"SqueezeBertModel",
"SqueezeBertModule",
"SqueezeBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_squeezebert import (
SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
SqueezeBertConfig,
SqueezeBertOnnxConfig,
)
from .tokenization_squeezebert import SqueezeBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_squeezebert import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
SqueezeBertModule,
SqueezeBertPreTrainedModel,
)
else:
import sys
__lowercase: Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) | 351 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : float , _UpperCamelCase : float ) -> float:
'''simple docstring'''
if density <= 0:
raise ValueError("Impossible fluid density" )
if bulk_modulus <= 0:
raise ValueError("Impossible bulk modulus" )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod() | 31 | 0 |
"""simple docstring"""
import random
import unittest
import numpy as np
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionImgaImgPipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class __lowercase ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__lowerCAmelCase = '''hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline'''
def _lowerCamelCase ( self , _UpperCAmelCase=0 ):
__a : str = floats_tensor((1, 3, 128, 128) , rng=random.Random(_UpperCAmelCase ) )
__a : List[str] = np.random.RandomState(_UpperCAmelCase )
__a : str = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 3,
'''strength''': 0.7_5,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def _lowerCamelCase ( self ):
__a : str = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
__a : Optional[Any] = self.get_dummy_inputs()
__a : Optional[int] = pipe(**_UpperCAmelCase ).images
__a : str = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 128, 128, 3)
__a : Dict = np.array([0.6_9_6_4_3, 0.5_8_4_8_4, 0.5_0_3_1_4, 0.5_8_7_6_0, 0.5_5_3_6_8, 0.5_9_6_4_3, 0.5_1_5_2_9, 0.4_1_2_1_7, 0.4_9_0_8_7] )
assert np.abs(image_slice - expected_slice ).max() < 1e-1
def _lowerCamelCase ( self ):
__a : str = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
__a : Optional[int] = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
__a : Any = self.get_dummy_inputs()
__a : List[str] = pipe(**_UpperCAmelCase ).images
__a : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__a : int = np.array([0.6_1_7_3_7, 0.5_4_6_4_2, 0.5_3_1_8_3, 0.5_4_4_6_5, 0.5_2_7_4_2, 0.6_0_5_2_5, 0.4_9_9_6_9, 0.4_0_6_5_5, 0.4_8_1_5_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def _lowerCamelCase ( self ):
__a : int = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
__a : str = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
# warmup pass to apply optimizations
__a : Any = pipe(**self.get_dummy_inputs() )
__a : Union[str, Any] = self.get_dummy_inputs()
__a : List[str] = pipe(**_UpperCAmelCase ).images
__a : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__a : List[Any] = np.array([0.5_2_7_6_1, 0.5_9_9_7_7, 0.4_9_0_3_3, 0.4_9_6_1_9, 0.5_4_2_8_2, 0.5_0_3_1_1, 0.4_7_6_0_0, 0.4_0_9_1_8, 0.4_5_2_0_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def _lowerCamelCase ( self ):
__a : Optional[int] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
__a : List[Any] = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
__a : Tuple = self.get_dummy_inputs()
__a : Any = pipe(**_UpperCAmelCase ).images
__a : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__a : Optional[Any] = np.array([0.5_2_9_1_1, 0.6_0_0_0_4, 0.4_9_2_2_9, 0.4_9_8_0_5, 0.5_4_5_0_2, 0.5_0_6_8_0, 0.4_7_7_7_7, 0.4_1_0_2_8, 0.4_5_3_0_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def _lowerCamelCase ( self ):
__a : Union[str, Any] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
__a : Tuple = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
__a : Any = self.get_dummy_inputs()
__a : Dict = pipe(**_UpperCAmelCase ).images
__a : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__a : Dict = np.array([0.5_2_9_1_1, 0.6_0_0_0_4, 0.4_9_2_2_9, 0.4_9_8_0_5, 0.5_4_5_0_2, 0.5_0_6_8_0, 0.4_7_7_7_7, 0.4_1_0_2_8, 0.4_5_3_0_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def _lowerCamelCase ( self ):
__a : Dict = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
__a : Optional[Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
__a : List[Any] = self.get_dummy_inputs()
__a : str = pipe(**_UpperCAmelCase ).images
__a : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__a : Optional[Any] = np.array([0.6_5_3_3_1, 0.5_8_2_7_7, 0.4_8_2_0_4, 0.5_6_0_5_9, 0.5_3_6_6_5, 0.5_6_2_3_5, 0.5_0_9_6_9, 0.4_0_0_0_9, 0.4_6_5_5_2] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
@nightly
@require_onnxruntime
@require_torch_gpu
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
@property
def _lowerCamelCase ( self ):
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def _lowerCamelCase ( self ):
__a : Optional[int] = ort.SessionOptions()
__a : Any = False
return options
def _lowerCamelCase ( self ):
__a : str = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
__a : Optional[Any] = init_image.resize((768, 512) )
# using the PNDM scheduler by default
__a : Optional[Any] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''onnx''' , safety_checker=_UpperCAmelCase , feature_extractor=_UpperCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
__a : int = '''A fantasy landscape, trending on artstation'''
__a : Union[str, Any] = np.random.RandomState(0 )
__a : Tuple = pipe(
prompt=_UpperCAmelCase , image=_UpperCAmelCase , strength=0.7_5 , guidance_scale=7.5 , num_inference_steps=10 , generator=_UpperCAmelCase , output_type='''np''' , )
__a : Optional[int] = output.images
__a : Tuple = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
__a : str = np.array([0.4_9_0_9, 0.5_0_5_9, 0.5_3_7_2, 0.4_6_2_3, 0.4_8_7_6, 0.5_0_4_9, 0.4_8_2_0, 0.4_9_5_6, 0.5_0_1_9] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
def _lowerCamelCase ( self ):
__a : List[Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
__a : Any = init_image.resize((768, 512) )
__a : str = LMSDiscreteScheduler.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , subfolder='''scheduler''' , revision='''onnx''' )
__a : str = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , revision='''onnx''' , scheduler=_UpperCAmelCase , safety_checker=_UpperCAmelCase , feature_extractor=_UpperCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
__a : Union[str, Any] = '''A fantasy landscape, trending on artstation'''
__a : Tuple = np.random.RandomState(0 )
__a : Dict = pipe(
prompt=_UpperCAmelCase , image=_UpperCAmelCase , strength=0.7_5 , guidance_scale=7.5 , num_inference_steps=20 , generator=_UpperCAmelCase , output_type='''np''' , )
__a : Tuple = output.images
__a : Tuple = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
__a : List[str] = np.array([0.8_0_4_3, 0.9_2_6, 0.9_5_8_1, 0.8_1_1_9, 0.8_9_5_4, 0.9_1_3, 0.7_2_0_9, 0.7_4_6_3, 0.7_4_3_1] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2 | 160 |
from __future__ import annotations
from collections.abc import Iterator
class __lowerCAmelCase :
def __init__( self :Optional[Any] , __magic_name__ :int ):
'''simple docstring'''
a = value
a = None
a = None
class __lowerCAmelCase :
def __init__( self :str , __magic_name__ :Node ):
'''simple docstring'''
a = tree
def lowerCamelCase__ ( self :str , __magic_name__ :Node | None ):
'''simple docstring'''
if node is None:
return 0
return node.value + (
self.depth_first_search(node.left ) + self.depth_first_search(node.right )
)
def __iter__( self :Tuple ):
'''simple docstring'''
yield self.depth_first_search(self.tree )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 228 | 0 |
import json
import logging
import os
import re
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import datasets
import numpy as np
import torch
import torchaudio
from packaging import version
from torch import nn
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaProcessor,
is_apex_available,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse("1.6"):
__magic_name__ = True
from torch.cuda.amp import autocast
__magic_name__ = logging.getLogger(__name__)
def _lowerCAmelCase ( A__: Any=None , A__: int=None ):
'''simple docstring'''
return field(default_factory=lambda: default , metadata=A__ )
@dataclass
class lowercase :
'''simple docstring'''
__SCREAMING_SNAKE_CASE = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
__SCREAMING_SNAKE_CASE = field(
default=A__ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
__SCREAMING_SNAKE_CASE = field(
default=A__ , metadata={"""help""": """Whether to freeze the feature extractor layers of the model."""} )
__SCREAMING_SNAKE_CASE = field(
default=0.1 , metadata={"""help""": """The dropout ratio for the attention probabilities."""} )
__SCREAMING_SNAKE_CASE = field(
default=0.1 , metadata={"""help""": """The dropout ratio for activations inside the fully connected layer."""} )
__SCREAMING_SNAKE_CASE = field(
default=0.1 , metadata={
"""help""": """The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler."""
} , )
__SCREAMING_SNAKE_CASE = field(
default=0.1 , metadata={"""help""": """The dropout probabilitiy for all 1D convolutional layers in feature extractor."""} , )
__SCREAMING_SNAKE_CASE = field(
default=0.05 , metadata={
"""help""": (
"""Propability of each feature vector along the time axis to be chosen as the start of the vector"""
"""span to be masked. Approximately ``mask_time_prob * sequence_length // mask_time_length`` feature"""
"""vectors will be masked along the time axis. This is only relevant if ``apply_spec_augment is True``."""
)
} , )
__SCREAMING_SNAKE_CASE = field(default=0.0 , metadata={"""help""": """The LayerDrop probability."""} )
@dataclass
class lowercase :
'''simple docstring'''
__SCREAMING_SNAKE_CASE = field(
default=A__ , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} )
__SCREAMING_SNAKE_CASE = field(
default="""train+validation""" , metadata={
"""help""": """The name of the training data set split to use (via the datasets library). Defaults to 'train'"""
} , )
__SCREAMING_SNAKE_CASE = field(
default=A__ , metadata={"""help""": """Overwrite the cached preprocessed datasets or not."""} )
__SCREAMING_SNAKE_CASE = field(
default=A__ , metadata={"""help""": """The number of processes to use for the preprocessing."""} , )
__SCREAMING_SNAKE_CASE = field(
default=A__ , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
__SCREAMING_SNAKE_CASE = field(
default=A__ , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of validation examples to this """
"""value if set."""
)
} , )
__SCREAMING_SNAKE_CASE = list_field(
default=[""",""", """?""", """.""", """!""", """-""", """;""", """:""", """\"\"""", """%""", """'""", """\"""", """�"""] , metadata={"""help""": """A list of characters to remove from the transcripts."""} , )
@dataclass
class lowercase :
'''simple docstring'''
__SCREAMING_SNAKE_CASE = 42
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
def __call__( self , _snake_case ) -> Dict[str, torch.Tensor]:
"""simple docstring"""
# split inputs and labels since they have to be of different lenghts and need
# different padding methods
UpperCAmelCase = [{'''input_values''': feature['''input_values''']} for feature in features]
UpperCAmelCase = [{'''input_ids''': feature['''labels''']} for feature in features]
UpperCAmelCase = self.processor.pad(
_snake_case , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='''pt''' , )
UpperCAmelCase = self.processor.pad(
labels=_snake_case , padding=self.padding , max_length=self.max_length_labels , pad_to_multiple_of=self.pad_to_multiple_of_labels , return_tensors='''pt''' , )
# replace padding with -100 to ignore loss correctly
UpperCAmelCase = labels_batch['''input_ids'''].masked_fill(labels_batch.attention_mask.ne(1 ) , -100 )
UpperCAmelCase = labels
return batch
class lowercase ( A__ ):
'''simple docstring'''
def snake_case_ ( self , _snake_case , _snake_case ) -> torch.Tensor:
"""simple docstring"""
model.train()
UpperCAmelCase = self._prepare_inputs(_snake_case )
if self.use_amp:
with autocast():
UpperCAmelCase = self.compute_loss(_snake_case , _snake_case )
else:
UpperCAmelCase = self.compute_loss(_snake_case , _snake_case )
if self.args.n_gpu > 1:
if model.module.config.ctc_loss_reduction == "mean":
UpperCAmelCase = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
UpperCAmelCase = loss.sum() / (inputs['''labels'''] >= 0).sum()
else:
raise ValueError(f"""{model.config.ctc_loss_reduction} is not valid. Choose one of ['mean', 'sum']""" )
if self.args.gradient_accumulation_steps > 1:
UpperCAmelCase = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(_snake_case ).backward()
elif self.use_apex:
with amp.scale_loss(_snake_case , self.optimizer ) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(_snake_case )
else:
loss.backward()
return loss.detach()
def _lowerCAmelCase ( ):
'''simple docstring'''
UpperCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
UpperCAmelCase = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
UpperCAmelCase = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info('''Training/evaluation parameters %s''' , A__ )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets:
UpperCAmelCase = datasets.load_dataset(
'''common_voice''' , data_args.dataset_config_name , split=data_args.train_split_name )
UpperCAmelCase = datasets.load_dataset('''common_voice''' , data_args.dataset_config_name , split='''test''' )
# Create and save tokenizer
UpperCAmelCase = F"""[{''.join(data_args.chars_to_ignore )}]"""
def remove_special_characters(A__: Any ):
UpperCAmelCase = re.sub(A__ , '''''' , batch['''sentence'''] ).lower() + ''' '''
return batch
UpperCAmelCase = train_dataset.map(A__ , remove_columns=['''sentence'''] )
UpperCAmelCase = eval_dataset.map(A__ , remove_columns=['''sentence'''] )
def extract_all_chars(A__: Union[str, Any] ):
UpperCAmelCase = ''' '''.join(batch['''text'''] )
UpperCAmelCase = list(set(A__ ) )
return {"vocab": [vocab], "all_text": [all_text]}
UpperCAmelCase = train_dataset.map(
A__ , batched=A__ , batch_size=-1 , keep_in_memory=A__ , remove_columns=train_dataset.column_names , )
UpperCAmelCase = train_dataset.map(
A__ , batched=A__ , batch_size=-1 , keep_in_memory=A__ , remove_columns=eval_dataset.column_names , )
UpperCAmelCase = list(set(vocab_train['''vocab'''][0] ) | set(vocab_test['''vocab'''][0] ) )
UpperCAmelCase = {v: k for k, v in enumerate(A__ )}
UpperCAmelCase = vocab_dict[''' ''']
del vocab_dict[" "]
UpperCAmelCase = len(A__ )
UpperCAmelCase = len(A__ )
with open('''vocab.json''' , '''w''' ) as vocab_file:
json.dump(A__ , A__ )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
UpperCAmelCase = WavaVecaCTCTokenizer(
'''vocab.json''' , unk_token='''[UNK]''' , pad_token='''[PAD]''' , word_delimiter_token='''|''' , )
UpperCAmelCase = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0.0 , do_normalize=A__ , return_attention_mask=A__ )
UpperCAmelCase = WavaVecaProcessor(feature_extractor=A__ , tokenizer=A__ )
UpperCAmelCase = WavaVecaForCTC.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , activation_dropout=model_args.activation_dropout , attention_dropout=model_args.attention_dropout , hidden_dropout=model_args.hidden_dropout , feat_proj_dropout=model_args.feat_proj_dropout , mask_time_prob=model_args.mask_time_prob , gradient_checkpointing=training_args.gradient_checkpointing , layerdrop=model_args.layerdrop , ctc_loss_reduction='''mean''' , pad_token_id=processor.tokenizer.pad_token_id , vocab_size=len(processor.tokenizer ) , )
if data_args.max_train_samples is not None:
UpperCAmelCase = min(len(A__ ) , data_args.max_train_samples )
UpperCAmelCase = train_dataset.select(range(A__ ) )
if data_args.max_val_samples is not None:
UpperCAmelCase = eval_dataset.select(range(data_args.max_val_samples ) )
UpperCAmelCase = torchaudio.transforms.Resample(4_8000 , 1_6000 )
# Preprocessing the datasets.
# We need to read the aduio files as arrays and tokenize the targets.
def speech_file_to_array_fn(A__: str ):
UpperCAmelCase , UpperCAmelCase = torchaudio.load(batch['''path'''] )
UpperCAmelCase = resampler(A__ ).squeeze().numpy()
UpperCAmelCase = 1_6000
UpperCAmelCase = batch['''text''']
return batch
UpperCAmelCase = train_dataset.map(
A__ , remove_columns=train_dataset.column_names , num_proc=data_args.preprocessing_num_workers , )
UpperCAmelCase = eval_dataset.map(
A__ , remove_columns=eval_dataset.column_names , num_proc=data_args.preprocessing_num_workers , )
def prepare_dataset(A__: List[str] ):
# check that all files have the correct sampling rate
assert (
len(set(batch['''sampling_rate'''] ) ) == 1
), F"""Make sure all inputs have the same sampling rate of {processor.feature_extractor.sampling_rate}."""
UpperCAmelCase = processor(
audio=batch['''speech'''] , text=batch['''target_text'''] , sampling_rate=batch['''sampling_rate'''][0] )
batch.update(A__ )
return batch
UpperCAmelCase = train_dataset.map(
A__ , remove_columns=train_dataset.column_names , batch_size=training_args.per_device_train_batch_size , batched=A__ , num_proc=data_args.preprocessing_num_workers , )
UpperCAmelCase = eval_dataset.map(
A__ , remove_columns=eval_dataset.column_names , batch_size=training_args.per_device_train_batch_size , batched=A__ , num_proc=data_args.preprocessing_num_workers , )
# Metric
UpperCAmelCase = datasets.load_metric('''wer''' )
def compute_metrics(A__: Optional[Any] ):
UpperCAmelCase = pred.predictions
UpperCAmelCase = np.argmax(A__ , axis=-1 )
UpperCAmelCase = processor.tokenizer.pad_token_id
UpperCAmelCase = processor.batch_decode(A__ )
# we do not want to group tokens when computing the metrics
UpperCAmelCase = processor.batch_decode(pred.label_ids , group_tokens=A__ )
UpperCAmelCase = wer_metric.compute(predictions=A__ , references=A__ )
return {"wer": wer}
if model_args.freeze_feature_extractor:
model.freeze_feature_extractor()
# Data collator
UpperCAmelCase = DataCollatorCTCWithPadding(processor=A__ , padding=A__ )
# Initialize our Trainer
UpperCAmelCase = CTCTrainer(
model=A__ , data_collator=A__ , args=A__ , compute_metrics=A__ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=processor.feature_extractor , )
# Training
if training_args.do_train:
if last_checkpoint is not None:
UpperCAmelCase = last_checkpoint
elif os.path.isdir(model_args.model_name_or_path ):
UpperCAmelCase = model_args.model_name_or_path
else:
UpperCAmelCase = None
# Save the feature_extractor and the tokenizer
if is_main_process(training_args.local_rank ):
processor.save_pretrained(training_args.output_dir )
UpperCAmelCase = trainer.train(resume_from_checkpoint=A__ )
trainer.save_model()
UpperCAmelCase = train_result.metrics
UpperCAmelCase = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(A__ )
)
UpperCAmelCase = min(A__ , len(A__ ) )
trainer.log_metrics('''train''' , A__ )
trainer.save_metrics('''train''' , A__ )
trainer.save_state()
# Evaluation
UpperCAmelCase = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
UpperCAmelCase = trainer.evaluate()
UpperCAmelCase = data_args.max_val_samples if data_args.max_val_samples is not None else len(A__ )
UpperCAmelCase = min(A__ , len(A__ ) )
trainer.log_metrics('''eval''' , A__ )
trainer.save_metrics('''eval''' , A__ )
return results
if __name__ == "__main__":
main()
| 152 |
import argparse
import re
import requests
import torch
# git clone https://github.com/salesforce/BLIP.git
from models.blip import blip_decoder
from models.blip_itm import blip_itm
from models.blip_vqa import blip_vqa
from PIL import Image
from torchvision import transforms
from torchvision.transforms.functional import InterpolationMode
from transformers import (
BertTokenizer,
BlipConfig,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
)
def _lowerCAmelCase ( A__: List[Any] , A__: Tuple ):
'''simple docstring'''
UpperCAmelCase = '''https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg'''
UpperCAmelCase = Image.open(requests.get(A__ , stream=A__ ).raw ).convert('''RGB''' )
UpperCAmelCase = transforms.Compose(
[
transforms.Resize((image_size, image_size) , interpolation=InterpolationMode.BICUBIC ),
transforms.ToTensor(),
transforms.Normalize((0.4814_5466, 0.457_8275, 0.4082_1073) , (0.2686_2954, 0.2613_0258, 0.2757_7711) ),
] )
UpperCAmelCase = transform(A__ ).unsqueeze(0 ).to(A__ )
return image
def _lowerCAmelCase ( A__: Optional[int] ):
'''simple docstring'''
if "visual_encoder" in key:
UpperCAmelCase = re.sub('''visual_encoder*''' , '''vision_model.encoder''' , A__ )
if "blocks" in key:
UpperCAmelCase = re.sub(r'''blocks''' , '''layers''' , A__ )
if "attn" in key:
UpperCAmelCase = re.sub(r'''attn''' , '''self_attn''' , A__ )
if "norm1" in key:
UpperCAmelCase = re.sub(r'''norm1''' , '''layer_norm1''' , A__ )
if "norm2" in key:
UpperCAmelCase = re.sub(r'''norm2''' , '''layer_norm2''' , A__ )
if "encoder.norm" in key:
UpperCAmelCase = re.sub(r'''encoder.norm''' , '''post_layernorm''' , A__ )
if "encoder.patch_embed.proj" in key:
UpperCAmelCase = re.sub(r'''encoder.patch_embed.proj''' , '''embeddings.patch_embedding''' , A__ )
if "encoder.pos_embed" in key:
UpperCAmelCase = re.sub(r'''encoder.pos_embed''' , '''embeddings.position_embedding''' , A__ )
if "encoder.cls_token" in key:
UpperCAmelCase = re.sub(r'''encoder.cls_token''' , '''embeddings.class_embedding''' , A__ )
if "self_attn" in key:
UpperCAmelCase = re.sub(r'''self_attn.proj''' , '''self_attn.projection''' , A__ )
return key
@torch.no_grad()
def _lowerCAmelCase ( A__: List[Any] , A__: Any=None ):
'''simple docstring'''
if config_path is not None:
UpperCAmelCase = BlipConfig.from_pretrained(A__ )
else:
UpperCAmelCase = BlipConfig(projection_dim=512 , text_config={} , vision_config={} )
UpperCAmelCase = BlipForConditionalGeneration(A__ ).eval()
UpperCAmelCase = '''https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_capfilt_large.pth'''
UpperCAmelCase = blip_decoder(pretrained=A__ , image_size=384 , vit='''base''' )
UpperCAmelCase = pt_model.eval()
UpperCAmelCase = pt_model.state_dict()
for key in modified_state_dict.copy():
UpperCAmelCase = modified_state_dict.pop(A__ )
UpperCAmelCase = rename_key(A__ )
UpperCAmelCase = value
hf_model.load_state_dict(A__ )
UpperCAmelCase = 384
UpperCAmelCase = load_demo_image(image_size=A__ , device='''cpu''' )
UpperCAmelCase = BertTokenizer.from_pretrained('''bert-base-uncased''' )
UpperCAmelCase = tokenizer(['''a picture of'''] ).input_ids
UpperCAmelCase = hf_model.generate(A__ , A__ )
assert out[0].tolist() == [3_0522, 1037, 3861, 1997, 1037, 2450, 3564, 2006, 1996, 3509, 2007, 2014, 3899, 102]
UpperCAmelCase = hf_model.generate(A__ )
assert out[0].tolist() == [3_0522, 1037, 2450, 3564, 2006, 1996, 3509, 2007, 2014, 3899, 102]
if pytorch_dump_folder_path is not None:
hf_model.save_pretrained(A__ )
# model_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_vqa.pth'
UpperCAmelCase = (
'''https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_vqa_capfilt_large.pth'''
)
UpperCAmelCase = blip_vqa(pretrained=A__ , image_size=A__ , vit='''base''' )
vqa_model.eval()
UpperCAmelCase = vqa_model.state_dict()
for key in modified_state_dict.copy():
UpperCAmelCase = modified_state_dict.pop(A__ )
UpperCAmelCase = rename_key(A__ )
UpperCAmelCase = value
UpperCAmelCase = BlipForQuestionAnswering(A__ )
hf_vqa_model.load_state_dict(A__ )
UpperCAmelCase = ['''How many dogs are in this image?''']
UpperCAmelCase = tokenizer(A__ , return_tensors='''pt''' ).input_ids
UpperCAmelCase = hf_vqa_model.generate(A__ , A__ )
print(tokenizer.decode(answer[0] ) )
assert tokenizer.decode(answer[0] ) == "[UNK] 1 [SEP]"
if pytorch_dump_folder_path is not None:
hf_vqa_model.save_pretrained(pytorch_dump_folder_path + '''_vqa''' )
UpperCAmelCase = '''https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_retrieval_coco.pth'''
UpperCAmelCase = blip_itm(pretrained=A__ , image_size=A__ , vit='''base''' )
itm_model.eval()
UpperCAmelCase = itm_model.state_dict()
for key in modified_state_dict.copy():
UpperCAmelCase = modified_state_dict.pop(A__ )
UpperCAmelCase = rename_key(A__ )
UpperCAmelCase = value
UpperCAmelCase = BlipForImageTextRetrieval(A__ )
UpperCAmelCase = ['''A picture of a woman with a dog sitting in a beach''']
UpperCAmelCase = tokenizer(
A__ , return_tensors='''pt''' , padding='''max_length''' , truncation=A__ , max_length=35 , ).input_ids
hf_itm_model.load_state_dict(A__ )
hf_itm_model.eval()
UpperCAmelCase = hf_itm_model(A__ , A__ , use_itm_head=A__ )
UpperCAmelCase = hf_itm_model(A__ , A__ , use_itm_head=A__ )
assert out[0].item() == 0.2110_6874_9427_7954
assert torch.nn.functional.softmax(out_itm[0] , dim=1 )[:, 1].item() == 0.4_5698_8453_8650_5127
if pytorch_dump_folder_path is not None:
hf_itm_model.save_pretrained(pytorch_dump_folder_path + '''_itm''' )
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
__magic_name__ = parser.parse_args()
convert_blip_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 152 | 1 |
'''simple docstring'''
# coding=utf-8
# Copyright 2020 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import sys
import transformers
_lowerCAmelCase = '''3'''
print('''Python version:''', sys.version)
print('''transformers version:''', transformers.__version__)
try:
import torch
print('''Torch version:''', torch.__version__)
print('''Cuda available:''', torch.cuda.is_available())
print('''Cuda version:''', torch.version.cuda)
print('''CuDNN version:''', torch.backends.cudnn.version())
print('''Number of GPUs available:''', torch.cuda.device_count())
print('''NCCL version:''', torch.cuda.nccl.version())
except ImportError:
print('''Torch version:''', None)
try:
import deepspeed
print('''DeepSpeed version:''', deepspeed.__version__)
except ImportError:
print('''DeepSpeed version:''', None)
try:
import tensorflow as tf
print('''TensorFlow version:''', tf.__version__)
print('''TF GPUs available:''', bool(tf.config.list_physical_devices('''GPU''')))
print('''Number of TF GPUs available:''', len(tf.config.list_physical_devices('''GPU''')))
except ImportError:
print('''TensorFlow version:''', None)
| 37 |
'''simple docstring'''
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
from .record_evaluation import evaluate as evaluate_record
_lowerCAmelCase = '''\
@article{wang2019superglue,
title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems},
author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R},
journal={arXiv preprint arXiv:1905.00537},
year={2019}
}
'''
_lowerCAmelCase = '''\
SuperGLUE (https://super.gluebenchmark.com/) is a new benchmark styled after
GLUE with a new set of more difficult language understanding tasks, improved
resources, and a new public leaderboard.
'''
_lowerCAmelCase = '''
Compute SuperGLUE evaluation metric associated to each SuperGLUE dataset.
Args:
predictions: list of predictions to score. Depending on the SuperGlUE subset:
- for \'record\': list of question-answer dictionaries with the following keys:
- \'idx\': index of the question as specified by the dataset
- \'prediction_text\': the predicted answer text
- for \'multirc\': list of question-answer dictionaries with the following keys:
- \'idx\': index of the question-answer pair as specified by the dataset
- \'prediction\': the predicted answer label
- otherwise: list of predicted labels
references: list of reference labels. Depending on the SuperGLUE subset:
- for \'record\': list of question-answers dictionaries with the following keys:
- \'idx\': index of the question as specified by the dataset
- \'answers\': list of possible answers
- otherwise: list of reference labels
Returns: depending on the SuperGLUE subset:
- for \'record\':
- \'exact_match\': Exact match between answer and gold answer
- \'f1\': F1 score
- for \'multirc\':
- \'exact_match\': Exact match between answer and gold answer
- \'f1_m\': Per-question macro-F1 score
- \'f1_a\': Average F1 score over all answers
- for \'axb\':
\'matthews_correlation\': Matthew Correlation
- for \'cb\':
- \'accuracy\': Accuracy
- \'f1\': F1 score
- for all others:
- \'accuracy\': Accuracy
Examples:
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'copa\') # any of ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'cb\')
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0, \'f1\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'record\')
>>> predictions = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'prediction_text\': \'answer\'}]
>>> references = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'answers\': [\'answer\', \'another_answer\']}]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'exact_match\': 1.0, \'f1\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'multirc\')
>>> predictions = [{\'idx\': {\'answer\': 0, \'paragraph\': 0, \'question\': 0}, \'prediction\': 0}, {\'idx\': {\'answer\': 1, \'paragraph\': 2, \'question\': 3}, \'prediction\': 1}]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'exact_match\': 1.0, \'f1_m\': 1.0, \'f1_a\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'axb\')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'matthews_correlation\': 1.0}
'''
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
return float((preds == labels).mean() )
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase="binary" ):
"""simple docstring"""
lowerCAmelCase__ : Any = simple_accuracy(UpperCamelCase , UpperCamelCase )
lowerCAmelCase__ : Tuple = float(fa_score(y_true=UpperCamelCase , y_pred=UpperCamelCase , average=UpperCamelCase ) )
return {
"accuracy": acc,
"f1": fa,
}
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ : List[str] = {}
for id_pred, label in zip(UpperCamelCase , UpperCamelCase ):
lowerCAmelCase__ : str = f"""{id_pred['idx']['paragraph']}-{id_pred['idx']['question']}"""
lowerCAmelCase__ : Dict = id_pred["""prediction"""]
if question_id in question_map:
question_map[question_id].append((pred, label) )
else:
lowerCAmelCase__ : Optional[int] = [(pred, label)]
lowerCAmelCase__ , lowerCAmelCase__ : int = [], []
for question, preds_labels in question_map.items():
lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = zip(*UpperCamelCase )
lowerCAmelCase__ : List[Any] = fa_score(y_true=UpperCamelCase , y_pred=UpperCamelCase , average="""macro""" )
fas.append(UpperCamelCase )
lowerCAmelCase__ : Union[str, Any] = int(sum(pred == label for pred, label in preds_labels ) == len(UpperCamelCase ) )
ems.append(UpperCamelCase )
lowerCAmelCase__ : Optional[Any] = float(sum(UpperCamelCase ) / len(UpperCamelCase ) )
lowerCAmelCase__ : List[Any] = sum(UpperCamelCase ) / len(UpperCamelCase )
lowerCAmelCase__ : Dict = float(fa_score(y_true=UpperCamelCase , y_pred=[id_pred["""prediction"""] for id_pred in ids_preds] ) )
return {"exact_match": em, "f1_m": fa_m, "f1_a": fa_a}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase_( datasets.Metric ):
'''simple docstring'''
def UpperCAmelCase_ ( self ) -> Optional[Any]:
if self.config_name not in [
"boolq",
"cb",
"copa",
"multirc",
"record",
"rte",
"wic",
"wsc",
"wsc.fixed",
"axb",
"axg",
]:
raise KeyError(
"""You should supply a configuration name selected in """
"""[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]""" )
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(self._get_feature_types() ) ,codebase_urls=[] ,reference_urls=[] ,format="""numpy""" if not self.config_name == """record""" and not self.config_name == """multirc""" else None ,)
def UpperCAmelCase_ ( self ) -> str:
if self.config_name == "record":
return {
"predictions": {
"idx": {
"passage": datasets.Value("""int64""" ),
"query": datasets.Value("""int64""" ),
},
"prediction_text": datasets.Value("""string""" ),
},
"references": {
"idx": {
"passage": datasets.Value("""int64""" ),
"query": datasets.Value("""int64""" ),
},
"answers": datasets.Sequence(datasets.Value("""string""" ) ),
},
}
elif self.config_name == "multirc":
return {
"predictions": {
"idx": {
"answer": datasets.Value("""int64""" ),
"paragraph": datasets.Value("""int64""" ),
"question": datasets.Value("""int64""" ),
},
"prediction": datasets.Value("""int64""" ),
},
"references": datasets.Value("""int64""" ),
}
else:
return {
"predictions": datasets.Value("""int64""" ),
"references": datasets.Value("""int64""" ),
}
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ) -> Any:
if self.config_name == "axb":
return {"matthews_correlation": matthews_corrcoef(__UpperCAmelCase ,__UpperCAmelCase )}
elif self.config_name == "cb":
return acc_and_fa(__UpperCAmelCase ,__UpperCAmelCase ,fa_avg="""macro""" )
elif self.config_name == "record":
lowerCAmelCase__ : Optional[Any] = [
{
"""qas""": [
{"""id""": ref["""idx"""]["""query"""], """answers""": [{"""text""": ans} for ans in ref["""answers"""]]}
for ref in references
]
}
]
lowerCAmelCase__ : Union[str, Any] = {pred["""idx"""]["""query"""]: pred["""prediction_text"""] for pred in predictions}
return evaluate_record(__UpperCAmelCase ,__UpperCAmelCase )[0]
elif self.config_name == "multirc":
return evaluate_multirc(__UpperCAmelCase ,__UpperCAmelCase )
elif self.config_name in ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]:
return {"accuracy": simple_accuracy(__UpperCAmelCase ,__UpperCAmelCase )}
else:
raise KeyError(
"""You should supply a configuration name selected in """
"""[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]""" )
| 37 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__UpperCamelCase : Union[str, Any] = {
'configuration_distilbert': [
'DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'DistilBertConfig',
'DistilBertOnnxConfig',
],
'tokenization_distilbert': ['DistilBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : str = ['DistilBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Optional[Any] = [
'DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'DistilBertForMaskedLM',
'DistilBertForMultipleChoice',
'DistilBertForQuestionAnswering',
'DistilBertForSequenceClassification',
'DistilBertForTokenClassification',
'DistilBertModel',
'DistilBertPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : str = [
'TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFDistilBertForMaskedLM',
'TFDistilBertForMultipleChoice',
'TFDistilBertForQuestionAnswering',
'TFDistilBertForSequenceClassification',
'TFDistilBertForTokenClassification',
'TFDistilBertMainLayer',
'TFDistilBertModel',
'TFDistilBertPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Union[str, Any] = [
'FlaxDistilBertForMaskedLM',
'FlaxDistilBertForMultipleChoice',
'FlaxDistilBertForQuestionAnswering',
'FlaxDistilBertForSequenceClassification',
'FlaxDistilBertForTokenClassification',
'FlaxDistilBertModel',
'FlaxDistilBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_distilbert import (
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DistilBertConfig,
DistilBertOnnxConfig,
)
from .tokenization_distilbert import DistilBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_distilbert_fast import DistilBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_distilbert import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
DistilBertPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertMainLayer,
TFDistilBertModel,
TFDistilBertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
FlaxDistilBertPreTrainedModel,
)
else:
import sys
__UpperCamelCase : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 361 | from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
import numpy as np
import PIL
from PIL import Image
from ...utils import BaseOutput, is_torch_available, is_transformers_available
@dataclass
class lowercase__ ( UpperCamelCase_):
UpperCamelCase_ = 42
UpperCamelCase_ = 42
if is_transformers_available() and is_torch_available():
from .pipeline_semantic_stable_diffusion import SemanticStableDiffusionPipeline
| 258 | 0 |
import numpy as np
from sklearn.datasets import fetch_california_housing
from sklearn.metrics import mean_absolute_error, mean_squared_error
from sklearn.model_selection import train_test_split
from xgboost import XGBRegressor
def lowercase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
return (data["data"], data["target"])
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowerCamelCase : List[Any] = XGBRegressor(verbosity=0 , random_state=42 )
xgb.fit(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Predict target for test data
lowerCamelCase : List[str] = xgb.predict(SCREAMING_SNAKE_CASE_ )
lowerCamelCase : str = predictions.reshape(len(SCREAMING_SNAKE_CASE_ ) , 1 )
return predictions
def lowercase_( ):
'''simple docstring'''
lowerCamelCase : Any = fetch_california_housing()
lowerCamelCase , lowerCamelCase : List[Any] = data_handling(SCREAMING_SNAKE_CASE_ )
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase : Any = train_test_split(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , test_size=0.25 , random_state=1 )
lowerCamelCase : List[str] = xgboost(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Error printing
print(f"""Mean Absolute Error : {mean_absolute_error(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )}""" )
print(f"""Mean Square Error : {mean_squared_error(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )}""" )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 283 |
import torch
from transformers import PreTrainedModel, XLMRobertaConfig, XLMRobertaModel
class UpperCAmelCase_ ( UpperCamelCase ):
'''simple docstring'''
__A : Optional[int] = "M-CLIP"
def __init__( self , __A=1024 , __A=768 , **__A ):
"""simple docstring"""
lowerCamelCase : str = transformerDimSize
lowerCamelCase : Any = imageDimSize
super().__init__(**__A )
class UpperCAmelCase_ ( UpperCamelCase ):
'''simple docstring'''
__A : Tuple = MCLIPConfig
def __init__( self , __A , *__A , **__A ):
"""simple docstring"""
super().__init__(__A , *__A , **__A )
lowerCamelCase : Tuple = XLMRobertaModel(__A )
lowerCamelCase : Optional[Any] = torch.nn.Linear(
in_features=config.transformerDimensions , out_features=config.numDims )
def _snake_case ( self , __A , __A ):
"""simple docstring"""
lowerCamelCase : Any = self.transformer(input_ids=__A , attention_mask=__A )[0]
lowerCamelCase : int = (embs * attention_mask.unsqueeze(2 )).sum(dim=1 ) / attention_mask.sum(dim=1 )[:, None]
return self.LinearTransformation(__A ), embs
| 283 | 1 |
import pytest
from datasets.utils.sharding import _distribute_shards, _number_of_shards_in_gen_kwargs, _split_gen_kwargs
@pytest.mark.parametrize(
"kwargs, expected" , [
({"num_shards": 0, "max_num_jobs": 1}, []),
({"num_shards": 1_0, "max_num_jobs": 1}, [range(1_0 )]),
({"num_shards": 1_0, "max_num_jobs": 1_0}, [range(__UpperCamelCase , i + 1 ) for i in range(1_0 )]),
({"num_shards": 1, "max_num_jobs": 1_0}, [range(1 )]),
({"num_shards": 1_0, "max_num_jobs": 3}, [range(0 , 4 ), range(4 , 7 ), range(7 , 1_0 )]),
({"num_shards": 3, "max_num_jobs": 1_0}, [range(0 , 1 ), range(1 , 2 ), range(2 , 3 )]),
] , )
def a__ ( __UpperCamelCase , __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = _distribute_shards(**__UpperCamelCase )
assert out == expected
@pytest.mark.parametrize(
"gen_kwargs, max_num_jobs, expected" , [
({"foo": 0}, 1_0, [{"foo": 0}]),
({"shards": [0, 1, 2, 3]}, 1, [{"shards": [0, 1, 2, 3]}]),
({"shards": [0, 1, 2, 3]}, 4, [{"shards": [0]}, {"shards": [1]}, {"shards": [2]}, {"shards": [3]}]),
({"shards": [0, 1]}, 4, [{"shards": [0]}, {"shards": [1]}]),
({"shards": [0, 1, 2, 3]}, 2, [{"shards": [0, 1]}, {"shards": [2, 3]}]),
] , )
def a__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = _split_gen_kwargs(__UpperCamelCase , __UpperCamelCase )
assert out == expected
@pytest.mark.parametrize(
"gen_kwargs, expected" , [
({"foo": 0}, 1),
({"shards": [0]}, 1),
({"shards": [0, 1, 2, 3]}, 4),
({"shards": [0, 1, 2, 3], "foo": 0}, 4),
({"shards": [0, 1, 2, 3], "other": (0, 1)}, 4),
({"shards": [0, 1, 2, 3], "shards2": [0, 1]}, RuntimeError),
] , )
def a__ ( __UpperCamelCase , __UpperCamelCase ):
if expected is RuntimeError:
with pytest.raises(__UpperCamelCase ):
_number_of_shards_in_gen_kwargs(__UpperCamelCase )
else:
SCREAMING_SNAKE_CASE_ = _number_of_shards_in_gen_kwargs(__UpperCamelCase )
assert out == expected
| 305 | from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
A : List[str] = {"configuration_swin": ["SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP", "SwinConfig", "SwinOnnxConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Any = [
"SWIN_PRETRAINED_MODEL_ARCHIVE_LIST",
"SwinForImageClassification",
"SwinForMaskedImageModeling",
"SwinModel",
"SwinPreTrainedModel",
"SwinBackbone",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : str = [
"TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFSwinForImageClassification",
"TFSwinForMaskedImageModeling",
"TFSwinModel",
"TFSwinPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_swin import SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinConfig, SwinOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swin import (
SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinBackbone,
SwinForImageClassification,
SwinForMaskedImageModeling,
SwinModel,
SwinPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_swin import (
TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSwinForImageClassification,
TFSwinForMaskedImageModeling,
TFSwinModel,
TFSwinPreTrainedModel,
)
else:
import sys
A : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 305 | 1 |
'''simple docstring'''
import pytest
from datasets.splits import SplitDict, SplitInfo
from datasets.utils.py_utils import asdict
@pytest.mark.parametrize(
"split_dict" , [
SplitDict(),
SplitDict({"train": SplitInfo(name="train" , num_bytes=1337 , num_examples=42 , dataset_name="my_dataset" )} ),
SplitDict({"train": SplitInfo(name="train" , num_bytes=1337 , num_examples=42 )} ),
SplitDict({"train": SplitInfo()} ),
] , )
def UpperCAmelCase_ ( __lowercase : SplitDict ) -> int:
'''simple docstring'''
_UpperCAmelCase = split_dict._to_yaml_list()
assert len(__lowercase ) == len(__lowercase )
_UpperCAmelCase = SplitDict._from_yaml_list(__lowercase )
for split_name, split_info in split_dict.items():
# dataset_name field is deprecated, and is therefore not part of the YAML dump
_UpperCAmelCase = None
# the split name of split_dict takes over the name of the split info object
_UpperCAmelCase = split_name
assert split_dict == reloaded
@pytest.mark.parametrize(
"split_info" , [SplitInfo(), SplitInfo(dataset_name=__lowercase ), SplitInfo(dataset_name="my_dataset" )] )
def UpperCAmelCase_ ( __lowercase : List[Any] ) -> Dict:
'''simple docstring'''
_UpperCAmelCase = asdict(SplitDict({"train": split_info} ) )
assert "dataset_name" in split_dict_asdict["train"]
assert split_dict_asdict["train"]["dataset_name"] == split_info.dataset_name
| 22 | '''simple docstring'''
import math
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import SchedulerMixin, SchedulerOutput
class lowerCamelCase_ (snake_case__ , snake_case__ ):
'''simple docstring'''
__UpperCamelCase: Optional[Any] = 1
@register_to_config
def __init__( self : Optional[int] , A : int = 1000 , A : Optional[Union[np.ndarray, List[float]]] = None ):
# set `betas`, `alphas`, `timesteps`
self.set_timesteps(A )
# standard deviation of the initial noise distribution
_UpperCAmelCase : int = 1.0
# For now we only support F-PNDM, i.e. the runge-kutta method
# For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf
# mainly at formula (9), (12), (13) and the Algorithm 2.
_UpperCAmelCase : int = 4
# running values
_UpperCAmelCase : Dict = []
def _A ( self : Optional[int] , A : int , A : Union[str, torch.device] = None ):
_UpperCAmelCase : int = num_inference_steps
_UpperCAmelCase : Union[str, Any] = torch.linspace(1 , 0 , num_inference_steps + 1 )[:-1]
_UpperCAmelCase : Any = torch.cat([steps, torch.tensor([0.0] )] )
if self.config.trained_betas is not None:
_UpperCAmelCase : str = torch.tensor(self.config.trained_betas , dtype=torch.floataa )
else:
_UpperCAmelCase : Dict = torch.sin(steps * math.pi / 2 ) ** 2
_UpperCAmelCase : List[Any] = (1.0 - self.betas**2) ** 0.5
_UpperCAmelCase : List[str] = (torch.atana(self.betas , self.alphas ) / math.pi * 2)[:-1]
_UpperCAmelCase : Dict = timesteps.to(A )
_UpperCAmelCase : Dict = []
def _A ( self : Optional[int] , A : torch.FloatTensor , A : int , A : torch.FloatTensor , A : bool = True , ):
if self.num_inference_steps is None:
raise ValueError(
"Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler" )
_UpperCAmelCase : Tuple = (self.timesteps == timestep).nonzero().item()
_UpperCAmelCase : Optional[Any] = timestep_index + 1
_UpperCAmelCase : int = sample * self.betas[timestep_index] + model_output * self.alphas[timestep_index]
self.ets.append(A )
if len(self.ets ) == 1:
_UpperCAmelCase : List[Any] = self.ets[-1]
elif len(self.ets ) == 2:
_UpperCAmelCase : str = (3 * self.ets[-1] - self.ets[-2]) / 2
elif len(self.ets ) == 3:
_UpperCAmelCase : Tuple = (23 * self.ets[-1] - 16 * self.ets[-2] + 5 * self.ets[-3]) / 12
else:
_UpperCAmelCase : Union[str, Any] = (1 / 24) * (55 * self.ets[-1] - 59 * self.ets[-2] + 37 * self.ets[-3] - 9 * self.ets[-4])
_UpperCAmelCase : Union[str, Any] = self._get_prev_sample(A , A , A , A )
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=A )
def _A ( self : Union[str, Any] , A : torch.FloatTensor , *A : Union[str, Any] , **A : Dict ):
return sample
def _A ( self : Optional[Any] , A : Optional[int] , A : int , A : Optional[Any] , A : List[str] ):
_UpperCAmelCase : List[str] = self.alphas[timestep_index]
_UpperCAmelCase : List[Any] = self.betas[timestep_index]
_UpperCAmelCase : Optional[Any] = self.alphas[prev_timestep_index]
_UpperCAmelCase : Dict = self.betas[prev_timestep_index]
_UpperCAmelCase : Tuple = (sample - sigma * ets) / max(A , 1E-8 )
_UpperCAmelCase : List[str] = next_alpha * pred + ets * next_sigma
return prev_sample
def __len__( self : Union[str, Any] ):
return self.config.num_train_timesteps
| 31 | 0 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
from .unet_ad_blocks import get_down_block, get_mid_block, get_out_block, get_up_block
@dataclass
class SCREAMING_SNAKE_CASE__ ( snake_case__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = 42
class SCREAMING_SNAKE_CASE__ ( snake_case__ ,snake_case__ ):
"""simple docstring"""
@register_to_config
def __init__( self : Union[str, Any] , UpperCAmelCase_ : int = 65_536 , UpperCAmelCase_ : Optional[int] = None , UpperCAmelCase_ : int = 2 , UpperCAmelCase_ : int = 2 , UpperCAmelCase_ : int = 0 , UpperCAmelCase_ : str = "fourier" , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : float = 0.0 , UpperCAmelCase_ : Tuple[str] = ("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , UpperCAmelCase_ : Tuple[str] = ("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , UpperCAmelCase_ : Tuple[str] = "UNetMidBlock1D" , UpperCAmelCase_ : str = None , UpperCAmelCase_ : Tuple[int] = (32, 32, 64) , UpperCAmelCase_ : str = None , UpperCAmelCase_ : int = 8 , UpperCAmelCase_ : int = 1 , UpperCAmelCase_ : bool = False , ):
"""simple docstring"""
super().__init__()
__UpperCAmelCase : str = sample_size
# time
if time_embedding_type == "fourier":
__UpperCAmelCase : int = GaussianFourierProjection(
embedding_size=8 , set_W_to_weight=UpperCAmelCase_ , log=UpperCAmelCase_ , flip_sin_to_cos=UpperCAmelCase_ )
__UpperCAmelCase : str = 2 * block_out_channels[0]
elif time_embedding_type == "positional":
__UpperCAmelCase : str = Timesteps(
block_out_channels[0] , flip_sin_to_cos=UpperCAmelCase_ , downscale_freq_shift=UpperCAmelCase_ )
__UpperCAmelCase : Dict = block_out_channels[0]
if use_timestep_embedding:
__UpperCAmelCase : Union[str, Any] = block_out_channels[0] * 4
__UpperCAmelCase : str = TimestepEmbedding(
in_channels=UpperCAmelCase_ , time_embed_dim=UpperCAmelCase_ , act_fn=UpperCAmelCase_ , out_dim=block_out_channels[0] , )
__UpperCAmelCase : Tuple = nn.ModuleList([] )
__UpperCAmelCase : int = None
__UpperCAmelCase : Optional[Any] = nn.ModuleList([] )
__UpperCAmelCase : Dict = None
# down
__UpperCAmelCase : str = in_channels
for i, down_block_type in enumerate(UpperCAmelCase_ ):
__UpperCAmelCase : Optional[Any] = output_channel
__UpperCAmelCase : Optional[int] = block_out_channels[i]
if i == 0:
input_channel += extra_in_channels
__UpperCAmelCase : Tuple = i == len(UpperCAmelCase_ ) - 1
__UpperCAmelCase : List[str] = get_down_block(
UpperCAmelCase_ , num_layers=UpperCAmelCase_ , in_channels=UpperCAmelCase_ , out_channels=UpperCAmelCase_ , temb_channels=block_out_channels[0] , add_downsample=not is_final_block or downsample_each_block , )
self.down_blocks.append(UpperCAmelCase_ )
# mid
__UpperCAmelCase : Optional[Any] = get_mid_block(
UpperCAmelCase_ , in_channels=block_out_channels[-1] , mid_channels=block_out_channels[-1] , out_channels=block_out_channels[-1] , embed_dim=block_out_channels[0] , num_layers=UpperCAmelCase_ , add_downsample=UpperCAmelCase_ , )
# up
__UpperCAmelCase : Tuple = list(reversed(UpperCAmelCase_ ) )
__UpperCAmelCase : Any = reversed_block_out_channels[0]
if out_block_type is None:
__UpperCAmelCase : Union[str, Any] = out_channels
else:
__UpperCAmelCase : Dict = block_out_channels[0]
for i, up_block_type in enumerate(UpperCAmelCase_ ):
__UpperCAmelCase : int = output_channel
__UpperCAmelCase : str = (
reversed_block_out_channels[i + 1] if i < len(UpperCAmelCase_ ) - 1 else final_upsample_channels
)
__UpperCAmelCase : Tuple = i == len(UpperCAmelCase_ ) - 1
__UpperCAmelCase : Dict = get_up_block(
UpperCAmelCase_ , num_layers=UpperCAmelCase_ , in_channels=UpperCAmelCase_ , out_channels=UpperCAmelCase_ , temb_channels=block_out_channels[0] , add_upsample=not is_final_block , )
self.up_blocks.append(UpperCAmelCase_ )
__UpperCAmelCase : Union[str, Any] = output_channel
# out
__UpperCAmelCase : Optional[int] = norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4 , 32 )
__UpperCAmelCase : List[Any] = get_out_block(
out_block_type=UpperCAmelCase_ , num_groups_out=UpperCAmelCase_ , embed_dim=block_out_channels[0] , out_channels=UpperCAmelCase_ , act_fn=UpperCAmelCase_ , fc_dim=block_out_channels[-1] // 4 , )
def lowerCamelCase_ ( self : Optional[int] , UpperCAmelCase_ : torch.FloatTensor , UpperCAmelCase_ : Union[torch.Tensor, float, int] , UpperCAmelCase_ : bool = True , ):
"""simple docstring"""
__UpperCAmelCase : Dict = timestep
if not torch.is_tensor(UpperCAmelCase_ ):
__UpperCAmelCase : List[str] = torch.tensor([timesteps] , dtype=torch.long , device=sample.device )
elif torch.is_tensor(UpperCAmelCase_ ) and len(timesteps.shape ) == 0:
__UpperCAmelCase : List[str] = timesteps[None].to(sample.device )
__UpperCAmelCase : List[str] = self.time_proj(UpperCAmelCase_ )
if self.config.use_timestep_embedding:
__UpperCAmelCase : Any = self.time_mlp(UpperCAmelCase_ )
else:
__UpperCAmelCase : Any = timestep_embed[..., None]
__UpperCAmelCase : int = timestep_embed.repeat([1, 1, sample.shape[2]] ).to(sample.dtype )
__UpperCAmelCase : Dict = timestep_embed.broadcast_to((sample.shape[:1] + timestep_embed.shape[1:]) )
# 2. down
__UpperCAmelCase : int = ()
for downsample_block in self.down_blocks:
__UpperCAmelCase , __UpperCAmelCase : int = downsample_block(hidden_states=UpperCAmelCase_ , temb=UpperCAmelCase_ )
down_block_res_samples += res_samples
# 3. mid
if self.mid_block:
__UpperCAmelCase : List[str] = self.mid_block(UpperCAmelCase_ , UpperCAmelCase_ )
# 4. up
for i, upsample_block in enumerate(self.up_blocks ):
__UpperCAmelCase : Any = down_block_res_samples[-1:]
__UpperCAmelCase : List[Any] = down_block_res_samples[:-1]
__UpperCAmelCase : str = upsample_block(UpperCAmelCase_ , res_hidden_states_tuple=UpperCAmelCase_ , temb=UpperCAmelCase_ )
# 5. post-process
if self.out_block:
__UpperCAmelCase : Tuple = self.out_block(UpperCAmelCase_ , UpperCAmelCase_ )
if not return_dict:
return (sample,)
return UNetaDOutput(sample=UpperCAmelCase_ )
| 37 |
'''simple docstring'''
def __UpperCamelCase ( _UpperCAmelCase ):
try:
__UpperCAmelCase : Union[str, Any] = float(_UpperCAmelCase )
except ValueError:
raise ValueError("Please enter a valid number" )
__UpperCAmelCase : str = decimal - int(_UpperCAmelCase )
if fractional_part == 0:
return int(_UpperCAmelCase ), 1
else:
__UpperCAmelCase : Dict = len(str(_UpperCAmelCase ).split("." )[1] )
__UpperCAmelCase : List[Any] = int(decimal * (10**number_of_frac_digits) )
__UpperCAmelCase : Any = 10**number_of_frac_digits
__UpperCAmelCase , __UpperCAmelCase : Tuple = denominator, numerator
while True:
__UpperCAmelCase : Dict = dividend % divisor
if remainder == 0:
break
__UpperCAmelCase , __UpperCAmelCase : str = divisor, remainder
__UpperCAmelCase , __UpperCAmelCase : List[str] = numerator / divisor, denominator / divisor
return int(_UpperCAmelCase ), int(_UpperCAmelCase )
if __name__ == "__main__":
print(f"{decimal_to_fraction(2) = }")
print(f"{decimal_to_fraction(89.0) = }")
print(f"{decimal_to_fraction('67') = }")
print(f"{decimal_to_fraction('45.0') = }")
print(f"{decimal_to_fraction(1.5) = }")
print(f"{decimal_to_fraction('6.25') = }")
print(f"{decimal_to_fraction('78td') = }")
| 37 | 1 |
'''simple docstring'''
import inspect
import os
import torch
from transformers import AutoModel
from transformers.testing_utils import mockenv_context
from transformers.trainer_utils import set_seed
import accelerate
from accelerate.accelerator import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils.testing import (
AccelerateTestCase,
TempDirTestCase,
execute_subprocess_async,
require_cuda,
require_fsdp,
require_multi_gpu,
slow,
)
from accelerate.utils.constants import (
FSDP_AUTO_WRAP_POLICY,
FSDP_BACKWARD_PREFETCH,
FSDP_SHARDING_STRATEGY,
FSDP_STATE_DICT_TYPE,
)
from accelerate.utils.dataclasses import FullyShardedDataParallelPlugin
from accelerate.utils.other import patch_environment
set_seed(4_2)
a_ = 'bert-base-cased'
a_ = 'fp16'
a_ = 'bf16'
a_ = [FPaa, BFaa]
@require_fsdp
@require_cuda
class __SCREAMING_SNAKE_CASE ( lowerCamelCase ):
def __magic_name__ ( self : List[Any] ) -> Dict:
super().setUp()
SCREAMING_SNAKE_CASE__ : List[Any] =dict(
ACCELERATE_USE_FSDP='''true''' , MASTER_ADDR='''localhost''' , MASTER_PORT='''10999''' , RANK='''0''' , LOCAL_RANK='''0''' , WORLD_SIZE='''1''' , )
def __magic_name__ ( self : List[str] ) -> Optional[int]:
from torch.distributed.fsdp.fully_sharded_data_parallel import ShardingStrategy
for i, strategy in enumerate(__lowercase ):
SCREAMING_SNAKE_CASE__ : Optional[int] =self.dist_env.copy()
SCREAMING_SNAKE_CASE__ : List[Any] =F"{i + 1}"
SCREAMING_SNAKE_CASE__ : Optional[int] =strategy
with mockenv_context(**__lowercase ):
SCREAMING_SNAKE_CASE__ : Dict =FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.sharding_strategy , ShardingStrategy(i + 1 ) )
def __magic_name__ ( self : List[Any] ) -> str:
from torch.distributed.fsdp.fully_sharded_data_parallel import BackwardPrefetch
for i, prefetch_policy in enumerate(__lowercase ):
SCREAMING_SNAKE_CASE__ : Any =self.dist_env.copy()
SCREAMING_SNAKE_CASE__ : str =prefetch_policy
with mockenv_context(**__lowercase ):
SCREAMING_SNAKE_CASE__ : Tuple =FullyShardedDataParallelPlugin()
if prefetch_policy == "NO_PREFETCH":
self.assertIsNone(fsdp_plugin.backward_prefetch )
else:
self.assertEqual(fsdp_plugin.backward_prefetch , BackwardPrefetch(i + 1 ) )
def __magic_name__ ( self : List[Any] ) -> List[str]:
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
for i, state_dict_type in enumerate(__lowercase ):
SCREAMING_SNAKE_CASE__ : Tuple =self.dist_env.copy()
SCREAMING_SNAKE_CASE__ : Optional[int] =state_dict_type
with mockenv_context(**__lowercase ):
SCREAMING_SNAKE_CASE__ : int =FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.state_dict_type , StateDictType(i + 1 ) )
if state_dict_type == "FULL_STATE_DICT":
self.assertTrue(fsdp_plugin.state_dict_config.offload_to_cpu )
self.assertTrue(fsdp_plugin.state_dict_config.ranka_only )
def __magic_name__ ( self : str ) -> Tuple:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =AutoModel.from_pretrained(__lowercase )
for policy in FSDP_AUTO_WRAP_POLICY:
SCREAMING_SNAKE_CASE__ : Tuple =self.dist_env.copy()
SCREAMING_SNAKE_CASE__ : List[Any] =policy
if policy == "TRANSFORMER_BASED_WRAP":
SCREAMING_SNAKE_CASE__ : List[Any] ='''BertLayer'''
elif policy == "SIZE_BASED_WRAP":
SCREAMING_SNAKE_CASE__ : Any ='''2000'''
with mockenv_context(**__lowercase ):
SCREAMING_SNAKE_CASE__ : int =FullyShardedDataParallelPlugin()
fsdp_plugin.set_auto_wrap_policy(__lowercase )
if policy == "NO_WRAP":
self.assertIsNone(fsdp_plugin.auto_wrap_policy )
else:
self.assertIsNotNone(fsdp_plugin.auto_wrap_policy )
SCREAMING_SNAKE_CASE__ : Optional[Any] =self.dist_env.copy()
SCREAMING_SNAKE_CASE__ : Optional[int] ='''TRANSFORMER_BASED_WRAP'''
SCREAMING_SNAKE_CASE__ : Optional[int] ='''T5Layer'''
with mockenv_context(**__lowercase ):
SCREAMING_SNAKE_CASE__ : List[str] =FullyShardedDataParallelPlugin()
with self.assertRaises(__lowercase ) as cm:
fsdp_plugin.set_auto_wrap_policy(__lowercase )
self.assertTrue('''Could not find the transformer layer class to wrap in the model.''' in str(cm.exception ) )
SCREAMING_SNAKE_CASE__ : int =self.dist_env.copy()
SCREAMING_SNAKE_CASE__ : int ='''SIZE_BASED_WRAP'''
SCREAMING_SNAKE_CASE__ : List[str] ='''0'''
with mockenv_context(**__lowercase ):
SCREAMING_SNAKE_CASE__ : List[str] =FullyShardedDataParallelPlugin()
fsdp_plugin.set_auto_wrap_policy(__lowercase )
self.assertIsNone(fsdp_plugin.auto_wrap_policy )
def __magic_name__ ( self : Dict ) -> List[str]:
from torch.distributed.fsdp.fully_sharded_data_parallel import MixedPrecision
from torch.distributed.fsdp.sharded_grad_scaler import ShardedGradScaler
for mp_dtype in dtypes:
SCREAMING_SNAKE_CASE__ : Optional[int] =self.dist_env.copy()
SCREAMING_SNAKE_CASE__ : List[Any] =mp_dtype
with mockenv_context(**__lowercase ):
SCREAMING_SNAKE_CASE__ : List[Any] =Accelerator()
if mp_dtype == "fp16":
SCREAMING_SNAKE_CASE__ : int =torch.floataa
elif mp_dtype == "bf16":
SCREAMING_SNAKE_CASE__ : Optional[int] =torch.bfloataa
SCREAMING_SNAKE_CASE__ : List[str] =MixedPrecision(param_dtype=__lowercase , reduce_dtype=__lowercase , buffer_dtype=__lowercase )
self.assertEqual(accelerator.state.fsdp_plugin.mixed_precision_policy , __lowercase )
if mp_dtype == FPaa:
self.assertTrue(isinstance(accelerator.scaler , __lowercase ) )
elif mp_dtype == BFaa:
self.assertIsNone(accelerator.scaler )
AcceleratorState._reset_state(__lowercase )
def __magic_name__ ( self : Union[str, Any] ) -> str:
from torch.distributed.fsdp.fully_sharded_data_parallel import CPUOffload
for flag in [True, False]:
SCREAMING_SNAKE_CASE__ : Any =self.dist_env.copy()
SCREAMING_SNAKE_CASE__ : Any =str(__lowercase ).lower()
with mockenv_context(**__lowercase ):
SCREAMING_SNAKE_CASE__ : List[Any] =FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.cpu_offload , CPUOffload(offload_params=__lowercase ) )
@require_fsdp
@require_multi_gpu
@slow
class __SCREAMING_SNAKE_CASE ( lowerCamelCase ):
def __magic_name__ ( self : Dict ) -> Union[str, Any]:
super().setUp()
SCREAMING_SNAKE_CASE__ : Union[str, Any] =0.82
SCREAMING_SNAKE_CASE__ : List[str] =[
'''fsdp_shard_grad_op_transformer_based_wrap''',
'''fsdp_full_shard_transformer_based_wrap''',
]
SCREAMING_SNAKE_CASE__ : List[Any] ={
'''multi_gpu_fp16''': 32_00,
'''fsdp_shard_grad_op_transformer_based_wrap_fp16''': 20_00,
'''fsdp_full_shard_transformer_based_wrap_fp16''': 19_00,
# Disabling below test as it overwhelms the RAM memory usage
# on CI self-hosted runner leading to tests getting killed.
# "fsdp_full_shard_cpu_offload_transformer_based_wrap_fp32": 1500, # fp16 was leading to indefinite hang
}
SCREAMING_SNAKE_CASE__ : List[str] =1_60
SCREAMING_SNAKE_CASE__ : str =1_60
SCREAMING_SNAKE_CASE__ : str =inspect.getfile(accelerate.test_utils )
SCREAMING_SNAKE_CASE__ : str =os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''external_deps'''] )
def __magic_name__ ( self : Any ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ : Tuple =os.path.join(self.test_scripts_folder , '''test_performance.py''' )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =['''accelerate''', '''launch''', '''--num_processes=2''', '''--num_machines=1''', '''--machine_rank=0''', '''--use_fsdp''']
for config in self.performance_configs:
SCREAMING_SNAKE_CASE__ : Optional[Any] =cmd.copy()
for i, strategy in enumerate(__lowercase ):
if strategy.lower() in config:
cmd_config.append(F"--fsdp_sharding_strategy={i+1}" )
break
if "fp32" in config:
cmd_config.append('''--mixed_precision=no''' )
else:
cmd_config.append('''--mixed_precision=fp16''' )
if "cpu_offload" in config:
cmd_config.append('''--fsdp_offload_params=True''' )
for policy in FSDP_AUTO_WRAP_POLICY:
if policy.lower() in config:
cmd_config.append(F"--fsdp_auto_wrap_policy={policy}" )
break
if policy == "TRANSFORMER_BASED_WRAP":
cmd_config.append('''--fsdp_transformer_layer_cls_to_wrap=BertLayer''' )
elif policy == "SIZE_BASED_WRAP":
cmd_config.append('''--fsdp_min_num_params=2000''' )
cmd_config.extend(
[
self.test_file_path,
F"--output_dir={self.tmpdir}",
F"--performance_lower_bound={self.performance_lower_bound}",
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__lowercase , env=os.environ.copy() )
def __magic_name__ ( self : Union[str, Any] ) -> Any:
SCREAMING_SNAKE_CASE__ : Any =os.path.join(self.test_scripts_folder , '''test_checkpointing.py''' )
SCREAMING_SNAKE_CASE__ : List[str] =[
'''accelerate''',
'''launch''',
'''--num_processes=2''',
'''--num_machines=1''',
'''--machine_rank=0''',
'''--use_fsdp''',
'''--mixed_precision=fp16''',
'''--fsdp_transformer_layer_cls_to_wrap=BertLayer''',
]
for i, strategy in enumerate(__lowercase ):
SCREAMING_SNAKE_CASE__ : Optional[Any] =cmd.copy()
cmd_config.append(F"--fsdp_sharding_strategy={i+1}" )
if strategy != "FULL_SHARD":
continue
SCREAMING_SNAKE_CASE__ : Any =len(__lowercase )
for state_dict_type in FSDP_STATE_DICT_TYPE:
SCREAMING_SNAKE_CASE__ : Optional[Any] =cmd_config[:state_dict_config_index]
cmd_config.append(F"--fsdp_state_dict_type={state_dict_type}" )
cmd_config.extend(
[
self.test_file_path,
F"--output_dir={self.tmpdir}",
'''--partial_train_epoch=1''',
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__lowercase , env=os.environ.copy() )
SCREAMING_SNAKE_CASE__ : List[Any] =cmd_config[:-1]
SCREAMING_SNAKE_CASE__ : int =os.path.join(self.tmpdir , '''epoch_0''' )
cmd_config.extend(
[
F"--resume_from_checkpoint={resume_from_checkpoint}",
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__lowercase , env=os.environ.copy() )
def __magic_name__ ( self : int ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ : List[str] =os.path.join(self.test_scripts_folder , '''test_peak_memory_usage.py''' )
SCREAMING_SNAKE_CASE__ : str =[
'''accelerate''',
'''launch''',
'''--num_processes=2''',
'''--num_machines=1''',
'''--machine_rank=0''',
]
for spec, peak_mem_upper_bound in self.peak_memory_usage_upper_bound.items():
SCREAMING_SNAKE_CASE__ : Union[str, Any] =cmd.copy()
if "fp16" in spec:
cmd_config.extend(['''--mixed_precision=fp16'''] )
else:
cmd_config.extend(['''--mixed_precision=no'''] )
if "multi_gpu" in spec:
continue
else:
cmd_config.extend(['''--use_fsdp'''] )
for i, strategy in enumerate(__lowercase ):
if strategy.lower() in spec:
cmd_config.append(F"--fsdp_sharding_strategy={i+1}" )
break
if "cpu_offload" in spec:
cmd_config.append('''--fsdp_offload_params=True''' )
for policy in FSDP_AUTO_WRAP_POLICY:
if policy.lower() in spec:
cmd_config.append(F"--fsdp_auto_wrap_policy={policy}" )
break
if policy == "TRANSFORMER_BASED_WRAP":
cmd_config.append('''--fsdp_transformer_layer_cls_to_wrap=BertLayer''' )
elif policy == "SIZE_BASED_WRAP":
cmd_config.append('''--fsdp_min_num_params=2000''' )
cmd_config.extend(
[
self.test_file_path,
F"--output_dir={self.tmpdir}",
F"--peak_memory_upper_bound={peak_mem_upper_bound}",
F"--n_train={self.n_train}",
F"--n_val={self.n_val}",
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__lowercase , env=os.environ.copy() ) | 152 |
'''simple docstring'''
from __future__ import annotations
import math
a_ = '2020.9.26'
a_ = 'xcodz-dot, cclaus, dhruvmanila'
def _a( UpperCamelCase__ : float, UpperCamelCase__ : float, UpperCamelCase__ : float, UpperCamelCase__ : float, UpperCamelCase__ : float ):
'''simple docstring'''
if not all(isinstance(UpperCamelCase__, (float, int) ) for val in locals().values() ):
SCREAMING_SNAKE_CASE__ : int =f"Input values must either be float or int: {list(locals().values() )}"
raise TypeError(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Dict =((x * distance) / (z + distance)) * scale
SCREAMING_SNAKE_CASE__ : Tuple =((y * distance) / (z + distance)) * scale
return projected_x, projected_y
def _a( UpperCamelCase__ : float, UpperCamelCase__ : float, UpperCamelCase__ : float, UpperCamelCase__ : str, UpperCamelCase__ : float ):
'''simple docstring'''
if not isinstance(UpperCamelCase__, UpperCamelCase__ ):
raise TypeError('''Axis must be a str''' )
SCREAMING_SNAKE_CASE__ : List[Any] =locals()
del input_variables["axis"]
if not all(isinstance(UpperCamelCase__, (float, int) ) for val in input_variables.values() ):
SCREAMING_SNAKE_CASE__ : List[str] =(
'''Input values except axis must either be float or int: '''
f"{list(input_variables.values() )}"
)
raise TypeError(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Tuple =(angle % 3_6_0) / 4_5_0 * 1_8_0 / math.pi
if axis == "z":
SCREAMING_SNAKE_CASE__ : str =x * math.cos(UpperCamelCase__ ) - y * math.sin(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Tuple =y * math.cos(UpperCamelCase__ ) + x * math.sin(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : str =z
elif axis == "x":
SCREAMING_SNAKE_CASE__ : Dict =y * math.cos(UpperCamelCase__ ) - z * math.sin(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Optional[int] =z * math.cos(UpperCamelCase__ ) + y * math.sin(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =x
elif axis == "y":
SCREAMING_SNAKE_CASE__ : Tuple =x * math.cos(UpperCamelCase__ ) - z * math.sin(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] =z * math.cos(UpperCamelCase__ ) + x * math.sin(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : List[Any] =y
else:
raise ValueError('''not a valid axis, choose one of \'x\', \'y\', \'z\'''' )
return new_x, new_y, new_z
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F'''{convert_to_ad(1.0, 2.0, 3.0, 10.0, 10.0) = }''')
print(F'''{rotate(1.0, 2.0, 3.0, 'y', 90.0) = }''') | 152 | 1 |
'''simple docstring'''
import math
def UpperCAmelCase_ (__a : int ):
"""simple docstring"""
if not isinstance(__a , __a ):
_a : Tuple = f"""Input value of [number={number}] must be an integer"""
raise TypeError(__a )
if number < 1:
_a : Any = f"""Input value of [number={number}] must be > 0"""
raise ValueError(__a )
elif number == 1:
return 3
elif number == 2:
return 5
else:
_a : str = int(math.log(number // 3 , 2 ) ) + 2
_a : Union[str, Any] = [3, 5]
_a : Any = 2
_a : List[Any] = 3
for block in range(1 , __a ):
for _ in range(__a ):
proth_list.append(2 ** (block + 1) + proth_list[proth_index - 1] )
proth_index += 1
increment *= 2
return proth_list[number - 1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for number in range(1_1):
__lowerCAmelCase = 0
try:
__lowerCAmelCase = proth(number)
except ValueError:
print(f'''ValueError: there is no {number}th Proth number''')
continue
print(f'''The {number}th Proth number: {value}''')
| 5 |
'''simple docstring'''
__lowerCAmelCase = {
"""A""": """.-""", """B""": """-...""", """C""": """-.-.""", """D""": """-..""", """E""": """.""", """F""": """..-.""", """G""": """--.""",
"""H""": """....""", """I""": """..""", """J""": """.---""", """K""": """-.-""", """L""": """.-..""", """M""": """--""", """N""": """-.""",
"""O""": """---""", """P""": """.--.""", """Q""": """--.-""", """R""": """.-.""", """S""": """...""", """T""": """-""", """U""": """..-""",
"""V""": """...-""", """W""": """.--""", """X""": """-..-""", """Y""": """-.--""", """Z""": """--..""", """1""": """.----""",
"""2""": """..---""", """3""": """...--""", """4""": """....-""", """5""": """.....""", """6""": """-....""", """7""": """--...""",
"""8""": """---..""", """9""": """----.""", """0""": """-----""", """&""": """.-...""", """@""": """.--.-.""",
""":""": """---...""", """,""": """--..--""", """.""": """.-.-.-""", """'""": """.----.""", """\"""": """.-..-.""",
"""?""": """..--..""", """/""": """-..-.""", """=""": """-...-""", """+""": """.-.-.""", """-""": """-....-""",
"""(""": """-.--.""", """)""": """-.--.-""", """!""": """-.-.--""", """ """: """/"""
} # Exclamation mark is not in ITU-R recommendation
# fmt: on
__lowerCAmelCase = {value: key for key, value in MORSE_CODE_DICT.items()}
def UpperCAmelCase_ (__a : str ):
"""simple docstring"""
return " ".join(MORSE_CODE_DICT[char] for char in message.upper() )
def UpperCAmelCase_ (__a : str ):
"""simple docstring"""
return "".join(REVERSE_DICT[char] for char in message.split() )
def UpperCAmelCase_ ():
"""simple docstring"""
_a : List[str] = 'Morse code here!'
print(__a )
_a : Tuple = encrypt(__a )
print(__a )
_a : str = decrypt(__a )
print(__a )
if __name__ == "__main__":
main()
| 5 | 1 |
import json
import os
import tempfile
import transformers
import datasets
from utils import generate_example_dataset, get_duration
UpperCAmelCase__ : Optional[Any] = 500000
UpperCAmelCase__ : Optional[Any] = os.path.split(__file__)
UpperCAmelCase__ : Tuple = os.path.join(RESULTS_BASEPATH, 'results', RESULTS_FILENAME.replace('.py', '.json'))
@get_duration
def lowerCamelCase__ ( a , **a ) -> Dict:
_A: Optional[Any] = dataset.map(**a )
@get_duration
def lowerCamelCase__ ( a , **a ) -> List[str]:
_A: Any = dataset.filter(**a )
def lowerCamelCase__ ( ) -> Tuple:
_A: str = {'''num examples''': SPEED_TEST_N_EXAMPLES}
with tempfile.TemporaryDirectory() as tmp_dir:
_A: Any = datasets.Features({'''text''': datasets.Value('''string''' ), '''numbers''': datasets.Value('''float32''' )} )
_A: List[Any] = generate_example_dataset(
os.path.join(a , '''dataset.arrow''' ) , a , num_examples=a )
_A: Optional[int] = transformers.AutoTokenizer.from_pretrained('''bert-base-cased''' , use_fast=a )
def tokenize(a ):
return tokenizer(examples['''text'''] )
_A: List[str] = map(a )
_A: str = map(a , batched=a )
_A: int = map(a , function=lambda a : None , batched=a )
with dataset.formatted_as(type='''numpy''' ):
_A: int = map(a , function=lambda a : None , batched=a )
with dataset.formatted_as(type='''pandas''' ):
_A: Union[str, Any] = map(a , function=lambda a : None , batched=a )
with dataset.formatted_as(type='''torch''' , columns='''numbers''' ):
_A: Optional[int] = map(a , function=lambda a : None , batched=a )
with dataset.formatted_as(type='''tensorflow''' , columns='''numbers''' ):
_A: Dict = map(a , function=lambda a : None , batched=a )
_A: Any = map(a , function=a , batched=a )
_A: Optional[Any] = filter(a )
# Activate later when tokenizer support batched inputs
# with dataset.formatted_as(type='numpy'):
# times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True)
with open(a , '''wb''' ) as f:
f.write(json.dumps(a ).encode('''utf-8''' ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_map_filter()
| 121 |
'''simple docstring'''
def __a ( UpperCAmelCase , UpperCAmelCase ) ->float:
"""simple docstring"""
if density <= 0:
raise ValueError("""Impossible fluid density""" )
if bulk_modulus <= 0:
raise ValueError("""Impossible bulk modulus""" )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 258 | 0 |
def snake_case (__lowercase , __lowercase , __lowercase , __lowercase ) -> bool:
'''simple docstring'''
if graph[path[curr_ind - 1]][next_ver] == 0:
return False
# 2. Validate that next vertex is not already in path
return not any(vertex == next_ver for vertex in path )
def snake_case (__lowercase , __lowercase , __lowercase ) -> bool:
'''simple docstring'''
if curr_ind == len(__lowercase ):
# return whether path exists between current and starting vertices
return graph[path[curr_ind - 1]][path[0]] == 1
# Recursive Step
for next_ver in range(0 , len(__lowercase ) ):
if valid_connection(__lowercase , __lowercase , __lowercase , __lowercase ):
# Insert current vertex into path as next transition
_snake_case : Tuple = next_ver
# Validate created path
if util_hamilton_cycle(__lowercase , __lowercase , curr_ind + 1 ):
return True
# Backtrack
_snake_case : Dict = -1
return False
def snake_case (__lowercase , __lowercase = 0 ) -> list[int]:
'''simple docstring'''
_snake_case : Union[str, Any] = [-1] * (len(__lowercase ) + 1)
# initialize start and end of path with starting index
_snake_case : Any = start_index
# evaluate and if we find answer return path either return empty array
return path if util_hamilton_cycle(__lowercase , __lowercase , 1 ) else [] | 352 | from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_tf_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_tf_available():
import tensorflow as tf
__SCREAMING_SNAKE_CASE : str = logging.get_logger(__name__)
@dataclass
class lowercase_ ( __snake_case ):
_lowerCamelCase = [
'no_inference',
'no_cuda',
'no_tpu',
'no_speed',
'no_memory',
'no_env_print',
'no_multi_process',
]
def __init__( self , **lowercase_ ):
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
_snake_case : List[str] = deprecated_arg[3:]
_snake_case : Optional[int] = not kwargs.pop(lowercase_ )
logger.warning(
f"""{deprecated_arg} is depreciated. Please use --no-{positive_arg} or"""
f""" {positive_arg}={kwargs[positive_arg]}""" )
_snake_case : Tuple = kwargs.pop("tpu_name" , self.tpu_name )
_snake_case : Any = kwargs.pop("device_idx" , self.device_idx )
_snake_case : List[str] = kwargs.pop("eager_mode" , self.eager_mode )
_snake_case : List[str] = kwargs.pop("use_xla" , self.use_xla )
super().__init__(**lowercase_ )
_lowerCamelCase = field(
default=__snake_case , metadata={'help': 'Name of TPU'} , )
_lowerCamelCase = field(
default=0 , metadata={'help': 'CPU / GPU device index. Defaults to 0.'} , )
_lowerCamelCase = field(default=__snake_case , metadata={'help': 'Benchmark models in eager model.'} )
_lowerCamelCase = field(
default=__snake_case , metadata={
'help': 'Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`.'
} , )
@cached_property
def UpperCamelCase ( self ):
requires_backends(self , ["tf"] )
_snake_case : str = None
if self.tpu:
try:
if self.tpu_name:
_snake_case : Optional[Any] = tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name )
else:
_snake_case : List[str] = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
_snake_case : Union[str, Any] = None
return tpu
@cached_property
def UpperCamelCase ( self ):
requires_backends(self , ["tf"] )
if self.is_tpu:
tf.config.experimental_connect_to_cluster(self._setup_tpu )
tf.tpu.experimental.initialize_tpu_system(self._setup_tpu )
_snake_case : List[str] = tf.distribute.TPUStrategy(self._setup_tpu )
else:
# currently no multi gpu is allowed
if self.is_gpu:
# TODO: Currently only single GPU is supported
tf.config.set_visible_devices(self.gpu_list[self.device_idx] , "GPU" )
_snake_case : Any = tf.distribute.OneDeviceStrategy(device=f"""/gpu:{self.device_idx}""" )
else:
tf.config.set_visible_devices([] , "GPU" ) # disable GPU
_snake_case : Any = tf.distribute.OneDeviceStrategy(device=f"""/cpu:{self.device_idx}""" )
return strategy
@property
def UpperCamelCase ( self ):
requires_backends(self , ["tf"] )
return self._setup_tpu is not None
@property
def UpperCamelCase ( self ):
requires_backends(self , ["tf"] )
return self._setup_strategy
@property
def UpperCamelCase ( self ):
requires_backends(self , ["tf"] )
return tf.config.list_physical_devices("GPU" )
@property
def UpperCamelCase ( self ):
requires_backends(self , ["tf"] )
if self.cuda:
return len(self.gpu_list )
return 0
@property
def UpperCamelCase ( self ):
return self.n_gpu > 0 | 284 | 0 |
import tempfile
import unittest
import numpy as np
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_flax
if is_flax_available():
import os
from flax.core.frozen_dict import unfreeze
from flax.traverse_util import flatten_dict
from transformers import FlaxBertModel
A : List[Any] = '0.12' # assumed parallelism: 8
@require_flax
@is_staging_test
class A ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def lowerCamelCase__ (cls : str ) -> Any:
"""simple docstring"""
lowercase__ = TOKEN
HfFolder.save_token(_UpperCAmelCase )
@classmethod
def lowerCamelCase__ (cls : Any ) -> Any:
"""simple docstring"""
try:
delete_repo(token=cls._token , repo_id="""test-model-flax""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""valid_org/test-model-flax-org""" )
except HTTPError:
pass
def lowerCamelCase__ (self : Any ) -> List[Any]:
"""simple docstring"""
lowercase__ = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
lowercase__ = FlaxBertModel(_UpperCAmelCase )
model.push_to_hub("""test-model-flax""" , use_auth_token=self._token )
lowercase__ = FlaxBertModel.from_pretrained(f'''{USER}/test-model-flax''' )
lowercase__ = flatten_dict(unfreeze(model.params ) )
lowercase__ = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
lowercase__ = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_UpperCAmelCase , 1E-3 , msg=f'''{key} not identical''' )
# Reset repo
delete_repo(token=self._token , repo_id="""test-model-flax""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(_UpperCAmelCase , repo_id="""test-model-flax""" , push_to_hub=_UpperCAmelCase , use_auth_token=self._token )
lowercase__ = FlaxBertModel.from_pretrained(f'''{USER}/test-model-flax''' )
lowercase__ = flatten_dict(unfreeze(model.params ) )
lowercase__ = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
lowercase__ = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_UpperCAmelCase , 1E-3 , msg=f'''{key} not identical''' )
def lowerCamelCase__ (self : List[str] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
lowercase__ = FlaxBertModel(_UpperCAmelCase )
model.push_to_hub("""valid_org/test-model-flax-org""" , use_auth_token=self._token )
lowercase__ = FlaxBertModel.from_pretrained("""valid_org/test-model-flax-org""" )
lowercase__ = flatten_dict(unfreeze(model.params ) )
lowercase__ = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
lowercase__ = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_UpperCAmelCase , 1E-3 , msg=f'''{key} not identical''' )
# Reset repo
delete_repo(token=self._token , repo_id="""valid_org/test-model-flax-org""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(
_UpperCAmelCase , repo_id="""valid_org/test-model-flax-org""" , push_to_hub=_UpperCAmelCase , use_auth_token=self._token )
lowercase__ = FlaxBertModel.from_pretrained("""valid_org/test-model-flax-org""" )
lowercase__ = flatten_dict(unfreeze(model.params ) )
lowercase__ = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
lowercase__ = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_UpperCAmelCase , 1E-3 , msg=f'''{key} not identical''' )
def UpperCamelCase ( __magic_name__ : Optional[int] , __magic_name__ : List[Any] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = True
lowercase__ = flatten_dict(modela.params )
lowercase__ = flatten_dict(modela.params )
for key in flat_params_a.keys():
if np.sum(np.abs(flat_params_a[key] - flat_params_a[key] ) ) > 1E-4:
lowercase__ = False
return models_are_equal
@require_flax
class A ( unittest.TestCase ):
'''simple docstring'''
def lowerCamelCase__ (self : Dict ) -> Tuple:
"""simple docstring"""
lowercase__ = BertConfig.from_pretrained("""hf-internal-testing/tiny-bert-flax-only""" )
lowercase__ = FlaxBertModel(_UpperCAmelCase )
lowercase__ = """bert"""
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(_UpperCAmelCase , _UpperCAmelCase ) )
with self.assertRaises(_UpperCAmelCase ):
lowercase__ = FlaxBertModel.from_pretrained(_UpperCAmelCase )
lowercase__ = FlaxBertModel.from_pretrained(_UpperCAmelCase , subfolder=_UpperCAmelCase )
self.assertTrue(check_models_equal(_UpperCAmelCase , _UpperCAmelCase ) )
def lowerCamelCase__ (self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = BertConfig.from_pretrained("""hf-internal-testing/tiny-bert-flax-only""" )
lowercase__ = FlaxBertModel(_UpperCAmelCase )
lowercase__ = """bert"""
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(_UpperCAmelCase , _UpperCAmelCase ) , max_shard_size="""10KB""" )
with self.assertRaises(_UpperCAmelCase ):
lowercase__ = FlaxBertModel.from_pretrained(_UpperCAmelCase )
lowercase__ = FlaxBertModel.from_pretrained(_UpperCAmelCase , subfolder=_UpperCAmelCase )
self.assertTrue(check_models_equal(_UpperCAmelCase , _UpperCAmelCase ) )
def lowerCamelCase__ (self : List[str] ) -> Dict:
"""simple docstring"""
lowercase__ = """bert"""
lowercase__ = """hf-internal-testing/tiny-random-bert-subfolder"""
with self.assertRaises(_UpperCAmelCase ):
lowercase__ = FlaxBertModel.from_pretrained(_UpperCAmelCase )
lowercase__ = FlaxBertModel.from_pretrained(_UpperCAmelCase , subfolder=_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
def lowerCamelCase__ (self : Tuple ) -> Dict:
"""simple docstring"""
lowercase__ = """bert"""
lowercase__ = """hf-internal-testing/tiny-random-bert-sharded-subfolder"""
with self.assertRaises(_UpperCAmelCase ):
lowercase__ = FlaxBertModel.from_pretrained(_UpperCAmelCase )
lowercase__ = FlaxBertModel.from_pretrained(_UpperCAmelCase , subfolder=_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
| 305 |
class A :
'''simple docstring'''
def __init__(self : List[str] ) -> Tuple:
"""simple docstring"""
lowercase__ = 0
lowercase__ = 0
lowercase__ = {}
def lowerCamelCase__ (self : Dict , _UpperCAmelCase : Tuple ) -> Optional[int]:
"""simple docstring"""
if vertex not in self.adjacency:
lowercase__ = {}
self.num_vertices += 1
def lowerCamelCase__ (self : List[Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : int , _UpperCAmelCase : List[str] ) -> Tuple:
"""simple docstring"""
self.add_vertex(_UpperCAmelCase )
self.add_vertex(_UpperCAmelCase )
if head == tail:
return
lowercase__ = weight
lowercase__ = weight
def lowerCamelCase__ (self : List[str] ) -> Optional[int]:
"""simple docstring"""
lowercase__ = self.get_edges()
for edge in edges:
lowercase__ , lowercase__ , lowercase__ = edge
edges.remove((tail, head, weight) )
for i in range(len(_UpperCAmelCase ) ):
lowercase__ = list(edges[i] )
edges.sort(key=lambda _UpperCAmelCase : e[2] )
for i in range(len(_UpperCAmelCase ) - 1 ):
if edges[i][2] >= edges[i + 1][2]:
lowercase__ = edges[i][2] + 1
for edge in edges:
lowercase__ , lowercase__ , lowercase__ = edge
lowercase__ = weight
lowercase__ = weight
def __str__(self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = """"""
for tail in self.adjacency:
for head in self.adjacency[tail]:
lowercase__ = self.adjacency[head][tail]
string += f'''{head} -> {tail} == {weight}\n'''
return string.rstrip("""\n""" )
def lowerCamelCase__ (self : Any ) -> str:
"""simple docstring"""
lowercase__ = []
for tail in self.adjacency:
for head in self.adjacency[tail]:
output.append((tail, head, self.adjacency[head][tail]) )
return output
def lowerCamelCase__ (self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
return self.adjacency.keys()
@staticmethod
def lowerCamelCase__ (_UpperCAmelCase : List[str]=None , _UpperCAmelCase : Any=None ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = Graph()
if vertices is None:
lowercase__ = []
if edges is None:
lowercase__ = []
for vertex in vertices:
g.add_vertex(_UpperCAmelCase )
for edge in edges:
g.add_edge(*_UpperCAmelCase )
return g
class A :
'''simple docstring'''
def __init__(self : Optional[Any] ) -> str:
"""simple docstring"""
lowercase__ = {}
lowercase__ = {}
def __len__(self : Optional[Any] ) -> Dict:
"""simple docstring"""
return len(self.parent )
def lowerCamelCase__ (self : str , _UpperCAmelCase : Dict ) -> Any:
"""simple docstring"""
if item in self.parent:
return self.find(_UpperCAmelCase )
lowercase__ = item
lowercase__ = 0
return item
def lowerCamelCase__ (self : List[str] , _UpperCAmelCase : Dict ) -> Any:
"""simple docstring"""
if item not in self.parent:
return self.make_set(_UpperCAmelCase )
if item != self.parent[item]:
lowercase__ = self.find(self.parent[item] )
return self.parent[item]
def lowerCamelCase__ (self : List[Any] , _UpperCAmelCase : Any , _UpperCAmelCase : List[Any] ) -> Optional[int]:
"""simple docstring"""
lowercase__ = self.find(_UpperCAmelCase )
lowercase__ = self.find(_UpperCAmelCase )
if roota == roota:
return roota
if self.rank[roota] > self.rank[roota]:
lowercase__ = roota
return roota
if self.rank[roota] < self.rank[roota]:
lowercase__ = roota
return roota
if self.rank[roota] == self.rank[roota]:
self.rank[roota] += 1
lowercase__ = roota
return roota
return None
@staticmethod
def lowerCamelCase__ (_UpperCAmelCase : str ) -> Optional[int]:
"""simple docstring"""
lowercase__ = graph.num_vertices
lowercase__ = Graph.UnionFind()
lowercase__ = []
while num_components > 1:
lowercase__ = {}
for vertex in graph.get_vertices():
lowercase__ = -1
lowercase__ = graph.get_edges()
for edge in edges:
lowercase__ , lowercase__ , lowercase__ = edge
edges.remove((tail, head, weight) )
for edge in edges:
lowercase__ , lowercase__ , lowercase__ = edge
lowercase__ = union_find.find(_UpperCAmelCase )
lowercase__ = union_find.find(_UpperCAmelCase )
if seta != seta:
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
lowercase__ = [head, tail, weight]
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
lowercase__ = [head, tail, weight]
for vertex in cheap_edge:
if cheap_edge[vertex] != -1:
lowercase__ , lowercase__ , lowercase__ = cheap_edge[vertex]
if union_find.find(_UpperCAmelCase ) != union_find.find(_UpperCAmelCase ):
union_find.union(_UpperCAmelCase , _UpperCAmelCase )
mst_edges.append(cheap_edge[vertex] )
lowercase__ = num_components - 1
lowercase__ = Graph.build(edges=_UpperCAmelCase )
return mst
| 305 | 1 |
'''simple docstring'''
import random
import unittest
import torch
from diffusers import IFInpaintingPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class _lowerCAmelCase ( snake_case__, snake_case__, unittest.TestCase ):
"""simple docstring"""
lowerCamelCase = IFInpaintingPipeline
lowerCamelCase = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"""width""", """height"""}
lowerCamelCase = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
lowerCamelCase = PipelineTesterMixin.required_optional_params - {"""latents"""}
def UpperCAmelCase_ ( self ) -> List[Any]:
return self._get_dummy_components()
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase=0 ) -> Union[str, Any]:
if str(_A ).startswith("""mps""" ):
A_ : Dict = torch.manual_seed(_A )
else:
A_ : List[Any] = torch.Generator(device=_A ).manual_seed(_A )
A_ : int = floats_tensor((1, 3, 32, 32) , rng=random.Random(_A ) ).to(_A )
A_ : Optional[Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(_A ) ).to(_A )
A_ : Dict = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""mask_image""": mask_image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def UpperCAmelCase_ ( self ) -> List[Any]:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def UpperCAmelCase_ ( self ) -> Optional[Any]:
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""" )
def UpperCAmelCase_ ( self ) -> Tuple:
super().test_save_load_floataa(expected_max_diff=1e-1 )
def UpperCAmelCase_ ( self ) -> List[str]:
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def UpperCAmelCase_ ( self ) -> int:
self._test_save_load_local()
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
| 364 |
'''simple docstring'''
import warnings
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
UpperCamelCase__ : List[Any] = logging.get_logger(__name__)
class _lowerCAmelCase ( __A ):
"""simple docstring"""
lowerCamelCase = ['''input_values''', '''attention_mask''']
def __init__( self , _lowerCamelCase = 1 , _lowerCamelCase = 1_6000 , _lowerCamelCase = 0.0 , _lowerCamelCase = False , _lowerCamelCase = 80 , _lowerCamelCase = 16 , _lowerCamelCase = 64 , _lowerCamelCase = "hann_window" , _lowerCamelCase = 1.0 , _lowerCamelCase = 80 , _lowerCamelCase = 7600 , _lowerCamelCase = 1e-10 , _lowerCamelCase = 2 , _lowerCamelCase = True , **_lowerCamelCase , ) -> List[Any]:
super().__init__(feature_size=_lowerCamelCase , sampling_rate=_lowerCamelCase , padding_value=_lowerCamelCase , **_lowerCamelCase )
A_ : List[Any] = do_normalize
A_ : Union[str, Any] = return_attention_mask
A_ : Tuple = num_mel_bins
A_ : List[str] = hop_length
A_ : int = win_length
A_ : Optional[int] = win_function
A_ : List[Any] = frame_signal_scale
A_ : str = fmin
A_ : Optional[Any] = fmax
A_ : Any = mel_floor
A_ : Any = reduction_factor
A_ : Tuple = win_length * sampling_rate // 1000
A_ : Dict = hop_length * sampling_rate // 1000
A_ : Dict = optimal_fft_length(self.sample_size )
A_ : str = (self.n_fft // 2) + 1
A_ : int = window_function(window_length=self.sample_size , name=self.win_function , periodic=_lowerCamelCase )
A_ : Optional[int] = mel_filter_bank(
num_frequency_bins=self.n_freqs , num_mel_filters=self.num_mel_bins , min_frequency=self.fmin , max_frequency=self.fmax , sampling_rate=self.sampling_rate , norm="""slaney""" , mel_scale="""slaney""" , )
if frame_signal_scale != 1.0:
warnings.warn(
"""The argument `frame_signal_scale` is deprecated and will be removed in version 4.30.0 of Transformers""" , _lowerCamelCase , )
if reduction_factor != 2.0:
warnings.warn(
"""The argument `reduction_factor` is deprecated and will be removed in version 4.30.0 of Transformers""" , _lowerCamelCase , )
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def UpperCAmelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = 0.0 ) -> List[np.ndarray]:
if attention_mask is not None:
A_ : Dict = np.array(_lowerCamelCase , np.intaa )
A_ : Dict = []
for vector, length in zip(_lowerCamelCase , attention_mask.sum(-1 ) ):
A_ : Optional[int] = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1e-7 )
if length < normed_slice.shape[0]:
A_ : Any = padding_value
normed_input_values.append(_lowerCamelCase )
else:
A_ : List[str] = [(x - x.mean()) / np.sqrt(x.var() + 1e-7 ) for x in input_values]
return normed_input_values
def UpperCAmelCase_ ( self , _lowerCamelCase , ) -> np.ndarray:
A_ : int = spectrogram(
_lowerCamelCase , window=self.window , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , mel_filters=self.mel_filters , mel_floor=self.mel_floor , log_mel="""log10""" , )
return log_mel_spec.T
def __call__( self , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = False , _lowerCamelCase = None , _lowerCamelCase = False , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , **_lowerCamelCase , ) -> BatchFeature:
if audio is None and audio_target is None:
raise ValueError("""You must provide either `audio` or `audio_target` values.""" )
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F"The model corresponding to this feature extractor: {self} was trained using a sampling rate of"
F" {self.sampling_rate}. Please make sure that the provided audio input was sampled with"
F" {self.sampling_rate} and not {sampling_rate}." )
else:
logger.warning(
"""It is strongly recommended to pass the ``sampling_rate`` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
if audio is not None:
A_ : Dict = self._process_audio(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , **_lowerCamelCase , )
else:
A_ : Optional[int] = None
if audio_target is not None:
A_ : Union[str, Any] = self._process_audio(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , **_lowerCamelCase , )
if inputs is None:
return inputs_target
else:
A_ : Optional[int] = inputs_target["""input_values"""]
A_ : Tuple = inputs_target.get("""attention_mask""" )
if decoder_attention_mask is not None:
A_ : int = decoder_attention_mask
return inputs
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase = False , _lowerCamelCase = False , _lowerCamelCase = None , _lowerCamelCase = False , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , **_lowerCamelCase , ) -> BatchFeature:
A_ : Optional[int] = isinstance(_lowerCamelCase , np.ndarray ) and len(speech.shape ) > 1
if is_batched_numpy and len(speech.shape ) > 2:
raise ValueError(F"Only mono-channel audio is supported for input to {self}" )
A_ : List[str] = is_batched_numpy or (
isinstance(_lowerCamelCase , (list, tuple) ) and (isinstance(speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
A_ : Union[str, Any] = [np.asarray(_lowerCamelCase , dtype=np.floataa ) for speech in speech]
elif not is_batched and not isinstance(_lowerCamelCase , np.ndarray ):
A_ : List[str] = np.asarray(_lowerCamelCase , dtype=np.floataa )
elif isinstance(_lowerCamelCase , np.ndarray ) and speech.dtype is np.dtype(np.floataa ):
A_ : Optional[Any] = speech.astype(np.floataa )
# always return batch
if not is_batched:
A_ : int = [speech]
# needed to make pad() work on spectrogram inputs
A_ : List[Any] = self.feature_size
# convert into correct format for padding
if is_target:
A_ : Tuple = [self._extract_mel_features(_lowerCamelCase ) for waveform in speech]
A_ : Tuple = BatchFeature({"""input_values""": features} )
A_ : Dict = self.num_mel_bins
else:
A_ : Union[str, Any] = BatchFeature({"""input_values""": speech} )
A_ : Tuple = self.pad(
_lowerCamelCase , padding=_lowerCamelCase , max_length=_lowerCamelCase , truncation=_lowerCamelCase , pad_to_multiple_of=_lowerCamelCase , return_attention_mask=_lowerCamelCase , **_lowerCamelCase , )
A_ : Union[str, Any] = feature_size_hack
# convert input values to correct format
A_ : str = padded_inputs["""input_values"""]
if not isinstance(input_values[0] , np.ndarray ):
A_ : str = [np.asarray(_lowerCamelCase , dtype=np.floataa ) for array in input_values]
elif (
not isinstance(_lowerCamelCase , np.ndarray )
and isinstance(input_values[0] , np.ndarray )
and input_values[0].dtype is np.dtype(np.floataa )
):
A_ : Tuple = [array.astype(np.floataa ) for array in input_values]
elif isinstance(_lowerCamelCase , np.ndarray ) and input_values.dtype is np.dtype(np.floataa ):
A_ : List[Any] = input_values.astype(np.floataa )
# convert attention_mask to correct format
A_ : Any = padded_inputs.get("""attention_mask""" )
if attention_mask is not None:
A_ : str = [np.asarray(_lowerCamelCase , dtype=np.intaa ) for array in attention_mask]
# zero-mean and unit-variance normalization
if not is_target and self.do_normalize:
A_ : Any = (
attention_mask
if self._get_padding_strategies(_lowerCamelCase , max_length=_lowerCamelCase ) is not PaddingStrategy.DO_NOT_PAD
else None
)
A_ : Any = self.zero_mean_unit_var_norm(
padded_inputs["""input_values"""] , attention_mask=_lowerCamelCase , padding_value=self.padding_value )
if return_tensors is not None:
A_ : Dict = padded_inputs.convert_to_tensors(_lowerCamelCase )
return padded_inputs
def UpperCAmelCase_ ( self ) -> Dict[str, Any]:
A_ : List[Any] = super().to_dict()
# Don't serialize these as they are derived from the other properties.
A_ : Optional[int] = ["""window""", """mel_filters""", """sample_size""", """sample_stride""", """n_fft""", """n_freqs"""]
for name in names:
if name in output:
del output[name]
return output
| 164 | 0 |
'''simple docstring'''
import unittest
from transformers import SPIECE_UNDERLINE, XLNetTokenizer, XLNetTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
_lowerCAmelCase = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
'''simple docstring'''
__lowercase : Union[str, Any] = XLNetTokenizer
__lowercase : List[Any] = XLNetTokenizerFast
__lowercase : List[str] = True
__lowercase : str = True
def UpperCAmelCase_ ( self ) -> int:
super().setUp()
# We have a SentencePiece fixture for testing
lowerCAmelCase__ : Optional[int] = XLNetTokenizer(__UpperCAmelCase ,keep_accents=__UpperCAmelCase )
tokenizer.sanitize_special_tokens()
tokenizer.save_pretrained(self.tmpdirname )
def UpperCAmelCase_ ( self ) -> Optional[Any]:
lowerCAmelCase__ : List[Any] = """<s>"""
lowerCAmelCase__ : Optional[int] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__UpperCAmelCase ) ,__UpperCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__UpperCAmelCase ) ,__UpperCAmelCase )
def UpperCAmelCase_ ( self ) -> Dict:
lowerCAmelCase__ : Optional[int] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] ,"""<unk>""" )
self.assertEqual(vocab_keys[1] ,"""<s>""" )
self.assertEqual(vocab_keys[-1] ,"""<eod>""" )
self.assertEqual(len(__UpperCAmelCase ) ,1006 )
def UpperCAmelCase_ ( self ) -> Dict:
self.assertEqual(self.get_tokenizer().vocab_size ,1000 )
def UpperCAmelCase_ ( self ) -> Optional[Any]:
lowerCAmelCase__ : Tuple = XLNetTokenizer(__UpperCAmelCase ,keep_accents=__UpperCAmelCase )
lowerCAmelCase__ : List[str] = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(__UpperCAmelCase ,["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCAmelCase ) ,[285, 46, 10, 170, 382] )
lowerCAmelCase__ : Tuple = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
__UpperCAmelCase ,[
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] ,)
lowerCAmelCase__ : List[Any] = tokenizer.convert_tokens_to_ids(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase ,[8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] )
lowerCAmelCase__ : List[Any] = tokenizer.convert_ids_to_tokens(__UpperCAmelCase )
self.assertListEqual(
__UpperCAmelCase ,[
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] ,)
def UpperCAmelCase_ ( self ) -> Optional[int]:
lowerCAmelCase__ : Optional[Any] = XLNetTokenizer(__UpperCAmelCase ,do_lower_case=__UpperCAmelCase )
lowerCAmelCase__ : Optional[int] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
__UpperCAmelCase ,[
SPIECE_UNDERLINE + """""",
"""i""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""se""",
""".""",
] ,)
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) ,["""▁he""", """ll""", """o"""] )
def UpperCAmelCase_ ( self ) -> Optional[int]:
lowerCAmelCase__ : Dict = XLNetTokenizer(__UpperCAmelCase ,do_lower_case=__UpperCAmelCase )
lowerCAmelCase__ : Optional[Any] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
__UpperCAmelCase ,[
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""se""",
""".""",
] ,)
@slow
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
lowerCAmelCase__ : Tuple = XLNetTokenizer.from_pretrained("""xlnet-base-cased""" )
lowerCAmelCase__ : int = tokenizer.encode("""sequence builders""" ,add_special_tokens=__UpperCAmelCase )
lowerCAmelCase__ : Dict = tokenizer.encode("""multi-sequence build""" ,add_special_tokens=__UpperCAmelCase )
lowerCAmelCase__ : Dict = tokenizer.build_inputs_with_special_tokens(__UpperCAmelCase )
lowerCAmelCase__ : Any = tokenizer.build_inputs_with_special_tokens(__UpperCAmelCase ,__UpperCAmelCase )
assert encoded_sentence == text + [4, 3]
assert encoded_pair == text + [4] + text_a + [4, 3]
@slow
def UpperCAmelCase_ ( self ) -> int:
# fmt: off
lowerCAmelCase__ : Optional[Any] = {"""input_ids""": [[17, 2_1442, 270, 17, 10, 1_4645, 318, 34, 17, 4546, 3145, 787, 13, 7752, 2_2018, 23, 21, 17, 4546, 3145, 787, 13, 3352, 1_4431, 13, 5500, 11, 1176, 580, 13, 1_6819, 4797, 23, 17, 10, 1_7135, 658, 19, 457, 7932, 13, 184, 19, 3154, 1_7135, 6468, 19, 1404, 1_2269, 19, 4229, 5356, 1_6264, 46, 19, 17, 2_0545, 1_0395, 9, 9, 9, 11, 28, 6421, 9531, 2_0729, 17, 10, 353, 1_7022, 11, 21, 6421, 9531, 1_6949, 17, 10, 1_1509, 753, 11, 33, 95, 2421, 7385, 956, 1_4431, 2626, 25, 842, 7385, 4836, 21, 1429, 2272, 9855, 3120, 161, 2_4738, 19, 1_3203, 658, 218, 787, 21, 430, 1_8482, 847, 2637, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 322, 2_2178, 27, 1064, 22, 956, 13, 1_1101, 1429, 5854, 2_4313, 1_8953, 40, 422, 2_4366, 68, 1758, 37, 1_0483, 1_4257, 31, 207, 263, 21, 203, 3773, 25, 71, 9735, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 32, 2049, 3442, 17, 1_3894, 3380, 23, 95, 18, 1_7634, 2288, 9, 4, 3]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__UpperCAmelCase ,model_name="""xlnet-base-cased""" ,revision="""c841166438c31ec7ca9a106dee7bb312b73ae511""" ,)
| 37 |
'''simple docstring'''
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('''TEST_SAGEMAKER''' , '''False''' ) ) is not True , reason='''Skipping test because should only be run when releasing minor transformers version''' , )
@pytest.mark.usefixtures('''sm_env''' )
@parameterized_class(
[
{
'''framework''': '''pytorch''',
'''script''': '''run_glue.py''',
'''model_name_or_path''': '''distilbert-base-cased''',
'''instance_type''': '''ml.p3.16xlarge''',
'''results''': {'''train_runtime''': 6_5_0, '''eval_accuracy''': 0.7, '''eval_loss''': 0.6},
},
{
'''framework''': '''pytorch''',
'''script''': '''run_ddp.py''',
'''model_name_or_path''': '''distilbert-base-cased''',
'''instance_type''': '''ml.p3.16xlarge''',
'''results''': {'''train_runtime''': 6_0_0, '''eval_accuracy''': 0.7, '''eval_loss''': 0.6},
},
{
'''framework''': '''tensorflow''',
'''script''': '''run_tf_dist.py''',
'''model_name_or_path''': '''distilbert-base-cased''',
'''instance_type''': '''ml.p3.16xlarge''',
'''results''': {'''train_runtime''': 6_0_0, '''eval_accuracy''': 0.6, '''eval_loss''': 0.7},
},
] )
class lowerCAmelCase_( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase_ ( self ) -> Optional[Any]:
if self.framework == "pytorch":
subprocess.run(
F"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() ,encoding="""utf-8""" ,check=__UpperCAmelCase ,)
assert hasattr(self ,"""env""" )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Optional[Any]:
lowerCAmelCase__ : Optional[int] = F"""{self.env.base_job_name}-{instance_count}-{'ddp' if 'ddp' in self.script else 'smd'}"""
# distributed data settings
lowerCAmelCase__ : Any = {"""smdistributed""": {"""dataparallel""": {"""enabled""": True}}} if self.script != """run_ddp.py""" else None
# creates estimator
return HuggingFace(
entry_point=self.script ,source_dir=self.env.test_path ,role=self.env.role ,image_uri=self.env.image_uri ,base_job_name=__UpperCAmelCase ,instance_count=__UpperCAmelCase ,instance_type=self.instance_type ,debugger_hook_config=__UpperCAmelCase ,hyperparameters={**self.env.distributed_hyperparameters, """model_name_or_path""": self.model_name_or_path} ,metric_definitions=self.env.metric_definitions ,distribution=__UpperCAmelCase ,py_version="""py36""" ,)
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Optional[Any]:
TrainingJobAnalytics(__UpperCAmelCase ).export_csv(F"""{self.env.test_path}/{job_name}_metrics.csv""" )
@parameterized.expand([(2,)] )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Any:
# create estimator
lowerCAmelCase__ : List[Any] = self.create_estimator(__UpperCAmelCase )
# run training
estimator.fit()
# result dataframe
lowerCAmelCase__ : Any = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
lowerCAmelCase__ : int = list(result_metrics_df[result_metrics_df.metric_name == """eval_accuracy"""]["""value"""] )
lowerCAmelCase__ : List[Any] = list(result_metrics_df[result_metrics_df.metric_name == """eval_loss"""]["""value"""] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
lowerCAmelCase__ : List[str] = (
Session().describe_training_job(estimator.latest_training_job.name ).get("""TrainingTimeInSeconds""" ,99_9999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["""eval_accuracy"""] for t in eval_accuracy )
assert all(t <= self.results["""eval_loss"""] for t in eval_loss )
# dump tests result into json file to share in PR
with open(F"""{estimator.latest_training_job.name}.json""" ,"""w""" ) as outfile:
json.dump({"""train_time""": train_runtime, """eval_accuracy""": eval_accuracy, """eval_loss""": eval_loss} ,__UpperCAmelCase )
| 37 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_A : str ={
'''configuration_bigbird_pegasus''': [
'''BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''BigBirdPegasusConfig''',
'''BigBirdPegasusOnnxConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : str =[
'''BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BigBirdPegasusForCausalLM''',
'''BigBirdPegasusForConditionalGeneration''',
'''BigBirdPegasusForQuestionAnswering''',
'''BigBirdPegasusForSequenceClassification''',
'''BigBirdPegasusModel''',
'''BigBirdPegasusPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP,
BigBirdPegasusConfig,
BigBirdPegasusOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST,
BigBirdPegasusForCausalLM,
BigBirdPegasusForConditionalGeneration,
BigBirdPegasusForQuestionAnswering,
BigBirdPegasusForSequenceClassification,
BigBirdPegasusModel,
BigBirdPegasusPreTrainedModel,
)
else:
import sys
_A : Optional[Any] =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 354 |
'''simple docstring'''
from typing import Optional, Tuple, Union
import torch
from einops import rearrange, reduce
from diffusers import DDIMScheduler, DDPMScheduler, DiffusionPipeline, ImagePipelineOutput, UNetaDConditionModel
from diffusers.schedulers.scheduling_ddim import DDIMSchedulerOutput
from diffusers.schedulers.scheduling_ddpm import DDPMSchedulerOutput
_A : Union[str, Any] =8
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase=BITS ) -> Tuple:
lowerCamelCase__ : List[str] = x.device
lowerCamelCase__ : Any = (x * 255).int().clamp(0 , 255 )
lowerCamelCase__ : Optional[int] = 2 ** torch.arange(bits - 1 , -1 , -1 , device=UpperCamelCase )
lowerCamelCase__ : int = rearrange(UpperCamelCase , """d -> d 1 1""" )
lowerCamelCase__ : List[str] = rearrange(UpperCamelCase , """b c h w -> b c 1 h w""" )
lowerCamelCase__ : Tuple = ((x & mask) != 0).float()
lowerCamelCase__ : List[Any] = rearrange(UpperCamelCase , """b c d h w -> b (c d) h w""" )
lowerCamelCase__ : Optional[int] = bits * 2 - 1
return bits
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase=BITS ) -> List[Any]:
lowerCamelCase__ : List[Any] = x.device
lowerCamelCase__ : Dict = (x > 0).int()
lowerCamelCase__ : Optional[Any] = 2 ** torch.arange(bits - 1 , -1 , -1 , device=UpperCamelCase , dtype=torch.intaa )
lowerCamelCase__ : List[Any] = rearrange(UpperCamelCase , """d -> d 1 1""" )
lowerCamelCase__ : List[str] = rearrange(UpperCamelCase , """b (c d) h w -> b c d h w""" , d=8 )
lowerCamelCase__ : List[Any] = reduce(x * mask , """b c d h w -> b c h w""" , """sum""" )
return (dec / 255).clamp(0.0 , 1.0 )
def SCREAMING_SNAKE_CASE_ (self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = 0.0 , UpperCamelCase = True , UpperCamelCase=None , UpperCamelCase = True , ) -> Union[DDIMSchedulerOutput, Tuple]:
if self.num_inference_steps is None:
raise ValueError(
"""Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler""" )
# See formulas (12) and (16) of DDIM paper https://arxiv.org/pdf/2010.02502.pdf
# Ideally, read DDIM paper in-detail understanding
# Notation (<variable name> -> <name in paper>
# - pred_noise_t -> e_theta(x_t, t)
# - pred_original_sample -> f_theta(x_t, t) or x_0
# - std_dev_t -> sigma_t
# - eta -> η
# - pred_sample_direction -> "direction pointing to x_t"
# - pred_prev_sample -> "x_t-1"
# 1. get previous step value (=t-1)
lowerCamelCase__ : Optional[int] = timestep - self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
lowerCamelCase__ : str = self.alphas_cumprod[timestep]
lowerCamelCase__ : List[str] = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod
lowerCamelCase__ : Optional[int] = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
lowerCamelCase__ : Optional[int] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
# 4. Clip "predicted x_0"
lowerCamelCase__ : Dict = self.bit_scale
if self.config.clip_sample:
lowerCamelCase__ : Optional[Any] = torch.clamp(UpperCamelCase , -scale , UpperCamelCase )
# 5. compute variance: "sigma_t(η)" -> see formula (16)
# σ_t = sqrt((1 − α_t−1)/(1 − α_t)) * sqrt(1 − α_t/α_t−1)
lowerCamelCase__ : Tuple = self._get_variance(UpperCamelCase , UpperCamelCase )
lowerCamelCase__ : Optional[int] = eta * variance ** 0.5
if use_clipped_model_output:
# the model_output is always re-derived from the clipped x_0 in Glide
lowerCamelCase__ : int = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
# 6. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
lowerCamelCase__ : Optional[Any] = (1 - alpha_prod_t_prev - std_dev_t**2) ** 0.5 * model_output
# 7. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
lowerCamelCase__ : Tuple = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if eta > 0:
# randn_like does not support generator https://github.com/pytorch/pytorch/issues/27072
lowerCamelCase__ : Dict = model_output.device if torch.is_tensor(UpperCamelCase ) else """cpu"""
lowerCamelCase__ : str = torch.randn(model_output.shape , dtype=model_output.dtype , generator=UpperCamelCase ).to(UpperCamelCase )
lowerCamelCase__ : Optional[Any] = self._get_variance(UpperCamelCase , UpperCamelCase ) ** 0.5 * eta * noise
lowerCamelCase__ : int = prev_sample + variance
if not return_dict:
return (prev_sample,)
return DDIMSchedulerOutput(prev_sample=UpperCamelCase , pred_original_sample=UpperCamelCase )
def SCREAMING_SNAKE_CASE_ (self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase="epsilon" , UpperCamelCase=None , UpperCamelCase = True , ) -> Union[DDPMSchedulerOutput, Tuple]:
lowerCamelCase__ : List[Any] = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type in ["learned", "learned_range"]:
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = torch.split(UpperCamelCase , sample.shape[1] , dim=1 )
else:
lowerCamelCase__ : List[str] = None
# 1. compute alphas, betas
lowerCamelCase__ : str = self.alphas_cumprod[t]
lowerCamelCase__ : List[str] = self.alphas_cumprod[t - 1] if t > 0 else self.one
lowerCamelCase__ : str = 1 - alpha_prod_t
lowerCamelCase__ : List[Any] = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if prediction_type == "epsilon":
lowerCamelCase__ : Union[str, Any] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif prediction_type == "sample":
lowerCamelCase__ : Optional[Any] = model_output
else:
raise ValueError(f'''Unsupported prediction_type {prediction_type}.''' )
# 3. Clip "predicted x_0"
lowerCamelCase__ : str = self.bit_scale
if self.config.clip_sample:
lowerCamelCase__ : List[Any] = torch.clamp(UpperCamelCase , -scale , UpperCamelCase )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
lowerCamelCase__ : Tuple = (alpha_prod_t_prev ** 0.5 * self.betas[t]) / beta_prod_t
lowerCamelCase__ : Tuple = self.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
lowerCamelCase__ : int = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
lowerCamelCase__ : Optional[Any] = 0
if t > 0:
lowerCamelCase__ : Optional[Any] = torch.randn(
model_output.size() , dtype=model_output.dtype , layout=model_output.layout , generator=UpperCamelCase ).to(model_output.device )
lowerCamelCase__ : str = (self._get_variance(UpperCamelCase , predicted_variance=UpperCamelCase ) ** 0.5) * noise
lowerCamelCase__ : Optional[int] = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return DDPMSchedulerOutput(prev_sample=UpperCamelCase , pred_original_sample=UpperCamelCase )
class _lowercase ( _lowercase ):
def __init__( self: List[str] , UpperCamelCase__: UNetaDConditionModel , UpperCamelCase__: Union[DDIMScheduler, DDPMScheduler] , UpperCamelCase__: Optional[float] = 1.0 , ):
super().__init__()
lowerCamelCase__ : Optional[int] = bit_scale
lowerCamelCase__ : List[Any] = (
ddim_bit_scheduler_step if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else ddpm_bit_scheduler_step
)
self.register_modules(unet=UpperCamelCase__ , scheduler=UpperCamelCase__ )
@torch.no_grad()
def __call__( self: Union[str, Any] , UpperCamelCase__: Optional[int] = 256 , UpperCamelCase__: Optional[int] = 256 , UpperCamelCase__: Optional[int] = 50 , UpperCamelCase__: Optional[torch.Generator] = None , UpperCamelCase__: Optional[int] = 1 , UpperCamelCase__: Optional[str] = "pil" , UpperCamelCase__: bool = True , **UpperCamelCase__: int , ):
lowerCamelCase__ : List[Any] = torch.randn(
(batch_size, self.unet.config.in_channels, height, width) , generator=UpperCamelCase__ , )
lowerCamelCase__ : Union[str, Any] = decimal_to_bits(UpperCamelCase__ ) * self.bit_scale
lowerCamelCase__ : Union[str, Any] = latents.to(self.device )
self.scheduler.set_timesteps(UpperCamelCase__ )
for t in self.progress_bar(self.scheduler.timesteps ):
# predict the noise residual
lowerCamelCase__ : Tuple = self.unet(UpperCamelCase__ , UpperCamelCase__ ).sample
# compute the previous noisy sample x_t -> x_t-1
lowerCamelCase__ : Any = self.scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ).prev_sample
lowerCamelCase__ : Dict = bits_to_decimal(UpperCamelCase__ )
if output_type == "pil":
lowerCamelCase__ : int = self.numpy_to_pil(UpperCamelCase__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=UpperCamelCase__ )
| 129 | 0 |
import math
def UpperCAmelCase_ ( __snake_case ) -> int:
"""simple docstring"""
if not isinstance(__snake_case , __snake_case ):
_lowercase =F"Input value of [number={number}] must be an integer"
raise TypeError(__snake_case )
if number < 1:
_lowercase =F"Input value of [number={number}] must be > 0"
raise ValueError(__snake_case )
elif number == 1:
return 3
elif number == 2:
return 5
else:
_lowercase =int(math.log(number // 3 , 2 ) ) + 2
_lowercase =[3, 5]
_lowercase =2
_lowercase =3
for block in range(1 , __snake_case ):
for _ in range(__snake_case ):
proth_list.append(2 ** (block + 1) + proth_list[proth_index - 1] )
proth_index += 1
increment *= 2
return proth_list[number - 1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for number in range(11):
UpperCAmelCase__ = 0
try:
UpperCAmelCase__ = proth(number)
except ValueError:
print(f'''ValueError: there is no {number}th Proth number''')
continue
print(f'''The {number}th Proth number: {value}''')
| 5 |
import comet # From: unbabel-comet
import torch
import datasets
UpperCAmelCase__ = datasets.logging.get_logger(__name__)
UpperCAmelCase__ = '''\
@inproceedings{rei-EtAl:2020:WMT,
author = {Rei, Ricardo and Stewart, Craig and Farinha, Ana C and Lavie, Alon},
title = {Unbabel\'s Participation in the WMT20 Metrics Shared Task},
booktitle = {Proceedings of the Fifth Conference on Machine Translation},
month = {November},
year = {2020},
address = {Online},
publisher = {Association for Computational Linguistics},
pages = {909--918},
}
@inproceedings{rei-etal-2020-comet,
title = "{COMET}: A Neural Framework for {MT} Evaluation",
author = "Rei, Ricardo and
Stewart, Craig and
Farinha, Ana C and
Lavie, Alon",
booktitle = "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)",
month = nov,
year = "2020",
address = "Online",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/2020.emnlp-main.213",
pages = "2685--2702",
}
'''
UpperCAmelCase__ = '''\
Crosslingual Optimized Metric for Evaluation of Translation (COMET) is an open-source framework used to train Machine Translation metrics that achieve high levels of correlation with different types of human judgments (HTER, DA\'s or MQM).
With the release of the framework the authors also released fully trained models that were used to compete in the WMT20 Metrics Shared Task achieving SOTA in that years competition.
See the [README.md] file at https://unbabel.github.io/COMET/html/models.html for more information.
'''
UpperCAmelCase__ = '''
COMET score.
Args:
`sources` (list of str): Source sentences
`predictions` (list of str): candidate translations
`references` (list of str): reference translations
`cuda` (bool): If set to True, runs COMET using GPU
`show_progress` (bool): Shows progress
`model`: COMET model to be used. Will default to `wmt-large-da-estimator-1719` if None.
Returns:
`samples`: List of dictionaries with `src`, `mt`, `ref` and `score`.
`scores`: List of scores.
Examples:
>>> comet_metric = datasets.load_metric(\'comet\')
>>> # comet_metric = load_metric(\'comet\', \'wmt20-comet-da\') # you can also choose which model to use
>>> source = ["Dem Feuer konnte Einhalt geboten werden", "Schulen und Kindergärten wurden eröffnet."]
>>> hypothesis = ["The fire could be stopped", "Schools and kindergartens were open"]
>>> reference = ["They were able to control the fire.", "Schools and kindergartens opened"]
>>> results = comet_metric.compute(predictions=hypothesis, references=reference, sources=source)
>>> print([round(v, 2) for v in results["scores"]])
[0.19, 0.92]
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class lowerCamelCase__ ( datasets.Metric):
def __A (self ) -> Optional[int]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''https://unbabel.github.io/COMET/html/index.html''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''sources''': datasets.Value('''string''' , id='''sequence''' ),
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/Unbabel/COMET'''] , reference_urls=[
'''https://github.com/Unbabel/COMET''',
'''https://www.aclweb.org/anthology/2020.emnlp-main.213/''',
'''http://www.statmt.org/wmt20/pdf/2020.wmt-1.101.pdf6''',
] , )
def __A (self , UpperCAmelCase ) -> Dict:
if self.config_name == "default":
_lowercase =comet.load_from_checkpoint(comet.download_model('''wmt20-comet-da''' ) )
else:
_lowercase =comet.load_from_checkpoint(comet.download_model(self.config_name ) )
def __A (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase=False ) -> int:
if gpus is None:
_lowercase =1 if torch.cuda.is_available() else 0
_lowercase ={'''src''': sources, '''mt''': predictions, '''ref''': references}
_lowercase =[dict(zip(UpperCAmelCase , UpperCAmelCase ) ) for t in zip(*data.values() )]
_lowercase , _lowercase =self.scorer.predict(UpperCAmelCase , gpus=UpperCAmelCase , progress_bar=UpperCAmelCase )
return {"mean_score": mean_score, "scores": scores}
| 5 | 1 |
import argparse
import logging
import pickle
import random
import time
import numpy as np
from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO
)
lowercase__ =logging.getLogger(__name__)
def __UpperCamelCase ( ):
__a : List[Any] = argparse.ArgumentParser(
description='''Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids).''' )
parser.add_argument('''--file_path''' , type=lowerCAmelCase__ , default='''data/dump.txt''' , help='''The path to the data.''' )
parser.add_argument('''--tokenizer_type''' , type=lowerCAmelCase__ , default='''bert''' , choices=['''bert''', '''roberta''', '''gpt2'''] )
parser.add_argument('''--tokenizer_name''' , type=lowerCAmelCase__ , default='''bert-base-uncased''' , help='''The tokenizer to use.''' )
parser.add_argument('''--dump_file''' , type=lowerCAmelCase__ , default='''data/dump''' , help='''The dump file prefix.''' )
__a : Optional[int] = parser.parse_args()
logger.info(f"Loading Tokenizer ({args.tokenizer_name})" )
if args.tokenizer_type == "bert":
__a : Dict = BertTokenizer.from_pretrained(args.tokenizer_name )
__a : int = tokenizer.special_tokens_map['''cls_token'''] # `[CLS]`
__a : int = tokenizer.special_tokens_map['''sep_token'''] # `[SEP]`
elif args.tokenizer_type == "roberta":
__a : int = RobertaTokenizer.from_pretrained(args.tokenizer_name )
__a : int = tokenizer.special_tokens_map['''cls_token'''] # `<s>`
__a : Optional[Any] = tokenizer.special_tokens_map['''sep_token'''] # `</s>`
elif args.tokenizer_type == "gpt2":
__a : Optional[int] = GPTaTokenizer.from_pretrained(args.tokenizer_name )
__a : List[Any] = tokenizer.special_tokens_map['''bos_token'''] # `<|endoftext|>`
__a : Dict = tokenizer.special_tokens_map['''eos_token'''] # `<|endoftext|>`
logger.info(f"Loading text from {args.file_path}" )
with open(args.file_path , '''r''' , encoding='''utf8''' ) as fp:
__a : Union[str, Any] = fp.readlines()
logger.info('''Start encoding''' )
logger.info(f"{len(lowerCAmelCase__ )} examples to process." )
__a : Optional[int] = []
__a : Union[str, Any] = 0
__a : List[Any] = 1_0_0_0_0
__a : Tuple = time.time()
for text in data:
__a : Optional[Any] = f"{bos} {text.strip()} {sep}"
__a : Dict = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
rslt.append(lowerCAmelCase__ )
iter += 1
if iter % interval == 0:
__a : List[Any] = time.time()
logger.info(f"{iter} examples processed. - {(end-start):.2f}s/{interval}expl" )
__a : Any = time.time()
logger.info('''Finished binarization''' )
logger.info(f"{len(lowerCAmelCase__ )} examples processed." )
__a : Optional[int] = f"{args.dump_file}.{args.tokenizer_name}.pickle"
__a : Dict = tokenizer.vocab_size
if vocab_size < (1 << 1_6):
__a : Optional[int] = [np.uintaa(lowerCAmelCase__ ) for d in rslt]
else:
__a : List[str] = [np.intaa(lowerCAmelCase__ ) for d in rslt]
random.shuffle(rslt_ )
logger.info(f"Dump to {dp_file}" )
with open(lowerCAmelCase__ , '''wb''' ) as handle:
pickle.dump(rslt_ , lowerCAmelCase__ , protocol=pickle.HIGHEST_PROTOCOL )
if __name__ == "__main__":
main()
| 90 |
import argparse
import os
import re
import packaging.version
lowercase__ ='examples/'
lowercase__ ={
'examples': (re.compile(R'^check_min_version\("[^"]+"\)\s*$', re.MULTILINE), 'check_min_version("VERSION")\n'),
'init': (re.compile(R'^__version__\s+=\s+"([^"]+)"\s*$', re.MULTILINE), '__version__ = "VERSION"\n'),
'setup': (re.compile(R'^(\s*)version\s*=\s*"[^"]+",', re.MULTILINE), R'\1version="VERSION",'),
'doc': (re.compile(R'^(\s*)release\s*=\s*"[^"]+"$', re.MULTILINE), 'release = "VERSION"\n'),
}
lowercase__ ={
'init': 'src/transformers/__init__.py',
'setup': 'setup.py',
}
lowercase__ ='README.md'
def __UpperCamelCase ( lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Tuple ):
with open(lowerCAmelCase__ , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
__a : Tuple = f.read()
__a , __a : Optional[int] = REPLACE_PATTERNS[pattern]
__a : List[Any] = replace.replace('''VERSION''' , lowerCAmelCase__ )
__a : Any = re_pattern.sub(lowerCAmelCase__ , lowerCAmelCase__ )
with open(lowerCAmelCase__ , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.write(lowerCAmelCase__ )
def __UpperCamelCase ( lowerCAmelCase__ : Tuple ):
for folder, directories, fnames in os.walk(lowerCAmelCase__ ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove('''research_projects''' )
if "legacy" in directories:
directories.remove('''legacy''' )
for fname in fnames:
if fname.endswith('''.py''' ):
update_version_in_file(os.path.join(lowerCAmelCase__ , lowerCAmelCase__ ) , lowerCAmelCase__ , pattern='''examples''' )
def __UpperCamelCase ( lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Dict=False ):
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
if not patch:
update_version_in_examples(lowerCAmelCase__ )
def __UpperCamelCase ( ):
__a : Optional[int] = '''🤗 Transformers currently provides the following architectures'''
__a : int = '''1. Want to contribute a new model?'''
with open(lowerCAmelCase__ , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
__a : Tuple = f.readlines()
# Find the start of the list.
__a : Optional[int] = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
__a : Any = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith('''1.''' ):
__a : str = lines[index].replace(
'''https://huggingface.co/docs/transformers/main/model_doc''' , '''https://huggingface.co/docs/transformers/model_doc''' , )
index += 1
with open(lowerCAmelCase__ , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.writelines(lowerCAmelCase__ )
def __UpperCamelCase ( ):
with open(REPLACE_FILES['''init'''] , '''r''' ) as f:
__a : Optional[int] = f.read()
__a : str = REPLACE_PATTERNS['''init'''][0].search(lowerCAmelCase__ ).groups()[0]
return packaging.version.parse(lowerCAmelCase__ )
def __UpperCamelCase ( lowerCAmelCase__ : Union[str, Any]=False ):
__a : str = get_version()
if patch and default_version.is_devrelease:
raise ValueError('''Can\'t create a patch version from the dev branch, checkout a released version!''' )
if default_version.is_devrelease:
__a : Union[str, Any] = default_version.base_version
elif patch:
__a : Tuple = f"{default_version.major}.{default_version.minor}.{default_version.micro + 1}"
else:
__a : List[str] = f"{default_version.major}.{default_version.minor + 1}.0"
# Now let's ask nicely if that's the right one.
__a : List[str] = input(f"Which version are you releasing? [{default_version}]" )
if len(lowerCAmelCase__ ) == 0:
__a : Tuple = default_version
print(f"Updating version to {version}." )
global_version_update(lowerCAmelCase__ , patch=lowerCAmelCase__ )
if not patch:
print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' )
clean_main_ref_in_model_list()
def __UpperCamelCase ( ):
__a : Dict = get_version()
__a : str = f"{current_version.major}.{current_version.minor + 1}.0.dev0"
__a : Any = current_version.base_version
# Check with the user we got that right.
__a : Any = input(f"Which version are we developing now? [{dev_version}]" )
if len(lowerCAmelCase__ ) == 0:
__a : Any = dev_version
print(f"Updating version to {version}." )
global_version_update(lowerCAmelCase__ )
print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' )
clean_main_ref_in_model_list()
if __name__ == "__main__":
lowercase__ =argparse.ArgumentParser()
parser.add_argument('--post_release', action='store_true', help='Whether this is pre or post release.')
parser.add_argument('--patch', action='store_true', help='Whether or not this is a patch release.')
lowercase__ =parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print('Nothing to do after a patch :-)')
else:
post_release_work()
| 90 | 1 |
"""simple docstring"""
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
is_tf_available,
is_torch_available,
)
a : List[Any] = logging.getLogger(__name__)
@dataclass(frozen=a__ )
class __UpperCamelCase :
lowerCamelCase : str
lowerCamelCase : str
lowerCamelCase : Optional[str] =None
lowerCamelCase : Optional[str] =None
lowerCamelCase : Optional[str] =None
@dataclass(frozen=a__ )
class __UpperCamelCase :
lowerCamelCase : List[int]
lowerCamelCase : Optional[List[int]] =None
lowerCamelCase : Optional[List[int]] =None
lowerCamelCase : Optional[Union[int, float]] =None
lowerCamelCase : Optional[int] =None
if is_torch_available():
import torch
from torch.utils.data import Dataset
class __UpperCamelCase ( a__ ):
lowerCamelCase : List[InputFeatures]
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__=False , lowerCAmelCase__ = False , ) -> Optional[int]:
a : Union[str, Any] = hans_processors[task]()
a : Dict = os.path.join(
lowerCAmelCase__ , "cached_{}_{}_{}_{}".format(
"dev" if evaluate else "train" , tokenizer.__class__.__name__ , str(lowerCAmelCase__ ) , lowerCAmelCase__ , ) , )
a : Dict = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
a, a : Union[str, Any] = label_list[2], label_list[1]
a : Optional[Any] = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
a : int = cached_features_file + ".lock"
with FileLock(lowerCAmelCase__ ):
if os.path.exists(lowerCAmelCase__ ) and not overwrite_cache:
logger.info(f"""Loading features from cached file {cached_features_file}""" )
a : Union[str, Any] = torch.load(lowerCAmelCase__ )
else:
logger.info(f"""Creating features from dataset file at {data_dir}""" )
a : List[Any] = (
processor.get_dev_examples(lowerCAmelCase__ ) if evaluate else processor.get_train_examples(lowerCAmelCase__ )
)
logger.info("Training examples: %s" , len(lowerCAmelCase__ ) )
a : Union[str, Any] = hans_convert_examples_to_features(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
logger.info("Saving features into cached file %s" , lowerCAmelCase__ )
torch.save(self.features , lowerCAmelCase__ )
def __len__( self ) -> int:
return len(self.features )
def __getitem__( self , lowerCAmelCase__ ) -> InputFeatures:
return self.features[i]
def __a ( self ) -> Optional[Any]:
return self.label_list
if is_tf_available():
import tensorflow as tf
class __UpperCamelCase :
lowerCamelCase : List[InputFeatures]
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = 128 , lowerCAmelCase__=False , lowerCAmelCase__ = False , ) -> Dict:
a : Tuple = hans_processors[task]()
a : Union[str, Any] = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
a, a : List[Any] = label_list[2], label_list[1]
a : int = label_list
a : Dict = processor.get_dev_examples(lowerCAmelCase__ ) if evaluate else processor.get_train_examples(lowerCAmelCase__ )
a : int = hans_convert_examples_to_features(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def gen():
for ex_index, ex in tqdm.tqdm(enumerate(self.features ) , desc="convert examples to features" ):
if ex_index % 1_0000 == 0:
logger.info("Writing example %d of %d" % (ex_index, len(lowerCAmelCase__ )) )
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
a : List[Any] = tf.data.Dataset.from_generator(
lowerCAmelCase__ , (
{
"example_id": tf.intaa,
"input_ids": tf.intaa,
"attention_mask": tf.intaa,
"token_type_ids": tf.intaa,
},
tf.intaa,
) , (
{
"example_id": tf.TensorShape([] ),
"input_ids": tf.TensorShape([None, None] ),
"attention_mask": tf.TensorShape([None, None] ),
"token_type_ids": tf.TensorShape([None, None] ),
},
tf.TensorShape([] ),
) , )
def __a ( self ) -> Tuple:
return self.dataset
def __len__( self ) -> Tuple:
return len(self.features )
def __getitem__( self , lowerCAmelCase__ ) -> InputFeatures:
return self.features[i]
def __a ( self ) -> Optional[int]:
return self.label_list
class __UpperCamelCase ( a__ ):
def __a ( self , lowerCAmelCase__ ) -> Any:
return self._create_examples(self._read_tsv(os.path.join(lowerCAmelCase__ , "heuristics_train_set.txt" ) ) , "train" )
def __a ( self , lowerCAmelCase__ ) -> List[str]:
return self._create_examples(self._read_tsv(os.path.join(lowerCAmelCase__ , "heuristics_evaluation_set.txt" ) ) , "dev" )
def __a ( self ) -> Optional[int]:
return ["contradiction", "entailment", "neutral"]
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> Tuple:
a : List[str] = []
for i, line in enumerate(lowerCAmelCase__ ):
if i == 0:
continue
a : Union[str, Any] = "%s-%s" % (set_type, line[0])
a : Optional[int] = line[5]
a : List[Any] = line[6]
a : int = line[7][2:] if line[7].startswith("ex" ) else line[7]
a : List[str] = line[0]
examples.append(InputExample(guid=lowerCAmelCase__ , text_a=lowerCAmelCase__ , text_b=lowerCAmelCase__ , label=lowerCAmelCase__ , pairID=lowerCAmelCase__ ) )
return examples
def _SCREAMING_SNAKE_CASE ( _lowercase : List[InputExample] , _lowercase : List[str] , _lowercase : int , _lowercase : PreTrainedTokenizer , ) ->List[str]:
'''simple docstring'''
a : Optional[int] = {label: i for i, label in enumerate(_lowercase )}
a : str = []
for ex_index, example in tqdm.tqdm(enumerate(_lowercase ) , desc="convert examples to features" ):
if ex_index % 1_0000 == 0:
logger.info("Writing example %d" % (ex_index) )
a : List[Any] = tokenizer(
example.text_a , example.text_b , add_special_tokens=_lowercase , max_length=_lowercase , padding="max_length" , truncation=_lowercase , return_overflowing_tokens=_lowercase , )
a : Tuple = label_map[example.label] if example.label in label_map else 0
a : Tuple = int(example.pairID )
features.append(InputFeatures(**_lowercase , label=_lowercase , pairID=_lowercase ) )
for i, example in enumerate(examples[:5] ):
logger.info("*** Example ***" )
logger.info(F"""guid: {example}""" )
logger.info(F"""features: {features[i]}""" )
return features
a : Tuple = {
'''hans''': 3,
}
a : Dict = {
'''hans''': HansProcessor,
}
| 105 |
from __future__ import annotations
import math
def a_ ( lowerCAmelCase_ : int, lowerCAmelCase_ : int, lowerCAmelCase_ : bool, lowerCAmelCase_ : list[int], lowerCAmelCase_ : float ):
if depth < 0:
raise ValueError('Depth cannot be less than 0' )
if len(lowerCAmelCase_ ) == 0:
raise ValueError('Scores cannot be empty' )
if depth == height:
return scores[node_index]
if is_max:
return max(
minimax(depth + 1, node_index * 2, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ ), minimax(depth + 1, node_index * 2 + 1, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ ), )
return min(
minimax(depth + 1, node_index * 2, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ ), minimax(depth + 1, node_index * 2 + 1, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ ), )
def a_ ( ):
__lowerCAmelCase = [90, 23, 6, 33, 21, 65, 123, 3_4423]
__lowerCAmelCase = math.log(len(lowerCAmelCase_ ), 2 )
print('Optimal value : ', end='' )
print(minimax(0, 0, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 284 | 0 |
'''simple docstring'''
import warnings
from typing import Dict
import numpy as np
from ..utils import ExplicitEnum, add_end_docstrings, is_tf_available, is_torch_available
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
def a__ ( a__ ):
"""simple docstring"""
return 1.0 / (1.0 + np.exp(-_outputs ))
def a__ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = np.max(_outputs , axis=-1 , keepdims=a__ )
__SCREAMING_SNAKE_CASE = np.exp(_outputs - maxes )
return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=a__ )
class lowerCAmelCase__ ( a ):
"""simple docstring"""
lowerCAmelCase__ = "sigmoid"
lowerCAmelCase__ = "softmax"
lowerCAmelCase__ = "none"
@add_end_docstrings(
a , r"\n return_all_scores (`bool`, *optional*, defaults to `False`):\n Whether to return all prediction scores or just the one of the predicted class.\n function_to_apply (`str`, *optional*, defaults to `\"default\"`):\n The function to apply to the model outputs in order to retrieve the scores. Accepts four different values:\n\n - `\"default\"`: if the model has a single label, will apply the sigmoid function on the output. If the model\n has several labels, will apply the softmax function on the output.\n - `\"sigmoid\"`: Applies the sigmoid function on the output.\n - `\"softmax\"`: Applies the softmax function on the output.\n - `\"none\"`: Does not apply any function on the output.\n " , )
class lowerCAmelCase__ ( a ):
"""simple docstring"""
lowerCAmelCase__ = False
lowerCAmelCase__ = ClassificationFunction.NONE
def __init__( self : Optional[Any] , **__SCREAMING_SNAKE_CASE : Optional[Any] ) -> Any:
"""simple docstring"""
super().__init__(**__SCREAMING_SNAKE_CASE )
self.check_model_type(
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if self.framework == """tf"""
else MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING )
def UpperCAmelCase__ ( self : List[Any] , __SCREAMING_SNAKE_CASE : Optional[int]=None , __SCREAMING_SNAKE_CASE : List[str]=None , __SCREAMING_SNAKE_CASE : int="" , **__SCREAMING_SNAKE_CASE : Dict ) -> List[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = tokenizer_kwargs
__SCREAMING_SNAKE_CASE = {}
if hasattr(self.model.config , """return_all_scores""" ) and return_all_scores is None:
__SCREAMING_SNAKE_CASE = self.model.config.return_all_scores
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) or top_k is None:
__SCREAMING_SNAKE_CASE = top_k
__SCREAMING_SNAKE_CASE = False
elif return_all_scores is not None:
warnings.warn(
"""`return_all_scores` is now deprecated, if want a similar functionality use `top_k=None` instead of"""
""" `return_all_scores=True` or `top_k=1` instead of `return_all_scores=False`.""" , __SCREAMING_SNAKE_CASE , )
if return_all_scores:
__SCREAMING_SNAKE_CASE = None
else:
__SCREAMING_SNAKE_CASE = 1
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
__SCREAMING_SNAKE_CASE = ClassificationFunction[function_to_apply.upper()]
if function_to_apply is not None:
__SCREAMING_SNAKE_CASE = function_to_apply
return preprocess_params, {}, postprocess_params
def __call__( self : Dict , *__SCREAMING_SNAKE_CASE : str , **__SCREAMING_SNAKE_CASE : Dict ) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = super().__call__(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
# TODO try and retrieve it in a nicer way from _sanitize_parameters.
__SCREAMING_SNAKE_CASE = """top_k""" not in kwargs
if isinstance(args[0] , __SCREAMING_SNAKE_CASE ) and _legacy:
# This pipeline is odd, and return a list when single item is run
return [result]
else:
return result
def UpperCAmelCase__ ( self : Optional[int] , __SCREAMING_SNAKE_CASE : List[Any] , **__SCREAMING_SNAKE_CASE : Any ) -> Dict[str, GenericTensor]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.framework
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
return self.tokenizer(**__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
elif isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and len(__SCREAMING_SNAKE_CASE ) == 1 and isinstance(inputs[0] , __SCREAMING_SNAKE_CASE ) and len(inputs[0] ) == 2:
# It used to be valid to use a list of list of list for text pairs, keeping this path for BC
return self.tokenizer(
text=inputs[0][0] , text_pair=inputs[0][1] , return_tensors=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
elif isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
# This is likely an invalid usage of the pipeline attempting to pass text pairs.
raise ValueError(
"""The pipeline received invalid inputs, if you are trying to send text pairs, you can try to send a"""
""" dictionary `{\"text\": \"My text\", \"text_pair\": \"My pair\"}` in order to send a text pair.""" )
return self.tokenizer(__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : List[Any] ) -> str:
"""simple docstring"""
return self.model(**__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : str , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Optional[Any]=None , __SCREAMING_SNAKE_CASE : int=1 , __SCREAMING_SNAKE_CASE : List[Any]=True ) -> Union[str, Any]:
"""simple docstring"""
if function_to_apply is None:
if self.model.config.problem_type == "multi_label_classification" or self.model.config.num_labels == 1:
__SCREAMING_SNAKE_CASE = ClassificationFunction.SIGMOID
elif self.model.config.problem_type == "single_label_classification" or self.model.config.num_labels > 1:
__SCREAMING_SNAKE_CASE = ClassificationFunction.SOFTMAX
elif hasattr(self.model.config , """function_to_apply""" ) and function_to_apply is None:
__SCREAMING_SNAKE_CASE = self.model.config.function_to_apply
else:
__SCREAMING_SNAKE_CASE = ClassificationFunction.NONE
__SCREAMING_SNAKE_CASE = model_outputs["""logits"""][0]
__SCREAMING_SNAKE_CASE = outputs.numpy()
if function_to_apply == ClassificationFunction.SIGMOID:
__SCREAMING_SNAKE_CASE = sigmoid(__SCREAMING_SNAKE_CASE )
elif function_to_apply == ClassificationFunction.SOFTMAX:
__SCREAMING_SNAKE_CASE = softmax(__SCREAMING_SNAKE_CASE )
elif function_to_apply == ClassificationFunction.NONE:
__SCREAMING_SNAKE_CASE = outputs
else:
raise ValueError(f'Unrecognized `function_to_apply` argument: {function_to_apply}' )
if top_k == 1 and _legacy:
return {"label": self.model.config.idalabel[scores.argmax().item()], "score": scores.max().item()}
__SCREAMING_SNAKE_CASE = [
{"""label""": self.model.config.idalabel[i], """score""": score.item()} for i, score in enumerate(__SCREAMING_SNAKE_CASE )
]
if not _legacy:
dict_scores.sort(key=lambda __SCREAMING_SNAKE_CASE : x["score"] , reverse=__SCREAMING_SNAKE_CASE )
if top_k is not None:
__SCREAMING_SNAKE_CASE = dict_scores[:top_k]
return dict_scores
| 331 |
'''simple docstring'''
from __future__ import annotations
from sys import maxsize
from typing import Generic, TypeVar
UpperCAmelCase : Dict = TypeVar('T')
def a__ ( a__ ):
"""simple docstring"""
return (position - 1) // 2
def a__ ( a__ ):
"""simple docstring"""
return (2 * position) + 1
def a__ ( a__ ):
"""simple docstring"""
return (2 * position) + 2
class lowerCAmelCase__ ( Generic[T] ):
"""simple docstring"""
def __init__( self : List[str] ) -> None:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = {}
__SCREAMING_SNAKE_CASE = 0
def __len__( self : Optional[Any] ) -> int:
"""simple docstring"""
return self.elements
def __repr__( self : List[str] ) -> str:
"""simple docstring"""
return str(self.heap )
def UpperCAmelCase__ ( self : Tuple ) -> bool:
"""simple docstring"""
return self.elements == 0
def UpperCAmelCase__ ( self : int , __SCREAMING_SNAKE_CASE : T , __SCREAMING_SNAKE_CASE : int ) -> None:
"""simple docstring"""
self.heap.append((elem, weight) )
__SCREAMING_SNAKE_CASE = self.elements
self.elements += 1
self._bubble_up(__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : Any ) -> T:
"""simple docstring"""
if self.elements > 1:
self._swap_nodes(0 , self.elements - 1 )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.heap.pop()
del self.position_map[elem]
self.elements -= 1
if self.elements > 0:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.heap[0]
self._bubble_down(__SCREAMING_SNAKE_CASE )
return elem
def UpperCAmelCase__ ( self : List[Any] , __SCREAMING_SNAKE_CASE : T , __SCREAMING_SNAKE_CASE : int ) -> None:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.position_map[elem]
__SCREAMING_SNAKE_CASE = (elem, weight)
if position > 0:
__SCREAMING_SNAKE_CASE = get_parent_position(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.heap[parent_position]
if parent_weight > weight:
self._bubble_up(__SCREAMING_SNAKE_CASE )
else:
self._bubble_down(__SCREAMING_SNAKE_CASE )
else:
self._bubble_down(__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : List[Any] , __SCREAMING_SNAKE_CASE : T ) -> None:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.position_map[elem]
if curr_pos == 0:
return None
__SCREAMING_SNAKE_CASE = get_parent_position(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.heap[curr_pos]
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.heap[parent_position]
if parent_weight > weight:
self._swap_nodes(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return self._bubble_up(__SCREAMING_SNAKE_CASE )
return None
def UpperCAmelCase__ ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : T ) -> None:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.position_map[elem]
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.heap[curr_pos]
__SCREAMING_SNAKE_CASE = get_child_left_position(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = get_child_right_position(__SCREAMING_SNAKE_CASE )
if child_left_position < self.elements and child_right_position < self.elements:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.heap[child_left_position]
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.heap[child_right_position]
if child_right_weight < child_left_weight and child_right_weight < weight:
self._swap_nodes(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return self._bubble_down(__SCREAMING_SNAKE_CASE )
if child_left_position < self.elements:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.heap[child_left_position]
if child_left_weight < weight:
self._swap_nodes(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return self._bubble_down(__SCREAMING_SNAKE_CASE )
else:
return None
if child_right_position < self.elements:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.heap[child_right_position]
if child_right_weight < weight:
self._swap_nodes(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return self._bubble_down(__SCREAMING_SNAKE_CASE )
return None
def UpperCAmelCase__ ( self : Any , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int ) -> None:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.heap[nodea_pos][0]
__SCREAMING_SNAKE_CASE = self.heap[nodea_pos][0]
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = (
self.heap[nodea_pos],
self.heap[nodea_pos],
)
__SCREAMING_SNAKE_CASE = nodea_pos
__SCREAMING_SNAKE_CASE = nodea_pos
class lowerCAmelCase__ ( Generic[T] ):
"""simple docstring"""
def __init__( self : Union[str, Any] ) -> None:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = {}
__SCREAMING_SNAKE_CASE = 0
def __repr__( self : Dict ) -> str:
"""simple docstring"""
return str(self.connections )
def __len__( self : Dict ) -> int:
"""simple docstring"""
return self.nodes
def UpperCAmelCase__ ( self : Any , __SCREAMING_SNAKE_CASE : T ) -> None:
"""simple docstring"""
if node not in self.connections:
__SCREAMING_SNAKE_CASE = {}
self.nodes += 1
def UpperCAmelCase__ ( self : int , __SCREAMING_SNAKE_CASE : T , __SCREAMING_SNAKE_CASE : T , __SCREAMING_SNAKE_CASE : int ) -> None:
"""simple docstring"""
self.add_node(__SCREAMING_SNAKE_CASE )
self.add_node(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = weight
__SCREAMING_SNAKE_CASE = weight
def a__ ( a__ , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = {node: maxsize for node in graph.connections}
__SCREAMING_SNAKE_CASE = {node: None for node in graph.connections}
__SCREAMING_SNAKE_CASE = MinPriorityQueue()
for node, weight in dist.items():
priority_queue.push(a__ , a__ )
if priority_queue.is_empty():
return dist, parent
# initialization
__SCREAMING_SNAKE_CASE = priority_queue.extract_min()
__SCREAMING_SNAKE_CASE = 0
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
__SCREAMING_SNAKE_CASE = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(a__ , dist[neighbour] )
__SCREAMING_SNAKE_CASE = node
# running prim's algorithm
while not priority_queue.is_empty():
__SCREAMING_SNAKE_CASE = priority_queue.extract_min()
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
__SCREAMING_SNAKE_CASE = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(a__ , dist[neighbour] )
__SCREAMING_SNAKE_CASE = node
return dist, parent
| 331 | 1 |
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def A_ ( A__ ) -> str:
a__ : Any = filter(lambda A__ : p.requires_grad , model.parameters() )
a__ : Union[str, Any] = sum([np.prod(p.size() ) for p in model_parameters] )
return params
lowercase : Union[str, Any] = logging.getLogger(__name__)
def A_ ( A__ , A__ ) -> int:
if metric == "rouge2":
a__ : Any = '{val_avg_rouge2:.4f}-{step_count}'
elif metric == "bleu":
a__ : Tuple = '{val_avg_bleu:.4f}-{step_count}'
elif metric == "em":
a__ : Union[str, Any] = '{val_avg_em:.4f}-{step_count}'
elif metric == "loss":
a__ : int = '{val_avg_loss:.4f}-{step_count}'
else:
raise NotImplementedError(
F'seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this'
' function.' )
a__ : Tuple = ModelCheckpoint(
dirpath=A__ , filename=A__ , monitor=F'val_{metric}' , mode='max' , save_top_k=1 , every_n_epochs=1 , )
return checkpoint_callback
def A_ ( A__ , A__ ) -> str:
return EarlyStopping(
monitor=F'val_{metric}' , mode='min' if 'loss' in metric else 'max' , patience=A__ , verbose=A__ , )
class A__ ( pl.Callback ):
"""simple docstring"""
def __lowercase ( self , lowercase , lowercase) -> Optional[int]:
'''simple docstring'''
a__ : Any = {F'lr_group_{i}': param['lr'] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups)}
pl_module.logger.log_metrics(lowercase)
@rank_zero_only
def __lowercase ( self , lowercase , lowercase , lowercase , lowercase=True) -> None:
'''simple docstring'''
logger.info(F'***** {type_path} results at step {trainer.global_step:05d} *****')
a__ : Optional[Any] = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['log', 'progress_bar', 'preds']})
# Log results
a__ : Any = Path(pl_module.hparams.output_dir)
if type_path == "test":
a__ : Dict = od / 'test_results.txt'
a__ : str = od / 'test_generations.txt'
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
a__ : Any = od / F'{type_path}_results/{trainer.global_step:05d}.txt'
a__ : Union[str, Any] = od / F'{type_path}_generations/{trainer.global_step:05d}.txt'
results_file.parent.mkdir(exist_ok=lowercase)
generations_file.parent.mkdir(exist_ok=lowercase)
with open(lowercase , 'a+') as writer:
for key in sorted(lowercase):
if key in ["log", "progress_bar", "preds"]:
continue
a__ : Dict = metrics[key]
if isinstance(lowercase , torch.Tensor):
a__ : Optional[Any] = val.item()
a__ : Dict = F'{key}: {val:.6f}\n'
writer.write(lowercase)
if not save_generations:
return
if "preds" in metrics:
a__ : Tuple = '\n'.join(metrics['preds'])
generations_file.open('w+').write(lowercase)
@rank_zero_only
def __lowercase ( self , lowercase , lowercase) -> int:
'''simple docstring'''
try:
a__ : int = pl_module.model.model.num_parameters()
except AttributeError:
a__ : str = pl_module.model.num_parameters()
a__ : Union[str, Any] = count_trainable_parameters(lowercase)
# mp stands for million parameters
trainer.logger.log_metrics({'n_params': npars, 'mp': npars / 1e6, 'grad_mp': n_trainable_pars / 1e6})
@rank_zero_only
def __lowercase ( self , lowercase , lowercase) -> Any:
'''simple docstring'''
save_json(pl_module.metrics , pl_module.metrics_save_path)
return self._write_logs(lowercase , lowercase , 'test')
@rank_zero_only
def __lowercase ( self , lowercase , lowercase) -> Dict:
'''simple docstring'''
save_json(pl_module.metrics , pl_module.metrics_save_path)
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 99 |
'''simple docstring'''
import argparse
import os
import sys
from unittest.mock import patch
import pytorch_lightning as pl
import timeout_decorator
import torch
from distillation import SummarizationDistiller, distill_main
from finetune import SummarizationModule, main
from transformers import MarianMTModel
from transformers.file_utils import cached_path
from transformers.testing_utils import TestCasePlus, require_torch_gpu, slow
from utils import load_json
__A = "sshleifer/mar_enro_6_3_student"
class A ( __UpperCAmelCase ):
def A__ ( self ) -> List[Any]:
'''simple docstring'''
super().setUp()
lowercase__ = cached_path(
"""https://cdn-datasets.huggingface.co/translation/wmt_en_ro-tr40k-va0.5k-te0.5k.tar.gz""" , extract_compressed_file=lowerCamelCase__ , )
lowercase__ = F'''{data_cached}/wmt_en_ro-tr40k-va0.5k-te0.5k'''
@slow
@require_torch_gpu
def A__ ( self ) -> str:
'''simple docstring'''
MarianMTModel.from_pretrained(lowerCamelCase__ )
@slow
@require_torch_gpu
def A__ ( self ) -> List[Any]:
'''simple docstring'''
lowercase__ = {
"""$MAX_LEN""": 64,
"""$BS""": 64,
"""$GAS""": 1,
"""$ENRO_DIR""": self.data_dir,
"""facebook/mbart-large-cc25""": MARIAN_MODEL,
# "val_check_interval=0.25": "val_check_interval=1.0",
"""--learning_rate=3e-5""": """--learning_rate 3e-4""",
"""--num_train_epochs 6""": """--num_train_epochs 1""",
}
# Clean up bash script
lowercase__ = (self.test_file_dir / """train_mbart_cc25_enro.sh""").open().read().split("""finetune.py""" )[1].strip()
lowercase__ = bash_script.replace("""\\\n""" , """""" ).strip().replace("""\"$@\"""" , """""" )
for k, v in env_vars_to_replace.items():
lowercase__ = bash_script.replace(lowerCamelCase__ , str(lowerCamelCase__ ) )
lowercase__ = self.get_auto_remove_tmp_dir()
# bash_script = bash_script.replace("--fp16 ", "")
lowercase__ = F'''
--output_dir {output_dir}
--tokenizer_name Helsinki-NLP/opus-mt-en-ro
--sortish_sampler
--do_predict
--gpus 1
--freeze_encoder
--n_train 40000
--n_val 500
--n_test 500
--fp16_opt_level O1
--num_sanity_val_steps 0
--eval_beams 2
'''.split()
# XXX: args.gpus > 1 : handle multi_gpu in the future
lowercase__ = ["""finetune.py"""] + bash_script.split() + args
with patch.object(lowerCamelCase__ , """argv""" , lowerCamelCase__ ):
lowercase__ = argparse.ArgumentParser()
lowercase__ = pl.Trainer.add_argparse_args(lowerCamelCase__ )
lowercase__ = SummarizationModule.add_model_specific_args(lowerCamelCase__ , os.getcwd() )
lowercase__ = parser.parse_args()
lowercase__ = main(lowerCamelCase__ )
# Check metrics
lowercase__ = load_json(model.metrics_save_path )
lowercase__ = metrics["""val"""][0]
lowercase__ = metrics["""val"""][-1]
self.assertEqual(len(metrics["""val"""] ) , (args.max_epochs / args.val_check_interval) )
assert isinstance(last_step_stats[F'''val_avg_{model.val_metric}'''] , lowerCamelCase__ )
self.assertGreater(last_step_stats["""val_avg_gen_time"""] , 0.01 )
# model hanging on generate. Maybe bad config was saved. (XXX: old comment/assert?)
self.assertLessEqual(last_step_stats["""val_avg_gen_time"""] , 1.0 )
# test learning requirements:
# 1. BLEU improves over the course of training by more than 2 pts
self.assertGreater(last_step_stats["""val_avg_bleu"""] - first_step_stats["""val_avg_bleu"""] , 2 )
# 2. BLEU finishes above 17
self.assertGreater(last_step_stats["""val_avg_bleu"""] , 17 )
# 3. test BLEU and val BLEU within ~1.1 pt.
self.assertLess(abs(metrics["""val"""][-1]["""val_avg_bleu"""] - metrics["""test"""][-1]["""test_avg_bleu"""] ) , 1.1 )
# check lightning ckpt can be loaded and has a reasonable statedict
lowercase__ = os.listdir(lowerCamelCase__ )
lowercase__ = [x for x in contents if x.endswith(""".ckpt""" )][0]
lowercase__ = os.path.join(args.output_dir , lowerCamelCase__ )
lowercase__ = torch.load(lowerCamelCase__ , map_location="""cpu""" )
lowercase__ = """model.model.decoder.layers.0.encoder_attn_layer_norm.weight"""
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
lowercase__ = {os.path.basename(lowerCamelCase__ ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics["""test"""] ) == 1
class A ( __UpperCAmelCase ):
@timeout_decorator.timeout(600 )
@slow
@require_torch_gpu
def A__ ( self ) -> Dict:
'''simple docstring'''
lowercase__ = F'''{self.test_file_dir_str}/test_data/wmt_en_ro'''
lowercase__ = {
"""--fp16_opt_level=O1""": """""",
"""$MAX_LEN""": 128,
"""$BS""": 16,
"""$GAS""": 1,
"""$ENRO_DIR""": data_dir,
"""$m""": """sshleifer/student_marian_en_ro_6_1""",
"""val_check_interval=0.25""": """val_check_interval=1.0""",
}
# Clean up bash script
lowercase__ = (
(self.test_file_dir / """distil_marian_no_teacher.sh""").open().read().split("""distillation.py""" )[1].strip()
)
lowercase__ = bash_script.replace("""\\\n""" , """""" ).strip().replace("""\"$@\"""" , """""" )
lowercase__ = bash_script.replace("""--fp16 """ , """ """ )
for k, v in env_vars_to_replace.items():
lowercase__ = bash_script.replace(lowerCamelCase__ , str(lowerCamelCase__ ) )
lowercase__ = self.get_auto_remove_tmp_dir()
lowercase__ = bash_script.replace("""--fp16""" , """""" )
lowercase__ = 6
lowercase__ = (
["""distillation.py"""]
+ bash_script.split()
+ [
F'''--output_dir={output_dir}''',
"""--gpus=1""",
"""--learning_rate=1e-3""",
F'''--num_train_epochs={epochs}''',
"""--warmup_steps=10""",
"""--val_check_interval=1.0""",
"""--do_predict""",
]
)
with patch.object(lowerCamelCase__ , """argv""" , lowerCamelCase__ ):
lowercase__ = argparse.ArgumentParser()
lowercase__ = pl.Trainer.add_argparse_args(lowerCamelCase__ )
lowercase__ = SummarizationDistiller.add_model_specific_args(lowerCamelCase__ , os.getcwd() )
lowercase__ = parser.parse_args()
# assert args.gpus == gpus THIS BREAKS for multi_gpu
lowercase__ = distill_main(lowerCamelCase__ )
# Check metrics
lowercase__ = load_json(model.metrics_save_path )
lowercase__ = metrics["""val"""][0]
lowercase__ = metrics["""val"""][-1]
assert len(metrics["""val"""] ) >= (args.max_epochs / args.val_check_interval) # +1 accounts for val_sanity_check
assert last_step_stats["val_avg_gen_time"] >= 0.01
assert first_step_stats["val_avg_bleu"] < last_step_stats["val_avg_bleu"] # model learned nothing
assert 1.0 >= last_step_stats["val_avg_gen_time"] # model hanging on generate. Maybe bad config was saved.
assert isinstance(last_step_stats[F'''val_avg_{model.val_metric}'''] , lowerCamelCase__ )
# check lightning ckpt can be loaded and has a reasonable statedict
lowercase__ = os.listdir(lowerCamelCase__ )
lowercase__ = [x for x in contents if x.endswith(""".ckpt""" )][0]
lowercase__ = os.path.join(args.output_dir , lowerCamelCase__ )
lowercase__ = torch.load(lowerCamelCase__ , map_location="""cpu""" )
lowercase__ = """model.model.decoder.layers.0.encoder_attn_layer_norm.weight"""
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
lowercase__ = {os.path.basename(lowerCamelCase__ ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics["""test"""] ) == 1
| 164 | 0 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_nllb import NllbTokenizer
else:
_a : Tuple = None
_a : str = logging.get_logger(__name__)
_a : str = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'}
_a : Tuple = {
'vocab_file': {
'facebook/nllb-200-distilled-600M': (
'https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model'
),
},
'tokenizer_file': {
'facebook/nllb-200-distilled-600M': (
'https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json'
),
},
}
_a : int = {
'facebook/nllb-large-en-ro': 1_024,
'facebook/nllb-200-distilled-600M': 1_024,
}
# fmt: off
_a : Union[str, Any] = ['ace_Arab', 'ace_Latn', 'acm_Arab', 'acq_Arab', 'aeb_Arab', 'afr_Latn', 'ajp_Arab', 'aka_Latn', 'amh_Ethi', 'apc_Arab', 'arb_Arab', 'ars_Arab', 'ary_Arab', 'arz_Arab', 'asm_Beng', 'ast_Latn', 'awa_Deva', 'ayr_Latn', 'azb_Arab', 'azj_Latn', 'bak_Cyrl', 'bam_Latn', 'ban_Latn', 'bel_Cyrl', 'bem_Latn', 'ben_Beng', 'bho_Deva', 'bjn_Arab', 'bjn_Latn', 'bod_Tibt', 'bos_Latn', 'bug_Latn', 'bul_Cyrl', 'cat_Latn', 'ceb_Latn', 'ces_Latn', 'cjk_Latn', 'ckb_Arab', 'crh_Latn', 'cym_Latn', 'dan_Latn', 'deu_Latn', 'dik_Latn', 'dyu_Latn', 'dzo_Tibt', 'ell_Grek', 'eng_Latn', 'epo_Latn', 'est_Latn', 'eus_Latn', 'ewe_Latn', 'fao_Latn', 'pes_Arab', 'fij_Latn', 'fin_Latn', 'fon_Latn', 'fra_Latn', 'fur_Latn', 'fuv_Latn', 'gla_Latn', 'gle_Latn', 'glg_Latn', 'grn_Latn', 'guj_Gujr', 'hat_Latn', 'hau_Latn', 'heb_Hebr', 'hin_Deva', 'hne_Deva', 'hrv_Latn', 'hun_Latn', 'hye_Armn', 'ibo_Latn', 'ilo_Latn', 'ind_Latn', 'isl_Latn', 'ita_Latn', 'jav_Latn', 'jpn_Jpan', 'kab_Latn', 'kac_Latn', 'kam_Latn', 'kan_Knda', 'kas_Arab', 'kas_Deva', 'kat_Geor', 'knc_Arab', 'knc_Latn', 'kaz_Cyrl', 'kbp_Latn', 'kea_Latn', 'khm_Khmr', 'kik_Latn', 'kin_Latn', 'kir_Cyrl', 'kmb_Latn', 'kon_Latn', 'kor_Hang', 'kmr_Latn', 'lao_Laoo', 'lvs_Latn', 'lij_Latn', 'lim_Latn', 'lin_Latn', 'lit_Latn', 'lmo_Latn', 'ltg_Latn', 'ltz_Latn', 'lua_Latn', 'lug_Latn', 'luo_Latn', 'lus_Latn', 'mag_Deva', 'mai_Deva', 'mal_Mlym', 'mar_Deva', 'min_Latn', 'mkd_Cyrl', 'plt_Latn', 'mlt_Latn', 'mni_Beng', 'khk_Cyrl', 'mos_Latn', 'mri_Latn', 'zsm_Latn', 'mya_Mymr', 'nld_Latn', 'nno_Latn', 'nob_Latn', 'npi_Deva', 'nso_Latn', 'nus_Latn', 'nya_Latn', 'oci_Latn', 'gaz_Latn', 'ory_Orya', 'pag_Latn', 'pan_Guru', 'pap_Latn', 'pol_Latn', 'por_Latn', 'prs_Arab', 'pbt_Arab', 'quy_Latn', 'ron_Latn', 'run_Latn', 'rus_Cyrl', 'sag_Latn', 'san_Deva', 'sat_Beng', 'scn_Latn', 'shn_Mymr', 'sin_Sinh', 'slk_Latn', 'slv_Latn', 'smo_Latn', 'sna_Latn', 'snd_Arab', 'som_Latn', 'sot_Latn', 'spa_Latn', 'als_Latn', 'srd_Latn', 'srp_Cyrl', 'ssw_Latn', 'sun_Latn', 'swe_Latn', 'swh_Latn', 'szl_Latn', 'tam_Taml', 'tat_Cyrl', 'tel_Telu', 'tgk_Cyrl', 'tgl_Latn', 'tha_Thai', 'tir_Ethi', 'taq_Latn', 'taq_Tfng', 'tpi_Latn', 'tsn_Latn', 'tso_Latn', 'tuk_Latn', 'tum_Latn', 'tur_Latn', 'twi_Latn', 'tzm_Tfng', 'uig_Arab', 'ukr_Cyrl', 'umb_Latn', 'urd_Arab', 'uzn_Latn', 'vec_Latn', 'vie_Latn', 'war_Latn', 'wol_Latn', 'xho_Latn', 'ydd_Hebr', 'yor_Latn', 'yue_Hant', 'zho_Hans', 'zho_Hant', 'zul_Latn']
class __A ( SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Union[str, Any] = VOCAB_FILES_NAMES
_UpperCamelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase : Dict = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase : str = ["input_ids", "attention_mask"]
_UpperCamelCase : Tuple = NllbTokenizer
_UpperCamelCase : List[int] = []
_UpperCamelCase : List[int] = []
def __init__( self , a__=None , a__=None , a__="<s>" , a__="</s>" , a__="</s>" , a__="<s>" , a__="<unk>" , a__="<pad>" , a__="<mask>" , a__=None , a__=None , a__=None , a__=False , **a__ , ):
# Mask token behave like a normal word, i.e. include the space before it
_lowerCAmelCase : Optional[int] = AddedToken(a__ , lstrip=a__ , rstrip=a__ ) if isinstance(a__ , a__ ) else mask_token
_lowerCAmelCase : int = legacy_behaviour
super().__init__(
vocab_file=a__ , tokenizer_file=a__ , bos_token=a__ , eos_token=a__ , sep_token=a__ , cls_token=a__ , unk_token=a__ , pad_token=a__ , mask_token=a__ , src_lang=a__ , tgt_lang=a__ , additional_special_tokens=a__ , legacy_behaviour=a__ , **a__ , )
_lowerCAmelCase : List[Any] = vocab_file
_lowerCAmelCase : List[str] = False if not self.vocab_file else True
_lowerCAmelCase : List[str] = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({"""additional_special_tokens""": _additional_special_tokens} )
_lowerCAmelCase : Tuple = {
lang_code: self.convert_tokens_to_ids(a__ ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
_lowerCAmelCase : List[str] = src_lang if src_lang is not None else """eng_Latn"""
_lowerCAmelCase : int = self.convert_tokens_to_ids(self._src_lang )
_lowerCAmelCase : str = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def __A ( self ):
return self._src_lang
@src_lang.setter
def __A ( self , a__ ):
_lowerCAmelCase : List[Any] = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __A ( self , a__ , a__ = None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def __A ( self , a__ , a__ = None ):
_lowerCAmelCase : Tuple = [self.sep_token_id]
_lowerCAmelCase : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __A ( self , a__ , a__ , a__ , a__ , **a__ ):
if src_lang is None or tgt_lang is None:
raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" )
_lowerCAmelCase : str = src_lang
_lowerCAmelCase : List[Any] = self(a__ , add_special_tokens=a__ , return_tensors=a__ , **a__ )
_lowerCAmelCase : Dict = self.convert_tokens_to_ids(a__ )
_lowerCAmelCase : Dict = tgt_lang_id
return inputs
def __A ( self , a__ , a__ = "eng_Latn" , a__ = None , a__ = "fra_Latn" , **a__ , ):
_lowerCAmelCase : List[Any] = src_lang
_lowerCAmelCase : Optional[int] = tgt_lang
return super().prepare_seqaseq_batch(a__ , a__ , **a__ )
def __A ( self ):
return self.set_src_lang_special_tokens(self.src_lang )
def __A ( self ):
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def __A ( self , a__ ):
_lowerCAmelCase : Optional[Any] = self.convert_tokens_to_ids(a__ )
if self.legacy_behaviour:
_lowerCAmelCase : Optional[int] = []
_lowerCAmelCase : Union[str, Any] = [self.eos_token_id, self.cur_lang_code]
else:
_lowerCAmelCase : str = [self.cur_lang_code]
_lowerCAmelCase : List[Any] = [self.eos_token_id]
_lowerCAmelCase : List[Any] = self.convert_ids_to_tokens(self.prefix_tokens )
_lowerCAmelCase : Any = self.convert_ids_to_tokens(self.suffix_tokens )
_lowerCAmelCase : Optional[Any] = processors.TemplateProcessing(
single=prefix_tokens_str + ["""$A"""] + suffix_tokens_str , pair=prefix_tokens_str + ["""$A""", """$B"""] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def __A ( self , a__ ):
_lowerCAmelCase : Any = self.convert_tokens_to_ids(a__ )
if self.legacy_behaviour:
_lowerCAmelCase : Optional[int] = []
_lowerCAmelCase : str = [self.eos_token_id, self.cur_lang_code]
else:
_lowerCAmelCase : int = [self.cur_lang_code]
_lowerCAmelCase : Optional[int] = [self.eos_token_id]
_lowerCAmelCase : Union[str, Any] = self.convert_ids_to_tokens(self.prefix_tokens )
_lowerCAmelCase : int = self.convert_ids_to_tokens(self.suffix_tokens )
_lowerCAmelCase : str = processors.TemplateProcessing(
single=prefix_tokens_str + ["""$A"""] + suffix_tokens_str , pair=prefix_tokens_str + ["""$A""", """$B"""] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def __A ( self , a__ , a__ = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(a__ ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory." )
return
_lowerCAmelCase : Optional[Any] = os.path.join(
a__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a__ ):
copyfile(self.vocab_file , a__ )
return (out_vocab_file,)
| 126 | """simple docstring"""
import json
import os
import tempfile
from unittest.mock import patch
import torch
from torch.utils.data import DataLoader, TensorDataset
from accelerate import DistributedType, infer_auto_device_map, init_empty_weights
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState, PartialState
from accelerate.test_utils import require_bnb, require_multi_gpu, slow
from accelerate.test_utils.testing import AccelerateTestCase, require_cuda
from accelerate.utils import patch_environment
def SCREAMING_SNAKE_CASE ( ) -> Tuple:
_lowerCAmelCase : Tuple = torch.nn.Linear(2 ,4 )
_lowerCAmelCase : Union[str, Any] = torch.optim.AdamW(model.parameters() ,lr=1.0 )
_lowerCAmelCase : Tuple = torch.optim.lr_scheduler.OneCycleLR(_lowerCamelCase ,max_lr=0.01 ,steps_per_epoch=2 ,epochs=1 )
_lowerCAmelCase : Tuple = DataLoader(TensorDataset(torch.tensor([1, 2, 3] ) ) )
_lowerCAmelCase : List[Any] = DataLoader(TensorDataset(torch.tensor([4, 5, 6] ) ) )
return model, optimizer, scheduler, train_dl, valid_dl
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Any ) -> int:
return (model.weight.abs().sum() + model.bias.abs().sum()).item()
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[str] ) -> Any:
_lowerCAmelCase : List[str] = torch.nn.Linear(*tuple(model.weight.T.shape ) ).state_dict()
model.load_state_dict(_lowerCamelCase )
class __A ( SCREAMING_SNAKE_CASE_ ):
@require_cuda
def __A ( self ):
_lowerCAmelCase : Union[str, Any] = Accelerator()
assert PartialState._shared_state["_cpu"] is False
assert PartialState._shared_state["device"].type == "cuda"
with self.assertRaises(a__ ):
_lowerCAmelCase : Tuple = Accelerator(cpu=a__ )
def __A ( self ):
_lowerCAmelCase : Dict = Accelerator()
_lowerCAmelCase : Any = GradientState()
assert state.num_steps == 1
_lowerCAmelCase : Optional[int] = 4
assert state.num_steps == 4
assert state.sync_gradients is True
_lowerCAmelCase : Dict = False
assert state.sync_gradients is False
GradientState._reset_state()
def __A ( self ):
_lowerCAmelCase : Optional[int] = Accelerator()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Any = create_components()
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) : int = accelerator.prepare(a__ , a__ , a__ , a__ , a__ )
self.assertTrue(prepared_model in accelerator._models )
self.assertTrue(prepared_optimizer in accelerator._optimizers )
self.assertTrue(prepared_scheduler in accelerator._schedulers )
self.assertTrue(prepared_train_dl in accelerator._dataloaders )
self.assertTrue(prepared_valid_dl in accelerator._dataloaders )
def __A ( self ):
_lowerCAmelCase : Optional[Any] = Accelerator()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : List[Any] = create_components()
accelerator.prepare(a__ , a__ , a__ , a__ , a__ )
accelerator.free_memory()
self.assertTrue(len(accelerator._models ) == 0 )
self.assertTrue(len(accelerator._optimizers ) == 0 )
self.assertTrue(len(accelerator._schedulers ) == 0 )
self.assertTrue(len(accelerator._dataloaders ) == 0 )
def __A ( self ):
PartialState._reset_state()
# Mock torch.cuda.set_device to avoid an exception as the device doesn't exist
def noop(*a__ , **a__ ):
pass
with patch("""torch.cuda.set_device""" , a__ ), patch_environment(ACCELERATE_TORCH_DEVICE="""cuda:64""" ):
_lowerCAmelCase : Dict = Accelerator()
self.assertEqual(str(accelerator.state.device ) , """cuda:64""" )
def __A ( self ):
_lowerCAmelCase : Any = Accelerator()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : List[str] = create_components()
accelerator.prepare(a__ , a__ , a__ , a__ , a__ )
_lowerCAmelCase : List[Any] = get_signature(a__ )
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(a__ )
# make sure random weights don't match
load_random_weights(a__ )
self.assertTrue(abs(model_signature - get_signature(a__ ) ) > 1e-3 )
# make sure loaded weights match
accelerator.load_state(a__ )
self.assertTrue(abs(model_signature - get_signature(a__ ) ) < 1e-3 )
def __A ( self ):
_lowerCAmelCase : str = Accelerator()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : str = create_components()
accelerator.prepare(a__ , a__ , a__ , a__ , a__ )
_lowerCAmelCase : Optional[Any] = get_signature(a__ )
# saving hook
def save_config(a__ , a__ , a__ ):
_lowerCAmelCase : Dict = {"""class_name""": models[0].__class__.__name__}
with open(os.path.join(a__ , """data.json""" ) , """w""" ) as f:
json.dump(a__ , a__ )
# loading hook
def load_config(a__ , a__ ):
with open(os.path.join(a__ , """data.json""" ) , """r""" ) as f:
_lowerCAmelCase : int = json.load(a__ )
_lowerCAmelCase : str = config["""class_name"""]
_lowerCAmelCase : Union[str, Any] = accelerator.register_save_state_pre_hook(a__ )
_lowerCAmelCase : int = accelerator.register_load_state_pre_hook(a__ )
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(a__ )
# make sure random weights don't match with hooks
load_random_weights(a__ )
self.assertTrue(abs(model_signature - get_signature(a__ ) ) > 1e-3 )
# random class name to verify correct one is loaded
_lowerCAmelCase : Dict = """random"""
# make sure loaded weights match with hooks
accelerator.load_state(a__ )
self.assertTrue(abs(model_signature - get_signature(a__ ) ) < 1e-3 )
# mode.class_name is loaded from config
self.assertTrue(model.class_name == model.__class__.__name__ )
# remove hooks
save_hook.remove()
load_hook.remove()
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(a__ )
# make sure random weights don't match with hooks removed
load_random_weights(a__ )
self.assertTrue(abs(model_signature - get_signature(a__ ) ) > 1e-3 )
# random class name to verify correct one is loaded
_lowerCAmelCase : Optional[Any] = """random"""
# make sure loaded weights match with hooks removed
accelerator.load_state(a__ )
self.assertTrue(abs(model_signature - get_signature(a__ ) ) < 1e-3 )
# mode.class_name is NOT loaded from config
self.assertTrue(model.class_name != model.__class__.__name__ )
def __A ( self ):
_lowerCAmelCase : Optional[Any] = Accelerator()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Any = create_components()
_lowerCAmelCase : Any = None
# This should work
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : int = accelerator.prepare(
a__ , a__ , a__ , a__ , a__ , a__ )
self.assertTrue(dummy_obj is None )
def __A ( self ):
_lowerCAmelCase : str = Accelerator()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Any = create_components()
_lowerCAmelCase : Optional[int] = [1, 2, 3]
# This should work
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : str = accelerator.prepare(
a__ , a__ , a__ , a__ , a__ , a__ )
self.assertEqual(
getattr(a__ , """_is_accelerate_prepared""" , a__ ) , a__ , """Dummy object should have `_is_accelerate_prepared` set to `True`""" , )
self.assertEqual(
getattr(a__ , """_is_accelerate_prepared""" , a__ ) , a__ , """Model is missing `_is_accelerator_prepared` or is set to `False`""" , )
self.assertEqual(
getattr(a__ , """_is_accelerate_prepared""" , a__ ) , a__ , """Optimizer is missing `_is_accelerator_prepared` or is set to `False`""" , )
self.assertEqual(
getattr(a__ , """_is_accelerate_prepared""" , a__ ) , a__ , """Scheduler is missing `_is_accelerator_prepared` or is set to `False`""" , )
self.assertEqual(
getattr(a__ , """_is_accelerate_prepared""" , a__ ) , a__ , """Train Dataloader is missing `_is_accelerator_prepared` or is set to `False`""" , )
self.assertEqual(
getattr(a__ , """_is_accelerate_prepared""" , a__ ) , a__ , """Valid Dataloader is missing `_is_accelerator_prepared` or is set to `False`""" , )
@slow
@require_bnb
def __A ( self ):
from transformers import AutoModelForCausalLM
_lowerCAmelCase : List[str] = AutoModelForCausalLM.from_pretrained(
"""EleutherAI/gpt-neo-125m""" , load_in_abit=a__ , device_map={"""""": 0} , )
_lowerCAmelCase : List[str] = Accelerator()
# This should work
_lowerCAmelCase : List[Any] = accelerator.prepare(a__ )
@slow
@require_bnb
def __A ( self ):
from transformers import AutoModelForCausalLM
_lowerCAmelCase : Any = Accelerator()
with init_empty_weights():
_lowerCAmelCase : Dict = AutoModelForCausalLM.from_pretrained(
"""EleutherAI/gpt-neo-125m""" , )
model.tie_weights()
_lowerCAmelCase : int = infer_auto_device_map(a__ )
_lowerCAmelCase : Optional[Any] = """cpu"""
_lowerCAmelCase : Dict = AutoModelForCausalLM.from_pretrained(
"""EleutherAI/gpt-neo-125m""" , device_map=a__ , load_in_abit=a__ , llm_inta_enable_fpaa_cpu_offload=a__ )
# This should not work and get value error
with self.assertRaises(a__ ):
_lowerCAmelCase : List[str] = accelerator.prepare(a__ )
@slow
@require_bnb
@require_multi_gpu
def __A ( self ):
from transformers import AutoModelForCausalLM
_lowerCAmelCase : Dict = {"""distributed_type""": DistributedType.MULTI_GPU}
with init_empty_weights():
_lowerCAmelCase : Optional[Any] = AutoModelForCausalLM.from_pretrained(
"""EleutherAI/gpt-neo-125m""" , )
model.tie_weights()
_lowerCAmelCase : List[str] = infer_auto_device_map(a__ )
_lowerCAmelCase : Union[str, Any] = 1
_lowerCAmelCase : int = AutoModelForCausalLM.from_pretrained(
"""EleutherAI/gpt-neo-125m""" , load_in_abit=a__ , device_map=a__ , )
_lowerCAmelCase : Tuple = Accelerator()
# This should not work and get value error
with self.assertRaises(a__ ):
_lowerCAmelCase : Optional[int] = accelerator.prepare(a__ )
PartialState._reset_state()
@slow
@require_bnb
@require_multi_gpu
def __A ( self ):
from transformers import AutoModelForCausalLM
with init_empty_weights():
_lowerCAmelCase : Dict = AutoModelForCausalLM.from_pretrained(
"""EleutherAI/gpt-neo-125m""" , )
_lowerCAmelCase : int = infer_auto_device_map(a__ )
_lowerCAmelCase : List[Any] = 1
_lowerCAmelCase : Optional[Any] = AutoModelForCausalLM.from_pretrained(
"""EleutherAI/gpt-neo-125m""" , load_in_abit=a__ , device_map=a__ , )
_lowerCAmelCase : str = Accelerator()
# This should work
_lowerCAmelCase : str = accelerator.prepare(a__ )
@require_cuda
def __A ( self ):
_lowerCAmelCase : Union[str, Any] = torch.nn.Linear(10 , 10 )
_lowerCAmelCase : Any = torch.optim.SGD(model.parameters() , lr=0.0_1 )
_lowerCAmelCase : List[str] = Accelerator(cpu=a__ )
_lowerCAmelCase : Tuple = accelerator.prepare(a__ )
| 126 | 1 |
from __future__ import annotations
from collections.abc import Generator
def __UpperCamelCase ( ):
lowerCAmelCase_ = {}
lowerCAmelCase_ = 2
while True:
lowerCAmelCase_ = factor_map.pop(lowerCamelCase_ , lowerCamelCase_ )
if factor:
lowerCAmelCase_ = factor + prime
while x in factor_map:
x += factor
lowerCAmelCase_ = factor
else:
lowerCAmelCase_ = prime
yield prime
prime += 1
def __UpperCamelCase ( _A = 1E1_0 ):
lowerCAmelCase_ = sieve()
lowerCAmelCase_ = 1
while True:
lowerCAmelCase_ = next(lowerCamelCase_ )
if (2 * prime * n) > limit:
return n
# Ignore the next prime as the reminder will be 2.
next(lowerCamelCase_ )
n += 2
if __name__ == "__main__":
print(solution())
| 278 |
import gc
import math
import unittest
import torch
from diffusers import UNetaDModel
from diffusers.utils import floats_tensor, logging, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
__snake_case : int =logging.get_logger(__name__)
enable_full_determinism()
class lowerCamelCase__ ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase):
'''simple docstring'''
snake_case_ =UNetaDModel
snake_case_ ="""sample"""
@property
def lowerCAmelCase__ (self ) -> Any:
"""simple docstring"""
lowerCAmelCase__ : List[str] = 4
lowerCAmelCase__ : List[str] = 3
lowerCAmelCase__ : Any = (32, 32)
lowerCAmelCase__ : Optional[Any] = floats_tensor((batch_size, num_channels) + sizes ).to(__lowerCamelCase )
lowerCAmelCase__ : Union[str, Any] = torch.tensor([10] ).to(__lowerCamelCase )
return {"sample": noise, "timestep": time_step}
@property
def lowerCAmelCase__ (self ) -> List[Any]:
"""simple docstring"""
return (3, 32, 32)
@property
def lowerCAmelCase__ (self ) -> Optional[int]:
"""simple docstring"""
return (3, 32, 32)
def lowerCAmelCase__ (self ) -> Dict:
"""simple docstring"""
lowerCAmelCase__ : str = {
'''block_out_channels''': (32, 64),
'''down_block_types''': ('''DownBlock2D''', '''AttnDownBlock2D'''),
'''up_block_types''': ('''AttnUpBlock2D''', '''UpBlock2D'''),
'''attention_head_dim''': 3,
'''out_channels''': 3,
'''in_channels''': 3,
'''layers_per_block''': 2,
'''sample_size''': 32,
}
lowerCAmelCase__ : List[str] = self.dummy_input
return init_dict, inputs_dict
class lowerCamelCase__ ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase):
'''simple docstring'''
snake_case_ =UNetaDModel
snake_case_ ="""sample"""
@property
def lowerCAmelCase__ (self ) -> Dict:
"""simple docstring"""
lowerCAmelCase__ : str = 4
lowerCAmelCase__ : Optional[int] = 4
lowerCAmelCase__ : Optional[Any] = (32, 32)
lowerCAmelCase__ : Optional[Any] = floats_tensor((batch_size, num_channels) + sizes ).to(__lowerCamelCase )
lowerCAmelCase__ : Optional[int] = torch.tensor([10] ).to(__lowerCamelCase )
return {"sample": noise, "timestep": time_step}
@property
def lowerCAmelCase__ (self ) -> List[Any]:
"""simple docstring"""
return (4, 32, 32)
@property
def lowerCAmelCase__ (self ) -> Any:
"""simple docstring"""
return (4, 32, 32)
def lowerCAmelCase__ (self ) -> Dict:
"""simple docstring"""
lowerCAmelCase__ : Tuple = {
'''sample_size''': 32,
'''in_channels''': 4,
'''out_channels''': 4,
'''layers_per_block''': 2,
'''block_out_channels''': (32, 64),
'''attention_head_dim''': 32,
'''down_block_types''': ('''DownBlock2D''', '''DownBlock2D'''),
'''up_block_types''': ('''UpBlock2D''', '''UpBlock2D'''),
}
lowerCAmelCase__ : Optional[Any] = self.dummy_input
return init_dict, inputs_dict
def lowerCAmelCase__ (self ) -> int:
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = UNetaDModel.from_pretrained('''fusing/unet-ldm-dummy-update''' ,output_loading_info=__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
self.assertEqual(len(loading_info['''missing_keys'''] ) ,0 )
model.to(__lowerCamelCase )
lowerCAmelCase__ : Tuple = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != '''cuda''' ,'''This test is supposed to run on GPU''' )
def lowerCAmelCase__ (self ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ : Dict = UNetaDModel.from_pretrained('''fusing/unet-ldm-dummy-update''' ,output_loading_info=__lowerCamelCase )
model.to(__lowerCamelCase )
lowerCAmelCase__ : Optional[Any] = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != '''cuda''' ,'''This test is supposed to run on GPU''' )
def lowerCAmelCase__ (self ) -> str:
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = UNetaDModel.from_pretrained('''fusing/unet-ldm-dummy-update''' ,output_loading_info=__lowerCamelCase )
model_accelerate.to(__lowerCamelCase )
model_accelerate.eval()
lowerCAmelCase__ : Union[str, Any] = torch.randn(
1 ,model_accelerate.config.in_channels ,model_accelerate.config.sample_size ,model_accelerate.config.sample_size ,generator=torch.manual_seed(0 ) ,)
lowerCAmelCase__ : Dict = noise.to(__lowerCamelCase )
lowerCAmelCase__ : List[Any] = torch.tensor([10] * noise.shape[0] ).to(__lowerCamelCase )
lowerCAmelCase__ : Tuple = model_accelerate(__lowerCamelCase ,__lowerCamelCase )['''sample''']
# two models don't need to stay in the device at the same time
del model_accelerate
torch.cuda.empty_cache()
gc.collect()
lowerCAmelCase__ , lowerCAmelCase__ : Tuple = UNetaDModel.from_pretrained(
'''fusing/unet-ldm-dummy-update''' ,output_loading_info=__lowerCamelCase ,low_cpu_mem_usage=__lowerCamelCase )
model_normal_load.to(__lowerCamelCase )
model_normal_load.eval()
lowerCAmelCase__ : List[Any] = model_normal_load(__lowerCamelCase ,__lowerCamelCase )['''sample''']
assert torch_all_close(__lowerCamelCase ,__lowerCamelCase ,rtol=1e-3 )
def lowerCAmelCase__ (self ) -> Dict:
"""simple docstring"""
lowerCAmelCase__ : List[str] = UNetaDModel.from_pretrained('''fusing/unet-ldm-dummy-update''' )
model.eval()
model.to(__lowerCamelCase )
lowerCAmelCase__ : Union[str, Any] = torch.randn(
1 ,model.config.in_channels ,model.config.sample_size ,model.config.sample_size ,generator=torch.manual_seed(0 ) ,)
lowerCAmelCase__ : str = noise.to(__lowerCamelCase )
lowerCAmelCase__ : List[Any] = torch.tensor([10] * noise.shape[0] ).to(__lowerCamelCase )
with torch.no_grad():
lowerCAmelCase__ : str = model(__lowerCamelCase ,__lowerCamelCase ).sample
lowerCAmelCase__ : List[str] = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
lowerCAmelCase__ : str = torch.tensor([-13.3258, -20.1100, -15.9873, -17.6617, -23.0596, -17.9419, -13.3675, -16.1889, -12.3800] )
# fmt: on
self.assertTrue(torch_all_close(__lowerCamelCase ,__lowerCamelCase ,rtol=1e-3 ) )
class lowerCamelCase__ ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase):
'''simple docstring'''
snake_case_ =UNetaDModel
snake_case_ ="""sample"""
@property
def lowerCAmelCase__ (self ,__lowerCamelCase=(32, 32) ) -> Dict:
"""simple docstring"""
lowerCAmelCase__ : str = 4
lowerCAmelCase__ : Optional[int] = 3
lowerCAmelCase__ : Tuple = floats_tensor((batch_size, num_channels) + sizes ).to(__lowerCamelCase )
lowerCAmelCase__ : Optional[int] = torch.tensor(batch_size * [10] ).to(dtype=torch.intaa ,device=__lowerCamelCase )
return {"sample": noise, "timestep": time_step}
@property
def lowerCAmelCase__ (self ) -> str:
"""simple docstring"""
return (3, 32, 32)
@property
def lowerCAmelCase__ (self ) -> Optional[int]:
"""simple docstring"""
return (3, 32, 32)
def lowerCAmelCase__ (self ) -> Any:
"""simple docstring"""
lowerCAmelCase__ : Tuple = {
'''block_out_channels''': [32, 64, 64, 64],
'''in_channels''': 3,
'''layers_per_block''': 1,
'''out_channels''': 3,
'''time_embedding_type''': '''fourier''',
'''norm_eps''': 1e-6,
'''mid_block_scale_factor''': math.sqrt(2.0 ),
'''norm_num_groups''': None,
'''down_block_types''': [
'''SkipDownBlock2D''',
'''AttnSkipDownBlock2D''',
'''SkipDownBlock2D''',
'''SkipDownBlock2D''',
],
'''up_block_types''': [
'''SkipUpBlock2D''',
'''SkipUpBlock2D''',
'''AttnSkipUpBlock2D''',
'''SkipUpBlock2D''',
],
}
lowerCAmelCase__ : Tuple = self.dummy_input
return init_dict, inputs_dict
@slow
def lowerCAmelCase__ (self ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ : Dict = UNetaDModel.from_pretrained('''google/ncsnpp-celebahq-256''' ,output_loading_info=__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
self.assertEqual(len(loading_info['''missing_keys'''] ) ,0 )
model.to(__lowerCamelCase )
lowerCAmelCase__ : Optional[int] = self.dummy_input
lowerCAmelCase__ : Tuple = floats_tensor((4, 3) + (2_56, 2_56) ).to(__lowerCamelCase )
lowerCAmelCase__ : Union[str, Any] = noise
lowerCAmelCase__ : Union[str, Any] = model(**__lowerCamelCase )
assert image is not None, "Make sure output is not None"
@slow
def lowerCAmelCase__ (self ) -> str:
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = UNetaDModel.from_pretrained('''google/ncsnpp-celebahq-256''' )
model.to(__lowerCamelCase )
lowerCAmelCase__ : Dict = 4
lowerCAmelCase__ : Optional[Any] = 3
lowerCAmelCase__ : List[Any] = (2_56, 2_56)
lowerCAmelCase__ : str = torch.ones((batch_size, num_channels) + sizes ).to(__lowerCamelCase )
lowerCAmelCase__ : Optional[int] = torch.tensor(batch_size * [1e-4] ).to(__lowerCamelCase )
with torch.no_grad():
lowerCAmelCase__ : List[Any] = model(__lowerCamelCase ,__lowerCamelCase ).sample
lowerCAmelCase__ : Any = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
lowerCAmelCase__ : Optional[Any] = torch.tensor([-4842.8691, -6499.6631, -3800.1953, -7978.2686, -1_0980.7129, -2_0028.8535, 8148.2822, 2342.2905, 567.7608] )
# fmt: on
self.assertTrue(torch_all_close(__lowerCamelCase ,__lowerCamelCase ,rtol=1e-2 ) )
def lowerCAmelCase__ (self ) -> Any:
"""simple docstring"""
lowerCAmelCase__ : int = UNetaDModel.from_pretrained('''fusing/ncsnpp-ffhq-ve-dummy-update''' )
model.to(__lowerCamelCase )
lowerCAmelCase__ : Optional[int] = 4
lowerCAmelCase__ : Dict = 3
lowerCAmelCase__ : str = (32, 32)
lowerCAmelCase__ : Tuple = torch.ones((batch_size, num_channels) + sizes ).to(__lowerCamelCase )
lowerCAmelCase__ : Tuple = torch.tensor(batch_size * [1e-4] ).to(__lowerCamelCase )
with torch.no_grad():
lowerCAmelCase__ : Optional[int] = model(__lowerCamelCase ,__lowerCamelCase ).sample
lowerCAmelCase__ : List[Any] = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
lowerCAmelCase__ : Union[str, Any] = torch.tensor([-0.0325, -0.0900, -0.0869, -0.0332, -0.0725, -0.0270, -0.0101, 0.0227, 0.0256] )
# fmt: on
self.assertTrue(torch_all_close(__lowerCamelCase ,__lowerCamelCase ,rtol=1e-2 ) )
def lowerCAmelCase__ (self ) -> Dict:
"""simple docstring"""
pass
| 129 | 0 |
'''simple docstring'''
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import BatchEncoding, MarianTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import is_sentencepiece_available, is_tf_available, is_torch_available
if is_sentencepiece_available():
from transformers.models.marian.tokenization_marian import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
__snake_case = get_tests_dir('''fixtures/test_sentencepiece.model''')
__snake_case = {'''target_lang''': '''fi''', '''source_lang''': '''en'''}
__snake_case = '''>>zh<<'''
__snake_case = '''Helsinki-NLP/'''
if is_torch_available():
__snake_case = '''pt'''
elif is_tf_available():
__snake_case = '''tf'''
else:
__snake_case = '''jax'''
@require_sentencepiece
class lowercase ( A__ , unittest.TestCase ):
"""simple docstring"""
_a = MarianTokenizer
_a = False
_a = True
def lowerCAmelCase__ ( self ):
'''simple docstring'''
super().setUp()
UpperCamelCase__ :Dict = ['''</s>''', '''<unk>''', '''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est''', '''\u0120''', '''<pad>''']
UpperCamelCase__ :str = dict(zip(UpperCamelCase_ , range(len(UpperCamelCase_ ) ) ) )
UpperCamelCase__ :Dict = Path(self.tmpdirname )
save_json(UpperCamelCase_ , save_dir / VOCAB_FILES_NAMES['''vocab'''] )
save_json(UpperCamelCase_ , save_dir / VOCAB_FILES_NAMES['''tokenizer_config_file'''] )
if not (save_dir / VOCAB_FILES_NAMES["source_spm"]).exists():
copyfile(UpperCamelCase_ , save_dir / VOCAB_FILES_NAMES['''source_spm'''] )
copyfile(UpperCamelCase_ , save_dir / VOCAB_FILES_NAMES['''target_spm'''] )
UpperCamelCase__ :Optional[int] = MarianTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCAmelCase__ ( self , **UpperCamelCase_ ):
'''simple docstring'''
return MarianTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase_ )
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
return (
"This is a test",
"This is a test",
)
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Union[str, Any] = '''</s>'''
UpperCamelCase__ :Optional[Any] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCamelCase_ ) , UpperCamelCase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCamelCase_ ) , UpperCamelCase_ )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :List[Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''</s>''' )
self.assertEqual(vocab_keys[1] , '''<unk>''' )
self.assertEqual(vocab_keys[-1] , '''<pad>''' )
self.assertEqual(len(UpperCamelCase_ ) , 9 )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 9 )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Union[str, Any] = MarianTokenizer.from_pretrained(F'''{ORG_NAME}opus-mt-en-de''' )
UpperCamelCase__ :Optional[Any] = en_de_tokenizer(['''I am a small frog'''] , return_tensors=UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
UpperCamelCase__ :Union[str, Any] = [38, 121, 14, 697, 38848, 0]
self.assertListEqual(UpperCamelCase_ , batch.input_ids[0] )
UpperCamelCase__ :str = tempfile.mkdtemp()
en_de_tokenizer.save_pretrained(UpperCamelCase_ )
UpperCamelCase__ :Any = [x.name for x in Path(UpperCamelCase_ ).glob('''*''' )]
self.assertIn('''source.spm''' , UpperCamelCase_ )
MarianTokenizer.from_pretrained(UpperCamelCase_ )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :List[Any] = self.get_tokenizer()
UpperCamelCase__ :int = tok(
['''I am a small frog''' * 1000, '''I am a small frog'''] , padding=UpperCamelCase_ , truncation=UpperCamelCase_ , return_tensors=UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
self.assertEqual(batch.input_ids.shape , (2, 512) )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :List[Any] = self.get_tokenizer()
UpperCamelCase__ :Optional[Any] = tok(['''I am a tiny frog''', '''I am a small frog'''] , padding=UpperCamelCase_ , return_tensors=UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
self.assertEqual(batch_smaller.input_ids.shape , (2, 10) )
@slow
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Dict = {'''input_ids''': [[43495, 462, 20, 42164, 1369, 52, 464, 132, 1703, 492, 13, 7491, 38999, 6, 8, 464, 132, 1703, 492, 13, 4669, 37867, 13, 7525, 27, 1593, 988, 13, 33972, 7029, 6, 20, 8251, 383, 2, 270, 5866, 3788, 2, 2353, 8251, 12338, 2, 13958, 387, 2, 3629, 6953, 188, 2900, 2, 13958, 8011, 11501, 23, 8460, 4073, 34009, 20, 435, 11439, 27, 8, 8460, 4073, 6004, 20, 9988, 375, 27, 33, 266, 1945, 1076, 1350, 37867, 3288, 5, 577, 1076, 4374, 8, 5082, 5, 26453, 257, 556, 403, 2, 242, 132, 383, 316, 492, 8, 10767, 6, 316, 304, 4239, 3, 0], [148, 15722, 19, 1839, 12, 1350, 13, 22327, 5082, 5418, 47567, 35938, 59, 318, 19552, 108, 2183, 54, 14976, 4835, 32, 547, 1114, 8, 315, 2417, 5, 92, 19088, 3, 0, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100], [36, 6395, 12570, 39147, 11597, 6, 266, 4, 45405, 7296, 3, 0, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCamelCase_ , model_name='''Helsinki-NLP/opus-mt-en-de''' , revision='''1a8c2263da11e68e50938f97e10cd57820bd504c''' , decode_kwargs={'''use_source_tokenizer''': True} , )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :int = MarianTokenizer.from_pretrained('''hf-internal-testing/test-marian-two-vocabs''' )
UpperCamelCase__ :Any = '''Tämä on testi'''
UpperCamelCase__ :Tuple = '''This is a test'''
UpperCamelCase__ :Optional[int] = [76, 7, 2047, 2]
UpperCamelCase__ :List[str] = [69, 12, 11, 940, 2]
UpperCamelCase__ :Optional[int] = tokenizer(UpperCamelCase_ ).input_ids
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
UpperCamelCase__ :Optional[int] = tokenizer(text_target=UpperCamelCase_ ).input_ids
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
UpperCamelCase__ :int = tokenizer.decode(UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ ) | 219 |
'''simple docstring'''
from __future__ import annotations
__snake_case = list[list[int]]
# assigning initial values to the grid
__snake_case = [
[3, 0, 6, 5, 0, 8, 4, 0, 0],
[5, 2, 0, 0, 0, 0, 0, 0, 0],
[0, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
# a grid with no solution
__snake_case = [
[5, 0, 6, 5, 0, 8, 4, 0, 3],
[5, 2, 0, 0, 0, 0, 0, 0, 2],
[1, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
def a ( __a , __a , __a , __a ) -> bool:
'''simple docstring'''
for i in range(9 ):
if grid[row][i] == n or grid[i][column] == n:
return False
for i in range(3 ):
for j in range(3 ):
if grid[(row - row % 3) + i][(column - column % 3) + j] == n:
return False
return True
def a ( __a ) -> tuple[int, int] | None:
'''simple docstring'''
for i in range(9 ):
for j in range(9 ):
if grid[i][j] == 0:
return i, j
return None
def a ( __a ) -> Matrix | None:
'''simple docstring'''
if location := find_empty_location(__a ):
UpperCamelCase__ , UpperCamelCase__ :Optional[int] = location
else:
# If the location is ``None``, then the grid is solved.
return grid
for digit in range(1 , 10 ):
if is_safe(__a , __a , __a , __a ):
UpperCamelCase__ :Tuple = digit
if sudoku(__a ) is not None:
return grid
UpperCamelCase__ :Union[str, Any] = 0
return None
def a ( __a ) -> None:
'''simple docstring'''
for row in grid:
for cell in row:
print(__a , end=''' ''' )
print()
if __name__ == "__main__":
# make a copy of grid so that you can compare with the unmodified grid
for example_grid in (initial_grid, no_solution):
print('''\nExample grid:\n''' + '''=''' * 20)
print_solution(example_grid)
print('''\nExample grid solution:''')
__snake_case = sudoku(example_grid)
if solution is not None:
print_solution(solution)
else:
print('''Cannot find a solution.''') | 219 | 1 |
from typing import Callable, Optional
from .. import Features
from ..packaged_modules.generator.generator import Generator
from .abc import AbstractDatasetInputStream
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
def __init__( self , lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = False , lowerCamelCase__ = False , lowerCamelCase__ = None , lowerCamelCase__ = None , **lowerCamelCase__ , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(
features=lowerCamelCase__ , cache_dir=lowerCamelCase__ , keep_in_memory=lowerCamelCase__ , streaming=lowerCamelCase__ , num_proc=lowerCamelCase__ , **lowerCamelCase__ , )
__lowerCamelCase = Generator(
cache_dir=lowerCamelCase__ , features=lowerCamelCase__ , generator=lowerCamelCase__ , gen_kwargs=lowerCamelCase__ , **lowerCamelCase__ , )
def lowercase_ ( self ) -> Optional[int]:
'''simple docstring'''
# Build iterable dataset
if self.streaming:
__lowerCamelCase = self.builder.as_streaming_dataset(split='train' )
# Build regular (map-style) dataset
else:
__lowerCamelCase = None
__lowerCamelCase = None
__lowerCamelCase = None
__lowerCamelCase = None
self.builder.download_and_prepare(
download_config=lowerCamelCase__ , download_mode=lowerCamelCase__ , verification_mode=lowerCamelCase__ , base_path=lowerCamelCase__ , num_proc=self.num_proc , )
__lowerCamelCase = self.builder.as_dataset(
split='train' , verification_mode=lowerCamelCase__ , in_memory=self.keep_in_memory )
return dataset
| 90 |
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__A = logging.get_logger(__name__)
__A = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
__A = {
"tokenizer_file": {
"EleutherAI/gpt-neox-20b": "https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json",
},
}
__A = {
"gpt-neox-20b": 20_48,
}
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
snake_case_ = VOCAB_FILES_NAMES
snake_case_ = PRETRAINED_VOCAB_FILES_MAP
snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ = ['''input_ids''', '''attention_mask''']
def __init__( self , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__="<|endoftext|>" , lowerCamelCase__="<|endoftext|>" , lowerCamelCase__="<|endoftext|>" , lowerCamelCase__=False , **lowerCamelCase__ , ) -> int:
'''simple docstring'''
super().__init__(
lowerCamelCase__ , lowerCamelCase__ , tokenizer_file=lowerCamelCase__ , unk_token=lowerCamelCase__ , bos_token=lowerCamelCase__ , eos_token=lowerCamelCase__ , add_prefix_space=lowerCamelCase__ , **lowerCamelCase__ , )
__lowerCamelCase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , lowerCamelCase__ ) != add_prefix_space:
__lowerCamelCase = getattr(lowerCamelCase__ , pre_tok_state.pop('type' ) )
__lowerCamelCase = add_prefix_space
__lowerCamelCase = pre_tok_class(**lowerCamelCase__ )
__lowerCamelCase = add_prefix_space
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ = None ) -> Tuple[str]:
'''simple docstring'''
__lowerCamelCase = self._tokenizer.model.save(lowerCamelCase__ , name=lowerCamelCase__ )
return tuple(lowerCamelCase__ )
def lowercase_ ( self , lowerCamelCase__ ) -> List[int]:
'''simple docstring'''
__lowerCamelCase = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ ) + [self.eos_token_id] )
if len(lowerCamelCase__ ) > self.model_max_length:
__lowerCamelCase = input_ids[-self.model_max_length :]
return input_ids
| 90 | 1 |
import copy
import os
from typing import TYPE_CHECKING, List, Union
if TYPE_CHECKING:
pass
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case : Tuple = logging.get_logger(__name__)
_snake_case : Any = {
"kakaobrain/align-base": "https://huggingface.co/kakaobrain/align-base/resolve/main/config.json",
}
class a (_lowerCAmelCase ):
"""simple docstring"""
__UpperCAmelCase : List[str] = "align_text_model"
def __init__( self : str , lowerCamelCase : str=30522 , lowerCamelCase : Optional[int]=768 , lowerCamelCase : Union[str, Any]=12 , lowerCamelCase : Any=12 , lowerCamelCase : Dict=3072 , lowerCamelCase : Optional[Any]="gelu" , lowerCamelCase : Dict=0.1 , lowerCamelCase : List[str]=0.1 , lowerCamelCase : List[Any]=512 , lowerCamelCase : List[str]=2 , lowerCamelCase : Optional[int]=0.02 , lowerCamelCase : Optional[int]=1E-12 , lowerCamelCase : Tuple=0 , lowerCamelCase : List[str]="absolute" , lowerCamelCase : Tuple=True , **lowerCamelCase : Tuple , ) -> Tuple:
super().__init__(**_SCREAMING_SNAKE_CASE )
__snake_case : Dict = vocab_size
__snake_case : Union[str, Any] = hidden_size
__snake_case : Optional[Any] = num_hidden_layers
__snake_case : List[str] = num_attention_heads
__snake_case : Tuple = hidden_act
__snake_case : Dict = intermediate_size
__snake_case : str = hidden_dropout_prob
__snake_case : List[str] = attention_probs_dropout_prob
__snake_case : Any = max_position_embeddings
__snake_case : Union[str, Any] = type_vocab_size
__snake_case : str = initializer_range
__snake_case : str = layer_norm_eps
__snake_case : Optional[int] = position_embedding_type
__snake_case : Optional[int] = use_cache
__snake_case : Optional[Any] = pad_token_id
@classmethod
def __snake_case ( cls : Optional[int] , lowerCamelCase : Any , **lowerCamelCase : List[Any] ) -> "PretrainedConfig":
cls._set_token_in_kwargs(_SCREAMING_SNAKE_CASE )
__snake_case : int = cls.get_config_dict(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
# get the text config dict if we are loading from AlignConfig
if config_dict.get("model_type" ) == "align":
__snake_case : List[str] = config_dict['''text_config''']
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
class a (_lowerCAmelCase ):
"""simple docstring"""
__UpperCAmelCase : List[Any] = "align_vision_model"
def __init__( self : int , lowerCamelCase : Optional[int] = 3 , lowerCamelCase : Optional[int] = 600 , lowerCamelCase : Optional[Any] = 2.0 , lowerCamelCase : Optional[int] = 3.1 , lowerCamelCase : Union[str, Any] = 8 , lowerCamelCase : Dict = [3, 3, 5, 3, 5, 5, 3] , lowerCamelCase : Optional[Any] = [32, 16, 24, 40, 80, 112, 192] , lowerCamelCase : Optional[Any] = [16, 24, 40, 80, 112, 192, 320] , lowerCamelCase : Dict = [] , lowerCamelCase : str = [1, 2, 2, 2, 1, 2, 1] , lowerCamelCase : int = [1, 2, 2, 3, 3, 4, 1] , lowerCamelCase : Any = [1, 6, 6, 6, 6, 6, 6] , lowerCamelCase : int = 0.25 , lowerCamelCase : Optional[Any] = "swish" , lowerCamelCase : List[Any] = 2560 , lowerCamelCase : List[Any] = "mean" , lowerCamelCase : Dict = 0.02 , lowerCamelCase : str = 0.0_01 , lowerCamelCase : List[str] = 0.99 , lowerCamelCase : Optional[int] = 0.2 , **lowerCamelCase : int , ) -> Optional[int]:
super().__init__(**_SCREAMING_SNAKE_CASE )
__snake_case : List[str] = num_channels
__snake_case : Any = image_size
__snake_case : Tuple = width_coefficient
__snake_case : Dict = depth_coefficient
__snake_case : List[str] = depth_divisor
__snake_case : List[str] = kernel_sizes
__snake_case : Dict = in_channels
__snake_case : Optional[int] = out_channels
__snake_case : Optional[int] = depthwise_padding
__snake_case : str = strides
__snake_case : Dict = num_block_repeats
__snake_case : Optional[int] = expand_ratios
__snake_case : Union[str, Any] = squeeze_expansion_ratio
__snake_case : Any = hidden_act
__snake_case : Tuple = hidden_dim
__snake_case : Dict = pooling_type
__snake_case : Dict = initializer_range
__snake_case : Dict = batch_norm_eps
__snake_case : Dict = batch_norm_momentum
__snake_case : Tuple = drop_connect_rate
__snake_case : Optional[Any] = sum(_SCREAMING_SNAKE_CASE ) * 4
@classmethod
def __snake_case ( cls : List[Any] , lowerCamelCase : str , **lowerCamelCase : Optional[int] ) -> "PretrainedConfig":
cls._set_token_in_kwargs(_SCREAMING_SNAKE_CASE )
__snake_case : Any = cls.get_config_dict(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
# get the vision config dict if we are loading from AlignConfig
if config_dict.get("model_type" ) == "align":
__snake_case : Optional[Any] = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
class a (_lowerCAmelCase ):
"""simple docstring"""
__UpperCAmelCase : List[Any] = "align"
__UpperCAmelCase : List[str] = True
def __init__( self : Optional[Any] , lowerCamelCase : List[str]=None , lowerCamelCase : Tuple=None , lowerCamelCase : List[str]=640 , lowerCamelCase : Tuple=1.0 , lowerCamelCase : Any=0.02 , **lowerCamelCase : List[str] , ) -> int:
super().__init__(**_SCREAMING_SNAKE_CASE )
if text_config is None:
__snake_case : Dict = {}
logger.info("text_config is None. Initializing the AlignTextConfig with default values." )
if vision_config is None:
__snake_case : List[Any] = {}
logger.info("vision_config is None. Initializing the AlignVisionConfig with default values." )
__snake_case : List[str] = AlignTextConfig(**_SCREAMING_SNAKE_CASE )
__snake_case : str = AlignVisionConfig(**_SCREAMING_SNAKE_CASE )
__snake_case : Union[str, Any] = projection_dim
__snake_case : List[Any] = temperature_init_value
__snake_case : int = initializer_range
@classmethod
def __snake_case ( cls : Tuple , lowerCamelCase : int , lowerCamelCase : Union[str, Any] , **lowerCamelCase : int ) -> int:
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **_SCREAMING_SNAKE_CASE )
def __snake_case ( self : List[Any] ) -> Union[str, Any]:
__snake_case : str = copy.deepcopy(self.__dict__ )
__snake_case : Optional[int] = self.text_config.to_dict()
__snake_case : List[str] = self.vision_config.to_dict()
__snake_case : Tuple = self.__class__.model_type
return output
| 371 |
import math
from typing import Optional
import numpy as np
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case : Union[str, Any] = logging.get_logger(__name__)
_snake_case : List[str] = {
"facebook/encodec_24khz": "https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json",
"facebook/encodec_48khz": "https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json",
}
class a (_lowerCAmelCase ):
"""simple docstring"""
__UpperCAmelCase : Tuple = "encodec"
def __init__( self : Any , lowerCamelCase : Optional[int]=[1.5, 3.0, 6.0, 12.0, 24.0] , lowerCamelCase : List[str]=24000 , lowerCamelCase : int=1 , lowerCamelCase : Optional[int]=False , lowerCamelCase : Dict=None , lowerCamelCase : Tuple=None , lowerCamelCase : Optional[int]=128 , lowerCamelCase : Optional[int]=32 , lowerCamelCase : List[str]=1 , lowerCamelCase : str=[8, 5, 4, 2] , lowerCamelCase : List[str]="weight_norm" , lowerCamelCase : Any=7 , lowerCamelCase : Tuple=7 , lowerCamelCase : int=3 , lowerCamelCase : int=2 , lowerCamelCase : Union[str, Any]=True , lowerCamelCase : List[Any]="reflect" , lowerCamelCase : Union[str, Any]=2 , lowerCamelCase : Optional[int]=2 , lowerCamelCase : int=1.0 , lowerCamelCase : Optional[Any]=1024 , lowerCamelCase : Optional[Any]=None , lowerCamelCase : str=True , **lowerCamelCase : Dict , ) -> Any:
__snake_case : Tuple = target_bandwidths
__snake_case : Union[str, Any] = sampling_rate
__snake_case : Union[str, Any] = audio_channels
__snake_case : Dict = normalize
__snake_case : List[Any] = chunk_length_s
__snake_case : Tuple = overlap
__snake_case : Optional[int] = hidden_size
__snake_case : List[Any] = num_filters
__snake_case : Union[str, Any] = num_residual_layers
__snake_case : Optional[int] = upsampling_ratios
__snake_case : List[str] = norm_type
__snake_case : Optional[int] = kernel_size
__snake_case : Dict = last_kernel_size
__snake_case : Tuple = residual_kernel_size
__snake_case : List[Any] = dilation_growth_rate
__snake_case : Optional[int] = use_causal_conv
__snake_case : Tuple = pad_mode
__snake_case : Union[str, Any] = compress
__snake_case : Union[str, Any] = num_lstm_layers
__snake_case : int = trim_right_ratio
__snake_case : Tuple = codebook_size
__snake_case : Optional[Any] = codebook_dim if codebook_dim is not None else hidden_size
__snake_case : int = use_conv_shortcut
if self.norm_type not in ["weight_norm", "time_group_norm"]:
raise ValueError(
F'self.norm_type must be one of `"weight_norm"`, `"time_group_norm"`), got {self.norm_type}' )
super().__init__(**lowerCamelCase )
@property
def __snake_case ( self : int ) -> Optional[int]:
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def __snake_case ( self : Union[str, Any] ) -> Optional[int]:
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
@property
def __snake_case ( self : Optional[Any] ) -> int:
__snake_case : Union[str, Any] = np.prod(self.upsampling_ratios )
return math.ceil(self.sampling_rate / hop_length )
@property
def __snake_case ( self : Optional[Any] ) -> int:
return int(1000 * self.target_bandwidths[-1] // (self.frame_rate * 10) )
| 134 | 0 |
'''simple docstring'''
import argparse
import json
import os
import time
import zipfile
from get_ci_error_statistics import download_artifact, get_artifacts_links
from transformers import logging
lowerCAmelCase :Optional[int] = logging.get_logger(__name__)
def lowerCamelCase ( lowerCAmelCase : int , lowerCAmelCase : int ):
"""simple docstring"""
__magic_name__ : Optional[Any] = set()
__magic_name__ : List[Any] = []
def parse_line(lowerCAmelCase : int ):
for line in fp:
if isinstance(lowerCAmelCase , lowerCAmelCase ):
__magic_name__ : Union[str, Any] = line.decode('UTF-8' )
if "warnings summary (final)" in line:
continue
# This means we are outside the body of a warning
elif not line.startswith(' ' ):
# process a single warning and move it to `selected_warnings`.
if len(lowerCAmelCase ) > 0:
__magic_name__ : int = '\n'.join(lowerCAmelCase )
# Only keep the warnings specified in `targets`
if any(f': {x}: ' in warning for x in targets ):
selected_warnings.add(lowerCAmelCase )
buffer.clear()
continue
else:
__magic_name__ : Optional[int] = line.strip()
buffer.append(lowerCAmelCase )
if from_gh:
for filename in os.listdir(lowerCAmelCase ):
__magic_name__ : int = os.path.join(lowerCAmelCase , lowerCAmelCase )
if not os.path.isdir(lowerCAmelCase ):
# read the file
if filename != "warnings.txt":
continue
with open(lowerCAmelCase ) as fp:
parse_line(lowerCAmelCase )
else:
try:
with zipfile.ZipFile(lowerCAmelCase ) as z:
for filename in z.namelist():
if not os.path.isdir(lowerCAmelCase ):
# read the file
if filename != "warnings.txt":
continue
with z.open(lowerCAmelCase ) as fp:
parse_line(lowerCAmelCase )
except Exception:
logger.warning(
f'{artifact_path} is either an invalid zip file or something else wrong. This file is skipped.' )
return selected_warnings
def lowerCamelCase ( lowerCAmelCase : Tuple , lowerCAmelCase : Any ):
"""simple docstring"""
__magic_name__ : List[Any] = set()
__magic_name__ : Dict = [os.path.join(lowerCAmelCase , lowerCAmelCase ) for p in os.listdir(lowerCAmelCase ) if (p.endswith('.zip' ) or from_gh)]
for p in paths:
selected_warnings.update(extract_warnings_from_single_artifact(lowerCAmelCase , lowerCAmelCase ) )
return selected_warnings
if __name__ == "__main__":
def lowerCamelCase ( lowerCAmelCase : str ):
"""simple docstring"""
return values.split(',' )
lowerCAmelCase :List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--workflow_run_id''', type=str, required=True, help='''A GitHub Actions workflow run id.''')
parser.add_argument(
'''--output_dir''',
type=str,
required=True,
help='''Where to store the downloaded artifacts and other result files.''',
)
parser.add_argument('''--token''', default=None, type=str, help='''A token that has actions:read permission.''')
# optional parameters
parser.add_argument(
'''--targets''',
default='''DeprecationWarning,UserWarning,FutureWarning''',
type=list_str,
help='''Comma-separated list of target warning(s) which we want to extract.''',
)
parser.add_argument(
'''--from_gh''',
action='''store_true''',
help='''If running from a GitHub action workflow and collecting warnings from its artifacts.''',
)
lowerCAmelCase :Optional[Any] = parser.parse_args()
lowerCAmelCase :Tuple = args.from_gh
if from_gh:
# The artifacts have to be downloaded using `actions/download-artifact@v3`
pass
else:
os.makedirs(args.output_dir, exist_ok=True)
# get download links
lowerCAmelCase :List[str] = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, '''artifacts.json'''), '''w''', encoding='''UTF-8''') as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
# download artifacts
for idx, (name, url) in enumerate(artifacts.items()):
print(name)
print(url)
print('''=''' * 8_0)
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
# extract warnings from artifacts
lowerCAmelCase :int = extract_warnings(args.output_dir, args.targets)
lowerCAmelCase :Optional[Any] = sorted(selected_warnings)
with open(os.path.join(args.output_dir, '''selected_warnings.json'''), '''w''', encoding='''UTF-8''') as fp:
json.dump(selected_warnings, fp, ensure_ascii=False, indent=4) | 331 |
'''simple docstring'''
import argparse
import logging
import os
from pathlib import Path
from typing import Any, Dict
import pytorch_lightning as pl
from pytorch_lightning.utilities import rank_zero_info
from transformers import (
AdamW,
AutoConfig,
AutoModel,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelForTokenClassification,
AutoModelWithLMHead,
AutoTokenizer,
PretrainedConfig,
PreTrainedTokenizer,
)
from transformers.optimization import (
Adafactor,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.utils.versions import require_version
lowerCAmelCase :Dict = logging.getLogger(__name__)
require_version('''pytorch_lightning>=1.0.4''')
lowerCAmelCase :str = {
'''base''': AutoModel,
'''sequence-classification''': AutoModelForSequenceClassification,
'''question-answering''': AutoModelForQuestionAnswering,
'''pretraining''': AutoModelForPreTraining,
'''token-classification''': AutoModelForTokenClassification,
'''language-modeling''': AutoModelWithLMHead,
'''summarization''': AutoModelForSeqaSeqLM,
'''translation''': AutoModelForSeqaSeqLM,
}
# update this and the import above to support new schedulers from transformers.optimization
lowerCAmelCase :Any = {
'''linear''': get_linear_schedule_with_warmup,
'''cosine''': get_cosine_schedule_with_warmup,
'''cosine_w_restarts''': get_cosine_with_hard_restarts_schedule_with_warmup,
'''polynomial''': get_polynomial_decay_schedule_with_warmup,
# '': get_constant_schedule, # not supported for now
# '': get_constant_schedule_with_warmup, # not supported for now
}
lowerCAmelCase :Tuple = sorted(arg_to_scheduler.keys())
lowerCAmelCase :Any = '''{''' + ''', '''.join(arg_to_scheduler_choices) + '''}'''
class _lowerCamelCase ( pl.LightningModule ):
'''simple docstring'''
def __init__( self : Union[str, Any] , _A : argparse.Namespace , _A : List[Any]=None , _A : Any="base" , _A : Tuple=None , _A : Union[str, Any]=None , _A : List[Any]=None , **_A : Optional[Any] , ) -> Optional[int]:
super().__init__()
# TODO: move to self.save_hyperparameters()
# self.save_hyperparameters()
# can also expand arguments into trainer signature for easier reading
self.save_hyperparameters(_A )
__magic_name__ : List[str] = 0
__magic_name__ : Union[str, Any] = Path(self.hparams.output_dir )
__magic_name__ : str = self.hparams.cache_dir if self.hparams.cache_dir else None
if config is None:
__magic_name__ : Optional[Any] = AutoConfig.from_pretrained(
self.hparams.config_name if self.hparams.config_name else self.hparams.model_name_or_path , **({'num_labels': num_labels} if num_labels is not None else {}) , cache_dir=_A , **_A , )
else:
__magic_name__ : PretrainedConfig = config
__magic_name__ : Any = ('encoder_layerdrop', 'decoder_layerdrop', 'dropout', 'attention_dropout')
for p in extra_model_params:
if getattr(self.hparams , _A , _A ):
assert hasattr(self.config , _A ), F'model config doesn\'t have a `{p}` attribute'
setattr(self.config , _A , getattr(self.hparams , _A ) )
if tokenizer is None:
__magic_name__ : List[Any] = AutoTokenizer.from_pretrained(
self.hparams.tokenizer_name if self.hparams.tokenizer_name else self.hparams.model_name_or_path , cache_dir=_A , )
else:
__magic_name__ : PreTrainedTokenizer = tokenizer
__magic_name__ : Optional[int] = MODEL_MODES[mode]
if model is None:
__magic_name__ : Tuple = self.model_type.from_pretrained(
self.hparams.model_name_or_path , from_tf=bool('.ckpt' in self.hparams.model_name_or_path ) , config=self.config , cache_dir=_A , )
else:
__magic_name__ : str = model
def __lowerCAmelCase ( self : Optional[int] , *_A : Union[str, Any] , **_A : Union[str, Any] ) -> Tuple:
__magic_name__ : Any = self.model_type.from_pretrained(*_A , **_A )
def __lowerCAmelCase ( self : Dict ) -> Union[str, Any]:
__magic_name__ : Optional[Any] = arg_to_scheduler[self.hparams.lr_scheduler]
__magic_name__ : str = get_schedule_func(
self.opt , num_warmup_steps=self.hparams.warmup_steps , num_training_steps=self.total_steps() )
__magic_name__ : int = {'scheduler': scheduler, 'interval': 'step', 'frequency': 1}
return scheduler
def __lowerCAmelCase ( self : str ) -> Optional[Any]:
__magic_name__ : Optional[Any] = self.model
__magic_name__ : int = ['bias', 'LayerNorm.weight']
__magic_name__ : Dict = [
{
'params': [
p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay )
], # check this named paramters
'weight_decay': self.hparams.weight_decay,
},
{
'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay )],
'weight_decay': 0.0,
},
]
if self.hparams.adafactor:
__magic_name__ : str = Adafactor(
_A , lr=self.hparams.learning_rate , scale_parameter=_A , relative_step=_A )
else:
__magic_name__ : Tuple = AdamW(
_A , lr=self.hparams.learning_rate , eps=self.hparams.adam_epsilon )
__magic_name__ : List[str] = optimizer
__magic_name__ : int = self.get_lr_scheduler()
return [optimizer], [scheduler]
def __lowerCAmelCase ( self : Optional[Any] , _A : Optional[int] , _A : Tuple ) -> Optional[Any]:
return self.validation_step(_A , _A )
def __lowerCAmelCase ( self : Dict , _A : List[str] ) -> Any:
return self.validation_end(_A )
def __lowerCAmelCase ( self : Union[str, Any] ) -> int:
__magic_name__ : int = max(1 , self.hparams.gpus ) # TODO: consider num_tpu_cores
__magic_name__ : Dict = self.hparams.train_batch_size * self.hparams.accumulate_grad_batches * num_devices
return (self.dataset_size / effective_batch_size) * self.hparams.max_epochs
def __lowerCAmelCase ( self : str , _A : Optional[int] ) -> str:
if stage == "test":
__magic_name__ : Any = len(self.test_dataloader().dataset )
else:
__magic_name__ : List[Any] = self.get_dataloader('train' , self.hparams.train_batch_size , shuffle=_A )
__magic_name__ : int = len(self.train_dataloader().dataset )
def __lowerCAmelCase ( self : List[str] , _A : str , _A : int , _A : bool = False ) -> Optional[int]:
raise NotImplementedError('You must implement this for your task' )
def __lowerCAmelCase ( self : int ) -> List[str]:
return self.train_loader
def __lowerCAmelCase ( self : Tuple ) -> int:
return self.get_dataloader('dev' , self.hparams.eval_batch_size , shuffle=_A )
def __lowerCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
return self.get_dataloader('test' , self.hparams.eval_batch_size , shuffle=_A )
def __lowerCAmelCase ( self : Optional[Any] , _A : Any ) -> str:
return os.path.join(
self.hparams.data_dir , 'cached_{}_{}_{}'.format(
_A , list(filter(_A , self.hparams.model_name_or_path.split('/' ) ) ).pop() , str(self.hparams.max_seq_length ) , ) , )
@pl.utilities.rank_zero_only
def __lowerCAmelCase ( self : List[str] , _A : Dict[str, Any] ) -> None:
__magic_name__ : Dict = self.output_dir.joinpath('best_tfmr' )
__magic_name__ : List[Any] = self.step_count
self.model.save_pretrained(_A )
self.tokenizer.save_pretrained(_A )
@staticmethod
def __lowerCAmelCase ( _A : List[str] , _A : Optional[Any] ) -> Tuple:
parser.add_argument(
'--model_name_or_path' , default=_A , type=_A , required=_A , help='Path to pretrained model or model identifier from huggingface.co/models' , )
parser.add_argument(
'--config_name' , default='' , type=_A , help='Pretrained config name or path if not the same as model_name' )
parser.add_argument(
'--tokenizer_name' , default=_A , type=_A , help='Pretrained tokenizer name or path if not the same as model_name' , )
parser.add_argument(
'--cache_dir' , default=str(Path(_A ).parent / 'test_run' / 'cache' ) , type=_A , help='Where do you want to store the pre-trained models downloaded from huggingface.co' , )
parser.add_argument(
'--encoder_layerdrop' , type=_A , help='Encoder layer dropout probability (Optional). Goes into model.config' , )
parser.add_argument(
'--decoder_layerdrop' , type=_A , help='Decoder layer dropout probability (Optional). Goes into model.config' , )
parser.add_argument(
'--dropout' , type=_A , help='Dropout probability (Optional). Goes into model.config' , )
parser.add_argument(
'--attention_dropout' , type=_A , help='Attention dropout probability (Optional). Goes into model.config' , )
parser.add_argument('--learning_rate' , default=5E-5 , type=_A , help='The initial learning rate for Adam.' )
parser.add_argument(
'--lr_scheduler' , default='linear' , choices=_A , metavar=_A , type=_A , help='Learning rate scheduler' , )
parser.add_argument('--weight_decay' , default=0.0 , type=_A , help='Weight decay if we apply some.' )
parser.add_argument('--adam_epsilon' , default=1E-8 , type=_A , help='Epsilon for Adam optimizer.' )
parser.add_argument('--warmup_steps' , default=0 , type=_A , help='Linear warmup over warmup_steps.' )
parser.add_argument('--num_workers' , default=4 , type=_A , help='kwarg passed to DataLoader' )
parser.add_argument('--num_train_epochs' , dest='max_epochs' , default=3 , type=_A )
parser.add_argument('--train_batch_size' , default=32 , type=_A )
parser.add_argument('--eval_batch_size' , default=32 , type=_A )
parser.add_argument('--adafactor' , action='store_true' )
class _lowerCamelCase ( pl.Callback ):
'''simple docstring'''
def __lowerCAmelCase ( self : List[str] , _A : List[Any] , _A : List[Any] ) -> List[str]:
if (
trainer.is_global_zero and trainer.global_rank == 0
): # we initialize the retriever only on master worker with RAY. In new pytorch-lightning accelorators are removed.
pl_module.model.rag.retriever.init_retrieval() # better to use hook functions.
class _lowerCamelCase ( pl.Callback ):
'''simple docstring'''
def __lowerCAmelCase ( self : List[str] , _A : Dict , _A : str ) -> List[str]:
# print(pl_module.model.rag)
for name, param in pl_module.model.rag.named_parameters():
if param.grad is None:
print(_A )
class _lowerCamelCase ( pl.Callback ):
'''simple docstring'''
def __lowerCAmelCase ( self : Optional[int] , _A : List[Any] , _A : Dict ) -> Optional[Any]:
__magic_name__ : Dict = trainer.lr_schedulers[0]['scheduler']
__magic_name__ : int = {F'lr_group_{i}': lr for i, lr in enumerate(lr_scheduler.get_lr() )}
pl_module.logger.log_metrics(_A )
def __lowerCAmelCase ( self : Any , _A : pl.Trainer , _A : pl.LightningModule ) -> Optional[int]:
rank_zero_info('***** Validation results *****' )
__magic_name__ : str = trainer.callback_metrics
# Log results
for key in sorted(_A ):
if key not in ["log", "progress_bar"]:
rank_zero_info('{} = {}\n'.format(_A , str(metrics[key] ) ) )
def __lowerCAmelCase ( self : Union[str, Any] , _A : pl.Trainer , _A : pl.LightningModule ) -> Optional[Any]:
rank_zero_info('***** Test results *****' )
__magic_name__ : Optional[int] = trainer.callback_metrics
# Log and save results to file
__magic_name__ : Optional[Any] = os.path.join(pl_module.hparams.output_dir , 'test_results.txt' )
with open(_A , 'w' ) as writer:
for key in sorted(_A ):
if key not in ["log", "progress_bar"]:
rank_zero_info('{} = {}\n'.format(_A , str(metrics[key] ) ) )
writer.write('{} = {}\n'.format(_A , str(metrics[key] ) ) )
def lowerCamelCase ( lowerCAmelCase : str , lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
parser.add_argument(
'--output_dir' , default=str(Path(lowerCAmelCase ).parent / 'test_run' / 'model_checkpoints' ) , type=lowerCAmelCase , help='The output directory where the model predictions and checkpoints will be written.' , )
parser.add_argument(
'--fp16' , action='store_true' , help='Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit' , )
parser.add_argument(
'--fp16_opt_level' , type=lowerCAmelCase , default='O2' , help=(
'For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].'
'See details at https://nvidia.github.io/apex/amp.html'
) , )
parser.add_argument('--n_tpu_cores' , dest='tpu_cores' , type=lowerCAmelCase )
parser.add_argument('--max_grad_norm' , dest='gradient_clip_val' , default=1.0 , type=lowerCAmelCase , help='Max gradient norm' )
parser.add_argument('--do_train' , action='store_true' , help='Whether to run training.' )
parser.add_argument('--do_predict' , action='store_true' , help='Whether to run predictions on the test set.' )
parser.add_argument(
'--gradient_accumulation_steps' , dest='accumulate_grad_batches' , type=lowerCAmelCase , default=1 , help='Number of updates steps to accumulate before performing a backward/update pass.' , )
parser.add_argument('--seed' , type=lowerCAmelCase , default=42 , help='random seed for initialization' )
parser.add_argument(
'--data_dir' , default=str(Path(lowerCAmelCase ).parent / 'test_run' / 'dummy-train-data' ) , type=lowerCAmelCase , help='The input data dir. Should contain the training files for the CoNLL-2003 NER task.' , )
def lowerCamelCase ( lowerCAmelCase : BaseTransformer , lowerCAmelCase : argparse.Namespace , lowerCAmelCase : List[Any]=None , lowerCAmelCase : Optional[Any]=True , lowerCAmelCase : Optional[Any]=[] , lowerCAmelCase : Union[str, Any]=None , lowerCAmelCase : Any=None , **lowerCAmelCase : Union[str, Any] , ):
"""simple docstring"""
pl.seed_everything(args.seed )
# init model
__magic_name__ : Any = Path(model.hparams.output_dir )
odir.mkdir(exist_ok=lowerCAmelCase )
# add custom checkpoints
if checkpoint_callback is None:
__magic_name__ : List[Any] = pl.callbacks.ModelCheckpoint(
filepath=args.output_dir , prefix='checkpoint' , monitor='val_loss' , mode='min' , save_top_k=1 )
if early_stopping_callback:
extra_callbacks.append(lowerCAmelCase )
if logging_callback is None:
__magic_name__ : Dict = LoggingCallback()
__magic_name__ : List[str] = {}
if args.fpaa:
__magic_name__ : Dict = 16
if args.gpus > 1:
__magic_name__ : Tuple = 'auto'
__magic_name__ : int = 'ddp'
__magic_name__ : str = args.accumulate_grad_batches
__magic_name__ : str = None
__magic_name__ : List[str] = 'auto'
__magic_name__ : List[Any] = pl.Trainer.from_argparse_args(
lowerCAmelCase , weights_summary=lowerCAmelCase , callbacks=[logging_callback] + extra_callbacks + [InitCallback()] + [checkpoint_callback] , logger=lowerCAmelCase , val_check_interval=1 , num_sanity_val_steps=2 , **lowerCAmelCase , )
if args.do_train:
trainer.fit(lowerCAmelCase )
else:
print('RAG modeling tests with new set functions successfuly executed!' )
return trainer | 331 | 1 |
from __future__ import annotations
def __UpperCamelCase ( lowercase__ : list[int] , lowercase__ : list[int] , lowercase__ : list[int] , lowercase__ : list[list[str]] , lowercase__ : int , ) -> None:
'''simple docstring'''
lowerCAmelCase_ : str = len(lowercase__ )
# If row is equal to the size of the board it means there are a queen in each row in
# the current board (possible_board)
if row == n:
# We convert the variable possible_board that looks like this: [1, 3, 0, 2] to
# this: ['. Q . . ', '. . . Q ', 'Q . . . ', '. . Q . ']
boards.append([""". """ * i + """Q """ + """. """ * (n - 1 - i) for i in possible_board] )
return
# We iterate each column in the row to find all possible results in each row
for col in range(lowercase__ ):
# We apply that we learned previously. First we check that in the current board
# (possible_board) there are not other same value because if there is it means
# that there are a collision in vertical. Then we apply the two formulas we
# learned before:
#
# 45º: y - x = b or 45: row - col = b
# 135º: y + x = b or row + col = b.
#
# And we verify if the results of this two formulas not exist in their variables
# respectively. (diagonal_right_collisions, diagonal_left_collisions)
#
# If any or these are True it means there is a collision so we continue to the
# next value in the for loop.
if (
col in possible_board
or row - col in diagonal_right_collisions
or row + col in diagonal_left_collisions
):
continue
# If it is False we call dfs function again and we update the inputs
depth_first_search(
[*possible_board, col] , [*diagonal_right_collisions, row - col] , [*diagonal_left_collisions, row + col] , lowercase__ , lowercase__ , )
def __UpperCamelCase ( lowercase__ : int ) -> None:
'''simple docstring'''
lowerCAmelCase_ : list[list[str]] = []
depth_first_search([] , [] , [] , lowercase__ , lowercase__ )
# Print all the boards
for board in boards:
for column in board:
print(lowercase__ )
print("""""" )
print(len(lowercase__ ) , """solutions were found.""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
n_queens_solution(4)
| 28 |
from datetime import datetime as dt
import os
from github import Github
__UpperCAmelCase = [
'good first issue',
'good second issue',
'good difficult issue',
'feature request',
'new model',
'wip',
]
def __UpperCamelCase ( ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase_ : Dict = Github(os.environ["""GITHUB_TOKEN"""] )
lowerCAmelCase_ : Tuple = g.get_repo("""huggingface/transformers""" )
lowerCAmelCase_ : Any = repo.get_issues(state="""open""" )
for issue in open_issues:
lowerCAmelCase_ : Union[str, Any] = sorted([comment for comment in issue.get_comments()] , key=lambda lowercase__ : i.created_at , reverse=lowercase__ )
lowerCAmelCase_ : str = comments[0] if len(lowercase__ ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.")
issue.edit(state="""closed""" )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would add stale comment to {issue.number}")
issue.create_comment(
"""This issue has been automatically marked as stale because it has not had """
"""recent activity. If you think this still needs to be addressed """
"""please comment on this thread.\n\nPlease note that issues that do not follow the """
"""[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) """
"""are likely to be ignored.""" )
if __name__ == "__main__":
main()
| 28 | 1 |
"""simple docstring"""
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import SegformerImageProcessor, SwinConfig, UperNetConfig, UperNetForSemanticSegmentation
def lowerCAmelCase_ ( snake_case_ : str ) ->str:
lowerCamelCase__ : Optional[Any] =3_8_4
lowerCamelCase__ : Optional[int] =7
if "tiny" in model_name:
lowerCamelCase__ : Optional[int] =9_6
lowerCamelCase__ : Any =(2, 2, 6, 2)
lowerCamelCase__ : List[str] =(3, 6, 1_2, 2_4)
elif "small" in model_name:
lowerCamelCase__ : Any =9_6
lowerCamelCase__ : Tuple =(2, 2, 1_8, 2)
lowerCamelCase__ : List[str] =(3, 6, 1_2, 2_4)
elif "base" in model_name:
lowerCamelCase__ : int =1_2_8
lowerCamelCase__ : Union[str, Any] =(2, 2, 1_8, 2)
lowerCamelCase__ : Union[str, Any] =(4, 8, 1_6, 3_2)
lowerCamelCase__ : Tuple =1_2
lowerCamelCase__ : Dict =5_1_2
elif "large" in model_name:
lowerCamelCase__ : List[Any] =1_9_2
lowerCamelCase__ : int =(2, 2, 1_8, 2)
lowerCamelCase__ : int =(6, 1_2, 2_4, 4_8)
lowerCamelCase__ : Dict =1_2
lowerCamelCase__ : Tuple =7_6_8
# set label information
lowerCamelCase__ : Optional[int] =1_5_0
lowerCamelCase__ : List[str] ='huggingface/label-files'
lowerCamelCase__ : Optional[Any] ='ade20k-id2label.json'
lowerCamelCase__ : Union[str, Any] =json.load(open(hf_hub_download(snake_case_ , snake_case_ , repo_type='dataset' ) , 'r' ) )
lowerCamelCase__ : Optional[Any] ={int(snake_case_ ): v for k, v in idalabel.items()}
lowerCamelCase__ : Tuple ={v: k for k, v in idalabel.items()}
lowerCamelCase__ : Optional[Any] =SwinConfig(
embed_dim=snake_case_ , depths=snake_case_ , num_heads=snake_case_ , window_size=snake_case_ , out_features=['stage1', 'stage2', 'stage3', 'stage4'] , )
lowerCamelCase__ : Union[str, Any] =UperNetConfig(
backbone_config=snake_case_ , auxiliary_in_channels=snake_case_ , num_labels=snake_case_ , idalabel=snake_case_ , labelaid=snake_case_ , )
return config
def lowerCAmelCase_ ( snake_case_ : Tuple ) ->Tuple:
lowerCamelCase__ : Optional[Any] =[]
# fmt: off
# stem
rename_keys.append(('backbone.patch_embed.projection.weight', 'backbone.embeddings.patch_embeddings.projection.weight') )
rename_keys.append(('backbone.patch_embed.projection.bias', 'backbone.embeddings.patch_embeddings.projection.bias') )
rename_keys.append(('backbone.patch_embed.norm.weight', 'backbone.embeddings.norm.weight') )
rename_keys.append(('backbone.patch_embed.norm.bias', 'backbone.embeddings.norm.bias') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.norm1.weight""", f"""backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.weight""") )
rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.norm1.bias""", f"""backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.bias""") )
rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_bias_table""", f"""backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table""") )
rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_index""", f"""backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index""") )
rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.weight""", f"""backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight""") )
rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.bias""", f"""backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias""") )
rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.norm2.weight""", f"""backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.weight""") )
rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.norm2.bias""", f"""backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.bias""") )
rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.weight""", f"""backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight""") )
rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.bias""", f"""backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias""") )
rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.ffn.layers.1.weight""", f"""backbone.encoder.layers.{i}.blocks.{j}.output.dense.weight""") )
rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.ffn.layers.1.bias""", f"""backbone.encoder.layers.{i}.blocks.{j}.output.dense.bias""") )
if i < 3:
rename_keys.append((f"""backbone.stages.{i}.downsample.reduction.weight""", f"""backbone.encoder.layers.{i}.downsample.reduction.weight""") )
rename_keys.append((f"""backbone.stages.{i}.downsample.norm.weight""", f"""backbone.encoder.layers.{i}.downsample.norm.weight""") )
rename_keys.append((f"""backbone.stages.{i}.downsample.norm.bias""", f"""backbone.encoder.layers.{i}.downsample.norm.bias""") )
rename_keys.append((f"""backbone.norm{i}.weight""", f"""backbone.hidden_states_norms.stage{i+1}.weight""") )
rename_keys.append((f"""backbone.norm{i}.bias""", f"""backbone.hidden_states_norms.stage{i+1}.bias""") )
# decode head
rename_keys.extend(
[
('decode_head.conv_seg.weight', 'decode_head.classifier.weight'),
('decode_head.conv_seg.bias', 'decode_head.classifier.bias'),
('auxiliary_head.conv_seg.weight', 'auxiliary_head.classifier.weight'),
('auxiliary_head.conv_seg.bias', 'auxiliary_head.classifier.bias'),
] )
# fmt: on
return rename_keys
def lowerCAmelCase_ ( snake_case_ : Dict , snake_case_ : Optional[Any] , snake_case_ : List[str] ) ->Dict:
lowerCamelCase__ : Optional[int] =dct.pop(snake_case_ )
lowerCamelCase__ : List[str] =val
def lowerCAmelCase_ ( snake_case_ : List[str] , snake_case_ : Union[str, Any] ) ->List[Any]:
lowerCamelCase__ : Any =[int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
lowerCamelCase__ : int =num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
lowerCamelCase__ : Dict =state_dict.pop(f"""backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.weight""" )
lowerCamelCase__ : Union[str, Any] =state_dict.pop(f"""backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
lowerCamelCase__ : List[Any] =in_proj_weight[:dim, :]
lowerCamelCase__ : str =in_proj_bias[: dim]
lowerCamelCase__ : Dict =in_proj_weight[
dim : dim * 2, :
]
lowerCamelCase__ : Union[str, Any] =in_proj_bias[
dim : dim * 2
]
lowerCamelCase__ : str =in_proj_weight[
-dim :, :
]
lowerCamelCase__ : int =in_proj_bias[-dim :]
# fmt: on
def lowerCAmelCase_ ( snake_case_ : List[Any] ) ->Tuple:
lowerCamelCase__ , lowerCamelCase__ : Tuple =x.shape
lowerCamelCase__ : Any =x.reshape(snake_case_ , 4 , in_channel // 4 )
lowerCamelCase__ : int =x[:, [0, 2, 1, 3], :].transpose(1 , 2 ).reshape(snake_case_ , snake_case_ )
return x
def lowerCAmelCase_ ( snake_case_ : Optional[Any] ) ->Dict:
lowerCamelCase__ , lowerCamelCase__ : List[Any] =x.shape
lowerCamelCase__ : str =x.reshape(snake_case_ , in_channel // 4 , 4 )
lowerCamelCase__ : Dict =x[:, :, [0, 2, 1, 3]].transpose(1 , 2 ).reshape(snake_case_ , snake_case_ )
return x
def lowerCAmelCase_ ( snake_case_ : Any ) ->Any:
lowerCamelCase__ : Dict =x.shape[0]
lowerCamelCase__ : Tuple =x.reshape(4 , in_channel // 4 )
lowerCamelCase__ : List[str] =x[[0, 2, 1, 3], :].transpose(0 , 1 ).reshape(snake_case_ )
return x
def lowerCAmelCase_ ( snake_case_ : Any ) ->Dict:
lowerCamelCase__ : Dict =x.shape[0]
lowerCamelCase__ : str =x.reshape(in_channel // 4 , 4 )
lowerCamelCase__ : Union[str, Any] =x[:, [0, 2, 1, 3]].transpose(0 , 1 ).reshape(snake_case_ )
return x
def lowerCAmelCase_ ( snake_case_ : Dict , snake_case_ : Optional[int] , snake_case_ : List[Any] ) ->int:
lowerCamelCase__ : List[str] ={
'upernet-swin-tiny': 'https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210531_112542-e380ad3e.pth',
'upernet-swin-small': 'https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210526_192015-ee2fff1c.pth',
'upernet-swin-base': 'https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K_20210531_125459-429057bf.pth',
'upernet-swin-large': 'https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k_20220318_091743-9ba68901.pth',
}
lowerCamelCase__ : Dict =model_name_to_url[model_name]
lowerCamelCase__ : Optional[Any] =torch.hub.load_state_dict_from_url(snake_case_ , map_location='cpu' , file_name=snake_case_ )[
'state_dict'
]
for name, param in state_dict.items():
print(snake_case_ , param.shape )
lowerCamelCase__ : List[str] =get_upernet_config(snake_case_ )
lowerCamelCase__ : Any =UperNetForSemanticSegmentation(snake_case_ )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
lowerCamelCase__ : Optional[int] =state_dict.pop(snake_case_ )
if "bn" in key:
lowerCamelCase__ : int =key.replace('bn' , 'batch_norm' )
lowerCamelCase__ : List[Any] =val
# rename keys
lowerCamelCase__ : Optional[int] =create_rename_keys(snake_case_ )
for src, dest in rename_keys:
rename_key(snake_case_ , snake_case_ , snake_case_ )
read_in_q_k_v(snake_case_ , config.backbone_config )
# fix downsample parameters
for key, value in state_dict.items():
if "downsample" in key:
if "reduction" in key:
lowerCamelCase__ : str =reverse_correct_unfold_reduction_order(snake_case_ )
if "norm" in key:
lowerCamelCase__ : Union[str, Any] =reverse_correct_unfold_norm_order(snake_case_ )
model.load_state_dict(snake_case_ )
# verify on image
lowerCamelCase__ : Tuple ='https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg'
lowerCamelCase__ : int =Image.open(requests.get(snake_case_ , stream=snake_case_ ).raw ).convert('RGB' )
lowerCamelCase__ : List[str] =SegformerImageProcessor()
lowerCamelCase__ : List[Any] =processor(snake_case_ , return_tensors='pt' ).pixel_values
with torch.no_grad():
lowerCamelCase__ : int =model(snake_case_ )
lowerCamelCase__ : Optional[Any] =outputs.logits
print(logits.shape )
print('First values of logits:' , logits[0, 0, :3, :3] )
# assert values
if model_name == "upernet-swin-tiny":
lowerCamelCase__ : Union[str, Any] =torch.tensor(
[[-7.5958, -7.5958, -7.4302], [-7.5958, -7.5958, -7.4302], [-7.4797, -7.4797, -7.3068]] )
elif model_name == "upernet-swin-small":
lowerCamelCase__ : Any =torch.tensor(
[[-7.1921, -7.1921, -6.9532], [-7.1921, -7.1921, -6.9532], [-7.0908, -7.0908, -6.8534]] )
elif model_name == "upernet-swin-base":
lowerCamelCase__ : Optional[int] =torch.tensor(
[[-6.5851, -6.5851, -6.4330], [-6.5851, -6.5851, -6.4330], [-6.4763, -6.4763, -6.3254]] )
elif model_name == "upernet-swin-large":
lowerCamelCase__ : Dict =torch.tensor(
[[-7.5297, -7.5297, -7.3802], [-7.5297, -7.5297, -7.3802], [-7.4044, -7.4044, -7.2586]] )
print('Logits:' , outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] , snake_case_ , atol=1E-4 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(f"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(snake_case_ )
print(f"""Saving processor to {pytorch_dump_folder_path}""" )
processor.save_pretrained(snake_case_ )
if push_to_hub:
print(f"""Pushing model and processor for {model_name} to hub""" )
model.push_to_hub(f"""openmmlab/{model_name}""" )
processor.push_to_hub(f"""openmmlab/{model_name}""" )
if __name__ == "__main__":
lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""upernet-swin-tiny""",
type=str,
choices=[f"""upernet-swin-{size}""" for size in ["""tiny""", """small""", """base""", """large"""]],
help="""Name of the Swin + UperNet model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
lowerCAmelCase = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub) | 126 |
"""simple docstring"""
# Author: OMKAR PATHAK, Nwachukwu Chidiebere
# Use a Python dictionary to construct the graph.
from __future__ import annotations
from pprint import pformat
from typing import Generic, TypeVar
lowerCAmelCase = TypeVar("""T""")
class A_ ( Generic[T] ):
"""simple docstring"""
def __init__( self :Dict , lowerCamelCase_ :bool = True ):
"""simple docstring"""
lowerCamelCase__ : dict[T, list[T]] ={} # dictionary of lists
lowerCamelCase__ : int =directed
def UpperCAmelCase__ ( self :str , lowerCamelCase_ :T , lowerCamelCase_ :T ):
"""simple docstring"""
if not self.directed: # For undirected graphs
# if both source vertex and destination vertex are both present in the
# adjacency list, add destination vertex to source vertex list of adjacent
# vertices and add source vertex to destination vertex list of adjacent
# vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(lowerCamelCase_ )
self.adj_list[destination_vertex].append(lowerCamelCase_ )
# if only source vertex is present in adjacency list, add destination vertex
# to source vertex list of adjacent vertices, then create a new vertex with
# destination vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(lowerCamelCase_ )
lowerCamelCase__ : Dict =[source_vertex]
# if only destination vertex is present in adjacency list, add source vertex
# to destination vertex list of adjacent vertices, then create a new vertex
# with source vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif destination_vertex in self.adj_list:
self.adj_list[destination_vertex].append(lowerCamelCase_ )
lowerCamelCase__ : Dict =[destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and assign a list
# containing the destination vertex as it's first adjacent vertex also
# create a new vertex with destination vertex as key and assign a list
# containing the source vertex as it's first adjacent vertex.
else:
lowerCamelCase__ : Union[str, Any] =[destination_vertex]
lowerCamelCase__ : Any =[source_vertex]
else: # For directed graphs
# if both source vertex and destination vertex are present in adjacency
# list, add destination vertex to source vertex list of adjacent vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(lowerCamelCase_ )
# if only source vertex is present in adjacency list, add destination
# vertex to source vertex list of adjacent vertices and create a new vertex
# with destination vertex as key, which has no adjacent vertex
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(lowerCamelCase_ )
lowerCamelCase__ : Optional[int] =[]
# if only destination vertex is present in adjacency list, create a new
# vertex with source vertex as key and assign a list containing destination
# vertex as first adjacent vertex
elif destination_vertex in self.adj_list:
lowerCamelCase__ : Tuple =[destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and a list containing
# destination vertex as it's first adjacent vertex. Then create a new vertex
# with destination vertex as key, which has no adjacent vertex
else:
lowerCamelCase__ : str =[destination_vertex]
lowerCamelCase__ : Optional[Any] =[]
return self
def __repr__( self :Optional[Any] ):
"""simple docstring"""
return pformat(self.adj_list ) | 126 | 1 |
from __future__ import annotations
def lowerCamelCase_ ( UpperCamelCase__ : int , UpperCamelCase__ : int ) -> tuple[int, int]:
"""simple docstring"""
if b == 0:
return (1, 0)
((__lowerCamelCase) , (__lowerCamelCase)) = extended_euclid(UpperCamelCase__ , a % b )
__lowerCamelCase = a // b
return (y, x - k * y)
def lowerCamelCase_ ( UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int ) -> int:
"""simple docstring"""
((__lowerCamelCase) , (__lowerCamelCase)) = extended_euclid(UpperCamelCase__ , UpperCamelCase__ )
__lowerCamelCase = na * na
__lowerCamelCase = ra * x * na + ra * y * na
return (n % m + m) % m
def lowerCamelCase_ ( UpperCamelCase__ : int , UpperCamelCase__ : int ) -> int:
"""simple docstring"""
((__lowerCamelCase) , (__lowerCamelCase)) = extended_euclid(UpperCamelCase__ , UpperCamelCase__ )
if b < 0:
__lowerCamelCase = (b % n + n) % n
return b
def lowerCamelCase_ ( UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int ) -> int:
"""simple docstring"""
__lowerCamelCase , __lowerCamelCase = invert_modulo(UpperCamelCase__ , UpperCamelCase__ ), invert_modulo(UpperCamelCase__ , UpperCamelCase__ )
__lowerCamelCase = na * na
__lowerCamelCase = ra * x * na + ra * y * na
return (n % m + m) % m
if __name__ == "__main__":
from doctest import testmod
testmod(name="chinese_remainder_theorem", verbose=True)
testmod(name="chinese_remainder_theorem2", verbose=True)
testmod(name="invert_modulo", verbose=True)
testmod(name="extended_euclid", verbose=True)
| 348 |
__A = {
"joule": 1.0,
"kilojoule": 10_00,
"megajoule": 1_00_00_00,
"gigajoule": 10_00_00_00_00,
"wattsecond": 1.0,
"watthour": 36_00,
"kilowatthour": 3_60_00_00,
"newtonmeter": 1.0,
"calorie_nutr": 41_86.8,
"kilocalorie_nutr": 4_18_68_00.00,
"electronvolt": 1.6_0_2_1_7_6_6_3_4e-1_9,
"britishthermalunit_it": 10_55.0_55_85,
"footpound": 1.3_5_5_8_1_8,
}
def lowerCamelCase_ ( UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : float ) -> float:
"""simple docstring"""
if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION:
__lowerCamelCase = (
F"""Incorrect 'from_type' or 'to_type' value: {from_type!r}, {to_type!r}\n"""
F"""Valid values are: {', '.join(UpperCamelCase__ )}"""
)
raise ValueError(UpperCamelCase__ )
return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 348 | 1 |
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __snake_case ( lowerCamelCase_ ):
lowerCAmelCase_ = ["image_processor", "tokenizer"]
lowerCAmelCase_ = "BlipImageProcessor"
lowerCAmelCase_ = "AutoTokenizer"
def __init__( self : Dict , _lowercase : int , _lowercase : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = False
super().__init__(_lowercase , _lowercase )
SCREAMING_SNAKE_CASE__ = self.image_processor
def __call__( self : int , _lowercase : ImageInput = None , _lowercase : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , _lowercase : bool = True , _lowercase : Union[bool, str, PaddingStrategy] = False , _lowercase : Union[bool, str, TruncationStrategy] = None , _lowercase : Optional[int] = None , _lowercase : int = 0 , _lowercase : Optional[int] = None , _lowercase : Optional[bool] = None , _lowercase : bool = False , _lowercase : bool = False , _lowercase : bool = False , _lowercase : bool = False , _lowercase : bool = False , _lowercase : bool = True , _lowercase : Optional[Union[str, TensorType]] = None , **_lowercase : Union[str, Any] , ):
"""simple docstring"""
if images is None and text is None:
raise ValueError("""You have to specify either images or text.""" )
# Get only text
if images is None:
SCREAMING_SNAKE_CASE__ = self.tokenizer
SCREAMING_SNAKE_CASE__ = self.tokenizer(
text=_lowercase , add_special_tokens=_lowercase , padding=_lowercase , truncation=_lowercase , max_length=_lowercase , stride=_lowercase , pad_to_multiple_of=_lowercase , return_attention_mask=_lowercase , return_overflowing_tokens=_lowercase , return_special_tokens_mask=_lowercase , return_offsets_mapping=_lowercase , return_token_type_ids=_lowercase , return_length=_lowercase , verbose=_lowercase , return_tensors=_lowercase , **_lowercase , )
return text_encoding
# add pixel_values
SCREAMING_SNAKE_CASE__ = self.image_processor(_lowercase , return_tensors=_lowercase )
if text is not None:
SCREAMING_SNAKE_CASE__ = self.tokenizer(
text=_lowercase , add_special_tokens=_lowercase , padding=_lowercase , truncation=_lowercase , max_length=_lowercase , stride=_lowercase , pad_to_multiple_of=_lowercase , return_attention_mask=_lowercase , return_overflowing_tokens=_lowercase , return_special_tokens_mask=_lowercase , return_offsets_mapping=_lowercase , return_token_type_ids=_lowercase , return_length=_lowercase , verbose=_lowercase , return_tensors=_lowercase , **_lowercase , )
else:
SCREAMING_SNAKE_CASE__ = None
if text_encoding is not None:
encoding_image_processor.update(_lowercase )
return encoding_image_processor
def __a ( self : List[str] , *_lowercase : Optional[int] , **_lowercase : str ):
"""simple docstring"""
return self.tokenizer.batch_decode(*_lowercase , **_lowercase )
def __a ( self : Union[str, Any] , *_lowercase : Optional[Any] , **_lowercase : Optional[int] ):
"""simple docstring"""
return self.tokenizer.decode(*_lowercase , **_lowercase )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def __a ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.tokenizer.model_input_names
SCREAMING_SNAKE_CASE__ = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 219 | def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : int , __UpperCamelCase : int ) -> str:
"""simple docstring"""
if a < 0 or b < 0:
raise ValueError("""the value of both inputs must be positive""" )
SCREAMING_SNAKE_CASE__ = str(bin(__UpperCamelCase ) )[2:] # remove the leading "0b"
SCREAMING_SNAKE_CASE__ = str(bin(__UpperCamelCase ) )[2:] # remove the leading "0b"
SCREAMING_SNAKE_CASE__ = max(len(__UpperCamelCase ) , len(__UpperCamelCase ) )
return "0b" + "".join(
str(int(char_a != char_b ) )
for char_a, char_b in zip(a_binary.zfill(__UpperCamelCase ) , b_binary.zfill(__UpperCamelCase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 219 | 1 |
'''simple docstring'''
import argparse
import os
import torch
from transformers import FlavaImageCodebook, FlavaImageCodebookConfig
def _lowercase ( __A ,__A ,__A ,__A ):
'''simple docstring'''
__UpperCamelCase = s.rsplit(__A ,__A )
return new.join(__A )
def _lowercase ( __A ):
'''simple docstring'''
return sum(param.float().sum() if """encoder.embeddings""" not in key else 0 for key, param in state_dict.items() )
def _lowercase ( __A ):
'''simple docstring'''
__UpperCamelCase = {}
__UpperCamelCase = ["""group_1""", """group_2""", """group_3""", """group_4"""]
for key, value in state_dict.items():
for group_key in group_keys:
if group_key in key:
__UpperCamelCase = key.replace(f"{group_key}." ,f"{group_key}.group." )
if "res_path" in key:
__UpperCamelCase = key.replace("""res_path.""" ,"""res_path.path.""" )
if key.endswith(""".w""" ):
__UpperCamelCase = rreplace(__A ,""".w""" ,""".weight""" ,1 )
if key.endswith(""".b""" ):
__UpperCamelCase = rreplace(__A ,""".b""" ,""".bias""" ,1 )
__UpperCamelCase = value.float()
return upgrade
@torch.no_grad()
def _lowercase ( __A ,__A ,__A=None ,__A=True ):
'''simple docstring'''
from dall_e import Encoder
__UpperCamelCase = Encoder()
if os.path.exists(__A ):
__UpperCamelCase = torch.load(__A )
else:
__UpperCamelCase = torch.hub.load_state_dict_from_url(__A )
if isinstance(__A ,__A ):
__UpperCamelCase = ckpt.state_dict()
encoder.load_state_dict(__A )
if config_path is not None:
__UpperCamelCase = FlavaImageCodebookConfig.from_pretrained(__A )
else:
__UpperCamelCase = FlavaImageCodebookConfig()
__UpperCamelCase = FlavaImageCodebook(__A ).eval()
__UpperCamelCase = encoder.state_dict()
__UpperCamelCase = upgrade_state_dict(__A )
hf_model.load_state_dict(__A )
__UpperCamelCase = hf_model.state_dict()
__UpperCamelCase = count_parameters(__A )
__UpperCamelCase = count_parameters(__A )
assert torch.allclose(__A ,__A ,atol=1E-3 )
if save_checkpoint:
hf_model.save_pretrained(__A )
else:
return hf_state_dict
if __name__ == "__main__":
a__ : Optional[Any] = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to flava checkpoint')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
a__ : Dict = parser.parse_args()
convert_dalle_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 243 |
'''simple docstring'''
import webbrowser
from sys import argv
from urllib.parse import parse_qs, quote
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
a__ : Optional[int] = '%20'.join(argv[1:]) if len(argv) > 1 else quote(str(input('Search: ')))
print('Googling.....')
a__ : Optional[int] = f'''https://www.google.com/search?q={query}&num=100'''
a__ : Union[str, Any] = requests.get(
url,
headers={'User-Agent': str(UserAgent().random)},
)
try:
a__ : Optional[Any] = (
BeautifulSoup(res.text, 'html.parser')
.find('div', attrs={'class': 'yuRUbf'})
.find('a')
.get('href')
)
except AttributeError:
a__ : Union[str, Any] = parse_qs(
BeautifulSoup(res.text, 'html.parser')
.find('div', attrs={'class': 'kCrYT'})
.find('a')
.get('href')
)['url'][0]
webbrowser.open(link)
| 243 | 1 |
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : list , SCREAMING_SNAKE_CASE__ : list , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
if index == number_of_items:
return 0
__UpperCamelCase =0
__UpperCamelCase =0
__UpperCamelCase =knapsack(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , index + 1 )
if weights[index] <= max_weight:
__UpperCamelCase =values[index] + knapsack(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , max_weight - weights[index] , index + 1 )
return max(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 62 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class lowerCamelCase ( metaclass=lowercase_ ):
'''simple docstring'''
__snake_case = ['speech']
def __init__( self : Tuple , *lowerCAmelCase_ : List[str] , **lowerCAmelCase_ : Dict ) -> int:
'''simple docstring'''
requires_backends(self , ["""speech"""] )
class lowerCamelCase ( metaclass=lowercase_ ):
'''simple docstring'''
__snake_case = ['speech']
def __init__( self : Union[str, Any] , *lowerCAmelCase_ : Tuple , **lowerCAmelCase_ : List[str] ) -> str:
'''simple docstring'''
requires_backends(self , ["""speech"""] )
| 134 | 0 |
"""simple docstring"""
from __future__ import annotations
def _lowerCamelCase( a , a ):
if b == 0:
return (1, 0)
((__a) , (__a)) = extended_euclid(a , a % b )
__a = a // b
return (y, x - k * y)
def _lowerCamelCase( a , a , a , a ):
((__a) , (__a)) = extended_euclid(a , a )
__a = na * na
__a = ra * x * na + ra * y * na
return (n % m + m) % m
def _lowerCamelCase( a , a ):
((__a) , (__a)) = extended_euclid(a , a )
if b < 0:
__a = (b % n + n) % n
return b
def _lowerCamelCase( a , a , a , a ):
__a , __a = invert_modulo(a , a ), invert_modulo(a , a )
__a = na * na
__a = ra * x * na + ra * y * na
return (n % m + m) % m
if __name__ == "__main__":
from doctest import testmod
testmod(name="""chinese_remainder_theorem""", verbose=True)
testmod(name="""chinese_remainder_theorem2""", verbose=True)
testmod(name="""invert_modulo""", verbose=True)
testmod(name="""extended_euclid""", verbose=True)
| 268 | """simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
SCREAMING_SNAKE_CASE__:Tuple = logging.get_logger(__name__)
class snake_case__ ( snake_case_ ):
_snake_case : Optional[Any] = ["""pixel_values"""]
def __init__( self , lowerCamelCase = True , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = PILImageResampling.BILINEAR , lowerCamelCase = True , lowerCamelCase = 1 / 255 , lowerCamelCase = True , lowerCamelCase = None , lowerCamelCase = None , **lowerCamelCase , ):
super().__init__(**lowerCamelCase )
__a = size if size is not None else {"shortest_edge": 384}
__a = get_size_dict(lowerCamelCase , default_to_square=lowerCamelCase )
__a = do_resize
__a = size
# Default value set here for backwards compatibility where the value in config is None
__a = crop_pct if crop_pct is not None else 224 / 256
__a = resample
__a = do_rescale
__a = rescale_factor
__a = do_normalize
__a = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
__a = image_std if image_std is not None else IMAGENET_STANDARD_STD
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = PILImageResampling.BICUBIC , lowerCamelCase = None , **lowerCamelCase , ):
__a = get_size_dict(lowerCamelCase , default_to_square=lowerCamelCase )
if "shortest_edge" not in size:
raise ValueError(F"Size dictionary must contain 'shortest_edge' key. Got {size.keys()}" )
__a = size["shortest_edge"]
if shortest_edge < 384:
# maintain same ratio, resizing shortest edge to shortest_edge/crop_pct
__a = int(shortest_edge / crop_pct )
__a = get_resize_output_image_size(lowerCamelCase , size=lowerCamelCase , default_to_square=lowerCamelCase )
__a = resize(image=lowerCamelCase , size=lowerCamelCase , resample=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase )
# then crop to (shortest_edge, shortest_edge)
return center_crop(image=lowerCamelCase , size=(shortest_edge, shortest_edge) , data_format=lowerCamelCase , **lowerCamelCase )
else:
# warping (no cropping) when evaluated at 384 or larger
return resize(
lowerCamelCase , size=(shortest_edge, shortest_edge) , resample=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , **lowerCamelCase , ):
return rescale(lowerCamelCase , scale=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , **lowerCamelCase , ):
return normalize(lowerCamelCase , mean=lowerCamelCase , std=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase )
def a__ ( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = ChannelDimension.FIRST , **lowerCamelCase , ):
__a = do_resize if do_resize is not None else self.do_resize
__a = crop_pct if crop_pct is not None else self.crop_pct
__a = resample if resample is not None else self.resample
__a = do_rescale if do_rescale is not None else self.do_rescale
__a = rescale_factor if rescale_factor is not None else self.rescale_factor
__a = do_normalize if do_normalize is not None else self.do_normalize
__a = image_mean if image_mean is not None else self.image_mean
__a = image_std if image_std is not None else self.image_std
__a = size if size is not None else self.size
__a = get_size_dict(lowerCamelCase , default_to_square=lowerCamelCase )
__a = make_list_of_images(lowerCamelCase )
if not valid_images(lowerCamelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_resize and size["shortest_edge"] < 384 and crop_pct is None:
raise ValueError("crop_pct must be specified if size < 384." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
__a = [to_numpy_array(lowerCamelCase ) for image in images]
if do_resize:
__a = [self.resize(image=lowerCamelCase , size=lowerCamelCase , crop_pct=lowerCamelCase , resample=lowerCamelCase ) for image in images]
if do_rescale:
__a = [self.rescale(image=lowerCamelCase , scale=lowerCamelCase ) for image in images]
if do_normalize:
__a = [self.normalize(image=lowerCamelCase , mean=lowerCamelCase , std=lowerCamelCase ) for image in images]
__a = [to_channel_dimension_format(lowerCamelCase , lowerCamelCase ) for image in images]
__a = {"pixel_values": images}
return BatchFeature(data=lowerCamelCase , tensor_type=lowerCamelCase )
| 268 | 1 |
'''simple docstring'''
import os
from glob import glob
import imageio
import torch
import torchvision
import wandb
from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan
from loaders import load_vqgan
from PIL import Image
from torch import nn
from transformers import CLIPModel, CLIPTokenizerFast
from utils import get_device, get_timestamp, show_pil
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self : Any , UpperCamelCase__ : str = "cpu" , UpperCamelCase__ : str = "openai/clip-vit-large-patch14" ):
"""simple docstring"""
UpperCamelCase = device
UpperCamelCase = CLIPTokenizerFast.from_pretrained(UpperCamelCase__ )
UpperCamelCase = [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3]
UpperCamelCase = [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1]
UpperCamelCase = torchvision.transforms.Normalize(self.image_mean , self.image_std )
UpperCamelCase = torchvision.transforms.Resize(2_2_4 )
UpperCamelCase = torchvision.transforms.CenterCrop(2_2_4 )
def A ( self : str , UpperCamelCase__ : Dict ):
"""simple docstring"""
UpperCamelCase = self.resize(UpperCamelCase__ )
UpperCamelCase = self.center_crop(UpperCamelCase__ )
UpperCamelCase = self.normalize(UpperCamelCase__ )
return images
def __call__( self : Union[str, Any] , UpperCamelCase__ : int=None , UpperCamelCase__ : Optional[Any]=None , **UpperCamelCase__ : Any ):
"""simple docstring"""
UpperCamelCase = self.tokenizer(text=UpperCamelCase__ , **UpperCamelCase__ )
UpperCamelCase = self.preprocess_img(UpperCamelCase__ )
UpperCamelCase = {key: value.to(self.device ) for (key, value) in encoding.items()}
return encoding
class SCREAMING_SNAKE_CASE ( nn.Module ):
"""simple docstring"""
def __init__( self : Any , UpperCamelCase__ : Optional[int]=1_0 , UpperCamelCase__ : Optional[int]=0.0_1 , UpperCamelCase__ : str=None , UpperCamelCase__ : Optional[Any]=None , UpperCamelCase__ : Any=None , UpperCamelCase__ : int=None , UpperCamelCase__ : Dict=None , UpperCamelCase__ : str=None , UpperCamelCase__ : List[str]=False , UpperCamelCase__ : str=True , UpperCamelCase__ : List[str]="image" , UpperCamelCase__ : Union[str, Any]=True , UpperCamelCase__ : Dict=False , UpperCamelCase__ : List[Any]=False , UpperCamelCase__ : Dict=False , ):
"""simple docstring"""
super().__init__()
UpperCamelCase = None
UpperCamelCase = device if device else get_device()
if vqgan:
UpperCamelCase = vqgan
else:
UpperCamelCase = load_vqgan(self.device , conf_path=UpperCamelCase__ , ckpt_path=UpperCamelCase__ )
self.vqgan.eval()
if clip:
UpperCamelCase = clip
else:
UpperCamelCase = CLIPModel.from_pretrained('openai/clip-vit-base-patch32' )
self.clip.to(self.device )
UpperCamelCase = ProcessorGradientFlow(device=self.device )
UpperCamelCase = iterations
UpperCamelCase = lr
UpperCamelCase = log
UpperCamelCase = make_grid
UpperCamelCase = return_val
UpperCamelCase = quantize
UpperCamelCase = self.vqgan.decoder.z_shape
def A ( self : List[Any] , UpperCamelCase__ : Any=None , UpperCamelCase__ : List[str]=None , UpperCamelCase__ : List[str]=5 , UpperCamelCase__ : List[str]=True ):
"""simple docstring"""
UpperCamelCase = []
if output_path is None:
UpperCamelCase = './animation.gif'
if input_path is None:
UpperCamelCase = self.save_path
UpperCamelCase = sorted(glob(input_path + '/*' ) )
if not len(UpperCamelCase__ ):
raise ValueError(
'No images found in save path, aborting (did you pass save_intermediate=True to the generate'
' function?)' )
if len(UpperCamelCase__ ) == 1:
print('Only one image found in save path, (did you pass save_intermediate=True to the generate function?)' )
UpperCamelCase = total_duration / len(UpperCamelCase__ )
UpperCamelCase = [frame_duration] * len(UpperCamelCase__ )
if extend_frames:
UpperCamelCase = 1.5
UpperCamelCase = 3
for file_name in paths:
if file_name.endswith('.png' ):
images.append(imageio.imread(UpperCamelCase__ ) )
imageio.mimsave(UpperCamelCase__ , UpperCamelCase__ , duration=UpperCamelCase__ )
print(f"""gif saved to {output_path}""" )
def A ( self : Optional[Any] , UpperCamelCase__ : List[Any]=None , UpperCamelCase__ : Optional[Any]=None ):
"""simple docstring"""
if not (path or img):
raise ValueError('Input either path or tensor' )
if img is not None:
raise NotImplementedError
UpperCamelCase = preprocess(Image.open(UpperCamelCase__ ) , target_image_size=2_5_6 ).to(self.device )
UpperCamelCase = preprocess_vqgan(UpperCamelCase__ )
UpperCamelCase , *UpperCamelCase = self.vqgan.encode(UpperCamelCase__ )
return z
def A ( self : str , UpperCamelCase__ : List[str] ):
"""simple docstring"""
UpperCamelCase = self.latent.detach().requires_grad_()
UpperCamelCase = base_latent + transform_vector
if self.quantize:
UpperCamelCase , *UpperCamelCase = self.vqgan.quantize(UpperCamelCase__ )
else:
UpperCamelCase = trans_latent
return self.vqgan.decode(UpperCamelCase__ )
def A ( self : Dict , UpperCamelCase__ : Tuple , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Union[str, Any]=None ):
"""simple docstring"""
UpperCamelCase = self.clip_preprocessor(text=UpperCamelCase__ , images=UpperCamelCase__ , return_tensors='pt' , padding=UpperCamelCase__ )
UpperCamelCase = self.clip(**UpperCamelCase__ )
UpperCamelCase = clip_outputs.logits_per_image
if weights is not None:
UpperCamelCase = similarity_logits * weights
return similarity_logits.sum()
def A ( self : Dict , UpperCamelCase__ : Tuple , UpperCamelCase__ : List[str] , UpperCamelCase__ : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase = self._get_clip_similarity(pos_prompts['prompts'] , UpperCamelCase__ , weights=(1 / pos_prompts['weights']) )
if neg_prompts:
UpperCamelCase = self._get_clip_similarity(neg_prompts['prompts'] , UpperCamelCase__ , weights=neg_prompts['weights'] )
else:
UpperCamelCase = torch.tensor([1] , device=self.device )
UpperCamelCase = -torch.log(UpperCamelCase__ ) + torch.log(UpperCamelCase__ )
return loss
def A ( self : Optional[int] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Any , UpperCamelCase__ : List[str] ):
"""simple docstring"""
UpperCamelCase = torch.randn_like(self.latent , requires_grad=UpperCamelCase__ , device=self.device )
UpperCamelCase = torch.optim.Adam([vector] , lr=self.lr )
for i in range(self.iterations ):
optim.zero_grad()
UpperCamelCase = self._add_vector(UpperCamelCase__ )
UpperCamelCase = loop_post_process(UpperCamelCase__ )
UpperCamelCase = self._get_CLIP_loss(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
print('CLIP loss' , UpperCamelCase__ )
if self.log:
wandb.log({'CLIP Loss': clip_loss} )
clip_loss.backward(retain_graph=UpperCamelCase__ )
optim.step()
if self.return_val == "image":
yield custom_to_pil(transformed_img[0] )
else:
yield vector
def A ( self : List[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : int , UpperCamelCase__ : Any ):
"""simple docstring"""
wandb.init(reinit=UpperCamelCase__ , project='face-editor' )
wandb.config.update({'Positive Prompts': positive_prompts} )
wandb.config.update({'Negative Prompts': negative_prompts} )
wandb.config.update({'lr': self.lr, 'iterations': self.iterations} )
if image_path:
UpperCamelCase = Image.open(UpperCamelCase__ )
UpperCamelCase = image.resize((2_5_6, 2_5_6) )
wandb.log('Original Image' , wandb.Image(UpperCamelCase__ ) )
def A ( self : Any , UpperCamelCase__ : List[Any] ):
"""simple docstring"""
if not prompts:
return []
UpperCamelCase = []
UpperCamelCase = []
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
UpperCamelCase = [prompt.strip() for prompt in prompts.split('|' )]
for prompt in prompts:
if isinstance(UpperCamelCase__ , (tuple, list) ):
UpperCamelCase = prompt[0]
UpperCamelCase = float(prompt[1] )
elif ":" in prompt:
UpperCamelCase , UpperCamelCase = prompt.split(':' )
UpperCamelCase = float(UpperCamelCase__ )
else:
UpperCamelCase = prompt
UpperCamelCase = 1.0
processed_prompts.append(UpperCamelCase__ )
weights.append(UpperCamelCase__ )
return {
"prompts": processed_prompts,
"weights": torch.tensor(UpperCamelCase__ , device=self.device ),
}
def A ( self : Optional[int] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : str=None , UpperCamelCase__ : str=None , UpperCamelCase__ : List[Any]=True , UpperCamelCase__ : List[str]=False , UpperCamelCase__ : List[Any]=True , UpperCamelCase__ : Optional[Any]=True , UpperCamelCase__ : Optional[Any]=None , ):
"""simple docstring"""
if image_path:
UpperCamelCase = self._get_latent(UpperCamelCase__ )
else:
UpperCamelCase = torch.randn(self.latent_dim , device=self.device )
if self.log:
self._init_logging(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
assert pos_prompts, "You must provide at least one positive prompt."
UpperCamelCase = self.process_prompts(UpperCamelCase__ )
UpperCamelCase = self.process_prompts(UpperCamelCase__ )
if save_final and save_path is None:
UpperCamelCase = os.path.join('./outputs/' , '_'.join(pos_prompts['prompts'] ) )
if not os.path.exists(UpperCamelCase__ ):
os.makedirs(UpperCamelCase__ )
else:
UpperCamelCase = save_path + '_' + get_timestamp()
os.makedirs(UpperCamelCase__ )
UpperCamelCase = save_path
UpperCamelCase = self.vqgan.decode(self.latent )[0]
if show_intermediate:
print('Original Image' )
show_pil(custom_to_pil(UpperCamelCase__ ) )
UpperCamelCase = loop_post_process(UpperCamelCase__ )
for iter, transformed_img in enumerate(self._optimize_CLIP(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) ):
if show_intermediate:
show_pil(UpperCamelCase__ )
if save_intermediate:
transformed_img.save(os.path.join(self.save_path , f"""iter_{iter:03d}.png""" ) )
if self.log:
wandb.log({'Image': wandb.Image(UpperCamelCase__ )} )
if show_final:
show_pil(UpperCamelCase__ )
if save_final:
transformed_img.save(os.path.join(self.save_path , f"""iter_{iter:03d}_final.png""" ) )
| 28 |
'''simple docstring'''
import json
import os
import tempfile
import datasets
from utils import generate_example_dataset, get_duration
_lowerCamelCase : List[str] = 5_0000
_lowerCamelCase : Optional[int] = 5000
_lowerCamelCase ,_lowerCamelCase : int = os.path.split(__file__)
_lowerCamelCase : str = os.path.join(RESULTS_BASEPATH, "results", RESULTS_FILENAME.replace(".py", ".json"))
@get_duration
def __lowerCamelCase ( A__ , A__ ) -> Any:
"""simple docstring"""
for i in range(A__ ):
UpperCamelCase = dataset[i]
@get_duration
def __lowerCamelCase ( A__ , A__ , A__ ) -> int:
"""simple docstring"""
for i in range(0 , len(A__ ) , A__ ):
UpperCamelCase = dataset[i : i + batch_size]
@get_duration
def __lowerCamelCase ( A__ , A__ , A__ ) -> List[Any]:
"""simple docstring"""
with dataset.formatted_as(type=A__ ):
for i in range(A__ ):
UpperCamelCase = dataset[i]
@get_duration
def __lowerCamelCase ( A__ , A__ , A__ , A__ ) -> int:
"""simple docstring"""
with dataset.formatted_as(type=A__ ):
for i in range(0 , A__ , A__ ):
UpperCamelCase = dataset[i : i + batch_size]
def __lowerCamelCase ( ) -> List[str]:
"""simple docstring"""
UpperCamelCase = {'num examples': SPEED_TEST_N_EXAMPLES}
UpperCamelCase = [
(read, {'length': SMALL_TEST}),
(read, {'length': SPEED_TEST_N_EXAMPLES}),
(read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 10}),
(read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 100}),
(read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 1_000}),
(read_formatted, {'type': 'numpy', 'length': SMALL_TEST}),
(read_formatted, {'type': 'pandas', 'length': SMALL_TEST}),
(read_formatted, {'type': 'torch', 'length': SMALL_TEST}),
(read_formatted, {'type': 'tensorflow', 'length': SMALL_TEST}),
(read_formatted_batch, {'type': 'numpy', 'length': SMALL_TEST, 'batch_size': 10}),
(read_formatted_batch, {'type': 'numpy', 'length': SMALL_TEST, 'batch_size': 1_000}),
]
UpperCamelCase = [
(read, {'length': SMALL_TEST}),
(read, {'length': SPEED_TEST_N_EXAMPLES}),
(read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 10}),
(read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 100}),
(read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 1_000}),
(read_formatted, {'type': 'numpy', 'length': SMALL_TEST}),
(read_formatted_batch, {'type': 'numpy', 'length': SMALL_TEST, 'batch_size': 10}),
(read_formatted_batch, {'type': 'numpy', 'length': SMALL_TEST, 'batch_size': 1_000}),
]
with tempfile.TemporaryDirectory() as tmp_dir:
print('generating dataset' )
UpperCamelCase = datasets.Features(
{'list': datasets.Sequence(datasets.Value('float32' ) ), 'numbers': datasets.Value('float32' )} )
UpperCamelCase = generate_example_dataset(
os.path.join(A__ , 'dataset.arrow' ) , A__ , num_examples=A__ , seq_shapes={'list': (100,)} , )
print('first set of iterations' )
for func, kwargs in functions:
print(func.__name__ , str(A__ ) )
UpperCamelCase = func(A__ , **A__ )
print('shuffling dataset' )
UpperCamelCase = dataset.shuffle()
print('Second set of iterations (after shuffling' )
for func, kwargs in functions_shuffled:
print('shuffled ' , func.__name__ , str(A__ ) )
UpperCamelCase = func(
A__ , **A__ )
with open(A__ , 'wb' ) as f:
f.write(json.dumps(A__ ).encode('utf-8' ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_iterating()
| 28 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowercase : List[Any] = {"configuration_vit": ["VIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ViTConfig", "ViTOnnxConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Any = ["ViTFeatureExtractor"]
lowercase : str = ["ViTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : int = [
"VIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"ViTForImageClassification",
"ViTForMaskedImageModeling",
"ViTModel",
"ViTPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Tuple = [
"TFViTForImageClassification",
"TFViTModel",
"TFViTPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Dict = [
"FlaxViTForImageClassification",
"FlaxViTModel",
"FlaxViTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig, ViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_vit import ViTFeatureExtractor
from .image_processing_vit import ViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit import (
VIT_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTForImageClassification,
ViTForMaskedImageModeling,
ViTModel,
ViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel
else:
import sys
lowercase : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 363 |
'''simple docstring'''
import dataclasses
import json
import sys
import types
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError
from copy import copy
from enum import Enum
from inspect import isclass
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints
import yaml
lowercase : int = NewType("DataClass", Any)
lowercase : Dict = NewType("DataClassType", Any)
def SCREAMING_SNAKE_CASE__ ( __A ) -> Optional[Any]:
if isinstance(__A , __A ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise ArgumentTypeError(
F'Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive).' )
def SCREAMING_SNAKE_CASE__ ( __A ) -> Callable[[str], Any]:
_snake_case = {str(__A ): choice for choice in choices}
return lambda __A : str_to_choice.get(__A , __A )
def SCREAMING_SNAKE_CASE__ ( *,
__A = None , __A = None , __A = dataclasses.MISSING , __A = dataclasses.MISSING , __A = None , **__A , ) -> dataclasses.Field:
if metadata is None:
# Important, don't use as default param in function signature because dict is mutable and shared across function calls
_snake_case = {}
if aliases is not None:
_snake_case = aliases
if help is not None:
_snake_case = help
return dataclasses.field(metadata=__A , default=__A , default_factory=__A , **__A )
class __UpperCAmelCase ( _lowerCamelCase ):
__lowercase = 42
def __init__( self , lowerCAmelCase_ , **lowerCAmelCase_ ):
"""simple docstring"""
if "formatter_class" not in kwargs:
_snake_case = ArgumentDefaultsHelpFormatter
super().__init__(**lowerCAmelCase_ )
if dataclasses.is_dataclass(lowerCAmelCase_ ):
_snake_case = [dataclass_types]
_snake_case = list(lowerCAmelCase_ )
for dtype in self.dataclass_types:
self._add_dataclass_arguments(lowerCAmelCase_ )
@staticmethod
def lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = F'--{field.name}'
_snake_case = field.metadata.copy()
# field.metadata is not used at all by Data Classes,
# it is provided as a third-party extension mechanism.
if isinstance(field.type , lowerCAmelCase_ ):
raise RuntimeError(
'Unresolved type detected, which should have been done with the help of '
'`typing.get_type_hints` method by default' )
_snake_case = kwargs.pop('aliases' , [] )
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
_snake_case = [aliases]
_snake_case = getattr(field.type , '__origin__' , field.type )
if origin_type is Union or (hasattr(lowerCAmelCase_ , 'UnionType' ) and isinstance(lowerCAmelCase_ , types.UnionType )):
if str not in field.type.__args__ and (
len(field.type.__args__ ) != 2 or type(lowerCAmelCase_ ) not in field.type.__args__
):
raise ValueError(
'Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because'
' the argument parser only supports one type per argument.'
F' Problem encountered in field \'{field.name}\'.' )
if type(lowerCAmelCase_ ) not in field.type.__args__:
# filter `str` in Union
_snake_case = field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1]
_snake_case = getattr(field.type , '__origin__' , field.type )
elif bool not in field.type.__args__:
# filter `NoneType` in Union (except for `Union[bool, NoneType]`)
_snake_case = (
field.type.__args__[0] if isinstance(lowerCAmelCase_ , field.type.__args__[1] ) else field.type.__args__[1]
)
_snake_case = getattr(field.type , '__origin__' , field.type )
# A variable to store kwargs for a boolean field, if needed
# so that we can init a `no_*` complement argument (see below)
_snake_case = {}
if origin_type is Literal or (isinstance(field.type , lowerCAmelCase_ ) and issubclass(field.type , lowerCAmelCase_ )):
if origin_type is Literal:
_snake_case = field.type.__args__
else:
_snake_case = [x.value for x in field.type]
_snake_case = make_choice_type_function(kwargs['choices'] )
if field.default is not dataclasses.MISSING:
_snake_case = field.default
else:
_snake_case = True
elif field.type is bool or field.type == Optional[bool]:
# Copy the currect kwargs to use to instantiate a `no_*` complement argument below.
# We do not initialize it here because the `no_*` alternative must be instantiated after the real argument
_snake_case = copy(lowerCAmelCase_ )
# Hack because type=bool in argparse does not behave as we want.
_snake_case = string_to_bool
if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING):
# Default value is False if we have no default when of type bool.
_snake_case = False if field.default is dataclasses.MISSING else field.default
# This is the value that will get picked if we don't include --field_name in any way
_snake_case = default
# This tells argparse we accept 0 or 1 value after --field_name
_snake_case = '?'
# This is the value that will get picked if we do --field_name (without value)
_snake_case = True
elif isclass(lowerCAmelCase_ ) and issubclass(lowerCAmelCase_ , lowerCAmelCase_ ):
_snake_case = field.type.__args__[0]
_snake_case = '+'
if field.default_factory is not dataclasses.MISSING:
_snake_case = field.default_factory()
elif field.default is dataclasses.MISSING:
_snake_case = True
else:
_snake_case = field.type
if field.default is not dataclasses.MISSING:
_snake_case = field.default
elif field.default_factory is not dataclasses.MISSING:
_snake_case = field.default_factory()
else:
_snake_case = True
parser.add_argument(lowerCAmelCase_ , *lowerCAmelCase_ , **lowerCAmelCase_ )
# Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added.
# Order is important for arguments with the same destination!
# We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down
# here and we do not need those changes/additional keys.
if field.default is True and (field.type is bool or field.type == Optional[bool]):
_snake_case = False
parser.add_argument(F'--no_{field.name}' , action='store_false' , dest=field.name , **lowerCAmelCase_ )
def lowerCamelCase ( self , lowerCAmelCase_ ):
"""simple docstring"""
if hasattr(lowerCAmelCase_ , '_argument_group_name' ):
_snake_case = self.add_argument_group(dtype._argument_group_name )
else:
_snake_case = self
try:
_snake_case = get_type_hints(lowerCAmelCase_ )
except NameError:
raise RuntimeError(
F'Type resolution failed for {dtype}. Try declaring the class in global scope or '
'removing line of `from __future__ import annotations` which opts in Postponed '
'Evaluation of Annotations (PEP 563)' )
except TypeError as ex:
# Remove this block when we drop Python 3.9 support
if sys.version_info[:2] < (3, 10) and "unsupported operand type(s) for |" in str(lowerCAmelCase_ ):
_snake_case = '.'.join(map(lowerCAmelCase_ , sys.version_info[:3] ) )
raise RuntimeError(
F'Type resolution failed for {dtype} on Python {python_version}. Try removing '
'line of `from __future__ import annotations` which opts in union types as '
'`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To '
'support Python versions that lower than 3.10, you need to use '
'`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of '
'`X | None`.' ) from ex
raise
for field in dataclasses.fields(lowerCAmelCase_ ):
if not field.init:
continue
_snake_case = type_hints[field.name]
self._parse_dataclass_field(lowerCAmelCase_ , lowerCAmelCase_ )
def lowerCamelCase ( self , lowerCAmelCase_=None , lowerCAmelCase_=False , lowerCAmelCase_=True , lowerCAmelCase_=None , lowerCAmelCase_=None , ):
"""simple docstring"""
if args_file_flag or args_filename or (look_for_args_file and len(sys.argv )):
_snake_case = []
if args_filename:
args_files.append(Path(lowerCAmelCase_ ) )
elif look_for_args_file and len(sys.argv ):
args_files.append(Path(sys.argv[0] ).with_suffix('.args' ) )
# args files specified via command line flag should overwrite default args files so we add them last
if args_file_flag:
# Create special parser just to extract the args_file_flag values
_snake_case = ArgumentParser()
args_file_parser.add_argument(lowerCAmelCase_ , type=lowerCAmelCase_ , action='append' )
# Use only remaining args for further parsing (remove the args_file_flag)
_snake_case , _snake_case = args_file_parser.parse_known_args(args=lowerCAmelCase_ )
_snake_case = vars(lowerCAmelCase_ ).get(args_file_flag.lstrip('-' ) , lowerCAmelCase_ )
if cmd_args_file_paths:
args_files.extend([Path(lowerCAmelCase_ ) for p in cmd_args_file_paths] )
_snake_case = []
for args_file in args_files:
if args_file.exists():
file_args += args_file.read_text().split()
# in case of duplicate arguments the last one has precedence
# args specified via the command line should overwrite args from files, so we add them last
_snake_case = file_args + args if args is not None else file_args + sys.argv[1:]
_snake_case , _snake_case = self.parse_known_args(args=lowerCAmelCase_ )
_snake_case = []
for dtype in self.dataclass_types:
_snake_case = {f.name for f in dataclasses.fields(lowerCAmelCase_ ) if f.init}
_snake_case = {k: v for k, v in vars(lowerCAmelCase_ ).items() if k in keys}
for k in keys:
delattr(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case = dtype(**lowerCAmelCase_ )
outputs.append(lowerCAmelCase_ )
if len(namespace.__dict__ ) > 0:
# additional namespace.
outputs.append(lowerCAmelCase_ )
if return_remaining_strings:
return (*outputs, remaining_args)
else:
if remaining_args:
raise ValueError(F'Some specified arguments are not used by the HfArgumentParser: {remaining_args}' )
return (*outputs,)
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = False ):
"""simple docstring"""
_snake_case = set(args.keys() )
_snake_case = []
for dtype in self.dataclass_types:
_snake_case = {f.name for f in dataclasses.fields(lowerCAmelCase_ ) if f.init}
_snake_case = {k: v for k, v in args.items() if k in keys}
unused_keys.difference_update(inputs.keys() )
_snake_case = dtype(**lowerCAmelCase_ )
outputs.append(lowerCAmelCase_ )
if not allow_extra_keys and unused_keys:
raise ValueError(F'Some keys are not used by the HfArgumentParser: {sorted(lowerCAmelCase_ )}' )
return tuple(lowerCAmelCase_ )
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = False ):
"""simple docstring"""
with open(Path(lowerCAmelCase_ ) , encoding='utf-8' ) as open_json_file:
_snake_case = json.loads(open_json_file.read() )
_snake_case = self.parse_dict(lowerCAmelCase_ , allow_extra_keys=lowerCAmelCase_ )
return tuple(lowerCAmelCase_ )
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = False ):
"""simple docstring"""
_snake_case = self.parse_dict(yaml.safe_load(Path(lowerCAmelCase_ ).read_text() ) , allow_extra_keys=lowerCAmelCase_ )
return tuple(lowerCAmelCase_ )
| 160 | 0 |
from __future__ import annotations
def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase )-> tuple[int, int]:
'''simple docstring'''
if b == 0:
return (1, 0)
((UpperCAmelCase) , (UpperCAmelCase)) : int =extended_euclid(__lowerCAmelCase , a % b )
UpperCAmelCase : Dict =a // b
return (y, x - k * y)
def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )-> int:
'''simple docstring'''
((UpperCAmelCase) , (UpperCAmelCase)) : int =extended_euclid(__lowerCAmelCase , __lowerCAmelCase )
UpperCAmelCase : Dict =na * na
UpperCAmelCase : int =ra * x * na + ra * y * na
return (n % m + m) % m
def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase )-> int:
'''simple docstring'''
((UpperCAmelCase) , (UpperCAmelCase)) : Union[str, Any] =extended_euclid(__lowerCAmelCase , __lowerCAmelCase )
if b < 0:
UpperCAmelCase : List[str] =(b % n + n) % n
return b
def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )-> int:
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase : List[str] =invert_modulo(__lowerCAmelCase , __lowerCAmelCase ), invert_modulo(__lowerCAmelCase , __lowerCAmelCase )
UpperCAmelCase : Optional[int] =na * na
UpperCAmelCase : Union[str, Any] =ra * x * na + ra * y * na
return (n % m + m) % m
if __name__ == "__main__":
from doctest import testmod
testmod(name='''chinese_remainder_theorem''', verbose=True)
testmod(name='''chinese_remainder_theorem2''', verbose=True)
testmod(name='''invert_modulo''', verbose=True)
testmod(name='''extended_euclid''', verbose=True)
| 348 | from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case = logging.get_logger(__name__)
# TODO Update this
__snake_case = {
'''facebook/esm-1b''': '''https://huggingface.co/facebook/esm-1b/resolve/main/config.json''',
# See all ESM models at https://huggingface.co/models?filter=esm
}
class __snake_case ( lowerCamelCase__ ):
__lowerCamelCase : Tuple = """esm"""
def __init__( self , snake_case__=None , snake_case__=None , snake_case__=None , snake_case__=768 , snake_case__=12 , snake_case__=12 , snake_case__=3072 , snake_case__=0.1 , snake_case__=0.1 , snake_case__=1026 , snake_case__=0.02 , snake_case__=1e-12 , snake_case__="absolute" , snake_case__=True , snake_case__=None , snake_case__=False , snake_case__=False , snake_case__=None , snake_case__=None , **snake_case__ , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(pad_token_id=snake_case__ , mask_token_id=snake_case__ , **snake_case__ )
UpperCAmelCase : List[str] =vocab_size
UpperCAmelCase : str =hidden_size
UpperCAmelCase : List[Any] =num_hidden_layers
UpperCAmelCase : Optional[Any] =num_attention_heads
UpperCAmelCase : str =intermediate_size
UpperCAmelCase : Any =hidden_dropout_prob
UpperCAmelCase : int =attention_probs_dropout_prob
UpperCAmelCase : Dict =max_position_embeddings
UpperCAmelCase : List[str] =initializer_range
UpperCAmelCase : Union[str, Any] =layer_norm_eps
UpperCAmelCase : Dict =position_embedding_type
UpperCAmelCase : Optional[Any] =use_cache
UpperCAmelCase : int =emb_layer_norm_before
UpperCAmelCase : List[str] =token_dropout
UpperCAmelCase : Optional[Any] =is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info('''No esmfold_config supplied for folding model, using default values.''' )
UpperCAmelCase : Optional[Any] =EsmFoldConfig()
elif isinstance(snake_case__ , snake_case__ ):
UpperCAmelCase : Optional[int] =EsmFoldConfig(**snake_case__ )
UpperCAmelCase : Tuple =esmfold_config
if vocab_list is None:
logger.warning('''No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!''' )
UpperCAmelCase : Any =get_default_vocab_list()
else:
UpperCAmelCase : Tuple =vocab_list
else:
UpperCAmelCase : Optional[int] =None
UpperCAmelCase : Union[str, Any] =None
if self.esmfold_config is not None and getattr(self.esmfold_config , '''use_esm_attn_map''' , snake_case__ ):
raise ValueError('''The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!''' )
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase : Union[str, Any] =super().to_dict()
if isinstance(self.esmfold_config , snake_case__ ):
UpperCAmelCase : str =self.esmfold_config.to_dict()
return output
@dataclass
class __snake_case :
__lowerCamelCase : str = None
__lowerCamelCase : bool = True
__lowerCamelCase : bool = False
__lowerCamelCase : bool = False
__lowerCamelCase : bool = False
__lowerCamelCase : float = 0
__lowerCamelCase : bool = True
__lowerCamelCase : bool = False
__lowerCamelCase : int = 128
__lowerCamelCase : "TrunkConfig" = None
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
if self.trunk is None:
UpperCAmelCase : str =TrunkConfig()
elif isinstance(self.trunk , snake_case__ ):
UpperCAmelCase : Optional[int] =TrunkConfig(**self.trunk )
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
UpperCAmelCase : Optional[Any] =asdict(self )
UpperCAmelCase : Any =self.trunk.to_dict()
return output
@dataclass
class __snake_case :
__lowerCamelCase : int = 48
__lowerCamelCase : int = 1024
__lowerCamelCase : int = 128
__lowerCamelCase : int = 32
__lowerCamelCase : int = 32
__lowerCamelCase : int = 32
__lowerCamelCase : float = 0
__lowerCamelCase : float = 0
__lowerCamelCase : bool = False
__lowerCamelCase : int = 4
__lowerCamelCase : Optional[int] = 128
__lowerCamelCase : "StructureModuleConfig" = None
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
if self.structure_module is None:
UpperCAmelCase : Any =StructureModuleConfig()
elif isinstance(self.structure_module , snake_case__ ):
UpperCAmelCase : str =StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(f'''`max_recycles` should be positive, got {self.max_recycles}.''' )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
'''`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got'''
f''' {self.sequence_state_dim} and {self.sequence_state_dim}.''' )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
'''`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got'''
f''' {self.pairwise_state_dim} and {self.pairwise_state_dim}.''' )
UpperCAmelCase : Optional[int] =self.sequence_state_dim // self.sequence_head_width
UpperCAmelCase : Any =self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
'''`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got'''
f''' {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.''' )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
'''`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got'''
f''' {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.''' )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(f'''`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.''' )
if self.dropout >= 0.4:
raise ValueError(f'''`dropout` should not be greater than 0.4, got {self.dropout}.''' )
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase : Union[str, Any] =asdict(self )
UpperCAmelCase : Tuple =self.structure_module.to_dict()
return output
@dataclass
class __snake_case :
__lowerCamelCase : int = 384
__lowerCamelCase : int = 128
__lowerCamelCase : int = 16
__lowerCamelCase : int = 128
__lowerCamelCase : int = 12
__lowerCamelCase : int = 4
__lowerCamelCase : int = 8
__lowerCamelCase : float = 0.1
__lowerCamelCase : int = 8
__lowerCamelCase : int = 1
__lowerCamelCase : int = 2
__lowerCamelCase : int = 7
__lowerCamelCase : int = 10
__lowerCamelCase : float = 1E-8
__lowerCamelCase : float = 1E5
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
return asdict(self )
def lowerCAmelCase_ ( )-> Tuple:
'''simple docstring'''
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 348 | 1 |
from __future__ import annotations
from collections.abc import Iterator
from typing import Generic, TypeVar
__UpperCAmelCase = TypeVar("""T""")
class SCREAMING_SNAKE_CASE ( Generic[T] ):
"""simple docstring"""
def __init__( self : str , lowerCAmelCase : T ) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase : Union[str, Any] = data
__lowerCAmelCase : Node[T] | None = None
def __str__( self : Optional[int] ) -> str:
"""simple docstring"""
return f'''{self.data}'''
class SCREAMING_SNAKE_CASE ( Generic[T] ):
"""simple docstring"""
def __init__( self : List[str] ) -> None:
"""simple docstring"""
__lowerCAmelCase : Node[T] | None = None
def __iter__( self : Any ) -> Iterator[T]:
"""simple docstring"""
__lowerCAmelCase : Union[str, Any] = self.top
while node:
yield node.data
__lowerCAmelCase : Tuple = node.next
def __str__( self : int ) -> str:
"""simple docstring"""
return "->".join([str(lowerCAmelCase ) for item in self] )
def __len__( self : List[str] ) -> int:
"""simple docstring"""
return len(tuple(iter(self ) ) )
def SCREAMING_SNAKE_CASE ( self : Any ) -> bool:
"""simple docstring"""
return self.top is None
def SCREAMING_SNAKE_CASE ( self : int , lowerCAmelCase : T ) -> None:
"""simple docstring"""
__lowerCAmelCase : Dict = Node(lowerCAmelCase )
if not self.is_empty():
__lowerCAmelCase : str = self.top
__lowerCAmelCase : Any = node
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> T:
"""simple docstring"""
if self.is_empty():
raise IndexError("""pop from empty stack""" )
assert isinstance(self.top , lowerCAmelCase )
__lowerCAmelCase : Union[str, Any] = self.top
__lowerCAmelCase : Any = self.top.next
return pop_node.data
def SCREAMING_SNAKE_CASE ( self : str ) -> T:
"""simple docstring"""
if self.is_empty():
raise IndexError("""peek from empty stack""" )
assert self.top is not None
return self.top.data
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> None:
"""simple docstring"""
__lowerCAmelCase : Any = None
if __name__ == "__main__":
from doctest import testmod
testmod()
| 139 |
import argparse
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import (
RobertaTokenizer,
TrOCRConfig,
TrOCRForCausalLM,
TrOCRProcessor,
VisionEncoderDecoderModel,
ViTConfig,
ViTImageProcessor,
ViTModel,
)
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCAmelCase = logging.get_logger(__name__)
def snake_case_ (__A : Optional[int] , __A : Any ) -> Any:
__lowerCAmelCase : Union[str, Any] = []
for i in range(encoder_config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f'''encoder.deit.blocks.{i}.norm1.weight''', f'''encoder.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((f'''encoder.deit.blocks.{i}.norm1.bias''', f'''encoder.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append(
(f'''encoder.deit.blocks.{i}.attn.proj.weight''', f'''encoder.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append(
(f'''encoder.deit.blocks.{i}.attn.proj.bias''', f'''encoder.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append(
(f'''encoder.deit.blocks.{i}.norm2.weight''', f'''encoder.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((f'''encoder.deit.blocks.{i}.norm2.bias''', f'''encoder.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append(
(f'''encoder.deit.blocks.{i}.mlp.fc1.weight''', f'''encoder.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append(
(f'''encoder.deit.blocks.{i}.mlp.fc1.bias''', f'''encoder.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append(
(f'''encoder.deit.blocks.{i}.mlp.fc2.weight''', f'''encoder.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((f'''encoder.deit.blocks.{i}.mlp.fc2.bias''', f'''encoder.encoder.layer.{i}.output.dense.bias''') )
# cls token, position embeddings and patch embeddings of encoder
rename_keys.extend(
[
("""encoder.deit.cls_token""", """encoder.embeddings.cls_token"""),
("""encoder.deit.pos_embed""", """encoder.embeddings.position_embeddings"""),
("""encoder.deit.patch_embed.proj.weight""", """encoder.embeddings.patch_embeddings.projection.weight"""),
("""encoder.deit.patch_embed.proj.bias""", """encoder.embeddings.patch_embeddings.projection.bias"""),
("""encoder.deit.norm.weight""", """encoder.layernorm.weight"""),
("""encoder.deit.norm.bias""", """encoder.layernorm.bias"""),
] )
return rename_keys
def snake_case_ (__A : List[str] , __A : str ) -> Optional[Any]:
for i in range(encoder_config.num_hidden_layers ):
# queries, keys and values (only weights, no biases)
__lowerCAmelCase : Optional[Any] = state_dict.pop(f'''encoder.deit.blocks.{i}.attn.qkv.weight''' )
__lowerCAmelCase : Tuple = in_proj_weight[
: encoder_config.hidden_size, :
]
__lowerCAmelCase : str = in_proj_weight[
encoder_config.hidden_size : encoder_config.hidden_size * 2, :
]
__lowerCAmelCase : str = in_proj_weight[
-encoder_config.hidden_size :, :
]
def snake_case_ (__A : Union[str, Any] , __A : str , __A : Optional[Any] ) -> Optional[Any]:
__lowerCAmelCase : Any = dct.pop(__A )
__lowerCAmelCase : str = val
def snake_case_ (__A : int ) -> Tuple:
if "handwritten" in checkpoint_url:
__lowerCAmelCase : Tuple = """https://fki.tic.heia-fr.ch/static/img/a01-122-02-00.jpg""" # industry
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-12.jpg" # have
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-10.jpg" # let
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02.jpg" #
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122.jpg"
elif "printed" in checkpoint_url or "stage1" in checkpoint_url:
__lowerCAmelCase : Optional[Any] = """https://www.researchgate.net/profile/Dinh-Sang/publication/338099565/figure/fig8/AS:840413229350922@1577381536857/An-receipt-example-in-the-SROIE-2019-dataset_Q640.jpg"""
__lowerCAmelCase : Dict = Image.open(requests.get(__A , stream=__A ).raw ).convert("""RGB""" )
return im
@torch.no_grad()
def snake_case_ (__A : Any , __A : Union[str, Any] ) -> Optional[int]:
__lowerCAmelCase : List[Any] = ViTConfig(image_size=3_8_4 , qkv_bias=__A )
__lowerCAmelCase : List[Any] = TrOCRConfig()
# size of the architecture
if "base" in checkpoint_url:
__lowerCAmelCase : Union[str, Any] = 7_6_8
elif "large" in checkpoint_url:
# use ViT-large encoder
__lowerCAmelCase : Any = 1_0_2_4
__lowerCAmelCase : Any = 4_0_9_6
__lowerCAmelCase : Optional[int] = 2_4
__lowerCAmelCase : str = 1_6
__lowerCAmelCase : List[Any] = 1_0_2_4
else:
raise ValueError("""Should either find 'base' or 'large' in checkpoint URL""" )
# the large-printed + stage1 checkpoints uses sinusoidal position embeddings, no layernorm afterwards
if "large-printed" in checkpoint_url or "stage1" in checkpoint_url:
__lowerCAmelCase : Tuple = False
__lowerCAmelCase : Union[str, Any] = """relu"""
__lowerCAmelCase : List[Any] = 1_0_2_4
__lowerCAmelCase : Any = True
__lowerCAmelCase : List[Any] = False
__lowerCAmelCase : Dict = False
# load HuggingFace model
__lowerCAmelCase : Dict = ViTModel(__A , add_pooling_layer=__A )
__lowerCAmelCase : Union[str, Any] = TrOCRForCausalLM(__A )
__lowerCAmelCase : Any = VisionEncoderDecoderModel(encoder=__A , decoder=__A )
model.eval()
# load state_dict of original model, rename some keys
__lowerCAmelCase : Union[str, Any] = torch.hub.load_state_dict_from_url(__A , map_location="""cpu""" , check_hash=__A )["""model"""]
__lowerCAmelCase : Any = create_rename_keys(__A , __A )
for src, dest in rename_keys:
rename_key(__A , __A , __A )
read_in_q_k_v(__A , __A )
# remove parameters we don't need
del state_dict["encoder.deit.head.weight"]
del state_dict["encoder.deit.head.bias"]
del state_dict["decoder.version"]
# add prefix to decoder keys
for key, val in state_dict.copy().items():
__lowerCAmelCase : Tuple = state_dict.pop(__A )
if key.startswith("""decoder""" ) and "output_projection" not in key:
__lowerCAmelCase : str = val
else:
__lowerCAmelCase : Tuple = val
# load state dict
model.load_state_dict(__A )
# Check outputs on an image
__lowerCAmelCase : List[Any] = ViTImageProcessor(size=encoder_config.image_size )
__lowerCAmelCase : List[str] = RobertaTokenizer.from_pretrained("""roberta-large""" )
__lowerCAmelCase : List[Any] = TrOCRProcessor(__A , __A )
__lowerCAmelCase : List[str] = processor(images=prepare_img(__A ) , return_tensors="""pt""" ).pixel_values
# verify logits
__lowerCAmelCase : List[str] = torch.tensor([[model.config.decoder.decoder_start_token_id]] )
__lowerCAmelCase : List[str] = model(pixel_values=__A , decoder_input_ids=__A )
__lowerCAmelCase : Optional[Any] = outputs.logits
__lowerCAmelCase : Union[str, Any] = torch.Size([1, 1, 5_0_2_6_5] )
if "trocr-base-handwritten" in checkpoint_url:
__lowerCAmelCase : Dict = torch.tensor(
[-1.4502, -4.6683, -0.5347, -2.9291, 9.1435, -3.0571, 8.9764, 1.7560, 8.7358, -1.5311] )
elif "trocr-large-handwritten" in checkpoint_url:
__lowerCAmelCase : List[Any] = torch.tensor(
[-2.6437, -1.3129, -2.2596, -5.3455, 6.3539, 1.7604, 5.4991, 1.4702, 5.6113, 2.0170] )
elif "trocr-base-printed" in checkpoint_url:
__lowerCAmelCase : Tuple = torch.tensor(
[-5.6816, -5.8388, 1.1398, -6.9034, 6.8505, -2.4393, 1.2284, -1.0232, -1.9661, -3.9210] )
elif "trocr-large-printed" in checkpoint_url:
__lowerCAmelCase : List[Any] = torch.tensor(
[-6.0162, -7.0959, 4.4155, -5.1063, 7.0468, -3.1631, 2.6466, -0.3081, -0.8106, -1.7535] )
if "stage1" not in checkpoint_url:
assert logits.shape == expected_shape, "Shape of logits not as expected"
assert torch.allclose(logits[0, 0, :1_0] , __A , atol=1e-3 ), "First elements of logits not as expected"
Path(__A ).mkdir(exist_ok=__A )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(__A )
print(f'''Saving processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(__A )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_url""",
default="""https://layoutlm.blob.core.windows.net/trocr/model_zoo/fairseq/trocr-base-handwritten.pt""",
type=str,
help="""URL to the original PyTorch checkpoint (.pth file).""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
__UpperCAmelCase = parser.parse_args()
convert_tr_ocr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 139 | 1 |
"""simple docstring"""
def UpperCamelCase ( ) ->list[list[int]]:
"""simple docstring"""
return [list(range(1_000 - i , -1_000 - i , -1 ) ) for i in range(1_000 )]
UpperCamelCase_ = generate_large_matrix()
UpperCamelCase_ = (
[[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]],
[[3, 2], [1, 0]],
[[7, 7, 6]],
[[7, 7, 6], [-1, -2, -3]],
grid,
)
def UpperCamelCase ( UpperCAmelCase ) ->None:
"""simple docstring"""
assert all(row == sorted(UpperCAmelCase , reverse=UpperCAmelCase ) for row in grid )
assert all(list(UpperCAmelCase ) == sorted(UpperCAmelCase , reverse=UpperCAmelCase ) for col in zip(*UpperCAmelCase ) )
def UpperCamelCase ( UpperCAmelCase ) ->int:
"""simple docstring"""
a_ = 0
a_ = len(UpperCAmelCase ) - 1
# Edge cases such as no values or all numbers are negative.
if not array or array[0] < 0:
return 0
while right + 1 > left:
a_ = (left + right) // 2
a_ = array[mid]
# Num must be negative and the index must be greater than or equal to 0.
if num < 0 and array[mid - 1] >= 0:
return mid
if num >= 0:
a_ = mid + 1
else:
a_ = mid - 1
# No negative numbers so return the last index of the array + 1 which is the length.
return len(UpperCAmelCase )
def UpperCamelCase ( UpperCAmelCase ) ->int:
"""simple docstring"""
a_ = 0
a_ = len(grid[0] )
for i in range(len(UpperCAmelCase ) ):
a_ = find_negative_index(grid[i][:bound] )
total += bound
return (len(UpperCAmelCase ) * len(grid[0] )) - total
def UpperCamelCase ( UpperCAmelCase ) ->int:
"""simple docstring"""
return len([number for row in grid for number in row if number < 0] )
def UpperCamelCase ( UpperCAmelCase ) ->int:
"""simple docstring"""
a_ = 0
for row in grid:
for i, number in enumerate(UpperCAmelCase ):
if number < 0:
total += len(UpperCAmelCase ) - i
break
return total
def UpperCamelCase ( ) ->None:
"""simple docstring"""
from timeit import timeit
print("Running benchmarks" )
a_ = (
"from __main__ import count_negatives_binary_search, "
"count_negatives_brute_force, count_negatives_brute_force_with_break, grid"
)
for func in (
"count_negatives_binary_search", # took 0.7727 seconds
"count_negatives_brute_force_with_break", # took 4.6505 seconds
"count_negatives_brute_force", # took 12.8160 seconds
):
a_ = timeit(F'''{func}(grid=grid)''' , setup=UpperCAmelCase , number=500 )
print(F'''{func}() took {time:0.4f} seconds''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark() | 243 |
"""simple docstring"""
from collections.abc import Callable
from math import pi, sqrt
from random import uniform
from statistics import mean
def UpperCamelCase ( UpperCAmelCase ) ->List[Any]:
"""simple docstring"""
def is_in_circle(UpperCAmelCase , UpperCAmelCase ) -> bool:
a_ = sqrt((x**2) + (y**2) )
# Our circle has a radius of 1, so a distance
# greater than 1 would land outside the circle.
return distance_from_centre <= 1
# The proportion of guesses that landed in the circle
a_ = mean(
int(is_in_circle(uniform(-1.0 , 1.0 ) , uniform(-1.0 , 1.0 ) ) )
for _ in range(UpperCAmelCase ) )
# The ratio of the area for circle to square is pi/4.
a_ = proportion * 4
print(F'''The estimated value of pi is {pi_estimate}''' )
print(F'''The numpy value of pi is {pi}''' )
print(F'''The total error is {abs(pi - pi_estimate )}''' )
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = 0.0 , UpperCAmelCase = 1.0 , ) ->float:
"""simple docstring"""
return mean(
function_to_integrate(uniform(UpperCAmelCase , UpperCAmelCase ) ) for _ in range(UpperCAmelCase ) ) * (max_value - min_value)
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase = 0.0 , UpperCAmelCase = 1.0 ) ->None:
"""simple docstring"""
def identity_function(UpperCAmelCase ) -> float:
return x
a_ = area_under_curve_estimator(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
a_ = (max_value * max_value - min_value * min_value) / 2
print("******************" )
print(F'''Estimating area under y=x where x varies from {min_value} to {max_value}''' )
print(F'''Estimated value is {estimated_value}''' )
print(F'''Expected value is {expected_value}''' )
print(F'''Total error is {abs(estimated_value - expected_value )}''' )
print("******************" )
def UpperCamelCase ( UpperCAmelCase ) ->None:
"""simple docstring"""
def function_to_integrate(UpperCAmelCase ) -> float:
return sqrt(4.0 - x * x )
a_ = area_under_curve_estimator(
UpperCAmelCase , UpperCAmelCase , 0.0 , 2.0 )
print("******************" )
print("Estimating pi using area_under_curve_estimator" )
print(F'''Estimated value is {estimated_value}''' )
print(F'''Expected value is {pi}''' )
print(F'''Total error is {abs(estimated_value - pi )}''' )
print("******************" )
if __name__ == "__main__":
import doctest
doctest.testmod() | 243 | 1 |
"""simple docstring"""
def UpperCAmelCase__ (snake_case__ : Optional[Any] ):
"""simple docstring"""
_snake_case : Optional[Any] = [0] * len(snake_case__ )
_snake_case : List[str] = []
_snake_case : Tuple = [1] * len(snake_case__ )
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(snake_case__ ) ):
if indegree[i] == 0:
queue.append(snake_case__ )
while queue:
_snake_case : Union[str, Any] = queue.pop(0 )
for x in graph[vertex]:
indegree[x] -= 1
if long_dist[vertex] + 1 > long_dist[x]:
_snake_case : Dict = long_dist[vertex] + 1
if indegree[x] == 0:
queue.append(snake_case__ )
print(max(snake_case__ ) )
# Adjacency list of Graph
A_ = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []}
longest_distance(graph)
| 132 |
"""simple docstring"""
from .data_collator import (
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForSeqaSeq,
DataCollatorForSOP,
DataCollatorForTokenClassification,
DataCollatorForWholeWordMask,
DataCollatorWithPadding,
DefaultDataCollator,
default_data_collator,
)
from .metrics import glue_compute_metrics, xnli_compute_metrics
from .processors import (
DataProcessor,
InputExample,
InputFeatures,
SingleSentenceClassificationProcessor,
SquadExample,
SquadFeatures,
SquadVaProcessor,
SquadVaProcessor,
glue_convert_examples_to_features,
glue_output_modes,
glue_processors,
glue_tasks_num_labels,
squad_convert_examples_to_features,
xnli_output_modes,
xnli_processors,
xnli_tasks_num_labels,
)
| 132 | 1 |
"""simple docstring"""
import math
def snake_case ( A__ ):
return math.sqrt(A__ ) * math.sqrt(A__ ) == num
def snake_case ( A__ ):
UpperCAmelCase_ : str = 0
UpperCAmelCase_ : List[str] = n
while left <= right:
UpperCAmelCase_ : List[Any] = (left + right) // 2
if mid**2 == n:
return True
elif mid**2 > n:
UpperCAmelCase_ : Optional[Any] = mid - 1
else:
UpperCAmelCase_ : Tuple = mid + 1
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 268 |
"""simple docstring"""
import warnings
from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401
warnings.warn(
'''The `inpainting.py` script is outdated. Please use directly `from diffusers import'''
''' StableDiffusionInpaintPipeline` instead.'''
)
| 268 | 1 |
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def lowercase__ ( ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = ArgumentParser(
description=(
'PyTorch TPU distributed training launch '
'helper utility that will spawn up '
'multiple distributed processes'
) )
# Optional arguments for the launch helper
parser.add_argument('--num_cores' , type=__snake_case , default=1 , help='Number of TPU cores to use (1 or 8).' )
# positional
parser.add_argument(
'training_script' , type=__snake_case , help=(
'The full path to the single TPU training '
'program/script to be launched in parallel, '
'followed by all the arguments for the '
'training script'
) , )
# rest from the training program
parser.add_argument('training_script_args' , nargs=__snake_case )
return parser.parse_args()
def lowercase__ ( ):
'''simple docstring'''
UpperCAmelCase_ : Dict = parse_args()
# Import training_script as a module.
UpperCAmelCase_ : Optional[Any] = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
UpperCAmelCase_ : Union[str, Any] = script_fpath.stem
UpperCAmelCase_ : Optional[Any] = importlib.import_module(__snake_case )
# Patch sys.argv
UpperCAmelCase_ : List[str] = [args.training_script] + args.training_script_args + ['--tpu_num_cores', str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 145 |
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
__UpperCAmelCase = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated.
__UpperCAmelCase = ' \"""\n Output class for the scheduler\'s step function output.\n\n Args:\n prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the\n denoising loop.\n pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n The predicted denoised sample (x_{0}) based on the model output from the current timestep.\n `pred_original_sample` can be used to preview progress or for guidance.\n \"""\n\n prev_sample: torch.FloatTensor\n pred_original_sample: Optional[torch.FloatTensor] = None\n'
class lowerCamelCase (unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self ) -> Tuple:
UpperCAmelCase_ : str = tempfile.mkdtemp()
os.makedirs(os.path.join(self.diffusers_dir , 'schedulers/' ) )
UpperCAmelCase_ : int = self.diffusers_dir
shutil.copy(
os.path.join(_UpperCamelCase , 'src/diffusers/schedulers/scheduling_ddpm.py' ) , os.path.join(self.diffusers_dir , 'schedulers/scheduling_ddpm.py' ) , )
def __UpperCAmelCase ( self ) -> Optional[int]:
UpperCAmelCase_ : str = 'src/diffusers'
shutil.rmtree(self.diffusers_dir )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=None ) -> Optional[Any]:
UpperCAmelCase_ : Dict = comment + f"\nclass {class_name}(nn.Module):\n" + class_code
if overwrite_result is not None:
UpperCAmelCase_ : Optional[Any] = comment + f"\nclass {class_name}(nn.Module):\n" + overwrite_result
UpperCAmelCase_ : Any = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_1_9 )
UpperCAmelCase_ : Optional[Any] = black.format_str(_UpperCamelCase , mode=_UpperCamelCase )
UpperCAmelCase_ : Any = os.path.join(self.diffusers_dir , 'new_code.py' )
with open(_UpperCamelCase , 'w' , newline='\n' ) as f:
f.write(_UpperCamelCase )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(_UpperCamelCase ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=_UpperCamelCase )
with open(_UpperCamelCase , 'r' ) as f:
self.assertTrue(f.read() , _UpperCamelCase )
def __UpperCAmelCase ( self ) -> List[str]:
UpperCAmelCase_ : List[Any] = check_copies.find_code_in_diffusers('schedulers.scheduling_ddpm.DDPMSchedulerOutput' )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
def __UpperCAmelCase ( self ) -> str:
# Base copy consistency
self.check_copy_consistency(
'# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput' , 'DDPMSchedulerOutput' , REFERENCE_CODE + '\n' , )
# With no empty line at the end
self.check_copy_consistency(
'# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput' , 'DDPMSchedulerOutput' , _UpperCamelCase , )
# Copy consistency with rename
self.check_copy_consistency(
'# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test' , 'TestSchedulerOutput' , re.sub('DDPM' , 'Test' , _UpperCamelCase ) , )
# Copy consistency with a really long name
UpperCAmelCase_ : Optional[int] = 'TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason'
self.check_copy_consistency(
f"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}" , f"{long_class_name}SchedulerOutput" , re.sub('Bert' , _UpperCamelCase , _UpperCamelCase ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
'# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test' , 'TestSchedulerOutput' , _UpperCamelCase , overwrite_result=re.sub('DDPM' , 'Test' , _UpperCamelCase ) , )
| 145 | 1 |
import absl # noqa: F401 # Here to have a nice missing dependency error message early on
import nltk # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import six # noqa: F401 # Here to have a nice missing dependency error message early on
from rouge_score import rouge_scorer, scoring
import datasets
UpperCAmelCase_ : int = '\\n@inproceedings{lin-2004-rouge,\n title = "{ROUGE}: A Package for Automatic Evaluation of Summaries",\n author = "Lin, Chin-Yew",\n booktitle = "Text Summarization Branches Out",\n month = jul,\n year = "2004",\n address = "Barcelona, Spain",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W04-1013",\n pages = "74--81",\n}\n'
UpperCAmelCase_ : Any = '\\nROUGE, or Recall-Oriented Understudy for Gisting Evaluation, is a set of metrics and a software package used for\nevaluating automatic summarization and machine translation software in natural language processing.\nThe metrics compare an automatically produced summary or translation against a reference or a set of references (human-produced) summary or translation.\n\nNote that ROUGE is case insensitive, meaning that upper case letters are treated the same way as lower case letters.\n\nThis metrics is a wrapper around Google Research reimplementation of ROUGE:\nhttps://github.com/google-research/google-research/tree/master/rouge\n'
UpperCAmelCase_ : List[Any] = '\nCalculates average rouge scores for a list of hypotheses and references\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n rouge_types: A list of rouge types to calculate.\n Valid names:\n `"rouge{n}"` (e.g. `"rouge1"`, `"rouge2"`) where: {n} is the n-gram based scoring,\n `"rougeL"`: Longest common subsequence based scoring.\n `"rougeLSum"`: rougeLsum splits text using `"\n"`.\n See details in https://github.com/huggingface/datasets/issues/617\n use_stemmer: Bool indicating whether Porter stemmer should be used to strip word suffixes.\n use_aggregator: Return aggregates if this is set to True\nReturns:\n rouge1: rouge_1 (precision, recall, f1),\n rouge2: rouge_2 (precision, recall, f1),\n rougeL: rouge_l (precision, recall, f1),\n rougeLsum: rouge_lsum (precision, recall, f1)\nExamples:\n\n >>> rouge = datasets.load_metric(\'rouge\')\n >>> predictions = ["hello there", "general kenobi"]\n >>> references = ["hello there", "general kenobi"]\n >>> results = rouge.compute(predictions=predictions, references=references)\n >>> print(list(results.keys()))\n [\'rouge1\', \'rouge2\', \'rougeL\', \'rougeLsum\']\n >>> print(results["rouge1"])\n AggregateScore(low=Score(precision=1.0, recall=1.0, fmeasure=1.0), mid=Score(precision=1.0, recall=1.0, fmeasure=1.0), high=Score(precision=1.0, recall=1.0, fmeasure=1.0))\n >>> print(results["rouge1"].mid.fmeasure)\n 1.0\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE__ ( datasets.Metric ):
def SCREAMING_SNAKE_CASE ( self : Any ) -> List[str]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , codebase_urls=['https://github.com/google-research/google-research/tree/master/rouge'] , reference_urls=[
'https://en.wikipedia.org/wiki/ROUGE_(metric)',
'https://github.com/google-research/google-research/tree/master/rouge',
] , )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Optional[Any]=None , SCREAMING_SNAKE_CASE__ : Any=True , SCREAMING_SNAKE_CASE__ : Tuple=False ) -> Optional[Any]:
if rouge_types is None:
a_ : Dict = ['rouge1', 'rouge2', 'rougeL', 'rougeLsum']
a_ : Union[str, Any] = rouge_scorer.RougeScorer(rouge_types=SCREAMING_SNAKE_CASE__ , use_stemmer=SCREAMING_SNAKE_CASE__ )
if use_aggregator:
a_ : Tuple = scoring.BootstrapAggregator()
else:
a_ : Union[str, Any] = []
for ref, pred in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
a_ : Dict = scorer.score(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if use_aggregator:
aggregator.add_scores(SCREAMING_SNAKE_CASE__ )
else:
scores.append(SCREAMING_SNAKE_CASE__ )
if use_aggregator:
a_ : Tuple = aggregator.aggregate()
else:
a_ : Optional[int] = {}
for key in scores[0]:
a_ : Any = [score[key] for score in scores]
return result
| 32 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A = {
'''configuration_luke''': ['''LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LukeConfig'''],
'''tokenization_luke''': ['''LukeTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = [
'''LUKE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''LukeForEntityClassification''',
'''LukeForEntityPairClassification''',
'''LukeForEntitySpanClassification''',
'''LukeForMultipleChoice''',
'''LukeForQuestionAnswering''',
'''LukeForSequenceClassification''',
'''LukeForTokenClassification''',
'''LukeForMaskedLM''',
'''LukeModel''',
'''LukePreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_luke import LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP, LukeConfig
from .tokenization_luke import LukeTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_luke import (
LUKE_PRETRAINED_MODEL_ARCHIVE_LIST,
LukeForEntityClassification,
LukeForEntityPairClassification,
LukeForEntitySpanClassification,
LukeForMaskedLM,
LukeForMultipleChoice,
LukeForQuestionAnswering,
LukeForSequenceClassification,
LukeForTokenClassification,
LukeModel,
LukePreTrainedModel,
)
else:
import sys
A = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 160 | 0 |
from typing import List, Optional
from tokenizers import ByteLevelBPETokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
_A = logging.get_logger(__name__)
_A = {
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
_A = {
'''vocab_file''': {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json'''
},
'''merges_file''': {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt'''
},
'''tokenizer_config_file''': {
'''facebook/blenderbot_small-90M''': (
'''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json'''
)
},
}
_A = {
'''facebook/blenderbot_small-90M''': 512,
}
class A ( __UpperCAmelCase ):
__snake_case = VOCAB_FILES_NAMES
__snake_case = PRETRAINED_VOCAB_FILES_MAP
__snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__snake_case = BlenderbotSmallTokenizer
def __init__( self, UpperCamelCase__=None, UpperCamelCase__=None, UpperCamelCase__="<|endoftext|>", UpperCamelCase__="<|endoftext|>", UpperCamelCase__="<|endoftext|>", UpperCamelCase__=False, UpperCamelCase__=True, **UpperCamelCase__, ):
"""simple docstring"""
super().__init__(
ByteLevelBPETokenizer(
vocab=UpperCamelCase__, merges=UpperCamelCase__, add_prefix_space=UpperCamelCase__, trim_offsets=UpperCamelCase__, ), bos_token=UpperCamelCase__, eos_token=UpperCamelCase__, unk_token=UpperCamelCase__, **UpperCamelCase__, )
lowerCAmelCase_ = add_prefix_space
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__=None ):
"""simple docstring"""
lowerCAmelCase_ = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__ = None ):
"""simple docstring"""
lowerCAmelCase_ = [self.sep_token_id]
lowerCAmelCase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 167 |
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoImageProcessor, ViTImageProcessor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / '''utils'''))
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
_A = get_tests_dir('''fixtures''')
class A ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = mock.Mock()
lowerCAmelCase_ = 500
lowerCAmelCase_ = {}
lowerCAmelCase_ = HTTPError
lowerCAmelCase_ = {}
# Download this model to make sure it's in the cache.
lowerCAmelCase_ = ViTImageProcessor.from_pretrained('''hf-internal-testing/tiny-random-vit''' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('''requests.Session.request''', return_value=UpperCamelCase__ ) as mock_head:
lowerCAmelCase_ = ViTImageProcessor.from_pretrained('''hf-internal-testing/tiny-random-vit''' )
# This check we did call the fake head request
mock_head.assert_called()
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = ViTImageProcessor.from_pretrained(
'''https://huggingface.co/hf-internal-testing/tiny-random-vit/resolve/main/preprocessor_config.json''' )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
with self.assertRaises(UpperCamelCase__ ):
# config is in subfolder, the following should not work without specifying the subfolder
lowerCAmelCase_ = AutoImageProcessor.from_pretrained('''hf-internal-testing/stable-diffusion-all-variants''' )
lowerCAmelCase_ = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/stable-diffusion-all-variants''', subfolder='''feature_extractor''' )
self.assertIsNotNone(UpperCamelCase__ )
@is_staging_test
class A ( unittest.TestCase ):
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls ):
"""simple docstring"""
lowerCAmelCase_ = TOKEN
HfFolder.save_token(UpperCamelCase__ )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls ):
"""simple docstring"""
try:
delete_repo(token=cls._token, repo_id='''test-image-processor''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token, repo_id='''valid_org/test-image-processor-org''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token, repo_id='''test-dynamic-image-processor''' )
except HTTPError:
pass
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = ViTImageProcessor.from_pretrained(UpperCamelCase__ )
image_processor.push_to_hub('''test-image-processor''', use_auth_token=self._token )
lowerCAmelCase_ = ViTImageProcessor.from_pretrained(f"{USER}/test-image-processor" )
for k, v in image_processor.__dict__.items():
self.assertEqual(UpperCamelCase__, getattr(UpperCamelCase__, UpperCamelCase__ ) )
# Reset repo
delete_repo(token=self._token, repo_id='''test-image-processor''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
UpperCamelCase__, repo_id='''test-image-processor''', push_to_hub=UpperCamelCase__, use_auth_token=self._token )
lowerCAmelCase_ = ViTImageProcessor.from_pretrained(f"{USER}/test-image-processor" )
for k, v in image_processor.__dict__.items():
self.assertEqual(UpperCamelCase__, getattr(UpperCamelCase__, UpperCamelCase__ ) )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = ViTImageProcessor.from_pretrained(UpperCamelCase__ )
image_processor.push_to_hub('''valid_org/test-image-processor''', use_auth_token=self._token )
lowerCAmelCase_ = ViTImageProcessor.from_pretrained('''valid_org/test-image-processor''' )
for k, v in image_processor.__dict__.items():
self.assertEqual(UpperCamelCase__, getattr(UpperCamelCase__, UpperCamelCase__ ) )
# Reset repo
delete_repo(token=self._token, repo_id='''valid_org/test-image-processor''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
UpperCamelCase__, repo_id='''valid_org/test-image-processor-org''', push_to_hub=UpperCamelCase__, use_auth_token=self._token )
lowerCAmelCase_ = ViTImageProcessor.from_pretrained('''valid_org/test-image-processor-org''' )
for k, v in image_processor.__dict__.items():
self.assertEqual(UpperCamelCase__, getattr(UpperCamelCase__, UpperCamelCase__ ) )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
CustomImageProcessor.register_for_auto_class()
lowerCAmelCase_ = CustomImageProcessor.from_pretrained(UpperCamelCase__ )
image_processor.push_to_hub('''test-dynamic-image-processor''', use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
image_processor.auto_map, {'''AutoImageProcessor''': '''custom_image_processing.CustomImageProcessor'''}, )
lowerCAmelCase_ = AutoImageProcessor.from_pretrained(
f"{USER}/test-dynamic-image-processor", trust_remote_code=UpperCamelCase__ )
# Can't make an isinstance check because the new_image_processor is from the CustomImageProcessor class of a dynamic module
self.assertEqual(new_image_processor.__class__.__name__, '''CustomImageProcessor''' )
| 167 | 1 |
'''simple docstring'''
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
A_ = "2.13.1"
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse("3.7"):
raise ImportWarning(
"To use `datasets`, Python>=3.7 is required, and the current version of Python doesn't match this condition."
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
"To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn't match this condition.\n"
"If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`."
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
A_ = concatenate_datasets
A_ = DownloadConfig
A_ = DownloadManager
A_ = DownloadMode
A_ = DownloadConfig
A_ = DownloadMode
A_ = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
| 139 |
'''simple docstring'''
from __future__ import annotations
import numpy as np
def A_ ( snake_case ):
return np.maximum(0 , snake_case )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 139 | 1 |
"""simple docstring"""
from collections.abc import Generator
from math import sin
def _snake_case ( _snake_case : bytes ) -> bytes:
'''simple docstring'''
if len(_snake_case ) != 32:
raise ValueError('Input must be of length 32' )
_A = B''
for i in [3, 2, 1, 0]:
little_endian += string_aa[8 * i : 8 * i + 8]
return little_endian
def _snake_case ( _snake_case : int ) -> bytes:
'''simple docstring'''
if i < 0:
raise ValueError('Input must be non-negative' )
_A = format(_snake_case , '08x' )[-8:]
_A = B''
for i in [3, 2, 1, 0]:
little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode('utf-8' )
return little_endian_hex
def _snake_case ( _snake_case : bytes ) -> bytes:
'''simple docstring'''
_A = B''
for char in message:
bit_string += format(_snake_case , '08b' ).encode('utf-8' )
_A = format(len(_snake_case ) , '064b' ).encode('utf-8' )
# Pad bit_string to a multiple of 512 chars
bit_string += b"1"
while len(_snake_case ) % 5_12 != 4_48:
bit_string += b"0"
bit_string += to_little_endian(start_len[32:] ) + to_little_endian(start_len[:32] )
return bit_string
def _snake_case ( _snake_case : bytes ) -> Generator[list[int], None, None]:
'''simple docstring'''
if len(_snake_case ) % 5_12 != 0:
raise ValueError('Input must have length that\'s a multiple of 512' )
for pos in range(0 , len(_snake_case ) , 5_12 ):
_A = bit_string[pos : pos + 5_12]
_A = []
for i in range(0 , 5_12 , 32 ):
block_words.append(int(to_little_endian(block[i : i + 32] ) , 2 ) )
yield block_words
def _snake_case ( _snake_case : int ) -> int:
'''simple docstring'''
if i < 0:
raise ValueError('Input must be non-negative' )
_A = format(_snake_case , '032b' )
_A = ''
for c in i_str:
new_str += "1" if c == "0" else "0"
return int(_snake_case , 2 )
def _snake_case ( _snake_case : int , _snake_case : int ) -> int:
'''simple docstring'''
return (a + b) % 2**32
def _snake_case ( _snake_case : int , _snake_case : int ) -> int:
'''simple docstring'''
if i < 0:
raise ValueError('Input must be non-negative' )
if shift < 0:
raise ValueError('Shift must be non-negative' )
return ((i << shift) ^ (i >> (32 - shift))) % 2**32
def _snake_case ( _snake_case : bytes ) -> bytes:
'''simple docstring'''
_A = preprocess(_snake_case )
_A = [int(2**32 * abs(sin(i + 1 ) ) ) for i in range(64 )]
# Starting states
_A = 0X6745_2301
_A = 0Xefcd_ab89
_A = 0X98ba_dcfe
_A = 0X1032_5476
_A = [
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
]
# Process bit string in chunks, each with 16 32-char words
for block_words in get_block_words(_snake_case ):
_A = aa
_A = ba
_A = ca
_A = da
# Hash current chunk
for i in range(64 ):
if i <= 15:
# f = (b & c) | (not_32(b) & d) # Alternate definition for f
_A = d ^ (b & (c ^ d))
_A = i
elif i <= 31:
# f = (d & b) | (not_32(d) & c) # Alternate definition for f
_A = c ^ (d & (b ^ c))
_A = (5 * i + 1) % 16
elif i <= 47:
_A = b ^ c ^ d
_A = (3 * i + 5) % 16
else:
_A = c ^ (b | not_aa(_snake_case ))
_A = (7 * i) % 16
_A = (f + a + added_consts[i] + block_words[g]) % 2**32
_A = d
_A = c
_A = b
_A = sum_aa(_snake_case , left_rotate_aa(_snake_case , shift_amounts[i] ) )
# Add hashed chunk to running total
_A = sum_aa(_snake_case , _snake_case )
_A = sum_aa(_snake_case , _snake_case )
_A = sum_aa(_snake_case , _snake_case )
_A = sum_aa(_snake_case , _snake_case )
_A = reformat_hex(_snake_case ) + reformat_hex(_snake_case ) + reformat_hex(_snake_case ) + reformat_hex(_snake_case )
return digest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 271 |
"""simple docstring"""
import enum
import os
from hashlib import shaaaa
from typing import Optional
from .. import config
from .logging import get_logger
a = get_logger(__name__)
class lowercase_ ( enum.Enum ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = '''all_checks'''
UpperCAmelCase : List[Any] = '''basic_checks'''
UpperCAmelCase : Any = '''no_checks'''
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
def _snake_case ( _snake_case : Optional[dict] , _snake_case : dict , _snake_case : Dict=None ) -> Dict:
'''simple docstring'''
if expected_checksums is None:
logger.info('Unable to verify checksums.' )
return
if len(set(_snake_case ) - set(_snake_case ) ) > 0:
raise ExpectedMoreDownloadedFiles(str(set(_snake_case ) - set(_snake_case ) ) )
if len(set(_snake_case ) - set(_snake_case ) ) > 0:
raise UnexpectedDownloadedFile(str(set(_snake_case ) - set(_snake_case ) ) )
_A = [url for url in expected_checksums if expected_checksums[url] != recorded_checksums[url]]
_A = ' for ' + verification_name if verification_name is not None else ''
if len(_snake_case ) > 0:
raise NonMatchingChecksumError(
F'''Checksums didn\'t match{for_verification_name}:\n'''
F'''{bad_urls}\n'''
'Set `verification_mode=\'no_checks\'` to skip checksums verification and ignore this error' )
logger.info('All the checksums matched successfully' + for_verification_name )
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
def _snake_case ( _snake_case : Optional[dict] , _snake_case : dict ) -> List[str]:
'''simple docstring'''
if expected_splits is None:
logger.info('Unable to verify splits sizes.' )
return
if len(set(_snake_case ) - set(_snake_case ) ) > 0:
raise ExpectedMoreSplits(str(set(_snake_case ) - set(_snake_case ) ) )
if len(set(_snake_case ) - set(_snake_case ) ) > 0:
raise UnexpectedSplits(str(set(_snake_case ) - set(_snake_case ) ) )
_A = [
{'expected': expected_splits[name], 'recorded': recorded_splits[name]}
for name in expected_splits
if expected_splits[name].num_examples != recorded_splits[name].num_examples
]
if len(_snake_case ) > 0:
raise NonMatchingSplitsSizesError(str(_snake_case ) )
logger.info('All the splits matched successfully.' )
def _snake_case ( _snake_case : str , _snake_case : bool = True ) -> dict:
'''simple docstring'''
if record_checksum:
_A = shaaaa()
with open(_snake_case , 'rb' ) as f:
for chunk in iter(lambda: f.read(1 << 20 ) , B'' ):
m.update(_snake_case )
_A = m.hexdigest()
else:
_A = None
return {"num_bytes": os.path.getsize(_snake_case ), "checksum": checksum}
def _snake_case ( _snake_case : int ) -> int:
'''simple docstring'''
if dataset_size and config.IN_MEMORY_MAX_SIZE:
return dataset_size < config.IN_MEMORY_MAX_SIZE
else:
return False
| 271 | 1 |
"""simple docstring"""
from dataclasses import dataclass
from typing import Dict, Optional, Union
import torch
import torch.nn.functional as F
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .attention_processor import AttentionProcessor, AttnProcessor
from .embeddings import TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
@dataclass
class __a (UpperCamelCase_):
'''simple docstring'''
_SCREAMING_SNAKE_CASE :torch.FloatTensor
class __a (UpperCamelCase_ , UpperCamelCase_):
'''simple docstring'''
@register_to_config
def __init__( self , _a = 32 , _a = 64 , _a = 20 , _a = 768 , _a=77 , _a=4 , _a = 0.0 , _a = "silu" , _a = None , _a = None , _a = "linear" , _a = "prd" , _a = None , _a = None , _a = None , ) -> Optional[int]:
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE__ : Dict = num_attention_heads
SCREAMING_SNAKE_CASE__ : List[str] = attention_head_dim
SCREAMING_SNAKE_CASE__ : str = num_attention_heads * attention_head_dim
SCREAMING_SNAKE_CASE__ : List[str] = additional_embeddings
SCREAMING_SNAKE_CASE__ : List[str] = time_embed_dim or inner_dim
SCREAMING_SNAKE_CASE__ : int = embedding_proj_dim or embedding_dim
SCREAMING_SNAKE_CASE__ : Any = clip_embed_dim or embedding_dim
SCREAMING_SNAKE_CASE__ : Tuple = Timesteps(_a , _a , 0 )
SCREAMING_SNAKE_CASE__ : Optional[int] = TimestepEmbedding(_a , _a , out_dim=_a , act_fn=_a )
SCREAMING_SNAKE_CASE__ : Dict = nn.Linear(_a , _a )
if embedding_proj_norm_type is None:
SCREAMING_SNAKE_CASE__ : Tuple = None
elif embedding_proj_norm_type == "layer":
SCREAMING_SNAKE_CASE__ : Optional[Any] = nn.LayerNorm(_a )
else:
raise ValueError(f'''unsupported embedding_proj_norm_type: {embedding_proj_norm_type}''' )
SCREAMING_SNAKE_CASE__ : str = nn.Linear(_a , _a )
if encoder_hid_proj_type is None:
SCREAMING_SNAKE_CASE__ : Dict = None
elif encoder_hid_proj_type == "linear":
SCREAMING_SNAKE_CASE__ : int = nn.Linear(_a , _a )
else:
raise ValueError(f'''unsupported encoder_hid_proj_type: {encoder_hid_proj_type}''' )
SCREAMING_SNAKE_CASE__ : Any = nn.Parameter(torch.zeros(1 , num_embeddings + additional_embeddings , _a ) )
if added_emb_type == "prd":
SCREAMING_SNAKE_CASE__ : str = nn.Parameter(torch.zeros(1 , 1 , _a ) )
elif added_emb_type is None:
SCREAMING_SNAKE_CASE__ : int = None
else:
raise ValueError(
f'''`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `\'prd\'` or `None`.''' )
SCREAMING_SNAKE_CASE__ : Dict = nn.ModuleList(
[
BasicTransformerBlock(
_a , _a , _a , dropout=_a , activation_fn="""gelu""" , attention_bias=_a , )
for d in range(_a )
] )
if norm_in_type == "layer":
SCREAMING_SNAKE_CASE__ : List[str] = nn.LayerNorm(_a )
elif norm_in_type is None:
SCREAMING_SNAKE_CASE__ : Optional[Any] = None
else:
raise ValueError(f'''Unsupported norm_in_type: {norm_in_type}.''' )
SCREAMING_SNAKE_CASE__ : Dict = nn.LayerNorm(_a )
SCREAMING_SNAKE_CASE__ : List[Any] = nn.Linear(_a , _a )
SCREAMING_SNAKE_CASE__ : Dict = torch.full(
[num_embeddings + additional_embeddings, num_embeddings + additional_embeddings] , -10_000.0 )
causal_attention_mask.triu_(1 )
SCREAMING_SNAKE_CASE__ : Optional[Any] = causal_attention_mask[None, ...]
self.register_buffer("""causal_attention_mask""" , _a , persistent=_a )
SCREAMING_SNAKE_CASE__ : Tuple = nn.Parameter(torch.zeros(1 , _a ) )
SCREAMING_SNAKE_CASE__ : Tuple = nn.Parameter(torch.zeros(1 , _a ) )
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def _a ( self ) -> Dict[str, AttentionProcessor]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = {}
def fn_recursive_add_processors(_a , _a , _a ):
if hasattr(_a , """set_processor""" ):
SCREAMING_SNAKE_CASE__ : Any = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(f'''{name}.{sub_name}''' , _a , _a )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(_a , _a , _a )
return processors
def _a ( self , _a ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = len(self.attn_processors.keys() )
if isinstance(_a , _a ) and len(_a ) != count:
raise ValueError(
f'''A dict of processors was passed, but the number of processors {len(_a )} does not match the'''
f''' number of attention layers: {count}. Please make sure to pass {count} processor classes.''' )
def fn_recursive_attn_processor(_a , _a , _a ):
if hasattr(_a , """set_processor""" ):
if not isinstance(_a , _a ):
module.set_processor(_a )
else:
module.set_processor(processor.pop(f'''{name}.processor''' ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(f'''{name}.{sub_name}''' , _a , _a )
for name, module in self.named_children():
fn_recursive_attn_processor(_a , _a , _a )
def _a ( self ) -> Dict:
"""simple docstring"""
self.set_attn_processor(AttnProcessor() )
def _a ( self , _a , _a , _a , _a = None , _a = None , _a = True , ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = hidden_states.shape[0]
SCREAMING_SNAKE_CASE__ : List[Any] = timestep
if not torch.is_tensor(_a ):
SCREAMING_SNAKE_CASE__ : Dict = torch.tensor([timesteps] , dtype=torch.long , device=hidden_states.device )
elif torch.is_tensor(_a ) and len(timesteps.shape ) == 0:
SCREAMING_SNAKE_CASE__ : int = timesteps[None].to(hidden_states.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
SCREAMING_SNAKE_CASE__ : Dict = timesteps * torch.ones(_a , dtype=timesteps.dtype , device=timesteps.device )
SCREAMING_SNAKE_CASE__ : List[Any] = self.time_proj(_a )
# timesteps does not contain any weights and will always return f32 tensors
# but time_embedding might be fp16, so we need to cast here.
SCREAMING_SNAKE_CASE__ : Dict = timesteps_projected.to(dtype=self.dtype )
SCREAMING_SNAKE_CASE__ : List[Any] = self.time_embedding(_a )
if self.embedding_proj_norm is not None:
SCREAMING_SNAKE_CASE__ : Tuple = self.embedding_proj_norm(_a )
SCREAMING_SNAKE_CASE__ : List[str] = self.embedding_proj(_a )
if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None:
SCREAMING_SNAKE_CASE__ : int = self.encoder_hidden_states_proj(_a )
elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None:
raise ValueError("""`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set""" )
SCREAMING_SNAKE_CASE__ : Optional[int] = self.proj_in(_a )
SCREAMING_SNAKE_CASE__ : Tuple = self.positional_embedding.to(hidden_states.dtype )
SCREAMING_SNAKE_CASE__ : Any = []
SCREAMING_SNAKE_CASE__ : Optional[Any] = 0
if encoder_hidden_states is not None:
additional_embeds.append(_a )
additional_embeddings_len += encoder_hidden_states.shape[1]
if len(proj_embeddings.shape ) == 2:
SCREAMING_SNAKE_CASE__ : Optional[Any] = proj_embeddings[:, None, :]
if len(hidden_states.shape ) == 2:
SCREAMING_SNAKE_CASE__ : str = hidden_states[:, None, :]
SCREAMING_SNAKE_CASE__ : Optional[int] = additional_embeds + [
proj_embeddings,
time_embeddings[:, None, :],
hidden_states,
]
if self.prd_embedding is not None:
SCREAMING_SNAKE_CASE__ : Tuple = self.prd_embedding.to(hidden_states.dtype ).expand(_a , -1 , -1 )
additional_embeds.append(_a )
SCREAMING_SNAKE_CASE__ : Dict = torch.cat(
_a , dim=1 , )
# Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens
SCREAMING_SNAKE_CASE__ : Optional[int] = additional_embeddings_len + proj_embeddings.shape[1] + 1
if positional_embeddings.shape[1] < hidden_states.shape[1]:
SCREAMING_SNAKE_CASE__ : List[str] = F.pad(
_a , (
0,
0,
additional_embeddings_len,
self.prd_embedding.shape[1] if self.prd_embedding is not None else 0,
) , value=0.0 , )
SCREAMING_SNAKE_CASE__ : str = hidden_states + positional_embeddings
if attention_mask is not None:
SCREAMING_SNAKE_CASE__ : List[str] = (1 - attention_mask.to(hidden_states.dtype )) * -10_000.0
SCREAMING_SNAKE_CASE__ : Dict = F.pad(_a , (0, self.additional_embeddings) , value=0.0 )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype )
SCREAMING_SNAKE_CASE__ : Dict = attention_mask.repeat_interleave(self.config.num_attention_heads , dim=0 )
if self.norm_in is not None:
SCREAMING_SNAKE_CASE__ : int = self.norm_in(_a )
for block in self.transformer_blocks:
SCREAMING_SNAKE_CASE__ : Optional[int] = block(_a , attention_mask=_a )
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.norm_out(_a )
if self.prd_embedding is not None:
SCREAMING_SNAKE_CASE__ : Optional[int] = hidden_states[:, -1]
else:
SCREAMING_SNAKE_CASE__ : Optional[int] = hidden_states[:, additional_embeddings_len:]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.proj_to_clip_embeddings(_a )
if not return_dict:
return (predicted_image_embedding,)
return PriorTransformerOutput(predicted_image_embedding=_a )
def _a ( self , _a ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = (prior_latents * self.clip_std) + self.clip_mean
return prior_latents
| 132 |
"""simple docstring"""
import importlib
import inspect
import json
import os
import re
import shutil
import sys
from pathlib import Path
from typing import Dict, Optional, Union
from urllib import request
from huggingface_hub import HfFolder, cached_download, hf_hub_download, model_info
from packaging import version
from .. import __version__
from . import DIFFUSERS_DYNAMIC_MODULE_NAME, HF_MODULES_CACHE, logging
a :Tuple = (
"https://raw.githubusercontent.com/huggingface/diffusers/{revision}/examples/community/{pipeline}.py"
)
a :int = logging.get_logger(__name__) # pylint: disable=invalid-name
def _lowercase ( ) -> Dict:
SCREAMING_SNAKE_CASE__ : Any = """https://pypi.org/pypi/diffusers/json"""
SCREAMING_SNAKE_CASE__ : str = json.loads(request.urlopen(__lowerCAmelCase ).read() )["""releases"""].keys()
return sorted(__lowerCAmelCase , key=lambda __lowerCAmelCase : version.Version(__lowerCAmelCase ) )
def _lowercase ( ) -> Optional[Any]:
# This function has already been executed if HF_MODULES_CACHE already is in the Python path.
if HF_MODULES_CACHE in sys.path:
return
sys.path.append(__lowerCAmelCase )
os.makedirs(__lowerCAmelCase , exist_ok=__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : str = Path(__lowerCAmelCase ) / """__init__.py"""
if not init_path.exists():
init_path.touch()
def _lowercase ( __lowerCAmelCase ) -> Optional[int]:
init_hf_modules()
SCREAMING_SNAKE_CASE__ : List[Any] = Path(__lowerCAmelCase ) / name
# If the parent module does not exist yet, recursively create it.
if not dynamic_module_path.parent.exists():
create_dynamic_module(dynamic_module_path.parent )
os.makedirs(__lowerCAmelCase , exist_ok=__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Dict = dynamic_module_path / """__init__.py"""
if not init_path.exists():
init_path.touch()
def _lowercase ( __lowerCAmelCase ) -> Tuple:
with open(__lowerCAmelCase , """r""" , encoding="""utf-8""" ) as f:
SCREAMING_SNAKE_CASE__ : int = f.read()
# Imports of the form `import .xxx`
SCREAMING_SNAKE_CASE__ : Optional[Any] = re.findall("""^\s*import\s+\.(\S+)\s*$""" , __lowerCAmelCase , flags=re.MULTILINE )
# Imports of the form `from .xxx import yyy`
relative_imports += re.findall("""^\s*from\s+\.(\S+)\s+import""" , __lowerCAmelCase , flags=re.MULTILINE )
# Unique-ify
return list(set(__lowerCAmelCase ) )
def _lowercase ( __lowerCAmelCase ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ : Optional[int] = False
SCREAMING_SNAKE_CASE__ : List[str] = [module_file]
SCREAMING_SNAKE_CASE__ : str = []
# Let's recurse through all relative imports
while not no_change:
SCREAMING_SNAKE_CASE__ : Dict = []
for f in files_to_check:
new_imports.extend(get_relative_imports(__lowerCAmelCase ) )
SCREAMING_SNAKE_CASE__ : int = Path(__lowerCAmelCase ).parent
SCREAMING_SNAKE_CASE__ : Dict = [str(module_path / m ) for m in new_imports]
SCREAMING_SNAKE_CASE__ : Optional[Any] = [f for f in new_import_files if f not in all_relative_imports]
SCREAMING_SNAKE_CASE__ : Any = [F'''{f}.py''' for f in new_import_files]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = len(__lowerCAmelCase ) == 0
all_relative_imports.extend(__lowerCAmelCase )
return all_relative_imports
def _lowercase ( __lowerCAmelCase ) -> Any:
with open(__lowerCAmelCase , """r""" , encoding="""utf-8""" ) as f:
SCREAMING_SNAKE_CASE__ : Dict = f.read()
# Imports of the form `import xxx`
SCREAMING_SNAKE_CASE__ : Optional[Any] = re.findall("""^\s*import\s+(\S+)\s*$""" , __lowerCAmelCase , flags=re.MULTILINE )
# Imports of the form `from xxx import yyy`
imports += re.findall("""^\s*from\s+(\S+)\s+import""" , __lowerCAmelCase , flags=re.MULTILINE )
# Only keep the top-level module
SCREAMING_SNAKE_CASE__ : str = [imp.split(""".""" )[0] for imp in imports if not imp.startswith(""".""" )]
# Unique-ify and test we got them all
SCREAMING_SNAKE_CASE__ : Tuple = list(set(__lowerCAmelCase ) )
SCREAMING_SNAKE_CASE__ : List[str] = []
for imp in imports:
try:
importlib.import_module(__lowerCAmelCase )
except ImportError:
missing_packages.append(__lowerCAmelCase )
if len(__lowerCAmelCase ) > 0:
raise ImportError(
"""This modeling file requires the following packages that were not found in your environment: """
F'''{', '.join(__lowerCAmelCase )}. Run `pip install {' '.join(__lowerCAmelCase )}`''' )
return get_relative_imports(__lowerCAmelCase )
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase ) -> Any:
SCREAMING_SNAKE_CASE__ : str = module_path.replace(os.path.sep , """.""" )
SCREAMING_SNAKE_CASE__ : Any = importlib.import_module(__lowerCAmelCase )
if class_name is None:
return find_pipeline_class(__lowerCAmelCase )
return getattr(__lowerCAmelCase , __lowerCAmelCase )
def _lowercase ( __lowerCAmelCase ) -> Optional[int]:
from ..pipelines import DiffusionPipeline
SCREAMING_SNAKE_CASE__ : Tuple = dict(inspect.getmembers(__lowerCAmelCase , inspect.isclass ) )
SCREAMING_SNAKE_CASE__ : Optional[Any] = None
for cls_name, cls in cls_members.items():
if (
cls_name != DiffusionPipeline.__name__
and issubclass(cls , __lowerCAmelCase )
and cls.__module__.split(""".""" )[0] != "diffusers"
):
if pipeline_class is not None:
raise ValueError(
F'''Multiple classes that inherit from {DiffusionPipeline.__name__} have been found:'''
F''' {pipeline_class.__name__}, and {cls_name}. Please make sure to define only one in'''
F''' {loaded_module}.''' )
SCREAMING_SNAKE_CASE__ : Optional[Any] = cls
return pipeline_class
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = False , __lowerCAmelCase = False , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = False , ) -> Dict:
SCREAMING_SNAKE_CASE__ : str = str(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[int] = os.path.join(__lowerCAmelCase , __lowerCAmelCase )
if os.path.isfile(__lowerCAmelCase ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = module_file_or_url
SCREAMING_SNAKE_CASE__ : List[str] = """local"""
elif pretrained_model_name_or_path.count("""/""" ) == 0:
SCREAMING_SNAKE_CASE__ : Optional[int] = get_diffusers_versions()
# cut ".dev0"
SCREAMING_SNAKE_CASE__ : List[Any] = """v""" + """.""".join(__version__.split(""".""" )[:3] )
# retrieve github version that matches
if revision is None:
SCREAMING_SNAKE_CASE__ : Any = latest_version if latest_version[1:] in available_versions else """main"""
logger.info(F'''Defaulting to latest_version: {revision}.''' )
elif revision in available_versions:
SCREAMING_SNAKE_CASE__ : List[str] = F'''v{revision}'''
elif revision == "main":
SCREAMING_SNAKE_CASE__ : int = revision
else:
raise ValueError(
F'''`custom_revision`: {revision} does not exist. Please make sure to choose one of'''
F''' {', '.join(available_versions + ['main'] )}.''' )
# community pipeline on GitHub
SCREAMING_SNAKE_CASE__ : int = COMMUNITY_PIPELINES_URL.format(revision=__lowerCAmelCase , pipeline=__lowerCAmelCase )
try:
SCREAMING_SNAKE_CASE__ : Dict = cached_download(
__lowerCAmelCase , cache_dir=__lowerCAmelCase , force_download=__lowerCAmelCase , proxies=__lowerCAmelCase , resume_download=__lowerCAmelCase , local_files_only=__lowerCAmelCase , use_auth_token=__lowerCAmelCase , )
SCREAMING_SNAKE_CASE__ : Optional[int] = """git"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = pretrained_model_name_or_path + """.py"""
except EnvironmentError:
logger.error(F'''Could not locate the {module_file} inside {pretrained_model_name_or_path}.''' )
raise
else:
try:
# Load from URL or cache if already cached
SCREAMING_SNAKE_CASE__ : Any = hf_hub_download(
__lowerCAmelCase , __lowerCAmelCase , cache_dir=__lowerCAmelCase , force_download=__lowerCAmelCase , proxies=__lowerCAmelCase , resume_download=__lowerCAmelCase , local_files_only=__lowerCAmelCase , use_auth_token=__lowerCAmelCase , )
SCREAMING_SNAKE_CASE__ : Dict = os.path.join("""local""" , """--""".join(pretrained_model_name_or_path.split("""/""" ) ) )
except EnvironmentError:
logger.error(F'''Could not locate the {module_file} inside {pretrained_model_name_or_path}.''' )
raise
# Check we have all the requirements in our environment
SCREAMING_SNAKE_CASE__ : Optional[int] = check_imports(__lowerCAmelCase )
# Now we move the module inside our cached dynamic modules.
SCREAMING_SNAKE_CASE__ : Any = DIFFUSERS_DYNAMIC_MODULE_NAME + os.path.sep + submodule
create_dynamic_module(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Tuple = Path(__lowerCAmelCase ) / full_submodule
if submodule == "local" or submodule == "git":
# We always copy local files (we could hash the file to see if there was a change, and give them the name of
# that hash, to only copy when there is a modification but it seems overkill for now).
# The only reason we do the copy is to avoid putting too many folders in sys.path.
shutil.copy(__lowerCAmelCase , submodule_path / module_file )
for module_needed in modules_needed:
SCREAMING_SNAKE_CASE__ : Tuple = F'''{module_needed}.py'''
shutil.copy(os.path.join(__lowerCAmelCase , __lowerCAmelCase ) , submodule_path / module_needed )
else:
# Get the commit hash
# TODO: we will get this info in the etag soon, so retrieve it from there and not here.
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
SCREAMING_SNAKE_CASE__ : Dict = use_auth_token
elif use_auth_token is True:
SCREAMING_SNAKE_CASE__ : Optional[int] = HfFolder.get_token()
else:
SCREAMING_SNAKE_CASE__ : Optional[int] = None
SCREAMING_SNAKE_CASE__ : int = model_info(__lowerCAmelCase , revision=__lowerCAmelCase , token=__lowerCAmelCase ).sha
# The module file will end up being placed in a subfolder with the git hash of the repo. This way we get the
# benefit of versioning.
SCREAMING_SNAKE_CASE__ : Optional[Any] = submodule_path / commit_hash
SCREAMING_SNAKE_CASE__ : Optional[Any] = full_submodule + os.path.sep + commit_hash
create_dynamic_module(__lowerCAmelCase )
if not (submodule_path / module_file).exists():
shutil.copy(__lowerCAmelCase , submodule_path / module_file )
# Make sure we also have every file with relative
for module_needed in modules_needed:
if not (submodule_path / module_needed).exists():
get_cached_module_file(
__lowerCAmelCase , F'''{module_needed}.py''' , cache_dir=__lowerCAmelCase , force_download=__lowerCAmelCase , resume_download=__lowerCAmelCase , proxies=__lowerCAmelCase , use_auth_token=__lowerCAmelCase , revision=__lowerCAmelCase , local_files_only=__lowerCAmelCase , )
return os.path.join(__lowerCAmelCase , __lowerCAmelCase )
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = False , __lowerCAmelCase = False , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = False , **__lowerCAmelCase , ) -> List[Any]:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = get_cached_module_file(
__lowerCAmelCase , __lowerCAmelCase , cache_dir=__lowerCAmelCase , force_download=__lowerCAmelCase , resume_download=__lowerCAmelCase , proxies=__lowerCAmelCase , use_auth_token=__lowerCAmelCase , revision=__lowerCAmelCase , local_files_only=__lowerCAmelCase , )
return get_class_in_module(__lowerCAmelCase , final_module.replace(""".py""" , """""" ) )
| 132 | 1 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class __lowerCAmelCase ( unittest.TestCase):
@slow
def SCREAMING_SNAKE_CASE ( self: Optional[Any] ):
lowercase :str = TFCamembertModel.from_pretrained("jplu/tf-camembert-base" )
lowercase :List[str] = tf.convert_to_tensor(
[[5, 1_21, 11, 6_60, 16, 7_30, 2_55_43, 1_10, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !"
lowercase :List[str] = model(_SCREAMING_SNAKE_CASE )["last_hidden_state"]
lowercase :List[Any] = tf.TensorShape((1, 10, 7_68) )
self.assertEqual(output.shape , _SCREAMING_SNAKE_CASE )
# compare the actual values for a slice.
lowercase :Union[str, Any] = tf.convert_to_tensor(
[[[-0.02_54, 0.02_35, 0.10_27], [0.06_06, -0.18_11, -0.04_18], [-0.15_61, -0.11_27, 0.26_87]]] , dtype=tf.floataa , )
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 368 |
import numpy as np
import torch
from torch.utils.data import Dataset, IterableDataset
from ..utils.generic import ModelOutput
class __lowerCAmelCase ( lowerCAmelCase):
def __init__( self: Any , _lowerCAmelCase: int , _lowerCAmelCase: str , _lowerCAmelCase: Union[str, Any] ):
lowercase :List[str] = dataset
lowercase :Optional[int] = process
lowercase :Union[str, Any] = params
def __len__( self: str ):
return len(self.dataset )
def __getitem__( self: int , _lowerCAmelCase: Dict ):
lowercase :Union[str, Any] = self.dataset[i]
lowercase :Optional[int] = self.process(_lowerCAmelCase , **self.params )
return processed
class __lowerCAmelCase ( lowerCAmelCase):
def __init__( self: int , _lowerCAmelCase: Tuple , _lowerCAmelCase: Union[str, Any] , _lowerCAmelCase: int , _lowerCAmelCase: Optional[int]=None ):
lowercase :Optional[Any] = loader
lowercase :int = infer
lowercase :Dict = params
if loader_batch_size == 1:
# Let's spare some time by deactivating altogether
lowercase :Union[str, Any] = None
lowercase :Any = loader_batch_size
# Internal bookkeeping
lowercase :Optional[Any] = None
lowercase :Dict = None
def __len__( self: Tuple ):
return len(self.loader )
def __iter__( self: List[str] ):
lowercase :Dict = iter(self.loader )
return self
def SCREAMING_SNAKE_CASE ( self: Union[str, Any] ):
if isinstance(self._loader_batch_data , torch.Tensor ):
# Batch data is simple tensor, just fetch the slice
lowercase :Optional[int] = self._loader_batch_data[self._loader_batch_index]
else:
# Batch data is assumed to be BaseModelOutput (or dict)
lowercase :str = {}
for k, element in self._loader_batch_data.items():
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
# Convert ModelOutput to tuple first
lowercase :Dict = element.to_tuple()
if isinstance(element[0] , torch.Tensor ):
lowercase :int = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
lowercase :List[str] = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(_lowerCAmelCase , _lowerCAmelCase ):
# Those are stored as lists of tensors so need specific unbatching.
if isinstance(element[0] , torch.Tensor ):
lowercase :Union[str, Any] = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
lowercase :List[Any] = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if element is None:
# This can happen for optional data that get passed around
lowercase :Optional[int] = None
elif isinstance(element[self._loader_batch_index] , torch.Tensor ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
lowercase :Optional[Any] = element[self._loader_batch_index].unsqueeze(0 )
elif isinstance(element[self._loader_batch_index] , np.ndarray ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
lowercase :Any = np.expand_dims(element[self._loader_batch_index] , 0 )
else:
# This is typically a list, so no need to `unsqueeze`.
lowercase :List[Any] = element[self._loader_batch_index]
# Recreate the element by reusing the original class to make it look
# batch_size=1
lowercase :List[Any] = self._loader_batch_data.__class__(_lowerCAmelCase )
self._loader_batch_index += 1
return result
def SCREAMING_SNAKE_CASE ( self: Optional[Any] ):
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
# We are currently unrolling a batch so we just need to return
# the current item within a batch
return self.loader_batch_item()
# We're out of items within a batch
lowercase :Tuple = next(self.iterator )
lowercase :Dict = self.infer(_lowerCAmelCase , **self.params )
# We now have a batch of "inferred things".
if self.loader_batch_size is not None:
# Try to infer the size of the batch
if isinstance(_lowerCAmelCase , torch.Tensor ):
lowercase :List[str] = processed
else:
lowercase :Tuple = list(processed.keys() )[0]
lowercase :Optional[Any] = processed[key]
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
lowercase :Optional[int] = len(_lowerCAmelCase )
else:
lowercase :Dict = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
lowercase :Tuple = observed_batch_size
# Setting internal index to unwrap the batch
lowercase :int = processed
lowercase :Optional[Any] = 0
return self.loader_batch_item()
else:
# We're not unrolling batches
return processed
class __lowerCAmelCase ( lowerCAmelCase):
def __init__( self: Union[str, Any] , _lowerCAmelCase: Tuple , _lowerCAmelCase: str , _lowerCAmelCase: str , _lowerCAmelCase: Optional[Any]=None ):
super().__init__(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
def __iter__( self: Tuple ):
lowercase :List[str] = iter(self.loader )
lowercase :str = None
return self
def SCREAMING_SNAKE_CASE ( self: Optional[Any] ):
if self.subiterator is None:
lowercase :List[Any] = self.infer(next(self.iterator ) , **self.params )
try:
# Try to return next item
lowercase :str = next(self.subiterator )
except StopIteration:
# When a preprocess iterator ends, we can start lookig at the next item
# ChunkIterator will keep feeding until ALL elements of iterator
# all have created their subiterator and have been iterating against.
#
# Another way to look at it, is we're basically flattening lists of lists
# into a single list, but with generators
lowercase :Tuple = self.infer(next(self.iterator ) , **self.params )
lowercase :Dict = next(self.subiterator )
return processed
class __lowerCAmelCase ( lowerCAmelCase):
def __iter__( self: str ):
lowercase :List[Any] = iter(self.loader )
return self
def SCREAMING_SNAKE_CASE ( self: str ):
# Extremely similar to PipelineIterator in its unpacking mechanism
# BUT, we have an extra required item which is the presence of `is_last`
# That is because everything is flattened by `PipelineChunkIterator` we
# need to keep track of how to regroup here in the original `process`
# boundaries so that `process` and `postprocess` see the same data.
# This iterator accumulates items (possibly while unbatching) until it
# its a `is_last` and then just passes it on to the caller.
lowercase :str = False
lowercase :int = []
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
while self._loader_batch_index < self.loader_batch_size:
lowercase :str = self.loader_batch_item()
lowercase :int = item.pop("is_last" )
accumulator.append(_lowerCAmelCase )
if is_last:
return accumulator
while not is_last:
lowercase :str = self.infer(next(self.iterator ) , **self.params )
if self.loader_batch_size is not None:
if isinstance(_lowerCAmelCase , torch.Tensor ):
lowercase :Tuple = processed
else:
lowercase :Union[str, Any] = list(processed.keys() )[0]
lowercase :Any = processed[key]
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
lowercase :Dict = len(_lowerCAmelCase )
else:
lowercase :List[str] = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
lowercase :Union[str, Any] = observed_batch_size
lowercase :str = processed
lowercase :Optional[int] = 0
while self._loader_batch_index < self.loader_batch_size:
lowercase :Any = self.loader_batch_item()
lowercase :int = item.pop("is_last" )
accumulator.append(_lowerCAmelCase )
if is_last:
return accumulator
else:
lowercase :Optional[Any] = processed
lowercase :str = item.pop("is_last" )
accumulator.append(_lowerCAmelCase )
return accumulator
class __lowerCAmelCase ( lowerCAmelCase):
def __init__( self: Union[str, Any] , _lowerCAmelCase: Dataset , _lowerCAmelCase: str ):
lowercase :Tuple = dataset
lowercase :Dict = key
def __len__( self: Any ):
return len(self.dataset )
def __getitem__( self: int , _lowerCAmelCase: int ):
return self.dataset[i][self.key]
class __lowerCAmelCase ( lowerCAmelCase):
def __init__( self: List[Any] , _lowerCAmelCase: Dataset , _lowerCAmelCase: str , _lowerCAmelCase: str ):
lowercase :Union[str, Any] = dataset
lowercase :Optional[int] = keya
lowercase :str = keya
def __len__( self: Optional[Any] ):
return len(self.dataset )
def __getitem__( self: Optional[Any] , _lowerCAmelCase: int ):
return {"text": self.dataset[i][self.keya], "text_pair": self.dataset[i][self.keya]}
| 158 | 0 |
'''simple docstring'''
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class A__ ( UpperCamelCase ):
"""simple docstring"""
UpperCamelCase_ : int = ['''image_processor''', '''tokenizer''']
UpperCamelCase_ : Tuple = '''OwlViTImageProcessor'''
UpperCamelCase_ : Dict = ('''CLIPTokenizer''', '''CLIPTokenizerFast''')
def __init__( self : int , lowerCAmelCase__ : Tuple=None , lowerCAmelCase__ : Any=None , **lowerCAmelCase__ : List[Any] ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase : List[str] = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , lowerCAmelCase__ , )
_UpperCAmelCase : str = kwargs.pop("feature_extractor" )
_UpperCAmelCase : List[str] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(lowerCAmelCase__ , lowerCAmelCase__ )
def __call__( self : Dict , lowerCAmelCase__ : Tuple=None , lowerCAmelCase__ : List[Any]=None , lowerCAmelCase__ : Optional[Any]=None , lowerCAmelCase__ : List[str]="max_length" , lowerCAmelCase__ : Union[str, Any]="np" , **lowerCAmelCase__ : Optional[Any] ) -> List[Any]:
"""simple docstring"""
if text is None and query_images is None and images is None:
raise ValueError(
"You have to specify at least one text or query image or image. All three cannot be none." )
if text is not None:
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) or (isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and not isinstance(text[0] , lowerCAmelCase__ )):
_UpperCAmelCase : Tuple = [self.tokenizer(lowerCAmelCase__ , padding=lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , **lowerCAmelCase__ )]
elif isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and isinstance(text[0] , lowerCAmelCase__ ):
_UpperCAmelCase : List[str] = []
# Maximum number of queries across batch
_UpperCAmelCase : Optional[Any] = max([len(lowerCAmelCase__ ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(lowerCAmelCase__ ) != max_num_queries:
_UpperCAmelCase : List[str] = t + [" "] * (max_num_queries - len(lowerCAmelCase__ ))
_UpperCAmelCase : List[Any] = self.tokenizer(lowerCAmelCase__ , padding=lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , **lowerCAmelCase__ )
encodings.append(lowerCAmelCase__ )
else:
raise TypeError("Input text should be a string, a list of strings or a nested list of strings" )
if return_tensors == "np":
_UpperCAmelCase : List[str] = np.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 )
_UpperCAmelCase : Any = np.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
_UpperCAmelCase : Any = jnp.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 )
_UpperCAmelCase : Dict = jnp.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
_UpperCAmelCase : str = torch.cat([encoding["input_ids"] for encoding in encodings] , dim=0 )
_UpperCAmelCase : str = torch.cat([encoding["attention_mask"] for encoding in encodings] , dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
_UpperCAmelCase : Dict = tf.stack([encoding["input_ids"] for encoding in encodings] , axis=0 )
_UpperCAmelCase : int = tf.stack([encoding["attention_mask"] for encoding in encodings] , axis=0 )
else:
raise ValueError("Target return tensor type could not be returned" )
_UpperCAmelCase : Union[str, Any] = BatchEncoding()
_UpperCAmelCase : Optional[Any] = input_ids
_UpperCAmelCase : int = attention_mask
if query_images is not None:
_UpperCAmelCase : List[str] = BatchEncoding()
_UpperCAmelCase : Optional[Any] = self.image_processor(
lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , **lowerCAmelCase__ ).pixel_values
_UpperCAmelCase : Dict = query_pixel_values
if images is not None:
_UpperCAmelCase : List[str] = self.image_processor(lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , **lowerCAmelCase__ )
if text is not None and images is not None:
_UpperCAmelCase : Any = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
_UpperCAmelCase : List[Any] = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowerCAmelCase__ ) , tensor_type=lowerCAmelCase__ )
def _lowerCAmelCase ( self : str , *lowerCAmelCase__ : Union[str, Any] , **lowerCAmelCase__ : Optional[int] ) -> Tuple:
"""simple docstring"""
return self.image_processor.post_process(*lowerCAmelCase__ , **lowerCAmelCase__ )
def _lowerCAmelCase ( self : Any , *lowerCAmelCase__ : str , **lowerCAmelCase__ : List[Any] ) -> List[Any]:
"""simple docstring"""
return self.image_processor.post_process_object_detection(*lowerCAmelCase__ , **lowerCAmelCase__ )
def _lowerCAmelCase ( self : Union[str, Any] , *lowerCAmelCase__ : Any , **lowerCAmelCase__ : List[Any] ) -> List[Any]:
"""simple docstring"""
return self.image_processor.post_process_image_guided_detection(*lowerCAmelCase__ , **lowerCAmelCase__ )
def _lowerCAmelCase ( self : Optional[Any] , *lowerCAmelCase__ : Dict , **lowerCAmelCase__ : int ) -> Optional[int]:
"""simple docstring"""
return self.tokenizer.batch_decode(*lowerCAmelCase__ , **lowerCAmelCase__ )
def _lowerCAmelCase ( self : Dict , *lowerCAmelCase__ : Any , **lowerCAmelCase__ : Optional[int] ) -> Tuple:
"""simple docstring"""
return self.tokenizer.decode(*lowerCAmelCase__ , **lowerCAmelCase__ )
@property
def _lowerCAmelCase ( self : int ) -> Union[str, Any]:
"""simple docstring"""
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , lowerCAmelCase__ , )
return self.image_processor_class
@property
def _lowerCAmelCase ( self : Optional[Any] ) -> Any:
"""simple docstring"""
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , lowerCAmelCase__ , )
return self.image_processor | 145 | '''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
if is_torch_available():
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
@require_torch
@require_sentencepiece
@require_tokenizers
class A__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def _lowerCAmelCase ( self : str ) -> int:
"""simple docstring"""
_UpperCAmelCase : Any = AutoModelForSeqaSeqLM.from_pretrained("google/mt5-small" , return_dict=lowerCAmelCase__ ).to(lowerCAmelCase__ )
_UpperCAmelCase : str = AutoTokenizer.from_pretrained("google/mt5-small" )
_UpperCAmelCase : str = tokenizer("Hello there" , return_tensors="pt" ).input_ids
_UpperCAmelCase : str = tokenizer("Hi I am" , return_tensors="pt" ).input_ids
_UpperCAmelCase : Any = model(input_ids.to(lowerCAmelCase__ ) , labels=labels.to(lowerCAmelCase__ ) ).loss
_UpperCAmelCase : Dict = -(labels.shape[-1] * loss.item())
_UpperCAmelCase : Any = -84.9127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 ) | 145 | 1 |
"""simple docstring"""
import random
from typing import Any
def SCREAMING_SNAKE_CASE_ ( snake_case : list )-> Optional[Any]:
for _ in range(len(lowerCamelCase_ ) ):
_lowerCamelCase = random.randint(0 , len(lowerCamelCase_ ) - 1 )
_lowerCamelCase = random.randint(0 , len(lowerCamelCase_ ) - 1 )
_lowerCamelCase = data[b], data[a]
return data
if __name__ == "__main__":
A_ : Any =[0, 1, 2, 3, 4, 5, 6, 7]
A_ : List[str] =['python', 'says', 'hello', '!']
print("""Fisher-Yates Shuffle:""")
print("""List""", integers, strings)
print("""FY Shuffle""", fisher_yates_shuffle(integers), fisher_yates_shuffle(strings))
| 355 |
"""simple docstring"""
from __future__ import annotations
A_ : List[Any] =list[tuple[int, int]]
A_ : Tuple =[
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
A_ : List[str] =([-1, 0], [0, -1], [1, 0], [0, 1]) # up, left, down, right
class __a :
def __init__( self , a__ , a__ , a__ , a__ , a__ , a__ , ):
_lowerCamelCase = pos_x
_lowerCamelCase = pos_y
_lowerCamelCase = (pos_y, pos_x)
_lowerCamelCase = goal_x
_lowerCamelCase = goal_y
_lowerCamelCase = g_cost
_lowerCamelCase = parent
_lowerCamelCase = self.calculate_heuristic()
def snake_case_ ( self ):
_lowerCamelCase = abs(self.pos_x - self.goal_x )
_lowerCamelCase = abs(self.pos_y - self.goal_y )
return dx + dy
def __lt__( self , a__ ):
return self.f_cost < other.f_cost
class __a :
def __init__( self , a__ , a__ ):
_lowerCamelCase = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , a__ )
_lowerCamelCase = Node(goal[1] , goal[0] , goal[1] , goal[0] , 9_99_99 , a__ )
_lowerCamelCase = [self.start]
_lowerCamelCase = []
_lowerCamelCase = False
def snake_case_ ( self ):
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
_lowerCamelCase = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
_lowerCamelCase = True
return self.retrace_path(a__ )
self.closed_nodes.append(a__ )
_lowerCamelCase = self.get_successors(a__ )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(a__ )
else:
# retrieve the best current path
_lowerCamelCase = self.open_nodes.pop(self.open_nodes.index(a__ ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(a__ )
else:
self.open_nodes.append(a__ )
if not self.reached:
return [self.start.pos]
return None
def snake_case_ ( self , a__ ):
_lowerCamelCase = []
for action in delta:
_lowerCamelCase = parent.pos_x + action[1]
_lowerCamelCase = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(a__ ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
a__ , a__ , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , a__ , ) )
return successors
def snake_case_ ( self , a__ ):
_lowerCamelCase = node
_lowerCamelCase = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
_lowerCamelCase = current_node.parent
path.reverse()
return path
if __name__ == "__main__":
A_ : str =(0, 0)
A_ : Tuple =(len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
print("""------""")
A_ : List[str] =GreedyBestFirst(init, goal)
A_ : Optional[int] =greedy_bf.search()
if path:
for pos_x, pos_y in path:
A_ : Optional[Any] =2
for elem in grid:
print(elem)
| 80 | 0 |
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .modeling_utils import ModelMixin
@dataclass
class lowercase ( __UpperCAmelCase):
__lowerCAmelCase : torch.FloatTensor
class lowercase ( __UpperCAmelCase , __UpperCAmelCase):
@register_to_config
def __init__( self : Any , _lowerCamelCase : int = 16 , _lowerCamelCase : int = 88 , _lowerCamelCase : Optional[int] = None , _lowerCamelCase : Optional[int] = None , _lowerCamelCase : int = 1 , _lowerCamelCase : float = 0.0 , _lowerCamelCase : int = 32 , _lowerCamelCase : Optional[int] = None , _lowerCamelCase : bool = False , _lowerCamelCase : Optional[int] = None , _lowerCamelCase : str = "geglu" , _lowerCamelCase : bool = True , _lowerCamelCase : bool = True , ):
"""simple docstring"""
super().__init__()
A_ : int = num_attention_heads
A_ : Union[str, Any] = attention_head_dim
A_ : Optional[int] = num_attention_heads * attention_head_dim
A_ : Any = in_channels
A_ : Tuple = torch.nn.GroupNorm(num_groups=_lowerCamelCase , num_channels=_lowerCamelCase , eps=1E-6 , affine=_lowerCamelCase )
A_ : Optional[int] = nn.Linear(_lowerCamelCase , _lowerCamelCase )
# 3. Define transformers blocks
A_ : Any = nn.ModuleList(
[
BasicTransformerBlock(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , dropout=_lowerCamelCase , cross_attention_dim=_lowerCamelCase , activation_fn=_lowerCamelCase , attention_bias=_lowerCamelCase , double_self_attention=_lowerCamelCase , norm_elementwise_affine=_lowerCamelCase , )
for d in range(_lowerCamelCase )
] )
A_ : Optional[int] = nn.Linear(_lowerCamelCase , _lowerCamelCase )
def a_ ( self : int , _lowerCamelCase : List[Any] , _lowerCamelCase : Any=None , _lowerCamelCase : List[Any]=None , _lowerCamelCase : int=None , _lowerCamelCase : Tuple=1 , _lowerCamelCase : Optional[int]=None , _lowerCamelCase : bool = True , ):
"""simple docstring"""
A_ , A_ , A_ , A_ : List[Any] = hidden_states.shape
A_ : Union[str, Any] = batch_frames // num_frames
A_ : List[str] = hidden_states
A_ : Tuple = hidden_states[None, :].reshape(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
A_ : str = hidden_states.permute(0 , 2 , 1 , 3 , 4 )
A_ : List[str] = self.norm(_lowerCamelCase )
A_ : List[Any] = hidden_states.permute(0 , 3 , 4 , 2 , 1 ).reshape(batch_size * height * width , _lowerCamelCase , _lowerCamelCase )
A_ : Dict = self.proj_in(_lowerCamelCase )
# 2. Blocks
for block in self.transformer_blocks:
A_ : Optional[int] = block(
_lowerCamelCase , encoder_hidden_states=_lowerCamelCase , timestep=_lowerCamelCase , cross_attention_kwargs=_lowerCamelCase , class_labels=_lowerCamelCase , )
# 3. Output
A_ : List[str] = self.proj_out(_lowerCamelCase )
A_ : Any = (
hidden_states[None, None, :]
.reshape(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
.permute(0 , 3 , 4 , 1 , 2 )
.contiguous()
)
A_ : List[str] = hidden_states.reshape(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
A_ : List[str] = hidden_states + residual
if not return_dict:
return (output,)
return TransformerTemporalModelOutput(sample=_lowerCamelCase )
| 167 |
"""simple docstring"""
from __future__ import annotations
import inspect
import unittest
from math import floor
import numpy as np
from transformers import CvtConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFCvtForImageClassification, TFCvtModel
from transformers.models.cvt.modeling_tf_cvt import TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowercase ( __UpperCAmelCase):
def a_ ( self : List[str] ):
"""simple docstring"""
A_ : Optional[int] = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(_lowerCamelCase , '''embed_dim''' ) )
self.parent.assertTrue(hasattr(_lowerCamelCase , '''num_heads''' ) )
class lowercase :
def __init__( self : Tuple , _lowerCamelCase : List[Any] , _lowerCamelCase : Union[str, Any]=13 , _lowerCamelCase : List[str]=64 , _lowerCamelCase : int=3 , _lowerCamelCase : int=[16, 48, 96] , _lowerCamelCase : Dict=[1, 3, 6] , _lowerCamelCase : List[Any]=[1, 2, 10] , _lowerCamelCase : Optional[int]=[7, 3, 3] , _lowerCamelCase : Optional[int]=[4, 2, 2] , _lowerCamelCase : Union[str, Any]=[2, 1, 1] , _lowerCamelCase : str=[2, 2, 2] , _lowerCamelCase : Tuple=[False, False, True] , _lowerCamelCase : Union[str, Any]=[0.0, 0.0, 0.0] , _lowerCamelCase : Optional[Any]=0.02 , _lowerCamelCase : Dict=1E-12 , _lowerCamelCase : Union[str, Any]=True , _lowerCamelCase : Optional[int]=True , _lowerCamelCase : List[Any]=2 , ):
"""simple docstring"""
A_ : Tuple = parent
A_ : Dict = batch_size
A_ : str = image_size
A_ : Dict = patch_sizes
A_ : Optional[int] = patch_stride
A_ : Optional[int] = patch_padding
A_ : Optional[Any] = is_training
A_ : Union[str, Any] = use_labels
A_ : str = num_labels
A_ : Optional[int] = num_channels
A_ : str = embed_dim
A_ : Tuple = num_heads
A_ : List[Any] = stride_kv
A_ : str = depth
A_ : Dict = cls_token
A_ : Optional[Any] = attention_drop_rate
A_ : str = initializer_range
A_ : Tuple = layer_norm_eps
def a_ ( self : Tuple ):
"""simple docstring"""
A_ : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A_ : List[Any] = None
if self.use_labels:
# create a random int32 tensor of given shape
A_ : Tuple = ids_tensor([self.batch_size] , self.num_labels )
A_ : Tuple = self.get_config()
return config, pixel_values, labels
def a_ ( self : Any ):
"""simple docstring"""
return CvtConfig(
image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , )
def a_ ( self : List[str] , _lowerCamelCase : List[Any] , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Union[str, Any] ):
"""simple docstring"""
A_ : str = TFCvtModel(config=_lowerCamelCase )
A_ : Any = model(_lowerCamelCase , training=_lowerCamelCase )
A_ : int = (self.image_size, self.image_size)
A_ , A_ : Optional[int] = image_size[0], image_size[1]
for i in range(len(self.depth ) ):
A_ : List[str] = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
A_ : int = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width) )
def a_ ( self : Tuple , _lowerCamelCase : Optional[int] , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Tuple ):
"""simple docstring"""
A_ : Any = self.num_labels
A_ : str = TFCvtForImageClassification(_lowerCamelCase )
A_ : List[Any] = model(_lowerCamelCase , labels=_lowerCamelCase , training=_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def a_ ( self : Tuple ):
"""simple docstring"""
A_ : Union[str, Any] = self.prepare_config_and_inputs()
A_ , A_ , A_ : Tuple = config_and_inputs
A_ : Any = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class lowercase ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase):
__lowerCAmelCase : str = (TFCvtModel, TFCvtForImageClassification) if is_tf_available() else ()
__lowerCAmelCase : Tuple = (
{"""feature-extraction""": TFCvtModel, """image-classification""": TFCvtForImageClassification}
if is_tf_available()
else {}
)
__lowerCAmelCase : int = False
__lowerCAmelCase : List[Any] = False
__lowerCAmelCase : List[Any] = False
__lowerCAmelCase : List[str] = False
__lowerCAmelCase : List[str] = False
def a_ ( self : int ):
"""simple docstring"""
A_ : Dict = TFCvtModelTester(self )
A_ : int = TFCvtConfigTester(self , config_class=_lowerCamelCase , has_text_modality=_lowerCamelCase , hidden_size=37 )
def a_ ( self : Any ):
"""simple docstring"""
self.config_tester.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
@unittest.skip(reason='''Cvt does not output attentions''' )
def a_ ( self : str ):
"""simple docstring"""
pass
@unittest.skip(reason='''Cvt does not use inputs_embeds''' )
def a_ ( self : Tuple ):
"""simple docstring"""
pass
@unittest.skip(reason='''Cvt does not support input and output embeddings''' )
def a_ ( self : Tuple ):
"""simple docstring"""
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices('''GPU''' ) ) == 0 , reason='''TF does not support backprop for grouped convolutions on CPU.''' , )
def a_ ( self : Tuple ):
"""simple docstring"""
super().test_dataset_conversion()
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices('''GPU''' ) ) == 0 , reason='''TF does not support backprop for grouped convolutions on CPU.''' , )
@slow
def a_ ( self : Dict ):
"""simple docstring"""
super().test_keras_fit()
@unittest.skip(reason='''Get `Failed to determine best cudnn convolution algo.` error after using TF 2.12+cuda 11.8''' )
def a_ ( self : int ):
"""simple docstring"""
A_ : List[Any] = tf.keras.mixed_precision.Policy('''mixed_float16''' )
tf.keras.mixed_precision.set_global_policy(_lowerCamelCase )
super().test_keras_fit()
tf.keras.mixed_precision.set_global_policy('''float32''' )
def a_ ( self : str ):
"""simple docstring"""
A_ , A_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ : Optional[Any] = model_class(_lowerCamelCase )
A_ : Optional[Any] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A_ : Dict = [*signature.parameters.keys()]
A_ : Optional[int] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _lowerCamelCase )
def a_ ( self : int ):
"""simple docstring"""
def check_hidden_states_output(_lowerCamelCase : int , _lowerCamelCase : Any , _lowerCamelCase : Union[str, Any] ):
A_ : Union[str, Any] = model_class(_lowerCamelCase )
A_ : Tuple = model(**self._prepare_for_class(_lowerCamelCase , _lowerCamelCase ) )
A_ : Optional[int] = outputs.hidden_states
A_ : Union[str, Any] = len(self.model_tester.depth )
self.assertEqual(len(_lowerCamelCase ) , _lowerCamelCase )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.embed_dim[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
A_ , A_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ : str = True
check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A_ : Dict = True
check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def a_ ( self : Tuple ):
"""simple docstring"""
A_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCamelCase )
def a_ ( self : int ):
"""simple docstring"""
A_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowerCamelCase )
@slow
def a_ ( self : List[Any] ):
"""simple docstring"""
for model_name in TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ : List[Any] = TFCvtModel.from_pretrained(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
def lowercase_ ( ):
"""simple docstring"""
A_ : Union[str, Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class lowercase ( unittest.TestCase):
@cached_property
def a_ ( self : List[Any] ):
"""simple docstring"""
return AutoImageProcessor.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def a_ ( self : Any ):
"""simple docstring"""
A_ : Optional[int] = TFCvtForImageClassification.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
A_ : int = self.default_image_processor
A_ : str = prepare_img()
A_ : Optional[int] = image_processor(images=_lowerCamelCase , return_tensors='''tf''' )
# forward pass
A_ : Dict = model(**_lowerCamelCase )
# verify the logits
A_ : Union[str, Any] = tf.TensorShape((1, 10_00) )
self.assertEqual(outputs.logits.shape , _lowerCamelCase )
A_ : Tuple = tf.constant([0.9285, 0.9015, -0.3150] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , _lowerCamelCase , atol=1E-4 ) )
| 167 | 1 |
import baseaa
def lowerCamelCase__ ( a__ : str ) -> bytes:
return baseaa.baaencode(string.encode("""utf-8""" ) )
def lowerCamelCase__ ( a__ : bytes ) -> str:
return baseaa.baadecode(a__ ).decode("""utf-8""" )
if __name__ == "__main__":
_A = '''Hello World!'''
_A = baseaa_encode(test)
print(encoded)
_A = baseaa_decode(encoded)
print(decoded)
| 261 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_batched,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
_A = logging.get_logger(__name__)
class lowercase_ ( __SCREAMING_SNAKE_CASE ):
A__ : Union[str, Any] = ["""pixel_values"""]
def __init__( self , __UpperCamelCase = True , __UpperCamelCase = None , __UpperCamelCase = PILImageResampling.BICUBIC , __UpperCamelCase = True , __UpperCamelCase = True , __UpperCamelCase = 1 / 2_5_5 , __UpperCamelCase = None , __UpperCamelCase = True , __UpperCamelCase = None , __UpperCamelCase = None , **__UpperCamelCase , ):
"""simple docstring"""
super().__init__(**__UpperCamelCase )
UpperCamelCase_ = size if size is not None else {"""height""": 2_2_4, """width""": 2_2_4}
UpperCamelCase_ = get_size_dict(__UpperCamelCase )
UpperCamelCase_ = crop_size if crop_size is not None else {"""height""": 2_2_4, """width""": 2_2_4}
UpperCamelCase_ = get_size_dict(__UpperCamelCase , default_to_square=__UpperCamelCase , param_name="""crop_size""" )
UpperCamelCase_ = do_resize
UpperCamelCase_ = do_rescale
UpperCamelCase_ = do_normalize
UpperCamelCase_ = do_center_crop
UpperCamelCase_ = crop_size
UpperCamelCase_ = size
UpperCamelCase_ = resample
UpperCamelCase_ = rescale_factor
UpperCamelCase_ = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
UpperCamelCase_ = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def lowerCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = PILImageResampling.BILINEAR , __UpperCamelCase = None , **__UpperCamelCase , ):
"""simple docstring"""
UpperCamelCase_ = get_size_dict(__UpperCamelCase )
if "shortest_edge" in size:
UpperCamelCase_ = get_resize_output_image_size(__UpperCamelCase , size=size["""shortest_edge"""] , default_to_square=__UpperCamelCase )
# size = get_resize_output_image_size(image, size["shortest_edge"], size["longest_edge"])
elif "height" in size and "width" in size:
UpperCamelCase_ = (size["""height"""], size["""width"""])
else:
raise ValueError(f'''Size must contain \'height\' and \'width\' keys or \'shortest_edge\' key. Got {size.keys()}''' )
return resize(__UpperCamelCase , size=__UpperCamelCase , resample=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase )
def lowerCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = None , **__UpperCamelCase , ):
"""simple docstring"""
UpperCamelCase_ = get_size_dict(__UpperCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(f'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' )
return center_crop(__UpperCamelCase , size=(size["""height"""], size["""width"""]) , data_format=__UpperCamelCase , **__UpperCamelCase )
def lowerCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = None , **__UpperCamelCase ):
"""simple docstring"""
return rescale(__UpperCamelCase , scale=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase )
def lowerCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = None , **__UpperCamelCase , ):
"""simple docstring"""
return normalize(__UpperCamelCase , mean=__UpperCamelCase , std=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase )
def lowerCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = ChannelDimension.FIRST , **__UpperCamelCase , ):
"""simple docstring"""
UpperCamelCase_ = do_resize if do_resize is not None else self.do_resize
UpperCamelCase_ = do_rescale if do_rescale is not None else self.do_rescale
UpperCamelCase_ = do_normalize if do_normalize is not None else self.do_normalize
UpperCamelCase_ = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCamelCase_ = crop_size if crop_size is not None else self.crop_size
UpperCamelCase_ = get_size_dict(__UpperCamelCase , param_name="""crop_size""" , default_to_square=__UpperCamelCase )
UpperCamelCase_ = resample if resample is not None else self.resample
UpperCamelCase_ = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCamelCase_ = image_mean if image_mean is not None else self.image_mean
UpperCamelCase_ = image_std if image_std is not None else self.image_std
UpperCamelCase_ = size if size is not None else self.size
UpperCamelCase_ = get_size_dict(__UpperCamelCase )
if not is_batched(__UpperCamelCase ):
UpperCamelCase_ = [images]
if not valid_images(__UpperCamelCase ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
# All transformations expect numpy arrays.
UpperCamelCase_ = [to_numpy_array(__UpperCamelCase ) for image in images]
if do_resize:
UpperCamelCase_ = [self.resize(image=__UpperCamelCase , size=__UpperCamelCase , resample=__UpperCamelCase ) for image in images]
if do_center_crop:
UpperCamelCase_ = [self.center_crop(image=__UpperCamelCase , size=__UpperCamelCase ) for image in images]
if do_rescale:
UpperCamelCase_ = [self.rescale(image=__UpperCamelCase , scale=__UpperCamelCase ) for image in images]
if do_normalize:
UpperCamelCase_ = [self.normalize(image=__UpperCamelCase , mean=__UpperCamelCase , std=__UpperCamelCase ) for image in images]
UpperCamelCase_ = [to_channel_dimension_format(__UpperCamelCase , __UpperCamelCase ) for image in images]
UpperCamelCase_ = {"""pixel_values""": images}
return BatchFeature(data=__UpperCamelCase , tensor_type=__UpperCamelCase )
| 261 | 1 |
'''simple docstring'''
import copy
import tempfile
import unittest
from huggingface_hub import HfFolder, delete_repo
from parameterized import parameterized
from requests.exceptions import HTTPError
from transformers import AutoConfig, GenerationConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@parameterized.expand([(None,), ('foo.json',)] )
def __lowercase ( self : Optional[Any] ,_a : str ):
'''simple docstring'''
_a : List[str] = GenerationConfig(
do_sample=_a ,temperature=0.7 ,length_penalty=1.0 ,bad_words_ids=[[1, 2, 3], [4, 5]] ,)
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(_a ,config_name=_a )
_a : Tuple = GenerationConfig.from_pretrained(_a ,config_name=_a )
# Checks parameters that were specified
self.assertEqual(loaded_config.do_sample ,_a )
self.assertEqual(loaded_config.temperature ,0.7 )
self.assertEqual(loaded_config.length_penalty ,1.0 )
self.assertEqual(loaded_config.bad_words_ids ,[[1, 2, 3], [4, 5]] )
# Checks parameters that were not specified (defaults)
self.assertEqual(loaded_config.top_k ,50 )
self.assertEqual(loaded_config.max_length ,20 )
self.assertEqual(loaded_config.max_time ,_a )
def __lowercase ( self : int ):
'''simple docstring'''
_a : Union[str, Any] = AutoConfig.from_pretrained('gpt2' )
_a : Tuple = GenerationConfig.from_model_config(_a )
_a : List[Any] = GenerationConfig()
# The generation config has loaded a few non-default parameters from the model config
self.assertNotEqual(_a ,_a )
# One of those parameters is eos_token_id -- check if it matches
self.assertNotEqual(generation_config_from_model.eos_token_id ,default_generation_config.eos_token_id )
self.assertEqual(generation_config_from_model.eos_token_id ,model_config.eos_token_id )
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
_a : Optional[Any] = GenerationConfig()
_a : Dict = {
'max_new_tokens': 1024,
'foo': 'bar',
}
_a : Optional[int] = copy.deepcopy(_a )
_a : List[str] = generation_config.update(**_a )
# update_kwargs was not modified (no side effects)
self.assertEqual(_a ,_a )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(generation_config.max_new_tokens ,1024 )
# `.update()` returns a dictionary of unused kwargs
self.assertEqual(_a ,{'foo': 'bar'} )
def __lowercase ( self : Dict ):
'''simple docstring'''
_a : Optional[Any] = GenerationConfig()
_a : Optional[Any] = 'bar'
with tempfile.TemporaryDirectory('test-generation-config' ) as tmp_dir:
generation_config.save_pretrained(_a )
_a : Optional[int] = GenerationConfig.from_pretrained(_a )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(new_config.foo ,'bar' )
_a : List[str] = GenerationConfig.from_model_config(_a )
assert not hasattr(_a ,'foo' ) # no new kwargs should be initialized if from config
def __lowercase ( self : List[Any] ):
'''simple docstring'''
_a : List[str] = GenerationConfig()
self.assertEqual(default_config.temperature ,1.0 )
self.assertEqual(default_config.do_sample ,_a )
self.assertEqual(default_config.num_beams ,1 )
_a : List[Any] = GenerationConfig(
do_sample=_a ,temperature=0.7 ,length_penalty=1.0 ,bad_words_ids=[[1, 2, 3], [4, 5]] ,)
self.assertEqual(config.temperature ,0.7 )
self.assertEqual(config.do_sample ,_a )
self.assertEqual(config.num_beams ,1 )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(_a )
_a : Optional[Any] = GenerationConfig.from_pretrained(_a ,temperature=1.0 )
self.assertEqual(loaded_config.temperature ,1.0 )
self.assertEqual(loaded_config.do_sample ,_a )
self.assertEqual(loaded_config.num_beams ,1 ) # default value
@is_staging_test
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@classmethod
def __lowercase ( cls : str ):
'''simple docstring'''
_a : List[str] = TOKEN
HfFolder.save_token(_a )
@classmethod
def __lowercase ( cls : Optional[int] ):
'''simple docstring'''
try:
delete_repo(token=cls._token ,repo_id='test-generation-config' )
except HTTPError:
pass
try:
delete_repo(token=cls._token ,repo_id='valid_org/test-generation-config-org' )
except HTTPError:
pass
def __lowercase ( self : str ):
'''simple docstring'''
_a : List[Any] = GenerationConfig(
do_sample=_a ,temperature=0.7 ,length_penalty=1.0 ,)
config.push_to_hub('test-generation-config' ,use_auth_token=self._token )
_a : Optional[Any] = GenerationConfig.from_pretrained(F"""{USER}/test-generation-config""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_a ,getattr(_a ,_a ) )
# Reset repo
delete_repo(token=self._token ,repo_id='test-generation-config' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
_a ,repo_id='test-generation-config' ,push_to_hub=_a ,use_auth_token=self._token )
_a : Optional[int] = GenerationConfig.from_pretrained(F"""{USER}/test-generation-config""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_a ,getattr(_a ,_a ) )
def __lowercase ( self : Tuple ):
'''simple docstring'''
_a : Tuple = GenerationConfig(
do_sample=_a ,temperature=0.7 ,length_penalty=1.0 ,)
config.push_to_hub('valid_org/test-generation-config-org' ,use_auth_token=self._token )
_a : Union[str, Any] = GenerationConfig.from_pretrained('valid_org/test-generation-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_a ,getattr(_a ,_a ) )
# Reset repo
delete_repo(token=self._token ,repo_id='valid_org/test-generation-config-org' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
_a ,repo_id='valid_org/test-generation-config-org' ,push_to_hub=_a ,use_auth_token=self._token )
_a : Tuple = GenerationConfig.from_pretrained('valid_org/test-generation-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_a ,getattr(_a ,_a ) )
| 271 |
'''simple docstring'''
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from torch.utils.data import DistributedSampler, RandomSampler
from transformers import PreTrainedModel, Trainer, logging
from transformers.integrations import is_fairscale_available
from transformers.models.fsmt.configuration_fsmt import FSMTConfig
from transformers.optimization import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.trainer_pt_utils import get_tpu_sampler
from transformers.training_args import ParallelMode
from transformers.utils import is_torch_tpu_available
if is_fairscale_available():
from fairscale.optim import OSS
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {
"""linear""": get_linear_schedule_with_warmup,
"""cosine""": get_cosine_schedule_with_warmup,
"""cosine_w_restarts""": get_cosine_with_hard_restarts_schedule_with_warmup,
"""polynomial""": get_polynomial_decay_schedule_with_warmup,
"""constant""": get_constant_schedule,
"""constant_w_warmup""": get_constant_schedule_with_warmup,
}
class UpperCAmelCase__ ( lowercase__ ):
"""simple docstring"""
def __init__( self : Optional[int] ,_a : Optional[Any]=None ,_a : Dict=None ,*_a : int ,**_a : str ):
'''simple docstring'''
super().__init__(*_a ,**_a )
if config is None:
assert isinstance(self.model ,_a ), (
"If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is"
F""" {self.model.__class__}"""
)
_a : List[Any] = self.model.config
else:
_a : Optional[int] = config
_a : List[str] = data_args
_a : List[Any] = self.config.tgt_vocab_size if isinstance(self.config ,_a ) else self.config.vocab_size
if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss):
assert self.config.pad_token_id is not None, (
"Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss"
" calculation or doing label smoothing."
)
if self.config.pad_token_id is None and self.config.eos_token_id is not None:
logger.warning(
F"""The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for"""
' padding..' )
if self.args.label_smoothing == 0:
_a : List[str] = torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id )
else:
# dynamically import label_smoothed_nll_loss
from utils import label_smoothed_nll_loss
_a : Tuple = label_smoothed_nll_loss
def __lowercase ( self : List[str] ,_a : int ):
'''simple docstring'''
if self.optimizer is None:
_a : Union[str, Any] = ['bias', 'LayerNorm.weight']
_a : Tuple = [
{
'params': [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )],
'weight_decay': self.args.weight_decay,
},
{
'params': [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )],
'weight_decay': 0.0,
},
]
_a : Optional[int] = Adafactor if self.args.adafactor else AdamW
if self.args.adafactor:
_a : Any = Adafactor
_a : Dict = {'scale_parameter': False, 'relative_step': False}
else:
_a : Union[str, Any] = AdamW
_a : str = {
'betas': (self.args.adam_betaa, self.args.adam_betaa),
'eps': self.args.adam_epsilon,
}
_a : Union[str, Any] = self.args.learning_rate
if self.sharded_ddp:
_a : str = OSS(
params=_a ,optim=_a ,**_a ,)
else:
_a : Tuple = optimizer_cls(_a ,**_a )
if self.lr_scheduler is None:
_a : List[Any] = self._get_lr_scheduler(_a )
else: # ignoring --lr_scheduler
logger.warning('scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored.' )
def __lowercase ( self : List[Any] ,_a : List[Any] ):
'''simple docstring'''
_a : str = arg_to_scheduler[self.args.lr_scheduler]
if self.args.lr_scheduler == "constant":
_a : int = schedule_func(self.optimizer )
elif self.args.lr_scheduler == "constant_w_warmup":
_a : List[str] = schedule_func(self.optimizer ,num_warmup_steps=self.args.warmup_steps )
else:
_a : Optional[int] = schedule_func(
self.optimizer ,num_warmup_steps=self.args.warmup_steps ,num_training_steps=_a )
return scheduler
def __lowercase ( self : Tuple ):
'''simple docstring'''
if isinstance(self.train_dataset ,torch.utils.data.IterableDataset ):
return None
elif is_torch_tpu_available():
return get_tpu_sampler(self.train_dataset )
else:
if self.args.sortish_sampler:
self.train_dataset.make_sortish_sampler(
self.args.per_device_train_batch_size ,distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED) ,)
return (
RandomSampler(self.train_dataset )
if self.args.local_rank == -1
else DistributedSampler(self.train_dataset )
)
def __lowercase ( self : Dict ,_a : Dict ,_a : Any ,_a : Dict ):
'''simple docstring'''
if self.args.label_smoothing == 0:
if self.data_args is not None and self.data_args.ignore_pad_token_for_loss:
# force training to ignore pad token
_a : List[Any] = model(**_a ,use_cache=_a )[0]
_a : Union[str, Any] = self.loss_fn(logits.view(-1 ,logits.shape[-1] ) ,labels.view(-1 ) )
else:
# compute usual loss via models
_a, _a : Union[str, Any] = model(**_a ,labels=_a ,use_cache=_a )[:2]
else:
# compute label smoothed loss
_a : List[Any] = model(**_a ,use_cache=_a )[0]
_a : Any = torch.nn.functional.log_softmax(_a ,dim=-1 )
_a, _a : List[str] = self.loss_fn(_a ,_a ,self.args.label_smoothing ,ignore_index=self.config.pad_token_id )
return loss, logits
def __lowercase ( self : Optional[int] ,_a : Union[str, Any] ,_a : List[Any] ):
'''simple docstring'''
_a : Optional[int] = inputs.pop('labels' )
_a, _a : int = self._compute_loss(_a ,_a ,_a )
return loss
def __lowercase ( self : Optional[Any] ,_a : nn.Module ,_a : Dict[str, Union[torch.Tensor, Any]] ,_a : bool ,_a : Optional[List[str]] = None ,):
'''simple docstring'''
_a : int = self._prepare_inputs(_a )
_a : Any = {
'max_length': self.data_args.val_max_target_length
if self.data_args is not None
else self.config.max_length,
'num_beams': self.data_args.eval_beams if self.data_args is not None else self.config.num_beams,
}
if self.args.predict_with_generate and not self.args.prediction_loss_only:
_a : int = self.model.generate(
inputs['input_ids'] ,attention_mask=inputs['attention_mask'] ,**_a ,)
# in case the batch is shorter than max length, the output should be padded
if generated_tokens.shape[-1] < gen_kwargs["max_length"]:
_a : int = self._pad_tensors_to_max_len(_a ,gen_kwargs['max_length'] )
_a : Union[str, Any] = inputs.pop('labels' )
with torch.no_grad():
# compute loss on predict data
_a, _a : Optional[int] = self._compute_loss(_a ,_a ,_a )
_a : Optional[Any] = loss.mean().detach()
if self.args.prediction_loss_only:
return (loss, None, None)
_a : Optional[Any] = generated_tokens if self.args.predict_with_generate else logits
if labels.shape[-1] < gen_kwargs["max_length"]:
_a : Dict = self._pad_tensors_to_max_len(_a ,gen_kwargs['max_length'] )
return (loss, logits, labels)
def __lowercase ( self : str ,_a : Tuple ,_a : Tuple ):
'''simple docstring'''
_a : List[Any] = self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id
if pad_token_id is None:
raise ValueError(
'Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be'
F""" padded to `max_length`={max_length}""" )
_a : int = pad_token_id * torch.ones(
(tensor.shape[0], max_length) ,dtype=tensor.dtype ,device=tensor.device )
_a : Union[str, Any] = tensor
return padded_tensor
| 271 | 1 |
import os
import sys
import unittest
lowerCAmelCase_ = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, """utils"""))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
lowerCAmelCase_ = os.path.join(git_repo_path, """src""", """transformers""")
lowerCAmelCase_ = "\n{0} = None\n"
lowerCAmelCase_ = "\nclass {0}(metaclass=DummyObject):\n _backends = {1}\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, {1})\n"
lowerCAmelCase_ = "\ndef {0}(*args, **kwargs):\n requires_backends({0}, {1})\n"
class _lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
_snake_case : Dict = find_backend(' _import_structure[\"models.albert\"].append(\"AlbertTokenizerFast\")' )
self.assertIsNone(_a )
_snake_case : Dict = find_backend(' if not is_tokenizers_available():' )
self.assertEqual(_a , 'tokenizers' )
_snake_case : int = find_backend(' if not is_tensorflow_text_available():' )
self.assertEqual(_a , 'tensorflow_text' )
_snake_case : Tuple = find_backend(' if not (is_sentencepiece_available() and is_tokenizers_available()):' )
self.assertEqual(_a , 'sentencepiece_and_tokenizers' )
_snake_case : Optional[int] = find_backend(
' if not (is_sentencepiece_available() and is_tensorflow_text_available()):' )
self.assertEqual(_a , 'sentencepiece_and_tensorflow_text' )
_snake_case : int = find_backend(
' if not (is_sentencepiece_available() and is_tokenizers_available() and is_vision_available()):' )
self.assertEqual(_a , 'sentencepiece_and_tokenizers_and_vision' )
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
_snake_case : Optional[int] = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn('torch' , _a )
self.assertIn('tensorflow_text' , _a )
self.assertIn('sentencepiece_and_tokenizers' , _a )
# Likewise, we can't assert on the exact content of a key
self.assertIn('BertModel' , objects['torch'] )
self.assertIn('TFBertModel' , objects['tf'] )
self.assertIn('FlaxBertModel' , objects['flax'] )
self.assertIn('BertModel' , objects['torch'] )
self.assertIn('TFBertTokenizer' , objects['tensorflow_text'] )
self.assertIn('convert_slow_tokenizer' , objects['sentencepiece_and_tokenizers'] )
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
_snake_case : Optional[int] = create_dummy_object('CONSTANT' , '\'torch\'' )
self.assertEqual(_a , '\nCONSTANT = None\n' )
_snake_case : Tuple = create_dummy_object('function' , '\'torch\'' )
self.assertEqual(
_a , '\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n' )
_snake_case : Any = "\nclass FakeClass(metaclass=DummyObject):\n _backends = 'torch'\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, 'torch')\n"
_snake_case : Any = create_dummy_object('FakeClass' , '\'torch\'' )
self.assertEqual(_a , _a )
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
_snake_case : int = "# This file is autogenerated by the command `make fix-copies`, do not edit.\nfrom ..utils import DummyObject, requires_backends\n\n\nCONSTANT = None\n\n\ndef function(*args, **kwargs):\n requires_backends(function, [\"torch\"])\n\n\nclass FakeClass(metaclass=DummyObject):\n _backends = [\"torch\"]\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, [\"torch\"])\n"
_snake_case : Optional[Any] = create_dummy_files({'torch': ['CONSTANT', 'function', 'FakeClass']} )
self.assertEqual(dummy_files['torch'] , _a )
| 369 |
import argparse
import torch
from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert
from transformers.utils import logging
logging.set_verbosity_info()
def lowerCamelCase_ ( lowerCAmelCase: List[str] , lowerCAmelCase: int , lowerCAmelCase: List[Any] )-> Dict:
# Initialise PyTorch model
_snake_case : Dict = RemBertConfig.from_json_file(lowerCAmelCase )
print('Building PyTorch model from configuration: {}'.format(str(lowerCAmelCase ) ) )
_snake_case : Optional[Any] = RemBertModel(lowerCAmelCase )
# Load weights from tf checkpoint
load_tf_weights_in_rembert(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# Save pytorch-model
print('Save PyTorch model to {}'.format(lowerCAmelCase ) )
torch.save(model.state_dict() , lowerCAmelCase )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--rembert_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained RemBERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
lowerCAmelCase_ = parser.parse_args()
convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path)
| 260 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.