code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
'''simple docstring'''
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
lowercase__ = ["image_processor", "tokenizer"]
lowercase__ = "BridgeTowerImageProcessor"
lowercase__ = ("RobertaTokenizer", "RobertaTokenizerFast")
def __init__( self , __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
super().__init__(__UpperCamelCase , __UpperCamelCase )
def __call__( self , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = True , __UpperCamelCase = False , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = 0 , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = False , __UpperCamelCase = False , __UpperCamelCase = False , __UpperCamelCase = False , __UpperCamelCase = True , __UpperCamelCase = None , **__UpperCamelCase , ):
'''simple docstring'''
__a : str = self.tokenizer(
text=__UpperCamelCase , add_special_tokens=__UpperCamelCase , padding=__UpperCamelCase , truncation=__UpperCamelCase , max_length=__UpperCamelCase , stride=__UpperCamelCase , pad_to_multiple_of=__UpperCamelCase , return_token_type_ids=__UpperCamelCase , return_attention_mask=__UpperCamelCase , return_overflowing_tokens=__UpperCamelCase , return_special_tokens_mask=__UpperCamelCase , return_offsets_mapping=__UpperCamelCase , return_length=__UpperCamelCase , verbose=__UpperCamelCase , return_tensors=__UpperCamelCase , **__UpperCamelCase , )
# add pixel_values + pixel_mask
__a : Optional[int] = self.image_processor(
__UpperCamelCase , return_tensors=__UpperCamelCase , do_normalize=__UpperCamelCase , do_center_crop=__UpperCamelCase , **__UpperCamelCase )
encoding.update(__UpperCamelCase )
return encoding
def __lowerCamelCase ( self , *__UpperCamelCase , **__UpperCamelCase ):
'''simple docstring'''
return self.tokenizer.batch_decode(*__UpperCamelCase , **__UpperCamelCase )
def __lowerCamelCase ( self , *__UpperCamelCase , **__UpperCamelCase ):
'''simple docstring'''
return self.tokenizer.decode(*__UpperCamelCase , **__UpperCamelCase )
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Any = self.tokenizer.model_input_names
__a : Union[str, Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) | 697 |
'''simple docstring'''
def _snake_case ( lowercase ) -> bool:
if not isinstance(lowercase , lowercase ):
raise ValueError("""check_bouncy() accepts only integer arguments""" )
__a : str = str(lowercase )
__a : Any = """""".join(sorted(lowercase ) )
return sorted_str_n != str_n and sorted_str_n[::-1] != str_n
def _snake_case ( lowercase = 9_9 ) -> int:
if not 0 < percent < 1_0_0:
raise ValueError("""solution() only accepts values from 0 to 100""" )
__a : List[str] = 0
__a : Union[str, Any] = 1
while True:
if check_bouncy(lowercase ):
bouncy_num += 1
if (bouncy_num / num) * 1_0_0 >= percent:
return num
num += 1
if __name__ == "__main__":
from doctest import testmod
testmod()
print(f'''{solution(99)}''') | 697 | 1 |
'''simple docstring'''
def _snake_case ( lowercase=2_8_1_2_3 ) -> Tuple:
__a : int = [1] * (limit + 1)
for i in range(2 , int(limit**0.5 ) + 1 ):
sum_divs[i * i] += i
for k in range(i + 1 , limit // i + 1 ):
sum_divs[k * i] += k + i
__a : Optional[int] = set()
__a : Optional[Any] = 0
for n in range(1 , limit + 1 ):
if sum_divs[n] > n:
abundants.add(lowercase )
if not any((n - a in abundants) for a in abundants ):
res += n
return res
if __name__ == "__main__":
print(solution()) | 697 |
'''simple docstring'''
import argparse
import torch
from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def _snake_case ( lowercase , lowercase , lowercase ) -> Any:
# Construct model
if gpta_config_file == "":
__a : Dict = GPTaConfig()
else:
__a : Optional[Any] = GPTaConfig.from_json_file(lowercase )
__a : Union[str, Any] = GPTaModel(lowercase )
# Load weights from numpy
load_tf_weights_in_gpta(lowercase , lowercase , lowercase )
# Save pytorch-model
__a : Optional[int] = pytorch_dump_folder_path + """/""" + WEIGHTS_NAME
__a : Dict = pytorch_dump_folder_path + """/""" + CONFIG_NAME
print(F"""Save PyTorch model to {pytorch_weights_dump_path}""" )
torch.save(model.state_dict() , lowercase )
print(F"""Save configuration file to {pytorch_config_dump_path}""" )
with open(lowercase , """w""" , encoding="""utf-8""" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--gpt2_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--gpt2_config_file',
default='',
type=str,
help=(
'An optional config json file corresponding to the pre-trained OpenAI model. \n'
'This specifies the model architecture.'
),
)
__SCREAMING_SNAKE_CASE : Dict = parser.parse_args()
convert_gpta_checkpoint_to_pytorch(args.gpta_checkpoint_path, args.gpta_config_file, args.pytorch_dump_folder_path) | 697 | 1 |
'''simple docstring'''
import argparse
import glob
import logging
import os
from argparse import Namespace
from importlib import import_module
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch.nn import CrossEntropyLoss
from torch.utils.data import DataLoader, TensorDataset
from utils_ner import TokenClassificationTask
__SCREAMING_SNAKE_CASE : Dict = logging.getLogger(__name__)
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
lowercase__ = "token-classification"
def __init__( self , __UpperCamelCase ):
'''simple docstring'''
if type(__UpperCamelCase ) == dict:
__a : int = Namespace(**__UpperCamelCase )
__a : Optional[Any] = import_module("""tasks""" )
try:
__a : List[str] = getattr(__UpperCamelCase , hparams.task_type )
__a : TokenClassificationTask = token_classification_task_clazz()
except AttributeError:
raise ValueError(
f"""Task {hparams.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. """
f"""Available tasks classes are: {TokenClassificationTask.__subclasses__()}""" )
__a : Union[str, Any] = self.token_classification_task.get_labels(hparams.labels )
__a : int = CrossEntropyLoss().ignore_index
super().__init__(__UpperCamelCase , len(self.labels ) , self.mode )
def __lowerCamelCase ( self , **__UpperCamelCase ):
'''simple docstring'''
return self.model(**__UpperCamelCase )
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
__a : int = {"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]}
if self.config.model_type != "distilbert":
__a : Any = (
batch[2] if self.config.model_type in ["""bert""", """xlnet"""] else None
) # XLM and RoBERTa don"t use token_type_ids
__a : Optional[int] = self(**__UpperCamelCase )
__a : Any = outputs[0]
# tensorboard_logs = {"loss": loss, "rate": self.lr_scheduler.get_last_lr()[-1]}
return {"loss": loss}
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : List[str] = self.hparams
for mode in ["train", "dev", "test"]:
__a : Union[str, Any] = self._feature_file(__UpperCamelCase )
if os.path.exists(__UpperCamelCase ) and not args.overwrite_cache:
logger.info("""Loading features from cached file %s""" , __UpperCamelCase )
__a : Any = torch.load(__UpperCamelCase )
else:
logger.info("""Creating features from dataset file at %s""" , args.data_dir )
__a : Union[str, Any] = self.token_classification_task.read_examples_from_file(args.data_dir , __UpperCamelCase )
__a : str = self.token_classification_task.convert_examples_to_features(
__UpperCamelCase , self.labels , args.max_seq_length , self.tokenizer , cls_token_at_end=bool(self.config.model_type in ["""xlnet"""] ) , cls_token=self.tokenizer.cls_token , cls_token_segment_id=2 if self.config.model_type in ["""xlnet"""] else 0 , sep_token=self.tokenizer.sep_token , sep_token_extra=__UpperCamelCase , pad_on_left=bool(self.config.model_type in ["""xlnet"""] ) , pad_token=self.tokenizer.pad_token_id , pad_token_segment_id=self.tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info("""Saving features into cached file %s""" , __UpperCamelCase )
torch.save(__UpperCamelCase , __UpperCamelCase )
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = False ):
'''simple docstring'''
__a : str = self._feature_file(__UpperCamelCase )
logger.info("""Loading features from cached file %s""" , __UpperCamelCase )
__a : str = torch.load(__UpperCamelCase )
__a : int = torch.tensor([f.input_ids for f in features] , dtype=torch.long )
__a : Union[str, Any] = torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
if features[0].token_type_ids is not None:
__a : List[Any] = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
else:
__a : str = torch.tensor([0 for f in features] , dtype=torch.long )
# HACK(we will not use this anymore soon)
__a : Union[str, Any] = torch.tensor([f.label_ids for f in features] , dtype=torch.long )
return DataLoader(
TensorDataset(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) , batch_size=__UpperCamelCase )
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
"""Compute validation""" ""
__a : Union[str, Any] = {"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]}
if self.config.model_type != "distilbert":
__a : List[str] = (
batch[2] if self.config.model_type in ["""bert""", """xlnet"""] else None
) # XLM and RoBERTa don"t use token_type_ids
__a : Tuple = self(**__UpperCamelCase )
__a , __a : Tuple = outputs[:2]
__a : Tuple = logits.detach().cpu().numpy()
__a : List[str] = inputs["""labels"""].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def __lowerCamelCase ( self , __UpperCamelCase ):
'''simple docstring'''
__a : Optional[Any] = torch.stack([x["""val_loss"""] for x in outputs] ).mean()
__a : Tuple = np.concatenate([x["""pred"""] for x in outputs] , axis=0 )
__a : int = np.argmax(__UpperCamelCase , axis=2 )
__a : Optional[Any] = np.concatenate([x["""target"""] for x in outputs] , axis=0 )
__a : Optional[Any] = dict(enumerate(self.labels ) )
__a : Optional[Any] = [[] for _ in range(out_label_ids.shape[0] )]
__a : List[str] = [[] for _ in range(out_label_ids.shape[0] )]
for i in range(out_label_ids.shape[0] ):
for j in range(out_label_ids.shape[1] ):
if out_label_ids[i, j] != self.pad_token_label_id:
out_label_list[i].append(label_map[out_label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
__a : Tuple = {
"""val_loss""": val_loss_mean,
"""accuracy_score""": accuracy_score(__UpperCamelCase , __UpperCamelCase ),
"""precision""": precision_score(__UpperCamelCase , __UpperCamelCase ),
"""recall""": recall_score(__UpperCamelCase , __UpperCamelCase ),
"""f1""": fa_score(__UpperCamelCase , __UpperCamelCase ),
}
__a : Dict = dict(results.items() )
__a : Dict = results
return ret, preds_list, out_label_list
def __lowerCamelCase ( self , __UpperCamelCase ):
'''simple docstring'''
__a , __a , __a : Optional[int] = self._eval_end(__UpperCamelCase )
__a : List[str] = ret["""log"""]
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def __lowerCamelCase ( self , __UpperCamelCase ):
'''simple docstring'''
__a , __a , __a : Optional[Any] = self._eval_end(__UpperCamelCase )
# Converting to the dict required by pl
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master/\
# pytorch_lightning/trainer/logging.py#L139
__a : Tuple = ret["""log"""]
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def __lowerCamelCase ( __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
BaseTransformer.add_model_specific_args(__UpperCamelCase , __UpperCamelCase )
parser.add_argument(
"""--task_type""" , default="""NER""" , type=__UpperCamelCase , help="""Task type to fine tune in training (e.g. NER, POS, etc)""" )
parser.add_argument(
"""--max_seq_length""" , default=128 , type=__UpperCamelCase , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--labels""" , default="""""" , type=__UpperCamelCase , help="""Path to a file containing all labels. If not specified, CoNLL-2003 labels are used.""" , )
parser.add_argument(
"""--gpus""" , default=0 , type=__UpperCamelCase , help="""The number of GPUs allocated for this, it is by default 0 meaning none""" , )
parser.add_argument(
"""--overwrite_cache""" , action="""store_true""" , help="""Overwrite the cached training and evaluation sets""" )
return parser
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : Any = argparse.ArgumentParser()
add_generic_args(parser, os.getcwd())
__SCREAMING_SNAKE_CASE : Tuple = NERTransformer.add_model_specific_args(parser, os.getcwd())
__SCREAMING_SNAKE_CASE : List[Any] = parser.parse_args()
__SCREAMING_SNAKE_CASE : int = NERTransformer(args)
__SCREAMING_SNAKE_CASE : List[str] = generic_train(model, args)
if args.do_predict:
# See https://github.com/huggingface/transformers/issues/3159
# pl use this default format to create a checkpoint:
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master\
# /pytorch_lightning/callbacks/model_checkpoint.py#L322
__SCREAMING_SNAKE_CASE : Dict = sorted(glob.glob(os.path.join(args.output_dir, 'checkpoint-epoch=*.ckpt'), recursive=True))
__SCREAMING_SNAKE_CASE : Optional[Any] = model.load_from_checkpoint(checkpoints[-1])
trainer.test(model) | 697 |
'''simple docstring'''
import unittest
from transformers import (
MODEL_FOR_OBJECT_DETECTION_MAPPING,
AutoFeatureExtractor,
AutoModelForObjectDetection,
ObjectDetectionPipeline,
is_vision_available,
pipeline,
)
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_pytesseract,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class SCREAMING_SNAKE_CASE__ :
@staticmethod
def __lowerCamelCase ( *__UpperCamelCase , **__UpperCamelCase ):
'''simple docstring'''
pass
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
lowercase__ = MODEL_FOR_OBJECT_DETECTION_MAPPING
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
__a : Optional[Any] = ObjectDetectionPipeline(model=__UpperCamelCase , image_processor=__UpperCamelCase )
return object_detector, ["./tests/fixtures/tests_samples/COCO/000000039769.png"]
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
__a : List[str] = object_detector("""./tests/fixtures/tests_samples/COCO/000000039769.png""" , threshold=0.0 )
self.assertGreater(len(__UpperCamelCase ) , 0 )
for detected_object in outputs:
self.assertEqual(
__UpperCamelCase , {
"""score""": ANY(__UpperCamelCase ),
"""label""": ANY(__UpperCamelCase ),
"""box""": {"""xmin""": ANY(__UpperCamelCase ), """ymin""": ANY(__UpperCamelCase ), """xmax""": ANY(__UpperCamelCase ), """ymax""": ANY(__UpperCamelCase )},
} , )
import datasets
__a : Optional[int] = datasets.load_dataset("""hf-internal-testing/fixtures_image_utils""" , """image""" , split="""test""" )
__a : Tuple = [
Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ),
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
# RGBA
dataset[0]["""file"""],
# LA
dataset[1]["""file"""],
# L
dataset[2]["""file"""],
]
__a : Any = object_detector(__UpperCamelCase , threshold=0.0 )
self.assertEqual(len(__UpperCamelCase ) , len(__UpperCamelCase ) )
for outputs in batch_outputs:
self.assertGreater(len(__UpperCamelCase ) , 0 )
for detected_object in outputs:
self.assertEqual(
__UpperCamelCase , {
"""score""": ANY(__UpperCamelCase ),
"""label""": ANY(__UpperCamelCase ),
"""box""": {"""xmin""": ANY(__UpperCamelCase ), """ymin""": ANY(__UpperCamelCase ), """xmax""": ANY(__UpperCamelCase ), """ymax""": ANY(__UpperCamelCase )},
} , )
@require_tf
@unittest.skip("""Object detection not implemented in TF""" )
def __lowerCamelCase ( self ):
'''simple docstring'''
pass
@require_torch
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Optional[Any] = """hf-internal-testing/tiny-detr-mobilenetsv3"""
__a : Dict = AutoModelForObjectDetection.from_pretrained(__UpperCamelCase )
__a : Optional[Any] = AutoFeatureExtractor.from_pretrained(__UpperCamelCase )
__a : str = ObjectDetectionPipeline(model=__UpperCamelCase , feature_extractor=__UpperCamelCase )
__a : Optional[int] = object_detector("""http://images.cocodataset.org/val2017/000000039769.jpg""" , threshold=0.0 )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [
{"""score""": 0.3_3_7_6, """label""": """LABEL_0""", """box""": {"""xmin""": 159, """ymin""": 120, """xmax""": 480, """ymax""": 359}},
{"""score""": 0.3_3_7_6, """label""": """LABEL_0""", """box""": {"""xmin""": 159, """ymin""": 120, """xmax""": 480, """ymax""": 359}},
] , )
__a : Union[str, Any] = object_detector(
[
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
] , threshold=0.0 , )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [
[
{"""score""": 0.3_3_7_6, """label""": """LABEL_0""", """box""": {"""xmin""": 159, """ymin""": 120, """xmax""": 480, """ymax""": 359}},
{"""score""": 0.3_3_7_6, """label""": """LABEL_0""", """box""": {"""xmin""": 159, """ymin""": 120, """xmax""": 480, """ymax""": 359}},
],
[
{"""score""": 0.3_3_7_6, """label""": """LABEL_0""", """box""": {"""xmin""": 159, """ymin""": 120, """xmax""": 480, """ymax""": 359}},
{"""score""": 0.3_3_7_6, """label""": """LABEL_0""", """box""": {"""xmin""": 159, """ymin""": 120, """xmax""": 480, """ymax""": 359}},
],
] , )
@require_torch
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : str = """facebook/detr-resnet-50"""
__a : Dict = AutoModelForObjectDetection.from_pretrained(__UpperCamelCase )
__a : int = AutoFeatureExtractor.from_pretrained(__UpperCamelCase )
__a : int = ObjectDetectionPipeline(model=__UpperCamelCase , feature_extractor=__UpperCamelCase )
__a : Any = object_detector("""http://images.cocodataset.org/val2017/000000039769.jpg""" )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [
{"""score""": 0.9_9_8_2, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 175, """ymax""": 117}},
{"""score""": 0.9_9_6_0, """label""": """remote""", """box""": {"""xmin""": 333, """ymin""": 72, """xmax""": 368, """ymax""": 187}},
{"""score""": 0.9_9_5_5, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 639, """ymax""": 473}},
{"""score""": 0.9_9_8_8, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}},
{"""score""": 0.9_9_8_7, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}},
] , )
__a : Optional[Any] = object_detector(
[
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
] )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [
[
{"""score""": 0.9_9_8_2, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 175, """ymax""": 117}},
{"""score""": 0.9_9_6_0, """label""": """remote""", """box""": {"""xmin""": 333, """ymin""": 72, """xmax""": 368, """ymax""": 187}},
{"""score""": 0.9_9_5_5, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 639, """ymax""": 473}},
{"""score""": 0.9_9_8_8, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}},
{"""score""": 0.9_9_8_7, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}},
],
[
{"""score""": 0.9_9_8_2, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 175, """ymax""": 117}},
{"""score""": 0.9_9_6_0, """label""": """remote""", """box""": {"""xmin""": 333, """ymin""": 72, """xmax""": 368, """ymax""": 187}},
{"""score""": 0.9_9_5_5, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 639, """ymax""": 473}},
{"""score""": 0.9_9_8_8, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}},
{"""score""": 0.9_9_8_7, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}},
],
] , )
@require_torch
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : int = """facebook/detr-resnet-50"""
__a : Optional[int] = pipeline("""object-detection""" , model=__UpperCamelCase )
__a : Optional[int] = object_detector("""http://images.cocodataset.org/val2017/000000039769.jpg""" )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [
{"""score""": 0.9_9_8_2, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 175, """ymax""": 117}},
{"""score""": 0.9_9_6_0, """label""": """remote""", """box""": {"""xmin""": 333, """ymin""": 72, """xmax""": 368, """ymax""": 187}},
{"""score""": 0.9_9_5_5, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 639, """ymax""": 473}},
{"""score""": 0.9_9_8_8, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}},
{"""score""": 0.9_9_8_7, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}},
] , )
__a : List[str] = object_detector(
[
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
] )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [
[
{"""score""": 0.9_9_8_2, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 175, """ymax""": 117}},
{"""score""": 0.9_9_6_0, """label""": """remote""", """box""": {"""xmin""": 333, """ymin""": 72, """xmax""": 368, """ymax""": 187}},
{"""score""": 0.9_9_5_5, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 639, """ymax""": 473}},
{"""score""": 0.9_9_8_8, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}},
{"""score""": 0.9_9_8_7, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}},
],
[
{"""score""": 0.9_9_8_2, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 175, """ymax""": 117}},
{"""score""": 0.9_9_6_0, """label""": """remote""", """box""": {"""xmin""": 333, """ymin""": 72, """xmax""": 368, """ymax""": 187}},
{"""score""": 0.9_9_5_5, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 639, """ymax""": 473}},
{"""score""": 0.9_9_8_8, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}},
{"""score""": 0.9_9_8_7, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}},
],
] , )
@require_torch
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Union[str, Any] = 0.9_9_8_5
__a : Union[str, Any] = """facebook/detr-resnet-50"""
__a : Optional[int] = pipeline("""object-detection""" , model=__UpperCamelCase )
__a : Union[str, Any] = object_detector("""http://images.cocodataset.org/val2017/000000039769.jpg""" , threshold=__UpperCamelCase )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [
{"""score""": 0.9_9_8_8, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}},
{"""score""": 0.9_9_8_7, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}},
] , )
@require_torch
@require_pytesseract
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : str = """Narsil/layoutlmv3-finetuned-funsd"""
__a : List[Any] = 0.9_9_9_3
__a : Dict = pipeline("""object-detection""" , model=__UpperCamelCase , threshold=__UpperCamelCase )
__a : List[str] = object_detector(
"""https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png""" )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [
{"""score""": 0.9_9_9_3, """label""": """I-ANSWER""", """box""": {"""xmin""": 294, """ymin""": 254, """xmax""": 343, """ymax""": 264}},
{"""score""": 0.9_9_9_3, """label""": """I-ANSWER""", """box""": {"""xmin""": 294, """ymin""": 254, """xmax""": 343, """ymax""": 264}},
] , ) | 697 | 1 |
'''simple docstring'''
import os
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers.models.realm.configuration_realm import RealmConfig
from transformers.models.realm.retrieval_realm import _REALM_BLOCK_RECORDS_FILENAME, RealmRetriever
from transformers.models.realm.tokenization_realm import VOCAB_FILES_NAMES, RealmTokenizer
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : List[Any] = tempfile.mkdtemp()
__a : int = 5
# Realm tok
__a : List[str] = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""[PAD]""",
"""[MASK]""",
"""test""",
"""question""",
"""this""",
"""is""",
"""the""",
"""first""",
"""second""",
"""third""",
"""fourth""",
"""fifth""",
"""record""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
__a : Any = os.path.join(self.tmpdirname , """realm_tokenizer""" )
os.makedirs(__UpperCamelCase , exist_ok=__UpperCamelCase )
__a : Tuple = os.path.join(__UpperCamelCase , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
__a : int = os.path.join(self.tmpdirname , """realm_block_records""" )
os.makedirs(__UpperCamelCase , exist_ok=__UpperCamelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
return RealmTokenizer.from_pretrained(os.path.join(self.tmpdirname , """realm_tokenizer""" ) )
def __lowerCamelCase ( self ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : List[str] = RealmConfig(num_block_records=self.num_block_records )
return config
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Union[str, Any] = Dataset.from_dict(
{
"""id""": ["""0""", """1"""],
"""question""": ["""foo""", """bar"""],
"""answers""": [["""Foo""", """Bar"""], ["""Bar"""]],
} )
return dataset
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : List[str] = np.array(
[
b"""This is the first record""",
b"""This is the second record""",
b"""This is the third record""",
b"""This is the fourth record""",
b"""This is the fifth record""",
b"""This is a longer longer longer record""",
] , dtype=__UpperCamelCase , )
return block_records
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : List[Any] = RealmRetriever(
block_records=self.get_dummy_block_records() , tokenizer=self.get_tokenizer() , )
return retriever
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Tuple = self.get_config()
__a : Union[str, Any] = self.get_dummy_retriever()
__a : str = retriever.tokenizer
__a : List[str] = np.array([0, 3] , dtype="""long""" )
__a : List[Any] = tokenizer(["""Test question"""] ).input_ids
__a : Dict = tokenizer(
["""the fourth"""] , add_special_tokens=__UpperCamelCase , return_token_type_ids=__UpperCamelCase , return_attention_mask=__UpperCamelCase , ).input_ids
__a : Union[str, Any] = config.reader_seq_len
__a , __a , __a , __a : List[Any] = retriever(
__UpperCamelCase , __UpperCamelCase , answer_ids=__UpperCamelCase , max_length=__UpperCamelCase , return_tensors="""np""" )
self.assertEqual(len(__UpperCamelCase ) , 2 )
self.assertEqual(len(__UpperCamelCase ) , 2 )
self.assertEqual(len(__UpperCamelCase ) , 2 )
self.assertEqual(concat_inputs.input_ids.shape , (2, 10) )
self.assertEqual(concat_inputs.attention_mask.shape , (2, 10) )
self.assertEqual(concat_inputs.token_type_ids.shape , (2, 10) )
self.assertEqual(concat_inputs.special_tokens_mask.shape , (2, 10) )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[0] ) , ["""[CLS]""", """test""", """question""", """[SEP]""", """this""", """is""", """the""", """first""", """record""", """[SEP]"""] , )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[1] ) , ["""[CLS]""", """test""", """question""", """[SEP]""", """this""", """is""", """the""", """fourth""", """record""", """[SEP]"""] , )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : str = self.get_config()
__a : Union[str, Any] = self.get_dummy_retriever()
__a : Any = retriever.tokenizer
__a : str = np.array([0, 3, 5] , dtype="""long""" )
__a : Optional[Any] = tokenizer(["""Test question"""] ).input_ids
__a : List[str] = tokenizer(
["""the fourth""", """longer longer"""] , add_special_tokens=__UpperCamelCase , return_token_type_ids=__UpperCamelCase , return_attention_mask=__UpperCamelCase , ).input_ids
__a : Optional[Any] = config.reader_seq_len
__a , __a , __a , __a : int = retriever(
__UpperCamelCase , __UpperCamelCase , answer_ids=__UpperCamelCase , max_length=__UpperCamelCase , return_tensors="""np""" )
self.assertEqual([False, True, True] , __UpperCamelCase )
self.assertEqual([[-1, -1, -1], [6, -1, -1], [6, 7, 8]] , __UpperCamelCase )
self.assertEqual([[-1, -1, -1], [7, -1, -1], [7, 8, 9]] , __UpperCamelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Optional[int] = self.get_dummy_retriever()
retriever.save_pretrained(os.path.join(self.tmpdirname , """realm_block_records""" ) )
# Test local path
__a : List[Any] = retriever.from_pretrained(os.path.join(self.tmpdirname , """realm_block_records""" ) )
self.assertEqual(retriever.block_records[0] , b"""This is the first record""" )
# Test mocked remote path
with patch("""transformers.models.realm.retrieval_realm.hf_hub_download""" ) as mock_hf_hub_download:
__a : str = os.path.join(
os.path.join(self.tmpdirname , """realm_block_records""" ) , _REALM_BLOCK_RECORDS_FILENAME )
__a : int = RealmRetriever.from_pretrained("""google/realm-cc-news-pretrained-openqa""" )
self.assertEqual(retriever.block_records[0] , b"""This is the first record""" ) | 697 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__SCREAMING_SNAKE_CASE : List[str] = {
'configuration_blenderbot_small': [
'BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP',
'BlenderbotSmallConfig',
'BlenderbotSmallOnnxConfig',
],
'tokenization_blenderbot_small': ['BlenderbotSmallTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Union[str, Any] = ['BlenderbotSmallTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : List[str] = [
'BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST',
'BlenderbotSmallForCausalLM',
'BlenderbotSmallForConditionalGeneration',
'BlenderbotSmallModel',
'BlenderbotSmallPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Optional[int] = [
'TFBlenderbotSmallForConditionalGeneration',
'TFBlenderbotSmallModel',
'TFBlenderbotSmallPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Optional[Any] = [
'FlaxBlenderbotSmallForConditionalGeneration',
'FlaxBlenderbotSmallModel',
'FlaxBlenderbotSmallPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotSmallConfig,
BlenderbotSmallOnnxConfig,
)
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_small_fast import BlenderbotSmallTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotSmallForCausalLM,
BlenderbotSmallForConditionalGeneration,
BlenderbotSmallModel,
BlenderbotSmallPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot_small import (
TFBlenderbotSmallForConditionalGeneration,
TFBlenderbotSmallModel,
TFBlenderbotSmallPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot_small import (
FlaxBlenderbotSmallForConditionalGeneration,
FlaxBlenderbotSmallModel,
FlaxBlenderbotSmallPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 697 | 1 |
'''simple docstring'''
import itertools
import json
import os
import unittest
from transformers import AddedToken, RobertaTokenizer, RobertaTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase , unittest.TestCase ):
lowercase__ = RobertaTokenizer
lowercase__ = RobertaTokenizerFast
lowercase__ = True
lowercase__ = {"cls_token": "<s>"}
def __lowerCamelCase ( self ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__a : int = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
__a : Tuple = dict(zip(__UpperCamelCase , range(len(__UpperCamelCase ) ) ) )
__a : Union[str, Any] = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
__a : int = {"""unk_token""": """<unk>"""}
__a : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
__a : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(__UpperCamelCase ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(__UpperCamelCase ) )
def __lowerCamelCase ( self , **__UpperCamelCase ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__UpperCamelCase )
def __lowerCamelCase ( self , **__UpperCamelCase ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return RobertaTokenizerFast.from_pretrained(self.tmpdirname , **__UpperCamelCase )
def __lowerCamelCase ( self , __UpperCamelCase ):
'''simple docstring'''
__a : str = """lower newer"""
__a : Any = """lower newer"""
return input_text, output_text
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : int = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
__a : Any = """lower newer"""
__a : List[Any] = ["""l""", """o""", """w""", """er""", """\u0120""", """n""", """e""", """w""", """er"""]
__a : Tuple = tokenizer.tokenize(__UpperCamelCase ) # , add_prefix_space=True)
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
__a : Dict = tokens + [tokenizer.unk_token]
__a : List[str] = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCamelCase ) , __UpperCamelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : str = self.get_tokenizer()
self.assertListEqual(tokenizer.encode("""Hello world!""" , add_special_tokens=__UpperCamelCase ) , [0, 3_1414, 232, 328, 2] )
self.assertListEqual(
tokenizer.encode("""Hello world! cécé herlolip 418""" , add_special_tokens=__UpperCamelCase ) , [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2] , )
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Optional[int] = self.tokenizer_class.from_pretrained("""roberta-base""" )
__a : Union[str, Any] = tokenizer.encode("""sequence builders""" , add_special_tokens=__UpperCamelCase )
__a : Dict = tokenizer.encode("""multi-sequence build""" , add_special_tokens=__UpperCamelCase )
__a : List[Any] = tokenizer.encode(
"""sequence builders""" , add_special_tokens=__UpperCamelCase , add_prefix_space=__UpperCamelCase )
__a : Tuple = tokenizer.encode(
"""sequence builders""" , """multi-sequence build""" , add_special_tokens=__UpperCamelCase , add_prefix_space=__UpperCamelCase )
__a : Any = tokenizer.build_inputs_with_special_tokens(__UpperCamelCase )
__a : str = tokenizer.build_inputs_with_special_tokens(__UpperCamelCase , __UpperCamelCase )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Tuple = self.get_tokenizer()
__a : List[str] = """Encode this sequence."""
__a : Dict = tokenizer.byte_encoder[""" """.encode("""utf-8""" )[0]]
# Testing encoder arguments
__a : Dict = tokenizer.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase , add_prefix_space=__UpperCamelCase )
__a : Optional[Any] = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(__UpperCamelCase , __UpperCamelCase )
__a : Tuple = tokenizer.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase , add_prefix_space=__UpperCamelCase )
__a : str = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(__UpperCamelCase , __UpperCamelCase )
tokenizer.add_special_tokens({"""bos_token""": """<s>"""} )
__a : List[str] = tokenizer.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase )
__a : str = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(__UpperCamelCase , __UpperCamelCase )
# Testing spaces after special tokens
__a : Dict = """<mask>"""
tokenizer.add_special_tokens(
{"""mask_token""": AddedToken(__UpperCamelCase , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase )} ) # mask token has a left space
__a : List[Any] = tokenizer.convert_tokens_to_ids(__UpperCamelCase )
__a : Union[str, Any] = """Encode <mask> sequence"""
__a : str = """Encode <mask>sequence"""
__a : int = tokenizer.encode(__UpperCamelCase )
__a : str = encoded.index(__UpperCamelCase )
__a : Union[str, Any] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(__UpperCamelCase , __UpperCamelCase )
__a : List[str] = tokenizer.encode(__UpperCamelCase )
__a : Any = encoded.index(__UpperCamelCase )
__a : str = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(__UpperCamelCase , __UpperCamelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
pass
def __lowerCamelCase ( self ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
__a : List[str] = self.rust_tokenizer_class.from_pretrained(__UpperCamelCase , **__UpperCamelCase )
__a : Any = self.tokenizer_class.from_pretrained(__UpperCamelCase , **__UpperCamelCase )
__a : Union[str, Any] = """A, <mask> AllenNLP sentence."""
__a : Union[str, Any] = tokenizer_r.encode_plus(__UpperCamelCase , add_special_tokens=__UpperCamelCase , return_token_type_ids=__UpperCamelCase )
__a : Dict = tokenizer_p.encode_plus(__UpperCamelCase , add_special_tokens=__UpperCamelCase , return_token_type_ids=__UpperCamelCase )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r["""token_type_ids"""] ) , sum(tokens_p["""token_type_ids"""] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r["""attention_mask"""] ) / len(tokens_r["""attention_mask"""] ) , sum(tokens_p["""attention_mask"""] ) / len(tokens_p["""attention_mask"""] ) , )
__a : Optional[int] = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""] )
__a : int = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p["""input_ids"""] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r["""input_ids"""] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(
__UpperCamelCase , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
self.assertSequenceEqual(
__UpperCamelCase , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
def __lowerCamelCase ( self ):
'''simple docstring'''
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
__a : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=__UpperCamelCase , add_prefix_space=__UpperCamelCase , trim_offsets=__UpperCamelCase )
__a : int = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
__a : Dict = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state["""add_prefix_space"""] , __UpperCamelCase )
self.assertEqual(post_processor_state["""add_prefix_space"""] , __UpperCamelCase )
self.assertEqual(post_processor_state["""trim_offsets"""] , __UpperCamelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
__a : Tuple = """hello""" # `hello` is a token in the vocabulary of `pretrained_name`
__a : str = f"""{text_of_1_token} {text_of_1_token}"""
__a : str = self.rust_tokenizer_class.from_pretrained(
__UpperCamelCase , use_fast=__UpperCamelCase , add_prefix_space=__UpperCamelCase , trim_offsets=__UpperCamelCase )
__a : Dict = tokenizer_r(__UpperCamelCase , return_offsets_mapping=__UpperCamelCase , add_special_tokens=__UpperCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__UpperCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__UpperCamelCase ) + 1, len(__UpperCamelCase ) + 1 + len(__UpperCamelCase )) , )
__a : Any = self.rust_tokenizer_class.from_pretrained(
__UpperCamelCase , use_fast=__UpperCamelCase , add_prefix_space=__UpperCamelCase , trim_offsets=__UpperCamelCase )
__a : Tuple = tokenizer_r(__UpperCamelCase , return_offsets_mapping=__UpperCamelCase , add_special_tokens=__UpperCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__UpperCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__UpperCamelCase ) + 1, len(__UpperCamelCase ) + 1 + len(__UpperCamelCase )) , )
__a : str = self.rust_tokenizer_class.from_pretrained(
__UpperCamelCase , use_fast=__UpperCamelCase , add_prefix_space=__UpperCamelCase , trim_offsets=__UpperCamelCase )
__a : str = tokenizer_r(__UpperCamelCase , return_offsets_mapping=__UpperCamelCase , add_special_tokens=__UpperCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__UpperCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__UpperCamelCase ), len(__UpperCamelCase ) + 1 + len(__UpperCamelCase )) , )
__a : List[Any] = self.rust_tokenizer_class.from_pretrained(
__UpperCamelCase , use_fast=__UpperCamelCase , add_prefix_space=__UpperCamelCase , trim_offsets=__UpperCamelCase )
__a : str = tokenizer_r(__UpperCamelCase , return_offsets_mapping=__UpperCamelCase , add_special_tokens=__UpperCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__UpperCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__UpperCamelCase ), len(__UpperCamelCase ) + 1 + len(__UpperCamelCase )) , )
__a : int = f""" {text}"""
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
__a : Tuple = self.rust_tokenizer_class.from_pretrained(
__UpperCamelCase , use_fast=__UpperCamelCase , add_prefix_space=__UpperCamelCase , trim_offsets=__UpperCamelCase )
__a : Union[str, Any] = tokenizer_r(__UpperCamelCase , return_offsets_mapping=__UpperCamelCase , add_special_tokens=__UpperCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(__UpperCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__UpperCamelCase ) + 1, 1 + len(__UpperCamelCase ) + 1 + len(__UpperCamelCase )) , )
__a : Any = self.rust_tokenizer_class.from_pretrained(
__UpperCamelCase , use_fast=__UpperCamelCase , add_prefix_space=__UpperCamelCase , trim_offsets=__UpperCamelCase )
__a : List[str] = tokenizer_r(__UpperCamelCase , return_offsets_mapping=__UpperCamelCase , add_special_tokens=__UpperCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__UpperCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__UpperCamelCase ), 1 + len(__UpperCamelCase ) + 1 + len(__UpperCamelCase )) , )
__a : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(
__UpperCamelCase , use_fast=__UpperCamelCase , add_prefix_space=__UpperCamelCase , trim_offsets=__UpperCamelCase )
__a : int = tokenizer_r(__UpperCamelCase , return_offsets_mapping=__UpperCamelCase , add_special_tokens=__UpperCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__UpperCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__UpperCamelCase ), 1 + len(__UpperCamelCase ) + 1 + len(__UpperCamelCase )) , ) | 697 |
'''simple docstring'''
import numpy as np
import torch
from torch.utils.data import Dataset
from utils import logger
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
def __init__( self , __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
__a : Any = params
__a : Optional[Any] = np.array(__UpperCamelCase )
__a : Union[str, Any] = np.array([len(__UpperCamelCase ) for t in data] )
self.check()
self.remove_long_sequences()
self.remove_empty_sequences()
self.remove_unknown_sequences()
self.check()
self.print_statistics()
def __getitem__( self , __UpperCamelCase ):
'''simple docstring'''
return (self.token_ids[index], self.lengths[index])
def __len__( self ):
'''simple docstring'''
return len(self.lengths )
def __lowerCamelCase ( self ):
'''simple docstring'''
assert len(self.token_ids ) == len(self.lengths )
assert all(self.lengths[i] == len(self.token_ids[i] ) for i in range(len(self.lengths ) ) )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Tuple = self.params.max_model_input_size
__a : Union[str, Any] = self.lengths > max_len
logger.info(f"""Splitting {sum(__UpperCamelCase )} too long sequences.""" )
def divide_chunks(__UpperCamelCase , __UpperCamelCase ):
return [l[i : i + n] for i in range(0 , len(__UpperCamelCase ) , __UpperCamelCase )]
__a : int = []
__a : Union[str, Any] = []
if self.params.mlm:
__a , __a : Any = self.params.special_tok_ids["""cls_token"""], self.params.special_tok_ids["""sep_token"""]
else:
__a , __a : str = self.params.special_tok_ids["""bos_token"""], self.params.special_tok_ids["""eos_token"""]
for seq_, len_ in zip(self.token_ids , self.lengths ):
assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_
if len_ <= max_len:
new_tok_ids.append(seq_ )
new_lengths.append(len_ )
else:
__a : Any = []
for sub_s in divide_chunks(seq_ , max_len - 2 ):
if sub_s[0] != cls_id:
__a : int = np.insert(__UpperCamelCase , 0 , __UpperCamelCase )
if sub_s[-1] != sep_id:
__a : str = np.insert(__UpperCamelCase , len(__UpperCamelCase ) , __UpperCamelCase )
assert len(__UpperCamelCase ) <= max_len
assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s
sub_seqs.append(__UpperCamelCase )
new_tok_ids.extend(__UpperCamelCase )
new_lengths.extend([len(__UpperCamelCase ) for l in sub_seqs] )
__a : Dict = np.array(__UpperCamelCase )
__a : Tuple = np.array(__UpperCamelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : List[str] = len(self )
__a : List[str] = self.lengths > 11
__a : int = self.token_ids[indices]
__a : Union[str, Any] = self.lengths[indices]
__a : Any = len(self )
logger.info(f"""Remove {init_size - new_size} too short (<=11 tokens) sequences.""" )
def __lowerCamelCase ( self ):
'''simple docstring'''
if "unk_token" not in self.params.special_tok_ids:
return
else:
__a : List[str] = self.params.special_tok_ids["""unk_token"""]
__a : str = len(self )
__a : str = np.array([np.count_nonzero(a == unk_token_id ) for a in self.token_ids] )
__a : Optional[Any] = (unk_occs / self.lengths) < 0.5
__a : List[str] = self.token_ids[indices]
__a : Optional[int] = self.lengths[indices]
__a : Any = len(self )
logger.info(f"""Remove {init_size - new_size} sequences with a high level of unknown tokens (50%).""" )
def __lowerCamelCase ( self ):
'''simple docstring'''
if not self.params.is_master:
return
logger.info(f"""{len(self )} sequences""" )
# data_len = sum(self.lengths)
# nb_unique_tokens = len(Counter(list(chain(*self.token_ids))))
# logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)')
# unk_idx = self.params.special_tok_ids['unk_token']
# nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids])
# logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)')
def __lowerCamelCase ( self , __UpperCamelCase ):
'''simple docstring'''
__a : List[str] = [t[0] for t in batch]
__a : str = [t[1] for t in batch]
assert len(__UpperCamelCase ) == len(__UpperCamelCase )
# Max for paddings
__a : Optional[int] = max(__UpperCamelCase )
# Pad token ids
if self.params.mlm:
__a : int = self.params.special_tok_ids["""pad_token"""]
else:
__a : Tuple = self.params.special_tok_ids["""unk_token"""]
__a : Any = [list(t.astype(__UpperCamelCase ) ) + [pad_idx] * (max_seq_len_ - len(__UpperCamelCase )) for t in token_ids]
assert len(tk_ ) == len(__UpperCamelCase )
assert all(len(__UpperCamelCase ) == max_seq_len_ for t in tk_ )
__a : Any = torch.tensor(tk_ ) # (bs, max_seq_len_)
__a : Optional[Any] = torch.tensor(__UpperCamelCase ) # (bs)
return tk_t, lg_t | 697 | 1 |
'''simple docstring'''
import pytest
__SCREAMING_SNAKE_CASE : Any = '__dummy_dataset1__'
__SCREAMING_SNAKE_CASE : List[Any] = '\nimport json\nimport os\n\nimport datasets\n\n\nREPO_URL = "https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/"\nURLS = {"train": REPO_URL + "wikiann-bn-train.jsonl", "validation": REPO_URL + "wikiann-bn-validation.jsonl"}\n\n\nclass __DummyDataset1__(datasets.GeneratorBasedBuilder):\n\n def _info(self):\n features = datasets.Features(\n {\n "tokens": datasets.Sequence(datasets.Value("string")),\n "ner_tags": datasets.Sequence(\n datasets.features.ClassLabel(\n names=[\n "O",\n "B-PER",\n "I-PER",\n "B-ORG",\n "I-ORG",\n "B-LOC",\n "I-LOC",\n ]\n )\n ),\n "langs": datasets.Sequence(datasets.Value("string")),\n "spans": datasets.Sequence(datasets.Value("string")),\n }\n )\n return datasets.DatasetInfo(features=features)\n\n def _split_generators(self, dl_manager):\n dl_path = dl_manager.download(URLS)\n return [\n datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={"filepath": dl_path["train"]}),\n datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={"filepath": dl_path["validation"]}),\n ]\n\n def _generate_examples(self, filepath):\n with open(filepath, "r", encoding="utf-8") as f:\n for i, line in enumerate(f):\n yield i, json.loads(line)\n'
@pytest.fixture
def _snake_case ( ) -> Tuple:
return DATASET_LOADING_SCRIPT_NAME
@pytest.fixture
def _snake_case ( ) -> Optional[Any]:
return DATASET_LOADING_SCRIPT_CODE
@pytest.fixture
def _snake_case ( lowercase , lowercase , lowercase ) -> List[Any]:
__a : Dict = dataset_loading_script_name
__a : Any = tmp_path / """datasets""" / script_name
script_dir.mkdir(parents=lowercase )
__a : Union[str, Any] = script_dir / F"""{script_name}.py"""
with open(lowercase , """w""" ) as f:
f.write(lowercase )
return str(lowercase ) | 697 |
'''simple docstring'''
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
lowercase__ = ""
lowercase__ = "hf-legacy" # "hf://"" is reserved for hffs
def __init__( self , __UpperCamelCase = None , __UpperCamelCase = None , **__UpperCamelCase , ):
'''simple docstring'''
super().__init__(self , **__UpperCamelCase )
__a : int = repo_info
__a : int = token
__a : Any = None
def __lowerCamelCase ( self ):
'''simple docstring'''
if self.dir_cache is None:
__a : Union[str, Any] = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
__a : List[str] = {
"""name""": hf_file.rfilename,
"""size""": None,
"""type""": """file""",
}
self.dir_cache.update(
{
str(__UpperCamelCase ): {"""name""": str(__UpperCamelCase ), """size""": None, """type""": """directory"""}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase = "rb" , **__UpperCamelCase , ):
'''simple docstring'''
if not isinstance(self.repo_info , __UpperCamelCase ):
raise NotImplementedError(f"""Open is only implemented for dataset repositories, but got {self.repo_info}""" )
__a : Any = hf_hub_url(self.repo_info.id , __UpperCamelCase , revision=self.repo_info.sha )
return fsspec.open(
__UpperCamelCase , mode=__UpperCamelCase , headers=get_authentication_headers_for_url(__UpperCamelCase , use_auth_token=self.token ) , client_kwargs={"""trust_env""": True} , ).open()
def __lowerCamelCase ( self , __UpperCamelCase , **__UpperCamelCase ):
'''simple docstring'''
self._get_dirs()
__a : str = self._strip_protocol(__UpperCamelCase )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(__UpperCamelCase )
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase=False , **__UpperCamelCase ):
'''simple docstring'''
self._get_dirs()
__a : int = PurePosixPath(path.strip("""/""" ) )
__a : List[str] = {}
for p, f in self.dir_cache.items():
__a : str = PurePosixPath(p.strip("""/""" ) )
__a : Optional[int] = p.parent
if root == path:
__a : List[str] = f
__a : str = list(paths.values() )
if detail:
return out
else:
return sorted(f["""name"""] for f in out ) | 697 | 1 |
'''simple docstring'''
def _snake_case ( lowercase = 1_0 , lowercase = 1_0_0_0 , lowercase = True ) -> int:
assert (
isinstance(lowercase , lowercase )
and isinstance(lowercase , lowercase )
and isinstance(lowercase , lowercase )
), "Invalid type of value(s) specified to function!"
if min_val > max_val:
raise ValueError("""Invalid value for min_val or max_val (min_value < max_value)""" )
return min_val if option else max_val
def _snake_case ( lowercase , lowercase ) -> int:
return int((number_a + number_a) / 2 )
def _snake_case ( lowercase , lowercase , lowercase ) -> None:
assert (
isinstance(lowercase , lowercase ) and isinstance(lowercase , lowercase ) and isinstance(lowercase , lowercase )
), 'argument values must be type of "int"'
if lower > higher:
raise ValueError("""argument value for lower and higher must be(lower > higher)""" )
if not lower < to_guess < higher:
raise ValueError(
"""guess value must be within the range of lower and higher value""" )
def answer(lowercase ) -> str:
if number > to_guess:
return "high"
elif number < to_guess:
return "low"
else:
return "same"
print("""started...""" )
__a : Optional[Any] = lower
__a : Optional[Any] = higher
__a : Dict = []
while True:
__a : Union[str, Any] = get_avg(lowercase , lowercase )
last_numbers.append(lowercase )
if answer(lowercase ) == "low":
__a : Dict = number
elif answer(lowercase ) == "high":
__a : Dict = number
else:
break
print(F"""guess the number : {last_numbers[-1]}""" )
print(F"""details : {last_numbers!s}""" )
def _snake_case ( ) -> None:
__a : List[str] = int(input("""Enter lower value : """ ).strip() )
__a : Optional[Any] = int(input("""Enter high value : """ ).strip() )
__a : Optional[int] = int(input("""Enter value to guess : """ ).strip() )
guess_the_number(lowercase , lowercase , lowercase )
if __name__ == "__main__":
main() | 697 |
'''simple docstring'''
import inspect
import unittest
from transformers import DPTConfig
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel
from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class SCREAMING_SNAKE_CASE__ :
def __init__( self , __UpperCamelCase , __UpperCamelCase=2 , __UpperCamelCase=32 , __UpperCamelCase=16 , __UpperCamelCase=3 , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=32 , __UpperCamelCase=4 , __UpperCamelCase=[0, 1, 2, 3] , __UpperCamelCase=4 , __UpperCamelCase=37 , __UpperCamelCase="gelu" , __UpperCamelCase=0.1 , __UpperCamelCase=0.1 , __UpperCamelCase=0.0_2 , __UpperCamelCase=3 , __UpperCamelCase=[1, 384, 24, 24] , __UpperCamelCase=True , __UpperCamelCase=None , ):
'''simple docstring'''
__a : List[str] = parent
__a : Tuple = batch_size
__a : str = image_size
__a : int = patch_size
__a : Dict = num_channels
__a : int = is_training
__a : Dict = use_labels
__a : Union[str, Any] = hidden_size
__a : Dict = num_hidden_layers
__a : Dict = backbone_out_indices
__a : Optional[int] = num_attention_heads
__a : List[str] = intermediate_size
__a : Optional[Any] = hidden_act
__a : Dict = hidden_dropout_prob
__a : Tuple = attention_probs_dropout_prob
__a : Any = initializer_range
__a : Any = num_labels
__a : Optional[Any] = backbone_featmap_shape
__a : List[Any] = scope
__a : List[str] = is_hybrid
# sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token)
__a : Union[str, Any] = (image_size // patch_size) ** 2
__a : List[str] = num_patches + 1
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__a : Union[str, Any] = None
if self.use_labels:
__a : str = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__a : Tuple = self.get_config()
return config, pixel_values, labels
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : List[str] = {
"""global_padding""": """same""",
"""layer_type""": """bottleneck""",
"""depths""": [3, 4, 9],
"""out_features""": ["""stage1""", """stage2""", """stage3"""],
"""embedding_dynamic_padding""": True,
"""hidden_sizes""": [96, 192, 384, 768],
"""num_groups""": 2,
}
return DPTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , backbone_out_indices=self.backbone_out_indices , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__UpperCamelCase , initializer_range=self.initializer_range , is_hybrid=self.is_hybrid , backbone_config=__UpperCamelCase , backbone_featmap_shape=self.backbone_featmap_shape , )
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
__a : Optional[Any] = DPTModel(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
__a : List[str] = model(__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
__a : List[str] = self.num_labels
__a : Union[str, Any] = DPTForDepthEstimation(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
__a : Tuple = model(__UpperCamelCase )
self.parent.assertEqual(result.predicted_depth.shape , (self.batch_size, self.image_size, self.image_size) )
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
__a : Dict = self.num_labels
__a : Tuple = DPTForSemanticSegmentation(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
__a : str = model(__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Optional[int] = self.prepare_config_and_inputs()
__a , __a , __a : Tuple = config_and_inputs
__a : List[str] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase , __UpperCamelCase , unittest.TestCase ):
lowercase__ = (DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else ()
lowercase__ = (
{
"depth-estimation": DPTForDepthEstimation,
"feature-extraction": DPTModel,
"image-segmentation": DPTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
lowercase__ = False
lowercase__ = False
lowercase__ = False
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Optional[int] = DPTModelTester(self )
__a : List[Any] = ConfigTester(self , config_class=__UpperCamelCase , has_text_modality=__UpperCamelCase , hidden_size=37 )
def __lowerCamelCase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="""DPT does not use inputs_embeds""" )
def __lowerCamelCase ( self ):
'''simple docstring'''
pass
def __lowerCamelCase ( self ):
'''simple docstring'''
__a , __a : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a : str = model_class(__UpperCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__a : Any = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__UpperCamelCase , nn.Linear ) )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a , __a : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a : Any = model_class(__UpperCamelCase )
__a : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__a : int = [*signature.parameters.keys()]
__a : List[str] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __UpperCamelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_depth_estimation(*__UpperCamelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__UpperCamelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
__a , __a : Dict = self.model_tester.prepare_config_and_inputs_for_common()
__a : List[Any] = True
if model_class in get_values(__UpperCamelCase ):
continue
__a : str = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.train()
__a : Union[str, Any] = self._prepare_for_class(__UpperCamelCase , __UpperCamelCase , return_labels=__UpperCamelCase )
__a : List[Any] = model(**__UpperCamelCase ).loss
loss.backward()
def __lowerCamelCase ( self ):
'''simple docstring'''
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
__a , __a : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
__a : Any = False
__a : Dict = True
if model_class in get_values(__UpperCamelCase ) or not model_class.supports_gradient_checkpointing:
continue
__a : Any = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.gradient_checkpointing_enable()
model.train()
__a : List[str] = self._prepare_for_class(__UpperCamelCase , __UpperCamelCase , return_labels=__UpperCamelCase )
__a : Dict = model(**__UpperCamelCase ).loss
loss.backward()
def __lowerCamelCase ( self ):
'''simple docstring'''
__a , __a : Any = self.model_tester.prepare_config_and_inputs_for_common()
__a : Any = _config_zero_init(__UpperCamelCase )
for model_class in self.all_model_classes:
__a : Any = model_class(config=__UpperCamelCase )
# Skip the check for the backbone
__a : Optional[Any] = []
for name, module in model.named_modules():
if module.__class__.__name__ == "DPTViTHybridEmbeddings":
__a : Optional[int] = [f"""{name}.{key}""" for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def __lowerCamelCase ( self ):
'''simple docstring'''
pass
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]:
__a : int = DPTModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a , __a : int = self.model_tester.prepare_config_and_inputs_for_common()
__a : Optional[int] = """add"""
with self.assertRaises(__UpperCamelCase ):
__a : int = DPTForDepthEstimation(__UpperCamelCase )
def _snake_case ( ) -> Any:
__a : Dict = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
@slow
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : int = DPTImageProcessor.from_pretrained("""Intel/dpt-hybrid-midas""" )
__a : int = DPTForDepthEstimation.from_pretrained("""Intel/dpt-hybrid-midas""" ).to(__UpperCamelCase )
__a : Union[str, Any] = prepare_img()
__a : Any = image_processor(images=__UpperCamelCase , return_tensors="""pt""" ).to(__UpperCamelCase )
# forward pass
with torch.no_grad():
__a : Optional[Any] = model(**__UpperCamelCase )
__a : int = outputs.predicted_depth
# verify the predicted depth
__a : Any = torch.Size((1, 384, 384) )
self.assertEqual(predicted_depth.shape , __UpperCamelCase )
__a : int = torch.tensor(
[[[5.6_4_3_7, 5.6_1_4_6, 5.6_5_1_1], [5.4_3_7_1, 5.5_6_4_9, 5.5_9_5_8], [5.5_2_1_5, 5.5_1_8_4, 5.5_2_9_3]]] ).to(__UpperCamelCase )
self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 100 , __UpperCamelCase , atol=1E-4 ) ) | 697 | 1 |
'''simple docstring'''
from collections.abc import Callable
from math import pi, sqrt
from random import uniform
from statistics import mean
def _snake_case ( lowercase ) -> List[Any]:
# A local function to see if a dot lands in the circle.
def is_in_circle(lowercase , lowercase ) -> bool:
__a : int = sqrt((x**2) + (y**2) )
# Our circle has a radius of 1, so a distance
# greater than 1 would land outside the circle.
return distance_from_centre <= 1
# The proportion of guesses that landed in the circle
__a : Tuple = mean(
int(is_in_circle(uniform(-1.0 , 1.0 ) , uniform(-1.0 , 1.0 ) ) )
for _ in range(lowercase ) )
# The ratio of the area for circle to square is pi/4.
__a : Optional[int] = proportion * 4
print(F"""The estimated value of pi is {pi_estimate}""" )
print(F"""The numpy value of pi is {pi}""" )
print(F"""The total error is {abs(pi - pi_estimate )}""" )
def _snake_case ( lowercase , lowercase , lowercase = 0.0 , lowercase = 1.0 , ) -> float:
return mean(
function_to_integrate(uniform(lowercase , lowercase ) ) for _ in range(lowercase ) ) * (max_value - min_value)
def _snake_case ( lowercase , lowercase = 0.0 , lowercase = 1.0 ) -> None:
def identity_function(lowercase ) -> float:
return x
__a : Tuple = area_under_curve_estimator(
lowercase , lowercase , lowercase , lowercase )
__a : Tuple = (max_value * max_value - min_value * min_value) / 2
print("""******************""" )
print(F"""Estimating area under y=x where x varies from {min_value} to {max_value}""" )
print(F"""Estimated value is {estimated_value}""" )
print(F"""Expected value is {expected_value}""" )
print(F"""Total error is {abs(estimated_value - expected_value )}""" )
print("""******************""" )
def _snake_case ( lowercase ) -> None:
def function_to_integrate(lowercase ) -> float:
return sqrt(4.0 - x * x )
__a : str = area_under_curve_estimator(
lowercase , lowercase , 0.0 , 2.0 )
print("""******************""" )
print("""Estimating pi using area_under_curve_estimator""" )
print(F"""Estimated value is {estimated_value}""" )
print(F"""Expected value is {pi}""" )
print(F"""Total error is {abs(estimated_value - pi )}""" )
print("""******************""" )
if __name__ == "__main__":
import doctest
doctest.testmod() | 697 |
'''simple docstring'''
import unittest
from .lib import (
Matrix,
Vector,
axpy,
square_zero_matrix,
unit_basis_vector,
zero_vector,
)
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Dict = Vector([1, 2, 3] )
self.assertEqual(x.component(0 ) , 1 )
self.assertEqual(x.component(2 ) , 3 )
__a : Optional[int] = Vector()
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Any = Vector([0, 0, 0, 0, 0, 1] )
self.assertEqual(str(__UpperCamelCase ) , """(0,0,0,0,0,1)""" )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Tuple = Vector([1, 2, 3, 4] )
self.assertEqual(len(__UpperCamelCase ) , 4 )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Optional[Any] = Vector([1, 2] )
__a : List[str] = Vector([1, 2, 3, 4, 5] )
__a : Optional[int] = Vector([0, 0, 0, 0, 0, 0, 0, 0, 0, 0] )
__a : Dict = Vector([1, -1, 1, -1, 2, -3, 4, -5] )
self.assertAlmostEqual(x.euclidean_length() , 2.2_3_6 , 3 )
self.assertAlmostEqual(y.euclidean_length() , 7.4_1_6 , 3 )
self.assertEqual(z.euclidean_length() , 0 )
self.assertAlmostEqual(w.euclidean_length() , 7.6_1_6 , 3 )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Dict = Vector([1, 2, 3] )
__a : Union[str, Any] = Vector([1, 1, 1] )
self.assertEqual((x + y).component(0 ) , 2 )
self.assertEqual((x + y).component(1 ) , 3 )
self.assertEqual((x + y).component(2 ) , 4 )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : List[str] = Vector([1, 2, 3] )
__a : Any = Vector([1, 1, 1] )
self.assertEqual((x - y).component(0 ) , 0 )
self.assertEqual((x - y).component(1 ) , 1 )
self.assertEqual((x - y).component(2 ) , 2 )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Tuple = Vector([1, 2, 3] )
__a : Optional[Any] = Vector([2, -1, 4] ) # for test of dot product
__a : Union[str, Any] = Vector([1, -2, -1] )
self.assertEqual(str(x * 3.0 ) , """(3.0,6.0,9.0)""" )
self.assertEqual((a * b) , 0 )
def __lowerCamelCase ( self ):
'''simple docstring'''
self.assertEqual(str(zero_vector(10 ) ).count("""0""" ) , 10 )
def __lowerCamelCase ( self ):
'''simple docstring'''
self.assertEqual(str(unit_basis_vector(3 , 1 ) ) , """(0,1,0)""" )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Dict = Vector([1, 2, 3] )
__a : Optional[int] = Vector([1, 0, 1] )
self.assertEqual(str(axpy(2 , __UpperCamelCase , __UpperCamelCase ) ) , """(3,4,7)""" )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : int = Vector([1, 0, 0, 0, 0, 0] )
__a : Any = x.copy()
self.assertEqual(str(__UpperCamelCase ) , str(__UpperCamelCase ) )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Union[str, Any] = Vector([1, 0, 0] )
x.change_component(0 , 0 )
x.change_component(1 , 1 )
self.assertEqual(str(__UpperCamelCase ) , """(0,1,0)""" )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Dict = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual("""|1,2,3|\n|2,4,5|\n|6,7,8|\n""" , str(__UpperCamelCase ) )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : List[Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
__a : List[Any] = [[-3, -14, -10], [-5, -10, -5], [-2, -1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(minors[x][y] , a.minor(__UpperCamelCase , __UpperCamelCase ) )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : List[Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
__a : Any = [[-3, 14, -10], [5, -10, 5], [-2, 1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(cofactors[x][y] , a.cofactor(__UpperCamelCase , __UpperCamelCase ) )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : List[Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(-5 , a.determinant() )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Any = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]] , 3 , 3 )
__a : List[Any] = Vector([1, 2, 3] )
self.assertEqual("""(14,32,50)""" , str(a * x ) )
self.assertEqual("""|2,4,6|\n|8,10,12|\n|14,16,18|\n""" , str(a * 2 ) )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : List[str] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
a.change_component(0 , 2 , 5 )
self.assertEqual("""|1,2,5|\n|2,4,5|\n|6,7,8|\n""" , str(__UpperCamelCase ) )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Tuple = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(7 , a.component(2 , 1 ) , 0.0_1 )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Dict = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
__a : Union[str, Any] = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 )
self.assertEqual("""|2,4,10|\n|4,8,10|\n|12,14,18|\n""" , str(a + b ) )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : List[Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
__a : List[str] = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 )
self.assertEqual("""|0,0,-4|\n|0,0,0|\n|0,0,-2|\n""" , str(a - b ) )
def __lowerCamelCase ( self ):
'''simple docstring'''
self.assertEqual(
"""|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n""" , str(square_zero_matrix(5 ) ) , )
if __name__ == "__main__":
unittest.main() | 697 | 1 |
'''simple docstring'''
from itertools import product
from cva import COLOR_BGR2GRAY, cvtColor, imread, imshow, waitKey
from numpy import dot, exp, mgrid, pi, ravel, square, uinta, zeros
def _snake_case ( lowercase , lowercase ) -> Dict:
__a : Dict = k_size // 2
__a , __a : List[Any] = mgrid[0 - center : k_size - center, 0 - center : k_size - center]
__a : Union[str, Any] = 1 / (2 * pi * sigma) * exp(-(square(lowercase ) + square(lowercase )) / (2 * square(lowercase )) )
return g
def _snake_case ( lowercase , lowercase , lowercase ) -> str:
__a , __a : Optional[Any] = image.shape[0], image.shape[1]
# dst image height and width
__a : Tuple = height - k_size + 1
__a : Any = width - k_size + 1
# im2col, turn the k_size*k_size pixels into a row and np.vstack all rows
__a : Tuple = zeros((dst_height * dst_width, k_size * k_size) )
__a : Optional[Any] = 0
for i, j in product(range(lowercase ) , range(lowercase ) ):
__a : str = ravel(image[i : i + k_size, j : j + k_size] )
__a : int = window
row += 1
# turn the kernel into shape(k*k, 1)
__a : Union[str, Any] = gen_gaussian_kernel(lowercase , lowercase )
__a : List[str] = ravel(lowercase )
# reshape and get the dst image
__a : Any = dot(lowercase , lowercase ).reshape(lowercase , lowercase ).astype(lowercase )
return dst
if __name__ == "__main__":
# read original image
__SCREAMING_SNAKE_CASE : List[Any] = imread(r'../image_data/lena.jpg')
# turn image in gray scale value
__SCREAMING_SNAKE_CASE : Any = cvtColor(img, COLOR_BGR2GRAY)
# get values with two different mask size
__SCREAMING_SNAKE_CASE : Any = gaussian_filter(gray, 3, sigma=1)
__SCREAMING_SNAKE_CASE : Union[str, Any] = gaussian_filter(gray, 5, sigma=0.8)
# show result images
imshow('gaussian filter with 3x3 mask', gaussianaxa)
imshow('gaussian filter with 5x5 mask', gaussianaxa)
waitKey() | 697 |
'''simple docstring'''
import os
from itertools import chain
from random import randrange, shuffle
import pytest
from .sola import PokerHand
__SCREAMING_SNAKE_CASE : List[str] = (
'4S 3H 2C 7S 5H',
'9D 8H 2C 6S 7H',
'2D 6D 9D TH 7D',
'TC 8C 2S JH 6C',
'JH 8S TH AH QH',
'TS KS 5S 9S AC',
'KD 6S 9D TH AD',
'KS 8D 4D 9S 4S', # pair
'8C 4S KH JS 4D', # pair
'QH 8H KD JH 8S', # pair
'KC 4H KS 2H 8D', # pair
'KD 4S KC 3H 8S', # pair
'AH 8S AS KC JH', # pair
'3H 4C 4H 3S 2H', # 2 pairs
'5S 5D 2C KH KH', # 2 pairs
'3C KH 5D 5S KH', # 2 pairs
'AS 3C KH AD KH', # 2 pairs
'7C 7S 3S 7H 5S', # 3 of a kind
'7C 7S KH 2H 7H', # 3 of a kind
'AC KH QH AH AS', # 3 of a kind
'2H 4D 3C AS 5S', # straight (low ace)
'3C 5C 4C 2C 6H', # straight
'6S 8S 7S 5H 9H', # straight
'JS QS 9H TS KH', # straight
'QC KH TS JS AH', # straight (high ace)
'8C 9C 5C 3C TC', # flush
'3S 8S 9S 5S KS', # flush
'4C 5C 9C 8C KC', # flush
'JH 8H AH KH QH', # flush
'3D 2H 3H 2C 2D', # full house
'2H 2C 3S 3H 3D', # full house
'KH KC 3S 3H 3D', # full house
'JC 6H JS JD JH', # 4 of a kind
'JC 7H JS JD JH', # 4 of a kind
'JC KH JS JD JH', # 4 of a kind
'2S AS 4S 5S 3S', # straight flush (low ace)
'2D 6D 3D 4D 5D', # straight flush
'5C 6C 3C 7C 4C', # straight flush
'JH 9H TH KH QH', # straight flush
'JH AH TH KH QH', # royal flush (high ace straight flush)
)
__SCREAMING_SNAKE_CASE : Optional[Any] = (
('2H 3H 4H 5H 6H', 'KS AS TS QS JS', 'Loss'),
('2H 3H 4H 5H 6H', 'AS AD AC AH JD', 'Win'),
('AS AH 2H AD AC', 'JS JD JC JH 3D', 'Win'),
('2S AH 2H AS AC', 'JS JD JC JH AD', 'Loss'),
('2S AH 2H AS AC', '2H 3H 5H 6H 7H', 'Win'),
('AS 3S 4S 8S 2S', '2H 3H 5H 6H 7H', 'Win'),
('2H 3H 5H 6H 7H', '2S 3H 4H 5S 6C', 'Win'),
('2S 3H 4H 5S 6C', '3D 4C 5H 6H 2S', 'Tie'),
('2S 3H 4H 5S 6C', 'AH AC 5H 6H AS', 'Win'),
('2S 2H 4H 5S 4C', 'AH AC 5H 6H AS', 'Loss'),
('2S 2H 4H 5S 4C', 'AH AC 5H 6H 7S', 'Win'),
('6S AD 7H 4S AS', 'AH AC 5H 6H 7S', 'Loss'),
('2S AH 4H 5S KC', 'AH AC 5H 6H 7S', 'Loss'),
('2S 3H 6H 7S 9C', '7H 3C TH 6H 9S', 'Loss'),
('4S 5H 6H TS AC', '3S 5H 6H TS AC', 'Win'),
('2S AH 4H 5S 6C', 'AD 4C 5H 6H 2C', 'Tie'),
('AS AH 3H AD AC', 'AS AH 2H AD AC', 'Win'),
('AH AC 5H 5C QS', 'AH AC 5H 5C KS', 'Loss'),
('AH AC 5H 5C QS', 'KH KC 5H 5C QS', 'Win'),
('7C 7S KH 2H 7H', '3C 3S AH 2H 3H', 'Win'),
('3C 3S AH 2H 3H', '7C 7S KH 2H 7H', 'Loss'),
('6H 5H 4H 3H 2H', '5H 4H 3H 2H AH', 'Win'),
('5H 4H 3H 2H AH', '5H 4H 3H 2H AH', 'Tie'),
('5H 4H 3H 2H AH', '6H 5H 4H 3H 2H', 'Loss'),
('AH AD KS KC AC', 'AH KD KH AC KC', 'Win'),
('2H 4D 3C AS 5S', '2H 4D 3C 6S 5S', 'Loss'),
('2H 3S 3C 3H 2S', '3S 3C 2S 2H 2D', 'Win'),
('4D 6D 5D 2D JH', '3S 8S 3H TC KH', 'Loss'),
('4S 6C 8S 3S 7S', 'AD KS 2D 7D 7C', 'Loss'),
('6S 4C 7H 8C 3H', '5H JC AH 9D 9C', 'Loss'),
('9D 9H JH TC QH', '3C 2S JS 5C 7H', 'Win'),
('2H TC 8S AD 9S', '4H TS 7H 2C 5C', 'Win'),
('9D 3S 2C 7S 7C', 'JC TD 3C TC 9H', 'Loss'),
)
__SCREAMING_SNAKE_CASE : Tuple = (
('2H 3H 4H 5H 6H', True),
('AS AH 2H AD AC', False),
('2H 3H 5H 6H 7H', True),
('KS AS TS QS JS', True),
('8H 9H QS JS TH', False),
('AS 3S 4S 8S 2S', True),
)
__SCREAMING_SNAKE_CASE : Dict = (
('2H 3H 4H 5H 6H', True),
('AS AH 2H AD AC', False),
('2H 3H 5H 6H 7H', False),
('KS AS TS QS JS', True),
('8H 9H QS JS TH', True),
)
__SCREAMING_SNAKE_CASE : Optional[int] = (
('2H 4D 3C AS 5S', True, [5, 4, 3, 2, 14]),
('2H 5D 3C AS 5S', False, [14, 5, 5, 3, 2]),
('JH QD KC AS TS', False, [14, 13, 12, 11, 10]),
('9D 3S 2C 7S 7C', False, [9, 7, 7, 3, 2]),
)
__SCREAMING_SNAKE_CASE : int = (
('JH AH TH KH QH', 0),
('JH 9H TH KH QH', 0),
('JC KH JS JD JH', 7),
('KH KC 3S 3H 3D', 6),
('8C 9C 5C 3C TC', 0),
('JS QS 9H TS KH', 0),
('7C 7S KH 2H 7H', 3),
('3C KH 5D 5S KH', 2),
('QH 8H KD JH 8S', 1),
('2D 6D 9D TH 7D', 0),
)
__SCREAMING_SNAKE_CASE : int = (
('JH AH TH KH QH', 23),
('JH 9H TH KH QH', 22),
('JC KH JS JD JH', 21),
('KH KC 3S 3H 3D', 20),
('8C 9C 5C 3C TC', 19),
('JS QS 9H TS KH', 18),
('7C 7S KH 2H 7H', 17),
('3C KH 5D 5S KH', 16),
('QH 8H KD JH 8S', 15),
('2D 6D 9D TH 7D', 14),
)
def _snake_case ( ) -> List[str]:
__a , __a : List[Any] = randrange(len(lowercase ) ), randrange(len(lowercase ) )
__a : int = ["""Loss""", """Tie""", """Win"""][(play >= oppo) + (play > oppo)]
__a , __a : int = SORTED_HANDS[play], SORTED_HANDS[oppo]
return hand, other, expected
def _snake_case ( lowercase = 1_0_0 ) -> Any:
return (generate_random_hand() for _ in range(lowercase ))
@pytest.mark.parametrize("""hand, expected""" , lowercase )
def _snake_case ( lowercase , lowercase ) -> int:
assert PokerHand(lowercase )._is_flush() == expected
@pytest.mark.parametrize("""hand, expected""" , lowercase )
def _snake_case ( lowercase , lowercase ) -> Any:
assert PokerHand(lowercase )._is_straight() == expected
@pytest.mark.parametrize("""hand, expected, card_values""" , lowercase )
def _snake_case ( lowercase , lowercase , lowercase ) -> List[str]:
__a : Union[str, Any] = PokerHand(lowercase )
assert player._is_five_high_straight() == expected
assert player._card_values == card_values
@pytest.mark.parametrize("""hand, expected""" , lowercase )
def _snake_case ( lowercase , lowercase ) -> Optional[int]:
assert PokerHand(lowercase )._is_same_kind() == expected
@pytest.mark.parametrize("""hand, expected""" , lowercase )
def _snake_case ( lowercase , lowercase ) -> Union[str, Any]:
assert PokerHand(lowercase )._hand_type == expected
@pytest.mark.parametrize("""hand, other, expected""" , lowercase )
def _snake_case ( lowercase , lowercase , lowercase ) -> Optional[int]:
assert PokerHand(lowercase ).compare_with(PokerHand(lowercase ) ) == expected
@pytest.mark.parametrize("""hand, other, expected""" , generate_random_hands() )
def _snake_case ( lowercase , lowercase , lowercase ) -> int:
assert PokerHand(lowercase ).compare_with(PokerHand(lowercase ) ) == expected
def _snake_case ( ) -> Union[str, Any]:
__a : Tuple = [PokerHand(lowercase ) for hand in SORTED_HANDS]
__a : Optional[int] = poker_hands.copy()
shuffle(lowercase )
__a : List[str] = chain(sorted(lowercase ) )
for index, hand in enumerate(lowercase ):
assert hand == poker_hands[index]
def _snake_case ( ) -> List[str]:
# Test that five high straights are compared correctly.
__a : Optional[int] = [PokerHand("""2D AC 3H 4H 5S""" ), PokerHand("""2S 3H 4H 5S 6C""" )]
pokerhands.sort(reverse=lowercase )
assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C"
def _snake_case ( ) -> List[str]:
# Multiple calls to five_high_straight function should still return True
# and shouldn't mutate the list in every call other than the first.
__a : Dict = PokerHand("""2C 4S AS 3D 5C""" )
__a : Dict = True
__a : Optional[int] = [5, 4, 3, 2, 1_4]
for _ in range(1_0 ):
assert pokerhand._is_five_high_straight() == expected
assert pokerhand._card_values == expected_card_values
def _snake_case ( ) -> Dict:
# Problem number 54 from Project Euler
# Testing from poker_hands.txt file
__a : Tuple = 0
__a : int = os.path.abspath(os.path.dirname(lowercase ) )
__a : Union[str, Any] = os.path.join(lowercase , """poker_hands.txt""" )
with open(lowercase ) as file_hand:
for line in file_hand:
__a : Union[str, Any] = line[:1_4].strip()
__a : Optional[Any] = line[1_5:].strip()
__a , __a : List[str] = PokerHand(lowercase ), PokerHand(lowercase )
__a : str = player.compare_with(lowercase )
if output == "Win":
answer += 1
assert answer == 3_7_6 | 697 | 1 |
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
HubertConfig,
HubertForCTC,
HubertModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
__SCREAMING_SNAKE_CASE : Optional[int] = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : Any = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
}
def _snake_case ( lowercase , lowercase , lowercase , lowercase , lowercase ) -> Optional[int]:
for attribute in key.split(""".""" ):
__a : Tuple = getattr(lowercase , lowercase )
if weight_type is not None:
__a : Tuple = getattr(lowercase , lowercase ).shape
else:
__a : Optional[int] = hf_pointer.shape
assert hf_shape == value.shape, (
F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
__a : Optional[Any] = value
elif weight_type == "weight_g":
__a : List[Any] = value
elif weight_type == "weight_v":
__a : Optional[Any] = value
elif weight_type == "bias":
__a : Any = value
else:
__a : Tuple = value
logger.info(F"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def _snake_case ( lowercase , lowercase , lowercase ) -> int:
__a : Union[str, Any] = []
__a : List[str] = fairseq_model.state_dict()
__a : Any = hf_model.hubert.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
__a : Dict = False
if "conv_layers" in name:
load_conv_layer(
lowercase , lowercase , lowercase , lowercase , hf_model.config.feat_extract_norm == """group""" , )
__a : Optional[int] = True
else:
for key, mapped_key in MAPPING.items():
__a : int = """hubert.""" + mapped_key if (is_finetuned and mapped_key != """lm_head""") else mapped_key
if key in name or (key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0] and not is_finetuned):
__a : str = True
if "*" in mapped_key:
__a : int = name.split(lowercase )[0].split(""".""" )[-2]
__a : Union[str, Any] = mapped_key.replace("""*""" , lowercase )
if "weight_g" in name:
__a : Any = """weight_g"""
elif "weight_v" in name:
__a : Tuple = """weight_v"""
elif "weight" in name:
__a : Optional[Any] = """weight"""
elif "bias" in name:
__a : Optional[int] = """bias"""
else:
__a : List[str] = None
set_recursively(lowercase , lowercase , lowercase , lowercase , lowercase )
continue
if not is_used:
unused_weights.append(lowercase )
logger.warning(F"""Unused weights: {unused_weights}""" )
def _snake_case ( lowercase , lowercase , lowercase , lowercase , lowercase ) -> Tuple:
__a : Any = full_name.split("""conv_layers.""" )[-1]
__a : Union[str, Any] = name.split(""".""" )
__a : Any = int(items[0] )
__a : List[Any] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
__a : Optional[int] = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
__a : int = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
__a : Any = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
__a : int = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(lowercase )
@torch.no_grad()
def _snake_case ( lowercase , lowercase , lowercase=None , lowercase=None , lowercase=True ) -> Union[str, Any]:
if config_path is not None:
__a : int = HubertConfig.from_pretrained(lowercase )
else:
__a : Any = HubertConfig()
if is_finetuned:
if dict_path:
__a : Optional[Any] = Dictionary.load(lowercase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
__a : Tuple = target_dict.pad_index
__a : Dict = target_dict.bos_index
__a : Dict = target_dict.eos_index
__a : Union[str, Any] = len(target_dict.symbols )
__a : List[str] = os.path.join(lowercase , """vocab.json""" )
if not os.path.isdir(lowercase ):
logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(lowercase ) )
return
os.makedirs(lowercase , exist_ok=lowercase )
with open(lowercase , """w""" , encoding="""utf-8""" ) as vocab_handle:
json.dump(target_dict.indices , lowercase )
__a : str = WavaVecaCTCTokenizer(
lowercase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=lowercase , )
__a : Union[str, Any] = True if config.feat_extract_norm == """layer""" else False
__a : int = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6_0_0_0 , padding_value=0 , do_normalize=lowercase , return_attention_mask=lowercase , )
__a : List[Any] = WavaVecaProcessor(feature_extractor=lowercase , tokenizer=lowercase )
processor.save_pretrained(lowercase )
__a : Union[str, Any] = HubertForCTC(lowercase )
else:
__a : Optional[Any] = HubertModel(lowercase )
if is_finetuned:
__a , __a , __a : Any = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
else:
__a , __a , __a : Optional[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
__a : Dict = model[0].eval()
recursively_load_weights(lowercase , lowercase , lowercase )
hf_wavavec.save_pretrained(lowercase )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : Any = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
__SCREAMING_SNAKE_CASE : Any = parser.parse_args()
convert_hubert_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
) | 697 |
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__SCREAMING_SNAKE_CASE : Optional[Any] = {'configuration_focalnet': ['FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FocalNetConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : List[Any] = [
'FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'FocalNetForImageClassification',
'FocalNetForMaskedImageModeling',
'FocalNetBackbone',
'FocalNetModel',
'FocalNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 697 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__SCREAMING_SNAKE_CASE : Optional[Any] = {'configuration_focalnet': ['FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FocalNetConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : List[Any] = [
'FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'FocalNetForImageClassification',
'FocalNetForMaskedImageModeling',
'FocalNetBackbone',
'FocalNetModel',
'FocalNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 697 |
'''simple docstring'''
from __future__ import annotations
import bisect
def _snake_case ( lowercase , lowercase , lowercase = 0 , lowercase = -1 ) -> int:
if hi < 0:
__a : Union[str, Any] = len(lowercase )
while lo < hi:
__a : List[str] = lo + (hi - lo) // 2
if sorted_collection[mid] < item:
__a : int = mid + 1
else:
__a : int = mid
return lo
def _snake_case ( lowercase , lowercase , lowercase = 0 , lowercase = -1 ) -> int:
if hi < 0:
__a : Any = len(lowercase )
while lo < hi:
__a : Any = lo + (hi - lo) // 2
if sorted_collection[mid] <= item:
__a : List[str] = mid + 1
else:
__a : Any = mid
return lo
def _snake_case ( lowercase , lowercase , lowercase = 0 , lowercase = -1 ) -> None:
sorted_collection.insert(bisect_left(lowercase , lowercase , lowercase , lowercase ) , lowercase )
def _snake_case ( lowercase , lowercase , lowercase = 0 , lowercase = -1 ) -> None:
sorted_collection.insert(bisect_right(lowercase , lowercase , lowercase , lowercase ) , lowercase )
def _snake_case ( lowercase , lowercase ) -> int | None:
__a : Dict = 0
__a : Any = len(lowercase ) - 1
while left <= right:
__a : str = left + (right - left) // 2
__a : List[Any] = sorted_collection[midpoint]
if current_item == item:
return midpoint
elif item < current_item:
__a : Optional[Any] = midpoint - 1
else:
__a : Optional[int] = midpoint + 1
return None
def _snake_case ( lowercase , lowercase ) -> int | None:
__a : Optional[int] = bisect.bisect_left(lowercase , lowercase )
if index != len(lowercase ) and sorted_collection[index] == item:
return index
return None
def _snake_case ( lowercase , lowercase , lowercase , lowercase ) -> int | None:
if right < left:
return None
__a : Any = left + (right - left) // 2
if sorted_collection[midpoint] == item:
return midpoint
elif sorted_collection[midpoint] > item:
return binary_search_by_recursion(lowercase , lowercase , lowercase , midpoint - 1 )
else:
return binary_search_by_recursion(lowercase , lowercase , midpoint + 1 , lowercase )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : List[Any] = input('Enter numbers separated by comma:\n').strip()
__SCREAMING_SNAKE_CASE : Optional[Any] = sorted(int(item) for item in user_input.split(','))
__SCREAMING_SNAKE_CASE : List[str] = int(input('Enter a single number to be found in the list:\n'))
__SCREAMING_SNAKE_CASE : Optional[int] = binary_search(collection, target)
if result is None:
print(f'''{target} was not found in {collection}.''')
else:
print(f'''{target} was found at position {result} in {collection}.''') | 697 | 1 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roberta import RobertaTokenizer
__SCREAMING_SNAKE_CASE : int = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : Union[str, Any] = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
__SCREAMING_SNAKE_CASE : Optional[int] = {
'vocab_file': {
'roberta-base': 'https://huggingface.co/roberta-base/resolve/main/vocab.json',
'roberta-large': 'https://huggingface.co/roberta-large/resolve/main/vocab.json',
'roberta-large-mnli': 'https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json',
'distilroberta-base': 'https://huggingface.co/distilroberta-base/resolve/main/vocab.json',
'roberta-base-openai-detector': 'https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json',
'roberta-large-openai-detector': (
'https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json'
),
},
'merges_file': {
'roberta-base': 'https://huggingface.co/roberta-base/resolve/main/merges.txt',
'roberta-large': 'https://huggingface.co/roberta-large/resolve/main/merges.txt',
'roberta-large-mnli': 'https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt',
'distilroberta-base': 'https://huggingface.co/distilroberta-base/resolve/main/merges.txt',
'roberta-base-openai-detector': 'https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt',
'roberta-large-openai-detector': (
'https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt'
),
},
'tokenizer_file': {
'roberta-base': 'https://huggingface.co/roberta-base/resolve/main/tokenizer.json',
'roberta-large': 'https://huggingface.co/roberta-large/resolve/main/tokenizer.json',
'roberta-large-mnli': 'https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json',
'distilroberta-base': 'https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json',
'roberta-base-openai-detector': (
'https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json'
),
'roberta-large-openai-detector': (
'https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json'
),
},
}
__SCREAMING_SNAKE_CASE : Optional[int] = {
'roberta-base': 512,
'roberta-large': 512,
'roberta-large-mnli': 512,
'distilroberta-base': 512,
'roberta-base-openai-detector': 512,
'roberta-large-openai-detector': 512,
}
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
lowercase__ = VOCAB_FILES_NAMES
lowercase__ = PRETRAINED_VOCAB_FILES_MAP
lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ = ["input_ids", "attention_mask"]
lowercase__ = RobertaTokenizer
def __init__( self , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase="replace" , __UpperCamelCase="<s>" , __UpperCamelCase="</s>" , __UpperCamelCase="</s>" , __UpperCamelCase="<s>" , __UpperCamelCase="<unk>" , __UpperCamelCase="<pad>" , __UpperCamelCase="<mask>" , __UpperCamelCase=False , __UpperCamelCase=True , **__UpperCamelCase , ):
'''simple docstring'''
super().__init__(
__UpperCamelCase , __UpperCamelCase , tokenizer_file=__UpperCamelCase , errors=__UpperCamelCase , bos_token=__UpperCamelCase , eos_token=__UpperCamelCase , sep_token=__UpperCamelCase , cls_token=__UpperCamelCase , unk_token=__UpperCamelCase , pad_token=__UpperCamelCase , mask_token=__UpperCamelCase , add_prefix_space=__UpperCamelCase , trim_offsets=__UpperCamelCase , **__UpperCamelCase , )
__a : List[str] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" , __UpperCamelCase ) != add_prefix_space:
__a : List[Any] = getattr(__UpperCamelCase , pre_tok_state.pop("""type""" ) )
__a : Union[str, Any] = add_prefix_space
__a : Optional[Any] = pre_tok_class(**__UpperCamelCase )
__a : Optional[int] = add_prefix_space
__a : List[Any] = """post_processor"""
__a : str = getattr(self.backend_tokenizer , __UpperCamelCase , __UpperCamelCase )
if tokenizer_component_instance:
__a : List[Any] = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
__a : Any = tuple(state["""sep"""] )
if "cls" in state:
__a : List[Any] = tuple(state["""cls"""] )
__a : List[Any] = False
if state.get("""add_prefix_space""" , __UpperCamelCase ) != add_prefix_space:
__a : Union[str, Any] = add_prefix_space
__a : Optional[Any] = True
if state.get("""trim_offsets""" , __UpperCamelCase ) != trim_offsets:
__a : str = trim_offsets
__a : Union[str, Any] = True
if changes_to_apply:
__a : Optional[int] = getattr(__UpperCamelCase , state.pop("""type""" ) )
__a : Dict = component_class(**__UpperCamelCase )
setattr(self.backend_tokenizer , __UpperCamelCase , __UpperCamelCase )
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error("""Using mask_token, but it is not set yet.""" )
return None
return str(self._mask_token )
@mask_token.setter
def __lowerCamelCase ( self , __UpperCamelCase ):
'''simple docstring'''
__a : Any = AddedToken(__UpperCamelCase , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else value
__a : str = value
def __lowerCamelCase ( self , *__UpperCamelCase , **__UpperCamelCase ):
'''simple docstring'''
__a : List[str] = kwargs.get("""is_split_into_words""" , __UpperCamelCase )
assert self.add_prefix_space or not is_split_into_words, (
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*__UpperCamelCase , **__UpperCamelCase )
def __lowerCamelCase ( self , *__UpperCamelCase , **__UpperCamelCase ):
'''simple docstring'''
__a : Tuple = kwargs.get("""is_split_into_words""" , __UpperCamelCase )
assert self.add_prefix_space or not is_split_into_words, (
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._encode_plus(*__UpperCamelCase , **__UpperCamelCase )
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase = None ):
'''simple docstring'''
__a : Union[str, Any] = self._tokenizer.model.save(__UpperCamelCase , name=__UpperCamelCase )
return tuple(__UpperCamelCase )
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase=None ):
'''simple docstring'''
__a : List[Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase = None ):
'''simple docstring'''
__a : Union[str, Any] = [self.sep_token_id]
__a : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] | 697 |
'''simple docstring'''
from itertools import product
def _snake_case ( lowercase , lowercase ) -> list[int]:
__a : Optional[int] = sides_number
__a : Union[str, Any] = max_face_number * dice_number
__a : Optional[Any] = [0] * (max_total + 1)
__a : Dict = 1
__a : str = range(lowercase , max_face_number + 1 )
for dice_numbers in product(lowercase , repeat=lowercase ):
__a : int = sum(lowercase )
totals_frequencies[total] += 1
return totals_frequencies
def _snake_case ( ) -> float:
__a : Tuple = total_frequency_distribution(
sides_number=4 , dice_number=9 )
__a : Union[str, Any] = total_frequency_distribution(
sides_number=6 , dice_number=6 )
__a : str = 0
__a : Dict = 9
__a : str = 4 * 9
__a : Any = 6
for peter_total in range(lowercase , max_peter_total + 1 ):
peter_wins_count += peter_totals_frequencies[peter_total] * sum(
colin_totals_frequencies[min_colin_total:peter_total] )
__a : str = (4**9) * (6**6)
__a : List[Any] = peter_wins_count / total_games_number
__a : List[Any] = round(lowercase , ndigits=7 )
return rounded_peter_win_probability
if __name__ == "__main__":
print(f'''{solution() = }''') | 697 | 1 |
'''simple docstring'''
from functools import reduce
__SCREAMING_SNAKE_CASE : Tuple = (
'73167176531330624919225119674426574742355349194934'
'96983520312774506326239578318016984801869478851843'
'85861560789112949495459501737958331952853208805511'
'12540698747158523863050715693290963295227443043557'
'66896648950445244523161731856403098711121722383113'
'62229893423380308135336276614282806444486645238749'
'30358907296290491560440772390713810515859307960866'
'70172427121883998797908792274921901699720888093776'
'65727333001053367881220235421809751254540594752243'
'52584907711670556013604839586446706324415722155397'
'53697817977846174064955149290862569321978468622482'
'83972241375657056057490261407972968652414535100474'
'82166370484403199890008895243450658541227588666881'
'16427171479924442928230863465674813919123162824586'
'17866458359124566529476545682848912883142607690042'
'24219022671055626321111109370544217506941658960408'
'07198403850962455444362981230987879927244284909188'
'84580156166097919133875499200524063689912560717606'
'05886116467109405077541002256983155200055935729725'
'71636269561882670428252483600823257530420752963450'
)
def _snake_case ( lowercase = N ) -> int:
return max(
# mypy cannot properly interpret reduce
int(reduce(lambda lowercase , lowercase : str(int(lowercase ) * int(lowercase ) ) , n[i : i + 1_3] ) )
for i in range(len(lowercase ) - 1_2 ) )
if __name__ == "__main__":
print(f'''{solution() = }''') | 697 |
'''simple docstring'''
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import DiffusionPipeline
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import logging
__SCREAMING_SNAKE_CASE : Union[str, Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
def __init__( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , ):
'''simple docstring'''
super().__init__()
self.register_modules(
vae=__UpperCamelCase , text_encoder=__UpperCamelCase , tokenizer=__UpperCamelCase , unet=__UpperCamelCase , scheduler=__UpperCamelCase , safety_checker=__UpperCamelCase , feature_extractor=__UpperCamelCase , )
def __lowerCamelCase ( self , __UpperCamelCase = "auto" ):
'''simple docstring'''
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
__a : Union[str, Any] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(__UpperCamelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
self.enable_attention_slicing(__UpperCamelCase )
@torch.no_grad()
def __call__( self , __UpperCamelCase , __UpperCamelCase = 512 , __UpperCamelCase = 512 , __UpperCamelCase = 50 , __UpperCamelCase = 7.5 , __UpperCamelCase = None , __UpperCamelCase = 1 , __UpperCamelCase = 0.0 , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = "pil" , __UpperCamelCase = True , __UpperCamelCase = None , __UpperCamelCase = 1 , __UpperCamelCase = None , **__UpperCamelCase , ):
'''simple docstring'''
if isinstance(__UpperCamelCase , __UpperCamelCase ):
__a : Union[str, Any] = 1
elif isinstance(__UpperCamelCase , __UpperCamelCase ):
__a : Tuple = len(__UpperCamelCase )
else:
raise ValueError(f"""`prompt` has to be of type `str` or `list` but is {type(__UpperCamelCase )}""" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"""`height` and `width` have to be divisible by 8 but are {height} and {width}.""" )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(__UpperCamelCase , __UpperCamelCase ) or callback_steps <= 0)
):
raise ValueError(
f"""`callback_steps` has to be a positive integer but is {callback_steps} of type"""
f""" {type(__UpperCamelCase )}.""" )
# get prompt text embeddings
__a : Tuple = self.tokenizer(
__UpperCamelCase , padding="""max_length""" , max_length=self.tokenizer.model_max_length , return_tensors="""pt""" , )
__a : Union[str, Any] = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
__a : str = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"""The following part of your input was truncated because CLIP can only handle sequences up to"""
f""" {self.tokenizer.model_max_length} tokens: {removed_text}""" )
__a : Optional[int] = text_input_ids[:, : self.tokenizer.model_max_length]
if text_embeddings is None:
__a : int = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
__a , __a , __a : Union[str, Any] = text_embeddings.shape
__a : Optional[Any] = text_embeddings.repeat(1 , __UpperCamelCase , 1 )
__a : Union[str, Any] = text_embeddings.view(bs_embed * num_images_per_prompt , __UpperCamelCase , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
__a : Any = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
__a : List[str]
if negative_prompt is None:
__a : Optional[Any] = [""""""]
elif type(__UpperCamelCase ) is not type(__UpperCamelCase ):
raise TypeError(
f"""`negative_prompt` should be the same type to `prompt`, but got {type(__UpperCamelCase )} !="""
f""" {type(__UpperCamelCase )}.""" )
elif isinstance(__UpperCamelCase , __UpperCamelCase ):
__a : Any = [negative_prompt]
elif batch_size != len(__UpperCamelCase ):
raise ValueError(
f"""`negative_prompt`: {negative_prompt} has batch size {len(__UpperCamelCase )}, but `prompt`:"""
f""" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"""
""" the batch size of `prompt`.""" )
else:
__a : Tuple = negative_prompt
__a : Any = text_input_ids.shape[-1]
__a : List[str] = self.tokenizer(
__UpperCamelCase , padding="""max_length""" , max_length=__UpperCamelCase , truncation=__UpperCamelCase , return_tensors="""pt""" , )
__a : str = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
__a : List[str] = uncond_embeddings.shape[1]
__a : List[Any] = uncond_embeddings.repeat(__UpperCamelCase , __UpperCamelCase , 1 )
__a : Tuple = uncond_embeddings.view(batch_size * num_images_per_prompt , __UpperCamelCase , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
__a : List[Any] = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
__a : Tuple = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
__a : List[Any] = (batch_size * num_images_per_prompt, self.unet.config.in_channels, 64, 64)
__a : int = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
__a : Any = torch.randn(
__UpperCamelCase , generator=__UpperCamelCase , device="""cpu""" , dtype=__UpperCamelCase ).to(self.device )
__a : Optional[Any] = torch.randn(__UpperCamelCase , generator=__UpperCamelCase , device="""cpu""" , dtype=__UpperCamelCase ).to(
self.device )
else:
__a : Optional[int] = torch.randn(
__UpperCamelCase , generator=__UpperCamelCase , device=self.device , dtype=__UpperCamelCase )
__a : str = torch.randn(__UpperCamelCase , generator=__UpperCamelCase , device=self.device , dtype=__UpperCamelCase )
else:
if latents_reference.shape != latents_shape:
raise ValueError(f"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" )
__a : Optional[Any] = latents_reference.to(self.device )
__a : str = latents.to(self.device )
# This is the key part of the pipeline where we
# try to ensure that the generated images w/ the same seed
# but different sizes actually result in similar images
__a : List[str] = (latents_shape[3] - latents_shape_reference[3]) // 2
__a : int = (latents_shape[2] - latents_shape_reference[2]) // 2
__a : int = latents_shape_reference[3] if dx >= 0 else latents_shape_reference[3] + 2 * dx
__a : Tuple = latents_shape_reference[2] if dy >= 0 else latents_shape_reference[2] + 2 * dy
__a : Optional[Any] = 0 if dx < 0 else dx
__a : Optional[Any] = 0 if dy < 0 else dy
__a : Optional[int] = max(-dx , 0 )
__a : Optional[Any] = max(-dy , 0 )
# import pdb
# pdb.set_trace()
__a : Optional[int] = latents_reference[:, :, dy : dy + h, dx : dx + w]
# set timesteps
self.scheduler.set_timesteps(__UpperCamelCase )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
__a : Dict = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
__a : Any = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
__a : List[Any] = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
__a : Optional[Any] = {}
if accepts_eta:
__a : Union[str, Any] = eta
for i, t in enumerate(self.progress_bar(__UpperCamelCase ) ):
# expand the latents if we are doing classifier free guidance
__a : List[str] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
__a : Tuple = self.scheduler.scale_model_input(__UpperCamelCase , __UpperCamelCase )
# predict the noise residual
__a : Union[str, Any] = self.unet(__UpperCamelCase , __UpperCamelCase , encoder_hidden_states=__UpperCamelCase ).sample
# perform guidance
if do_classifier_free_guidance:
__a , __a : List[str] = noise_pred.chunk(2 )
__a : Optional[int] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
__a : List[Any] = self.scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
__a : Optional[Any] = 1 / 0.1_8_2_1_5 * latents
__a : Optional[int] = self.vae.decode(__UpperCamelCase ).sample
__a : List[str] = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
__a : int = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if self.safety_checker is not None:
__a : List[str] = self.feature_extractor(self.numpy_to_pil(__UpperCamelCase ) , return_tensors="""pt""" ).to(
self.device )
__a , __a : int = self.safety_checker(
images=__UpperCamelCase , clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype ) )
else:
__a : Optional[int] = None
if output_type == "pil":
__a : str = self.numpy_to_pil(__UpperCamelCase )
if not return_dict:
return (image, has_nsfw_concept)
return StableDiffusionPipelineOutput(images=__UpperCamelCase , nsfw_content_detected=__UpperCamelCase ) | 697 | 1 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
__SCREAMING_SNAKE_CASE : Union[str, Any] = logging.get_logger(__name__)
def _snake_case ( lowercase , lowercase=False , lowercase=False ) -> int:
__a : Union[str, Any] = """backbone.""" if is_semantic else """"""
__a : List[str] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"""{prefix}blocks.{i}.norm1.weight""", F"""beit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""{prefix}blocks.{i}.norm1.bias""", F"""beit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append(
(F"""{prefix}blocks.{i}.attn.proj.weight""", F"""beit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append(
(F"""{prefix}blocks.{i}.attn.proj.bias""", F"""beit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((F"""{prefix}blocks.{i}.norm2.weight""", F"""beit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""{prefix}blocks.{i}.norm2.bias""", F"""beit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((F"""{prefix}blocks.{i}.mlp.fc1.weight""", F"""beit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((F"""{prefix}blocks.{i}.mlp.fc1.bias""", F"""beit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((F"""{prefix}blocks.{i}.mlp.fc2.weight""", F"""beit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""{prefix}blocks.{i}.mlp.fc2.bias""", F"""beit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
(F"""{prefix}cls_token""", """beit.embeddings.cls_token"""),
(F"""{prefix}patch_embed.proj.weight""", """beit.embeddings.patch_embeddings.projection.weight"""),
(F"""{prefix}patch_embed.proj.bias""", """beit.embeddings.patch_embeddings.projection.bias"""),
(F"""{prefix}pos_embed""", """beit.embeddings.position_embeddings"""),
] )
if has_lm_head:
# mask token + layernorm
rename_keys.extend(
[
("""mask_token""", """beit.embeddings.mask_token"""),
("""norm.weight""", """layernorm.weight"""),
("""norm.bias""", """layernorm.bias"""),
] )
else:
# layernorm + classification head
rename_keys.extend(
[
("""fc_norm.weight""", """beit.pooler.layernorm.weight"""),
("""fc_norm.bias""", """beit.pooler.layernorm.bias"""),
("""head.weight""", """classifier.weight"""),
("""head.bias""", """classifier.bias"""),
] )
return rename_keys
def _snake_case ( lowercase , lowercase , lowercase=False , lowercase=False ) -> Optional[Any]:
for i in range(config.num_hidden_layers ):
__a : str = """backbone.""" if is_semantic else """"""
# queries, keys and values
__a : List[Any] = state_dict.pop(F"""{prefix}blocks.{i}.attn.qkv.weight""" )
__a : Union[str, Any] = state_dict.pop(F"""{prefix}blocks.{i}.attn.q_bias""" )
__a : Tuple = state_dict.pop(F"""{prefix}blocks.{i}.attn.v_bias""" )
__a : Optional[Any] = in_proj_weight[
: config.hidden_size, :
]
__a : Tuple = q_bias
__a : Optional[int] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__a : Tuple = in_proj_weight[
-config.hidden_size :, :
]
__a : Optional[Any] = v_bias
# gamma_1 and gamma_2
# we call them lambda because otherwise they are renamed when using .from_pretrained
__a : List[str] = state_dict.pop(F"""{prefix}blocks.{i}.gamma_1""" )
__a : Tuple = state_dict.pop(F"""{prefix}blocks.{i}.gamma_2""" )
__a : Any = gamma_a
__a : List[str] = gamma_a
def _snake_case ( lowercase , lowercase , lowercase ) -> Optional[Any]:
__a : Optional[Any] = dct.pop(lowercase )
__a : Optional[Any] = val
def _snake_case ( ) -> Optional[int]:
__a : Union[str, Any] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
__a : Union[str, Any] = Image.open(requests.get(lowercase , stream=lowercase ).raw )
return im
@torch.no_grad()
def _snake_case ( lowercase , lowercase , lowercase=False ) -> Optional[int]:
__a : Tuple = False if """rvlcdip""" in checkpoint_url else True
__a : int = BeitConfig(use_absolute_position_embeddings=lowercase , use_mask_token=lowercase )
# size of the architecture
if "large" in checkpoint_url or "dit-l" in checkpoint_url:
__a : Dict = 1_0_2_4
__a : List[str] = 4_0_9_6
__a : Any = 2_4
__a : Union[str, Any] = 1_6
# labels
if "rvlcdip" in checkpoint_url:
__a : Any = 1_6
__a : str = """huggingface/label-files"""
__a : Dict = """rvlcdip-id2label.json"""
__a : Optional[int] = json.load(open(hf_hub_download(lowercase , lowercase , repo_type="""dataset""" ) , """r""" ) )
__a : List[str] = {int(lowercase ): v for k, v in idalabel.items()}
__a : int = idalabel
__a : Union[str, Any] = {v: k for k, v in idalabel.items()}
# load state_dict of original model, remove and rename some keys
__a : Optional[int] = torch.hub.load_state_dict_from_url(lowercase , map_location="""cpu""" )["""model"""]
__a : Optional[Any] = create_rename_keys(lowercase , has_lm_head=lowercase )
for src, dest in rename_keys:
rename_key(lowercase , lowercase , lowercase )
read_in_q_k_v(lowercase , lowercase , has_lm_head=lowercase )
# load HuggingFace model
__a : str = BeitForMaskedImageModeling(lowercase ) if has_lm_head else BeitForImageClassification(lowercase )
model.eval()
model.load_state_dict(lowercase )
# Check outputs on an image
__a : Optional[int] = BeitImageProcessor(
size=config.image_size , resample=PILImageResampling.BILINEAR , do_center_crop=lowercase )
__a : List[str] = prepare_img()
__a : Optional[Any] = image_processor(images=lowercase , return_tensors="""pt""" )
__a : int = encoding["""pixel_values"""]
__a : Tuple = model(lowercase )
__a : int = outputs.logits
# verify logits
__a : List[str] = [1, 1_6] if """rvlcdip""" in checkpoint_url else [1, 1_9_6, 8_1_9_2]
assert logits.shape == torch.Size(lowercase ), "Shape of logits not as expected"
Path(lowercase ).mkdir(exist_ok=lowercase )
print(F"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowercase )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(lowercase )
if push_to_hub:
if has_lm_head:
__a : Optional[int] = """dit-base""" if """base""" in checkpoint_url else """dit-large"""
else:
__a : int = """dit-base-finetuned-rvlcdip""" if """dit-b""" in checkpoint_url else """dit-large-finetuned-rvlcdip"""
image_processor.push_to_hub(
repo_path_or_name=Path(lowercase , lowercase ) , organization="""nielsr""" , commit_message="""Add image processor""" , use_temp_dir=lowercase , )
model.push_to_hub(
repo_path_or_name=Path(lowercase , lowercase ) , organization="""nielsr""" , commit_message="""Add model""" , use_temp_dir=lowercase , )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_url',
default='https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth',
type=str,
help='URL to the original PyTorch checkpoint (.pth file).',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
parser.add_argument(
'--push_to_hub',
action='store_true',
)
__SCREAMING_SNAKE_CASE : int = parser.parse_args()
convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub) | 697 |
'''simple docstring'''
import numpy as np
from PIL import Image
def _snake_case ( lowercase , lowercase , lowercase ) -> np.ndarray:
__a : Any = np.array(lowercase )
if arr.shape[0] != arr.shape[1]:
raise ValueError("""The input array is not a square matrix""" )
__a : Union[str, Any] = 0
__a : Dict = 0
__a : Optional[Any] = 0
__a : Tuple = 0
# compute the shape of the output matrix
__a : Optional[int] = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape maxpool_shape
__a : int = np.zeros((maxpool_shape, maxpool_shape) )
while i < arr.shape[0]:
if i + size > arr.shape[0]:
# if the end of the matrix is reached, break
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the maximum of the pooling matrix
__a : Optional[Any] = np.max(arr[i : i + size, j : j + size] )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
__a : Optional[Any] = 0
__a : str = 0
return updated_arr
def _snake_case ( lowercase , lowercase , lowercase ) -> np.ndarray:
__a : int = np.array(lowercase )
if arr.shape[0] != arr.shape[1]:
raise ValueError("""The input array is not a square matrix""" )
__a : int = 0
__a : Optional[Any] = 0
__a : str = 0
__a : List[Any] = 0
# compute the shape of the output matrix
__a : int = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape avgpool_shape
__a : Optional[int] = np.zeros((avgpool_shape, avgpool_shape) )
while i < arr.shape[0]:
# if the end of the matrix is reached, break
if i + size > arr.shape[0]:
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the average of the pooling matrix
__a : Any = int(np.average(arr[i : i + size, j : j + size] ) )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
__a : str = 0
__a : List[Any] = 0
return updated_arr
# Main Function
if __name__ == "__main__":
from doctest import testmod
testmod(name='avgpooling', verbose=True)
# Loading the image
__SCREAMING_SNAKE_CASE : str = Image.open('path_to_image')
# Converting the image to numpy array and maxpooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(maxpooling(np.array(image), size=3, stride=2)).show()
# Converting the image to numpy array and averagepooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(avgpooling(np.array(image), size=3, stride=2)).show() | 697 | 1 |
'''simple docstring'''
from argparse import ArgumentParser
from . import BaseTransformersCLICommand
def _snake_case ( lowercase ) -> List[Any]:
return DownloadCommand(args.model , args.cache_dir , args.force , args.trust_remote_code )
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
@staticmethod
def __lowerCamelCase ( __UpperCamelCase ):
'''simple docstring'''
__a : List[str] = parser.add_parser("""download""" )
download_parser.add_argument(
"""--cache-dir""" , type=__UpperCamelCase , default=__UpperCamelCase , help="""Path to location to store the models""" )
download_parser.add_argument(
"""--force""" , action="""store_true""" , help="""Force the model to be download even if already in cache-dir""" )
download_parser.add_argument(
"""--trust-remote-code""" , action="""store_true""" , help="""Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you've reviewed the code as it will execute on your local machine""" , )
download_parser.add_argument("""model""" , type=__UpperCamelCase , help="""Name of the model to download""" )
download_parser.set_defaults(func=__UpperCamelCase )
def __init__( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
__a : List[Any] = model
__a : Optional[Any] = cache
__a : int = force
__a : List[str] = trust_remote_code
def __lowerCamelCase ( self ):
'''simple docstring'''
from ..models.auto import AutoModel, AutoTokenizer
AutoModel.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
AutoTokenizer.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code ) | 697 |
'''simple docstring'''
import qiskit
def _snake_case ( lowercase , lowercase ) -> qiskit.result.counts.Counts:
__a : Any = qiskit.Aer.get_backend("""aer_simulator""" )
# Create a Quantum Circuit acting on the q register
__a : str = qiskit.QuantumCircuit(lowercase , lowercase )
# Map the quantum measurement to the classical bits
circuit.measure([0] , [0] )
# Execute the circuit on the simulator
__a : Any = qiskit.execute(lowercase , lowercase , shots=1_0_0_0 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(lowercase )
if __name__ == "__main__":
print(f'''Total count for various states are: {single_qubit_measure(1, 1)}''') | 697 | 1 |
'''simple docstring'''
import json
import os
from typing import Optional
import numpy as np
from ...feature_extraction_utils import BatchFeature
from ...processing_utils import ProcessorMixin
from ...utils import logging
from ...utils.hub import get_file_from_repo
from ..auto import AutoTokenizer
__SCREAMING_SNAKE_CASE : str = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
lowercase__ = "AutoTokenizer"
lowercase__ = ["tokenizer"]
lowercase__ = {
"semantic_prompt": 1,
"coarse_prompt": 2,
"fine_prompt": 2,
}
def __init__( self , __UpperCamelCase , __UpperCamelCase=None ):
'''simple docstring'''
super().__init__(__UpperCamelCase )
__a : Any = speaker_embeddings
@classmethod
def __lowerCamelCase ( cls , __UpperCamelCase , __UpperCamelCase="speaker_embeddings_path.json" , **__UpperCamelCase ):
'''simple docstring'''
if speaker_embeddings_dict_path is not None:
__a : Tuple = get_file_from_repo(
__UpperCamelCase , __UpperCamelCase , subfolder=kwargs.pop("""subfolder""" , __UpperCamelCase ) , cache_dir=kwargs.pop("""cache_dir""" , __UpperCamelCase ) , force_download=kwargs.pop("""force_download""" , __UpperCamelCase ) , proxies=kwargs.pop("""proxies""" , __UpperCamelCase ) , resume_download=kwargs.pop("""resume_download""" , __UpperCamelCase ) , local_files_only=kwargs.pop("""local_files_only""" , __UpperCamelCase ) , use_auth_token=kwargs.pop("""use_auth_token""" , __UpperCamelCase ) , revision=kwargs.pop("""revision""" , __UpperCamelCase ) , )
if speaker_embeddings_path is None:
logger.warning(
f"""`{os.path.join(__UpperCamelCase , __UpperCamelCase )}` does not exists
, no preloaded speaker embeddings will be used - Make sure to provide a correct path to the json
dictionnary if wanted, otherwise set `speaker_embeddings_dict_path=None`.""" )
__a : Dict = None
else:
with open(__UpperCamelCase ) as speaker_embeddings_json:
__a : Optional[int] = json.load(__UpperCamelCase )
else:
__a : Optional[Any] = None
__a : List[Any] = AutoTokenizer.from_pretrained(__UpperCamelCase , **__UpperCamelCase )
return cls(tokenizer=__UpperCamelCase , speaker_embeddings=__UpperCamelCase )
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase="speaker_embeddings_path.json" , __UpperCamelCase="speaker_embeddings" , __UpperCamelCase = False , **__UpperCamelCase , ):
'''simple docstring'''
if self.speaker_embeddings is not None:
os.makedirs(os.path.join(__UpperCamelCase , __UpperCamelCase , """v2""" ) , exist_ok=__UpperCamelCase )
__a : List[Any] = {}
__a : Tuple = save_directory
for prompt_key in self.speaker_embeddings:
if prompt_key != "repo_or_path":
__a : Tuple = self._load_voice_preset(__UpperCamelCase )
__a : List[Any] = {}
for key in self.speaker_embeddings[prompt_key]:
np.save(
os.path.join(
embeddings_dict["""repo_or_path"""] , __UpperCamelCase , f"""{prompt_key}_{key}""" ) , voice_preset[key] , allow_pickle=__UpperCamelCase , )
__a : Union[str, Any] = os.path.join(__UpperCamelCase , f"""{prompt_key}_{key}.npy""" )
__a : List[str] = tmp_dict
with open(os.path.join(__UpperCamelCase , __UpperCamelCase ) , """w""" ) as fp:
json.dump(__UpperCamelCase , __UpperCamelCase )
super().save_pretrained(__UpperCamelCase , __UpperCamelCase , **__UpperCamelCase )
def __lowerCamelCase ( self , __UpperCamelCase = None , **__UpperCamelCase ):
'''simple docstring'''
__a : str = self.speaker_embeddings[voice_preset]
__a : List[Any] = {}
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset_paths:
raise ValueError(
f"""Voice preset unrecognized, missing {key} as a key in self.speaker_embeddings[{voice_preset}].""" )
__a : Any = get_file_from_repo(
self.speaker_embeddings.get("""repo_or_path""" , """/""" ) , voice_preset_paths[key] , subfolder=kwargs.pop("""subfolder""" , __UpperCamelCase ) , cache_dir=kwargs.pop("""cache_dir""" , __UpperCamelCase ) , force_download=kwargs.pop("""force_download""" , __UpperCamelCase ) , proxies=kwargs.pop("""proxies""" , __UpperCamelCase ) , resume_download=kwargs.pop("""resume_download""" , __UpperCamelCase ) , local_files_only=kwargs.pop("""local_files_only""" , __UpperCamelCase ) , use_auth_token=kwargs.pop("""use_auth_token""" , __UpperCamelCase ) , revision=kwargs.pop("""revision""" , __UpperCamelCase ) , )
if path is None:
raise ValueError(
f"""`{os.path.join(self.speaker_embeddings.get("repo_or_path" , "/" ) , voice_preset_paths[key] )}` does not exists
, no preloaded voice preset will be used - Make sure to provide correct paths to the {voice_preset}
embeddings.""" )
__a : Optional[Any] = np.load(__UpperCamelCase )
return voice_preset_dict
def __lowerCamelCase ( self , __UpperCamelCase = None ):
'''simple docstring'''
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset:
raise ValueError(f"""Voice preset unrecognized, missing {key} as a key.""" )
if not isinstance(voice_preset[key] , np.ndarray ):
raise ValueError(f"""{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.""" )
if len(voice_preset[key].shape ) != self.preset_shape[key]:
raise ValueError(f"""{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.""" )
def __call__( self , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase="pt" , __UpperCamelCase=256 , __UpperCamelCase=False , __UpperCamelCase=True , __UpperCamelCase=False , **__UpperCamelCase , ):
'''simple docstring'''
if voice_preset is not None and not isinstance(__UpperCamelCase , __UpperCamelCase ):
if (
isinstance(__UpperCamelCase , __UpperCamelCase )
and self.speaker_embeddings is not None
and voice_preset in self.speaker_embeddings
):
__a : List[str] = self._load_voice_preset(__UpperCamelCase )
else:
if isinstance(__UpperCamelCase , __UpperCamelCase ) and not voice_preset.endswith(""".npz""" ):
__a : int = voice_preset + """.npz"""
__a : Tuple = np.load(__UpperCamelCase )
if voice_preset is not None:
self._validate_voice_preset_dict(__UpperCamelCase , **__UpperCamelCase )
__a : Tuple = BatchFeature(data=__UpperCamelCase , tensor_type=__UpperCamelCase )
__a : Tuple = self.tokenizer(
__UpperCamelCase , return_tensors=__UpperCamelCase , padding="""max_length""" , max_length=__UpperCamelCase , return_attention_mask=__UpperCamelCase , return_token_type_ids=__UpperCamelCase , add_special_tokens=__UpperCamelCase , **__UpperCamelCase , )
if voice_preset is not None:
__a : List[Any] = voice_preset
return encoded_text | 697 |
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConformerConfig,
WavaVecaConformerForCTC,
WavaVecaConformerForPreTraining,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
__SCREAMING_SNAKE_CASE : str = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : Any = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.linear_k': 'encoder.layers.*.self_attn.linear_k',
'self_attn.linear_v': 'encoder.layers.*.self_attn.linear_v',
'self_attn.linear_q': 'encoder.layers.*.self_attn.linear_q',
'self_attn.pos_bias_u': 'encoder.layers.*.self_attn.pos_bias_u',
'self_attn.pos_bias_v': 'encoder.layers.*.self_attn.pos_bias_v',
'self_attn.linear_out': 'encoder.layers.*.self_attn.linear_out',
'self_attn.linear_pos': 'encoder.layers.*.self_attn.linear_pos',
'self_attn.rotary_emb': 'encoder.embed_positions',
'self_attn_layer_norm': 'encoder.layers.*.self_attn_layer_norm',
'conv_module.pointwise_conv1': 'encoder.layers.*.conv_module.pointwise_conv1',
'conv_module.pointwise_conv2': 'encoder.layers.*.conv_module.pointwise_conv2',
'conv_module.depthwise_conv': 'encoder.layers.*.conv_module.depthwise_conv',
'conv_module.batch_norm': 'encoder.layers.*.conv_module.batch_norm',
'conv_module.layer_norm': 'encoder.layers.*.conv_module.layer_norm',
'ffn1.w_1': 'encoder.layers.*.ffn1.intermediate_dense',
'ffn1.w_2': 'encoder.layers.*.ffn1.output_dense',
'ffn1.layer_norm': 'encoder.layers.*.ffn1_layer_norm',
'ffn2.w_1': 'encoder.layers.*.ffn2.intermediate_dense',
'ffn2.w_2': 'encoder.layers.*.ffn2.output_dense',
'ffn2.layer_norm': 'encoder.layers.*.ffn2_layer_norm',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
}
__SCREAMING_SNAKE_CASE : Optional[Any] = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def _snake_case ( lowercase , lowercase , lowercase , lowercase , lowercase ) -> List[Any]:
for attribute in key.split(""".""" ):
__a : str = getattr(lowercase , lowercase )
if weight_type is not None:
__a : Dict = getattr(lowercase , lowercase ).shape
else:
__a : Dict = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}""" )
if weight_type == "weight":
__a : Any = value
elif weight_type == "weight_g":
__a : int = value
elif weight_type == "weight_v":
__a : int = value
elif weight_type == "bias":
__a : List[Any] = value
elif weight_type == "running_mean":
__a : Union[str, Any] = value
elif weight_type == "running_var":
__a : Tuple = value
elif weight_type == "num_batches_tracked":
__a : Optional[int] = value
elif weight_type == "inv_freq":
__a : List[str] = value
else:
__a : List[str] = value
logger.info(F"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def _snake_case ( lowercase , lowercase , lowercase ) -> Dict:
__a : Dict = []
__a : Dict = fairseq_model.state_dict()
__a : Tuple = hf_model.wavaveca_conformer.feature_extractor
for name, value in fairseq_dict.items():
__a : int = False
if "conv_layers" in name:
load_conv_layer(
lowercase , lowercase , lowercase , lowercase , hf_model.config.feat_extract_norm == """group""" , )
__a : List[Any] = True
else:
for key, mapped_key in MAPPING.items():
__a : Optional[int] = """wav2vec2_conformer.""" + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
__a : str = True
if "*" in mapped_key:
__a : Optional[int] = name.split(lowercase )[0].split(""".""" )[-2]
__a : List[Any] = mapped_key.replace("""*""" , lowercase )
if "pos_bias_u" in name:
__a : Union[str, Any] = None
elif "pos_bias_v" in name:
__a : List[Any] = None
elif "weight_g" in name:
__a : List[Any] = """weight_g"""
elif "weight_v" in name:
__a : List[Any] = """weight_v"""
elif "bias" in name:
__a : Optional[int] = """bias"""
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__a : str = """weight"""
elif "running_mean" in name:
__a : List[str] = """running_mean"""
elif "inv_freq" in name:
__a : Dict = """inv_freq"""
elif "running_var" in name:
__a : Union[str, Any] = """running_var"""
elif "num_batches_tracked" in name:
__a : int = """num_batches_tracked"""
else:
__a : Optional[int] = None
set_recursively(lowercase , lowercase , lowercase , lowercase , lowercase )
continue
if not is_used:
unused_weights.append(lowercase )
logger.warning(F"""Unused weights: {unused_weights}""" )
def _snake_case ( lowercase , lowercase , lowercase , lowercase , lowercase ) -> List[str]:
__a : Optional[Any] = full_name.split("""conv_layers.""" )[-1]
__a : Union[str, Any] = name.split(""".""" )
__a : Optional[Any] = int(items[0] )
__a : int = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
__a : Dict = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
__a : str = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" )
__a : Dict = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" )
__a : Union[str, Any] = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(lowercase )
@torch.no_grad()
def _snake_case ( lowercase , lowercase , lowercase=None , lowercase=None , lowercase=True ) -> Optional[Any]:
if config_path is not None:
__a : Any = WavaVecaConformerConfig.from_pretrained(lowercase , hidden_act="""swish""" )
else:
__a : Optional[int] = WavaVecaConformerConfig()
if "rope" in checkpoint_path:
__a : Optional[Any] = """rotary"""
if is_finetuned:
if dict_path:
__a : List[Any] = Dictionary.load(lowercase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
__a : int = target_dict.pad_index
__a : List[str] = target_dict.bos_index
__a : str = target_dict.eos_index
__a : Dict = len(target_dict.symbols )
__a : Any = os.path.join(lowercase , """vocab.json""" )
if not os.path.isdir(lowercase ):
logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(lowercase ) )
return
os.makedirs(lowercase , exist_ok=lowercase )
__a : Dict = target_dict.indices
# fairseq has the <pad> and <s> switched
__a : Optional[Any] = 0
__a : List[Any] = 1
with open(lowercase , """w""" , encoding="""utf-8""" ) as vocab_handle:
json.dump(lowercase , lowercase )
__a : int = WavaVecaCTCTokenizer(
lowercase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=lowercase , )
__a : Optional[int] = True if config.feat_extract_norm == """layer""" else False
__a : Dict = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6_0_0_0 , padding_value=0 , do_normalize=lowercase , return_attention_mask=lowercase , )
__a : str = WavaVecaProcessor(feature_extractor=lowercase , tokenizer=lowercase )
processor.save_pretrained(lowercase )
__a : List[str] = WavaVecaConformerForCTC(lowercase )
else:
__a : Optional[int] = WavaVecaConformerForPreTraining(lowercase )
if is_finetuned:
__a , __a , __a : Dict = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
else:
__a : Optional[int] = argparse.Namespace(task="""audio_pretraining""" )
__a : Tuple = fairseq.tasks.setup_task(lowercase )
__a , __a , __a : int = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=lowercase )
__a : Any = model[0].eval()
recursively_load_weights(lowercase , lowercase , not is_finetuned )
hf_wavavec.save_pretrained(lowercase )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : Dict = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
__SCREAMING_SNAKE_CASE : int = parser.parse_args()
convert_wavaveca_conformer_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
) | 697 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__SCREAMING_SNAKE_CASE : Union[str, Any] = {
'configuration_data2vec_audio': ['DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Data2VecAudioConfig'],
'configuration_data2vec_text': [
'DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Data2VecTextConfig',
'Data2VecTextOnnxConfig',
],
'configuration_data2vec_vision': [
'DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Data2VecVisionConfig',
'Data2VecVisionOnnxConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Dict = [
'DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST',
'Data2VecAudioForAudioFrameClassification',
'Data2VecAudioForCTC',
'Data2VecAudioForSequenceClassification',
'Data2VecAudioForXVector',
'Data2VecAudioModel',
'Data2VecAudioPreTrainedModel',
]
__SCREAMING_SNAKE_CASE : Any = [
'DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST',
'Data2VecTextForCausalLM',
'Data2VecTextForMaskedLM',
'Data2VecTextForMultipleChoice',
'Data2VecTextForQuestionAnswering',
'Data2VecTextForSequenceClassification',
'Data2VecTextForTokenClassification',
'Data2VecTextModel',
'Data2VecTextPreTrainedModel',
]
__SCREAMING_SNAKE_CASE : Dict = [
'DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST',
'Data2VecVisionForImageClassification',
'Data2VecVisionForMaskedImageModeling',
'Data2VecVisionForSemanticSegmentation',
'Data2VecVisionModel',
'Data2VecVisionPreTrainedModel',
]
if is_tf_available():
__SCREAMING_SNAKE_CASE : Union[str, Any] = [
'TFData2VecVisionForImageClassification',
'TFData2VecVisionForSemanticSegmentation',
'TFData2VecVisionModel',
'TFData2VecVisionPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_dataavec_audio import DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecAudioConfig
from .configuration_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecTextConfig,
DataaVecTextOnnxConfig,
)
from .configuration_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecVisionConfig,
DataaVecVisionOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dataavec_audio import (
DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecAudioForAudioFrameClassification,
DataaVecAudioForCTC,
DataaVecAudioForSequenceClassification,
DataaVecAudioForXVector,
DataaVecAudioModel,
DataaVecAudioPreTrainedModel,
)
from .modeling_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecTextForCausalLM,
DataaVecTextForMaskedLM,
DataaVecTextForMultipleChoice,
DataaVecTextForQuestionAnswering,
DataaVecTextForSequenceClassification,
DataaVecTextForTokenClassification,
DataaVecTextModel,
DataaVecTextPreTrainedModel,
)
from .modeling_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecVisionForImageClassification,
DataaVecVisionForMaskedImageModeling,
DataaVecVisionForSemanticSegmentation,
DataaVecVisionModel,
DataaVecVisionPreTrainedModel,
)
if is_tf_available():
from .modeling_tf_dataavec_vision import (
TFDataaVecVisionForImageClassification,
TFDataaVecVisionForSemanticSegmentation,
TFDataaVecVisionModel,
TFDataaVecVisionPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 697 |
'''simple docstring'''
import warnings
from functools import wraps
from typing import Callable
def _snake_case ( lowercase ) -> Callable:
@wraps(lowercase )
def _inner_fn(*lowercase , **lowercase ):
warnings.warn(
(F"""'{fn.__name__}' is experimental and might be subject to breaking changes in the future.""") , lowercase , )
return fn(*lowercase , **lowercase )
return _inner_fn | 697 | 1 |
'''simple docstring'''
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def __init__( self , __UpperCamelCase , __UpperCamelCase=7 , __UpperCamelCase=3 , __UpperCamelCase=18 , __UpperCamelCase=30 , __UpperCamelCase=400 , __UpperCamelCase=True , __UpperCamelCase=None , __UpperCamelCase=True , __UpperCamelCase=None , __UpperCamelCase=True , __UpperCamelCase=[0.5, 0.5, 0.5] , __UpperCamelCase=[0.5, 0.5, 0.5] , __UpperCamelCase=False , ):
'''simple docstring'''
__a : Optional[int] = size if size is not None else {"""height""": 20, """width""": 20}
__a : str = crop_size if crop_size is not None else {"""height""": 18, """width""": 18}
__a : Dict = parent
__a : int = batch_size
__a : List[Any] = num_channels
__a : Any = image_size
__a : Optional[Any] = min_resolution
__a : int = max_resolution
__a : List[str] = do_resize
__a : Tuple = size
__a : List[Any] = do_center_crop
__a : List[Any] = crop_size
__a : List[str] = do_normalize
__a : Any = image_mean
__a : Tuple = image_std
__a : Dict = do_reduce_labels
def __lowerCamelCase ( self ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_reduce_labels": self.do_reduce_labels,
}
def _snake_case ( ) -> Any:
__a : Dict = load_dataset("""hf-internal-testing/fixtures_ade20k""" , split="""test""" )
__a : str = Image.open(dataset[0]["""file"""] )
__a : Optional[Any] = Image.open(dataset[1]["""file"""] )
return image, map
def _snake_case ( ) -> List[Any]:
__a : int = load_dataset("""hf-internal-testing/fixtures_ade20k""" , split="""test""" )
__a : Optional[Any] = Image.open(ds[0]["""file"""] )
__a : Optional[int] = Image.open(ds[1]["""file"""] )
__a : Optional[int] = Image.open(ds[2]["""file"""] )
__a : str = Image.open(ds[3]["""file"""] )
return [imagea, imagea], [mapa, mapa]
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase , unittest.TestCase ):
lowercase__ = BeitImageProcessor if is_vision_available() else None
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Any = BeitImageProcessingTester(self )
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : List[str] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__UpperCamelCase , """do_resize""" ) )
self.assertTrue(hasattr(__UpperCamelCase , """size""" ) )
self.assertTrue(hasattr(__UpperCamelCase , """do_center_crop""" ) )
self.assertTrue(hasattr(__UpperCamelCase , """center_crop""" ) )
self.assertTrue(hasattr(__UpperCamelCase , """do_normalize""" ) )
self.assertTrue(hasattr(__UpperCamelCase , """image_mean""" ) )
self.assertTrue(hasattr(__UpperCamelCase , """image_std""" ) )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : str = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 20, """width""": 20} )
self.assertEqual(image_processor.crop_size , {"""height""": 18, """width""": 18} )
self.assertEqual(image_processor.do_reduce_labels , __UpperCamelCase )
__a : Tuple = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , crop_size=84 , reduce_labels=__UpperCamelCase )
self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} )
self.assertEqual(image_processor.crop_size , {"""height""": 84, """width""": 84} )
self.assertEqual(image_processor.do_reduce_labels , __UpperCamelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
pass
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__a : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCamelCase , Image.Image )
# Test not batched input
__a : List[str] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
__a : List[str] = image_processing(__UpperCamelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__a : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCamelCase , numpify=__UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCamelCase , np.ndarray )
# Test not batched input
__a : int = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
__a : List[str] = image_processing(__UpperCamelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__a : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCamelCase , torchify=__UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCamelCase , torch.Tensor )
# Test not batched input
__a : Dict = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
__a : Optional[Any] = image_processing(__UpperCamelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__a : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCamelCase , torchify=__UpperCamelCase )
__a : Tuple = []
for image in image_inputs:
self.assertIsInstance(__UpperCamelCase , torch.Tensor )
maps.append(torch.zeros(image.shape[-2:] ).long() )
# Test not batched input
__a : List[str] = image_processing(image_inputs[0] , maps[0] , return_tensors="""pt""" )
self.assertEqual(
encoding["""pixel_values"""].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(
encoding["""labels"""].shape , (
1,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(encoding["""labels"""].dtype , torch.long )
self.assertTrue(encoding["""labels"""].min().item() >= 0 )
self.assertTrue(encoding["""labels"""].max().item() <= 255 )
# Test batched
__a : Optional[Any] = image_processing(__UpperCamelCase , __UpperCamelCase , return_tensors="""pt""" )
self.assertEqual(
encoding["""pixel_values"""].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(
encoding["""labels"""].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(encoding["""labels"""].dtype , torch.long )
self.assertTrue(encoding["""labels"""].min().item() >= 0 )
self.assertTrue(encoding["""labels"""].max().item() <= 255 )
# Test not batched input (PIL images)
__a , __a : str = prepare_semantic_single_inputs()
__a : str = image_processing(__UpperCamelCase , __UpperCamelCase , return_tensors="""pt""" )
self.assertEqual(
encoding["""pixel_values"""].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(
encoding["""labels"""].shape , (
1,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(encoding["""labels"""].dtype , torch.long )
self.assertTrue(encoding["""labels"""].min().item() >= 0 )
self.assertTrue(encoding["""labels"""].max().item() <= 255 )
# Test batched input (PIL images)
__a , __a : Union[str, Any] = prepare_semantic_batch_inputs()
__a : Dict = image_processing(__UpperCamelCase , __UpperCamelCase , return_tensors="""pt""" )
self.assertEqual(
encoding["""pixel_values"""].shape , (
2,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(
encoding["""labels"""].shape , (
2,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(encoding["""labels"""].dtype , torch.long )
self.assertTrue(encoding["""labels"""].min().item() >= 0 )
self.assertTrue(encoding["""labels"""].max().item() <= 255 )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : int = self.image_processing_class(**self.image_processor_dict )
# ADE20k has 150 classes, and the background is included, so labels should be between 0 and 150
__a , __a : Union[str, Any] = prepare_semantic_single_inputs()
__a : Any = image_processing(__UpperCamelCase , __UpperCamelCase , return_tensors="""pt""" )
self.assertTrue(encoding["""labels"""].min().item() >= 0 )
self.assertTrue(encoding["""labels"""].max().item() <= 150 )
__a : Optional[Any] = True
__a : Optional[Any] = image_processing(__UpperCamelCase , __UpperCamelCase , return_tensors="""pt""" )
self.assertTrue(encoding["""labels"""].min().item() >= 0 )
self.assertTrue(encoding["""labels"""].max().item() <= 255 ) | 697 |
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
from ....audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ....feature_extraction_sequence_utils import SequenceFeatureExtractor
from ....feature_extraction_utils import BatchFeature
from ....file_utils import PaddingStrategy, TensorType
from ....utils import logging
__SCREAMING_SNAKE_CASE : Tuple = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
lowercase__ = ["input_features", "attention_mask"]
def __init__( self , __UpperCamelCase=80 , __UpperCamelCase=1_6000 , __UpperCamelCase=0.0 , __UpperCamelCase=10 , __UpperCamelCase=25 , __UpperCamelCase="hamming_window" , __UpperCamelCase=3_2_7_6_8.0 , __UpperCamelCase=0.9_7 , __UpperCamelCase=1.0 , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=False , **__UpperCamelCase , ):
'''simple docstring'''
super().__init__(feature_size=__UpperCamelCase , sampling_rate=__UpperCamelCase , padding_value=__UpperCamelCase , **__UpperCamelCase )
__a : List[str] = feature_size
__a : List[str] = sampling_rate
__a : int = padding_value
__a : Any = hop_length
__a : int = win_length
__a : Tuple = frame_signal_scale
__a : Union[str, Any] = preemphasis_coeff
__a : List[str] = mel_floor
__a : Union[str, Any] = normalize_means
__a : Optional[Any] = normalize_vars
__a : Optional[Any] = win_function
__a : Union[str, Any] = return_attention_mask
__a : List[Any] = win_length * sampling_rate // 1000
__a : List[Any] = hop_length * sampling_rate // 1000
__a : Optional[Any] = optimal_fft_length(self.sample_size )
__a : Any = (self.n_fft // 2) + 1
def __lowerCamelCase ( self , __UpperCamelCase ):
'''simple docstring'''
if self.win_function == "hamming_window":
__a : str = window_function(window_length=self.sample_size , name=self.win_function , periodic=__UpperCamelCase )
else:
__a : Dict = window_function(window_length=self.sample_size , name=self.win_function )
__a : Optional[Any] = mel_filter_bank(
num_frequency_bins=self.n_freqs , num_mel_filters=self.feature_size , min_frequency=0.0 , max_frequency=self.sampling_rate / 2.0 , sampling_rate=self.sampling_rate , )
__a : Any = spectrogram(
one_waveform * self.frame_signal_scale , window=__UpperCamelCase , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , center=__UpperCamelCase , preemphasis=self.preemphasis_coeff , mel_filters=__UpperCamelCase , mel_floor=self.mel_floor , log_mel="""log""" , )
return msfc_features.T
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
if self.normalize_means:
__a : int = x[:input_length].mean(axis=0 )
__a : str = np.subtract(__UpperCamelCase , __UpperCamelCase )
if self.normalize_vars:
__a : Dict = x[:input_length].std(axis=0 )
__a : Dict = np.divide(__UpperCamelCase , __UpperCamelCase )
if input_length < x.shape[0]:
__a : Union[str, Any] = padding_value
# make sure array is in float32
__a : Any = x.astype(np.floataa )
return x
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase = None ):
'''simple docstring'''
__a : Tuple = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [self._normalize_one(__UpperCamelCase , __UpperCamelCase , self.padding_value ) for x, n in zip(__UpperCamelCase , __UpperCamelCase )]
def __call__( self , __UpperCamelCase , __UpperCamelCase = False , __UpperCamelCase = None , __UpperCamelCase = False , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , **__UpperCamelCase , ):
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"""The model corresponding to this feature extractor: {self} was trained using a sampling rate of"""
f""" {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with"""
f""" {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
"""It is strongly recommended to pass the ``sampling_rate`` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
__a : Tuple = isinstance(__UpperCamelCase , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f"""Only mono-channel audio is supported for input to {self}""" )
__a : Tuple = is_batched_numpy or (
isinstance(__UpperCamelCase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
__a : Tuple = [np.asarray(__UpperCamelCase , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(__UpperCamelCase , np.ndarray ):
__a : List[str] = np.asarray(__UpperCamelCase , dtype=np.floataa )
elif isinstance(__UpperCamelCase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
__a : str = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
__a : Any = [raw_speech]
# extract fbank features
__a : str = [self._extract_mfsc_features(__UpperCamelCase ) for one_waveform in raw_speech]
# convert into correct format for padding
__a : Optional[Any] = BatchFeature({"""input_features""": features} )
__a : Any = self.pad(
__UpperCamelCase , padding=__UpperCamelCase , max_length=__UpperCamelCase , truncation=__UpperCamelCase , pad_to_multiple_of=__UpperCamelCase , return_attention_mask=__UpperCamelCase , **__UpperCamelCase , )
# make sure list is in array format
__a : int = padded_inputs.get("""input_features""" )
if isinstance(input_features[0] , __UpperCamelCase ):
__a : Union[str, Any] = [np.asarray(__UpperCamelCase , dtype=np.floataa ) for feature in input_features]
__a : List[str] = padded_inputs.get("""attention_mask""" )
if attention_mask is not None:
__a : Optional[int] = [np.asarray(__UpperCamelCase , dtype=np.intaa ) for array in attention_mask]
if self.normalize_means or self.normalize_vars:
__a : Optional[Any] = (
np.array(__UpperCamelCase , dtype=np.intaa )
if self._get_padding_strategies(__UpperCamelCase , max_length=__UpperCamelCase ) is not PaddingStrategy.DO_NOT_PAD
and padding
else None
)
__a : int = self.normalize(
padded_inputs["""input_features"""] , attention_mask=__UpperCamelCase )
if return_tensors is not None:
__a : List[Any] = padded_inputs.convert_to_tensors(__UpperCamelCase )
return padded_inputs | 697 | 1 |
'''simple docstring'''
def _snake_case ( lowercase ) -> int:
assert column_title.isupper()
__a : List[str] = 0
__a : List[str] = len(lowercase ) - 1
__a : List[Any] = 0
while index >= 0:
__a : Union[str, Any] = (ord(column_title[index] ) - 6_4) * pow(2_6 , lowercase )
answer += value
power += 1
index -= 1
return answer
if __name__ == "__main__":
from doctest import testmod
testmod() | 697 |
'''simple docstring'''
__SCREAMING_SNAKE_CASE : int = 9.80_665
def _snake_case ( lowercase , lowercase , lowercase = g ) -> float:
if fluid_density <= 0:
raise ValueError("""Impossible fluid density""" )
if volume < 0:
raise ValueError("""Impossible Object volume""" )
if gravity <= 0:
raise ValueError("""Impossible Gravity""" )
return fluid_density * gravity * volume
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod() | 697 | 1 |
'''simple docstring'''
import argparse
import logging
import pickle
import random
import time
import numpy as np
from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO
)
__SCREAMING_SNAKE_CASE : int = logging.getLogger(__name__)
def _snake_case ( ) -> Dict:
__a : List[Any] = argparse.ArgumentParser(
description="""Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids).""" )
parser.add_argument("""--file_path""" , type=lowercase , default="""data/dump.txt""" , help="""The path to the data.""" )
parser.add_argument("""--tokenizer_type""" , type=lowercase , default="""bert""" , choices=["""bert""", """roberta""", """gpt2"""] )
parser.add_argument("""--tokenizer_name""" , type=lowercase , default="""bert-base-uncased""" , help="""The tokenizer to use.""" )
parser.add_argument("""--dump_file""" , type=lowercase , default="""data/dump""" , help="""The dump file prefix.""" )
__a : int = parser.parse_args()
logger.info(F"""Loading Tokenizer ({args.tokenizer_name})""" )
if args.tokenizer_type == "bert":
__a : List[Any] = BertTokenizer.from_pretrained(args.tokenizer_name )
__a : Tuple = tokenizer.special_tokens_map["""cls_token"""] # `[CLS]`
__a : Optional[Any] = tokenizer.special_tokens_map["""sep_token"""] # `[SEP]`
elif args.tokenizer_type == "roberta":
__a : Any = RobertaTokenizer.from_pretrained(args.tokenizer_name )
__a : Any = tokenizer.special_tokens_map["""cls_token"""] # `<s>`
__a : int = tokenizer.special_tokens_map["""sep_token"""] # `</s>`
elif args.tokenizer_type == "gpt2":
__a : str = GPTaTokenizer.from_pretrained(args.tokenizer_name )
__a : str = tokenizer.special_tokens_map["""bos_token"""] # `<|endoftext|>`
__a : List[Any] = tokenizer.special_tokens_map["""eos_token"""] # `<|endoftext|>`
logger.info(F"""Loading text from {args.file_path}""" )
with open(args.file_path , """r""" , encoding="""utf8""" ) as fp:
__a : Dict = fp.readlines()
logger.info("""Start encoding""" )
logger.info(F"""{len(lowercase )} examples to process.""" )
__a : Optional[Any] = []
__a : Tuple = 0
__a : int = 1_0_0_0_0
__a : str = time.time()
for text in data:
__a : Tuple = F"""{bos} {text.strip()} {sep}"""
__a : List[str] = tokenizer.encode(lowercase , add_special_tokens=lowercase )
rslt.append(lowercase )
iter += 1
if iter % interval == 0:
__a : List[str] = time.time()
logger.info(F"""{iter} examples processed. - {(end-start):.2f}s/{interval}expl""" )
__a : Optional[int] = time.time()
logger.info("""Finished binarization""" )
logger.info(F"""{len(lowercase )} examples processed.""" )
__a : Optional[int] = F"""{args.dump_file}.{args.tokenizer_name}.pickle"""
__a : Any = tokenizer.vocab_size
if vocab_size < (1 << 1_6):
__a : Optional[Any] = [np.uintaa(lowercase ) for d in rslt]
else:
__a : Optional[Any] = [np.intaa(lowercase ) for d in rslt]
random.shuffle(rslt_ )
logger.info(F"""Dump to {dp_file}""" )
with open(lowercase , """wb""" ) as handle:
pickle.dump(rslt_ , lowercase , protocol=pickle.HIGHEST_PROTOCOL )
if __name__ == "__main__":
main() | 697 |
'''simple docstring'''
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetrImageProcessor
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def __init__( self , __UpperCamelCase , __UpperCamelCase=7 , __UpperCamelCase=3 , __UpperCamelCase=30 , __UpperCamelCase=400 , __UpperCamelCase=True , __UpperCamelCase=None , __UpperCamelCase=True , __UpperCamelCase=1 / 255 , __UpperCamelCase=True , __UpperCamelCase=[0.5, 0.5, 0.5] , __UpperCamelCase=[0.5, 0.5, 0.5] , __UpperCamelCase=True , ):
'''simple docstring'''
__a : List[Any] = size if size is not None else {"""shortest_edge""": 18, """longest_edge""": 1333}
__a : Dict = parent
__a : Union[str, Any] = batch_size
__a : Optional[int] = num_channels
__a : Dict = min_resolution
__a : List[Any] = max_resolution
__a : int = do_resize
__a : str = size
__a : Optional[Any] = do_rescale
__a : Optional[Any] = rescale_factor
__a : str = do_normalize
__a : Any = image_mean
__a : Optional[Any] = image_std
__a : Dict = do_pad
def __lowerCamelCase ( self ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_pad": self.do_pad,
}
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase=False ):
'''simple docstring'''
if not batched:
__a : Union[str, Any] = image_inputs[0]
if isinstance(__UpperCamelCase , Image.Image ):
__a , __a : Tuple = image.size
else:
__a , __a : Tuple = image.shape[1], image.shape[2]
if w < h:
__a : Optional[int] = int(self.size["""shortest_edge"""] * h / w )
__a : Tuple = self.size["""shortest_edge"""]
elif w > h:
__a : Optional[Any] = self.size["""shortest_edge"""]
__a : Any = int(self.size["""shortest_edge"""] * w / h )
else:
__a : Any = self.size["""shortest_edge"""]
__a : Optional[int] = self.size["""shortest_edge"""]
else:
__a : Any = []
for image in image_inputs:
__a , __a : Any = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
__a : List[Any] = max(__UpperCamelCase , key=lambda __UpperCamelCase : item[0] )[0]
__a : Optional[Any] = max(__UpperCamelCase , key=lambda __UpperCamelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase , unittest.TestCase ):
lowercase__ = DetrImageProcessor if is_vision_available() else None
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : str = DetrImageProcessingTester(self )
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Optional[int] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__UpperCamelCase , """image_mean""" ) )
self.assertTrue(hasattr(__UpperCamelCase , """image_std""" ) )
self.assertTrue(hasattr(__UpperCamelCase , """do_normalize""" ) )
self.assertTrue(hasattr(__UpperCamelCase , """do_rescale""" ) )
self.assertTrue(hasattr(__UpperCamelCase , """rescale_factor""" ) )
self.assertTrue(hasattr(__UpperCamelCase , """do_resize""" ) )
self.assertTrue(hasattr(__UpperCamelCase , """size""" ) )
self.assertTrue(hasattr(__UpperCamelCase , """do_pad""" ) )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 18, """longest_edge""": 1333} )
self.assertEqual(image_processor.do_pad , __UpperCamelCase )
__a : List[Any] = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=__UpperCamelCase )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42, """longest_edge""": 84} )
self.assertEqual(image_processor.do_pad , __UpperCamelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
pass
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__a : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCamelCase , Image.Image )
# Test not batched input
__a : Optional[Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
__a , __a : Any = self.image_processor_tester.get_expected_values(__UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__a , __a : Optional[int] = self.image_processor_tester.get_expected_values(__UpperCamelCase , batched=__UpperCamelCase )
__a : Any = image_processing(__UpperCamelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Dict = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__a : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCamelCase , numpify=__UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCamelCase , np.ndarray )
# Test not batched input
__a : Dict = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
__a , __a : Any = self.image_processor_tester.get_expected_values(__UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__a : List[str] = image_processing(__UpperCamelCase , return_tensors="""pt""" ).pixel_values
__a , __a : str = self.image_processor_tester.get_expected_values(__UpperCamelCase , batched=__UpperCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Any = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__a : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCamelCase , torchify=__UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCamelCase , torch.Tensor )
# Test not batched input
__a : Any = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
__a , __a : Any = self.image_processor_tester.get_expected_values(__UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__a : List[str] = image_processing(__UpperCamelCase , return_tensors="""pt""" ).pixel_values
__a , __a : Any = self.image_processor_tester.get_expected_values(__UpperCamelCase , batched=__UpperCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : List[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_annotations.txt""" , """r""" ) as f:
__a : Dict = json.loads(f.read() )
__a : Optional[int] = {"""image_id""": 3_9769, """annotations""": target}
# encode them
__a : List[str] = DetrImageProcessor.from_pretrained("""facebook/detr-resnet-50""" )
__a : Tuple = image_processing(images=__UpperCamelCase , annotations=__UpperCamelCase , return_tensors="""pt""" )
# verify pixel values
__a : Union[str, Any] = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["""pixel_values"""].shape , __UpperCamelCase )
__a : List[str] = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , __UpperCamelCase , atol=1E-4 ) )
# verify area
__a : List[Any] = torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , __UpperCamelCase ) )
# verify boxes
__a : Optional[int] = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , __UpperCamelCase )
__a : Any = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , __UpperCamelCase , atol=1E-3 ) )
# verify image_id
__a : Union[str, Any] = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , __UpperCamelCase ) )
# verify is_crowd
__a : List[Any] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , __UpperCamelCase ) )
# verify class_labels
__a : Any = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , __UpperCamelCase ) )
# verify orig_size
__a : Any = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , __UpperCamelCase ) )
# verify size
__a : str = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , __UpperCamelCase ) )
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : List[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt""" , """r""" ) as f:
__a : Tuple = json.loads(f.read() )
__a : str = {"""file_name""": """000000039769.png""", """image_id""": 3_9769, """segments_info""": target}
__a : int = pathlib.Path("""./tests/fixtures/tests_samples/COCO/coco_panoptic""" )
# encode them
__a : List[str] = DetrImageProcessor.from_pretrained("""facebook/detr-resnet-50-panoptic""" )
__a : Tuple = image_processing(images=__UpperCamelCase , annotations=__UpperCamelCase , masks_path=__UpperCamelCase , return_tensors="""pt""" )
# verify pixel values
__a : List[str] = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["""pixel_values"""].shape , __UpperCamelCase )
__a : Any = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , __UpperCamelCase , atol=1E-4 ) )
# verify area
__a : Optional[Any] = torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , __UpperCamelCase ) )
# verify boxes
__a : Optional[Any] = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , __UpperCamelCase )
__a : List[str] = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , __UpperCamelCase , atol=1E-3 ) )
# verify image_id
__a : List[str] = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , __UpperCamelCase ) )
# verify is_crowd
__a : Optional[int] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , __UpperCamelCase ) )
# verify class_labels
__a : Optional[int] = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , __UpperCamelCase ) )
# verify masks
__a : Union[str, Any] = 82_2873
self.assertEqual(encoding["""labels"""][0]["""masks"""].sum().item() , __UpperCamelCase )
# verify orig_size
__a : str = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , __UpperCamelCase ) )
# verify size
__a : List[Any] = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , __UpperCamelCase ) ) | 697 | 1 |
'''simple docstring'''
import numpy as np
import torch
from torch.utils.data import Dataset
from utils import logger
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
def __init__( self , __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
__a : Any = params
__a : Optional[Any] = np.array(__UpperCamelCase )
__a : Union[str, Any] = np.array([len(__UpperCamelCase ) for t in data] )
self.check()
self.remove_long_sequences()
self.remove_empty_sequences()
self.remove_unknown_sequences()
self.check()
self.print_statistics()
def __getitem__( self , __UpperCamelCase ):
'''simple docstring'''
return (self.token_ids[index], self.lengths[index])
def __len__( self ):
'''simple docstring'''
return len(self.lengths )
def __lowerCamelCase ( self ):
'''simple docstring'''
assert len(self.token_ids ) == len(self.lengths )
assert all(self.lengths[i] == len(self.token_ids[i] ) for i in range(len(self.lengths ) ) )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Tuple = self.params.max_model_input_size
__a : Union[str, Any] = self.lengths > max_len
logger.info(f"""Splitting {sum(__UpperCamelCase )} too long sequences.""" )
def divide_chunks(__UpperCamelCase , __UpperCamelCase ):
return [l[i : i + n] for i in range(0 , len(__UpperCamelCase ) , __UpperCamelCase )]
__a : int = []
__a : Union[str, Any] = []
if self.params.mlm:
__a , __a : Any = self.params.special_tok_ids["""cls_token"""], self.params.special_tok_ids["""sep_token"""]
else:
__a , __a : str = self.params.special_tok_ids["""bos_token"""], self.params.special_tok_ids["""eos_token"""]
for seq_, len_ in zip(self.token_ids , self.lengths ):
assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_
if len_ <= max_len:
new_tok_ids.append(seq_ )
new_lengths.append(len_ )
else:
__a : Any = []
for sub_s in divide_chunks(seq_ , max_len - 2 ):
if sub_s[0] != cls_id:
__a : int = np.insert(__UpperCamelCase , 0 , __UpperCamelCase )
if sub_s[-1] != sep_id:
__a : str = np.insert(__UpperCamelCase , len(__UpperCamelCase ) , __UpperCamelCase )
assert len(__UpperCamelCase ) <= max_len
assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s
sub_seqs.append(__UpperCamelCase )
new_tok_ids.extend(__UpperCamelCase )
new_lengths.extend([len(__UpperCamelCase ) for l in sub_seqs] )
__a : Dict = np.array(__UpperCamelCase )
__a : Tuple = np.array(__UpperCamelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : List[str] = len(self )
__a : List[str] = self.lengths > 11
__a : int = self.token_ids[indices]
__a : Union[str, Any] = self.lengths[indices]
__a : Any = len(self )
logger.info(f"""Remove {init_size - new_size} too short (<=11 tokens) sequences.""" )
def __lowerCamelCase ( self ):
'''simple docstring'''
if "unk_token" not in self.params.special_tok_ids:
return
else:
__a : List[str] = self.params.special_tok_ids["""unk_token"""]
__a : str = len(self )
__a : str = np.array([np.count_nonzero(a == unk_token_id ) for a in self.token_ids] )
__a : Optional[Any] = (unk_occs / self.lengths) < 0.5
__a : List[str] = self.token_ids[indices]
__a : Optional[int] = self.lengths[indices]
__a : Any = len(self )
logger.info(f"""Remove {init_size - new_size} sequences with a high level of unknown tokens (50%).""" )
def __lowerCamelCase ( self ):
'''simple docstring'''
if not self.params.is_master:
return
logger.info(f"""{len(self )} sequences""" )
# data_len = sum(self.lengths)
# nb_unique_tokens = len(Counter(list(chain(*self.token_ids))))
# logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)')
# unk_idx = self.params.special_tok_ids['unk_token']
# nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids])
# logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)')
def __lowerCamelCase ( self , __UpperCamelCase ):
'''simple docstring'''
__a : List[str] = [t[0] for t in batch]
__a : str = [t[1] for t in batch]
assert len(__UpperCamelCase ) == len(__UpperCamelCase )
# Max for paddings
__a : Optional[int] = max(__UpperCamelCase )
# Pad token ids
if self.params.mlm:
__a : int = self.params.special_tok_ids["""pad_token"""]
else:
__a : Tuple = self.params.special_tok_ids["""unk_token"""]
__a : Any = [list(t.astype(__UpperCamelCase ) ) + [pad_idx] * (max_seq_len_ - len(__UpperCamelCase )) for t in token_ids]
assert len(tk_ ) == len(__UpperCamelCase )
assert all(len(__UpperCamelCase ) == max_seq_len_ for t in tk_ )
__a : Any = torch.tensor(tk_ ) # (bs, max_seq_len_)
__a : Optional[Any] = torch.tensor(__UpperCamelCase ) # (bs)
return tk_t, lg_t | 697 |
'''simple docstring'''
import argparse
import logging
import os
import time
import timeit
import datasets
import numpy as np
import pycuda.autoinit # noqa: F401
import pycuda.driver as cuda
import tensorrt as trt
import torch
from absl import logging as absl_logging
from accelerate import Accelerator
from datasets import load_dataset, load_metric
from torch.utils.data import DataLoader
from utils_qa import postprocess_qa_predictions
import transformers
from transformers import AutoTokenizer, EvalPrediction, default_data_collator, set_seed
from transformers.trainer_pt_utils import nested_concat, nested_truncate
__SCREAMING_SNAKE_CASE : Optional[int] = trt.Logger(trt.Logger.WARNING)
__SCREAMING_SNAKE_CASE : Tuple = absl_logging.get_absl_logger()
absl_logger.setLevel(logging.WARNING)
__SCREAMING_SNAKE_CASE : Any = logging.getLogger(__name__)
__SCREAMING_SNAKE_CASE : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--onnx_model_path',
default=None,
type=str,
required=True,
help='Path to ONNX model: ',
)
parser.add_argument(
'--output_dir',
default=None,
type=str,
required=True,
help='The output directory where the model checkpoints and predictions will be written.',
)
# Other parameters
parser.add_argument(
'--tokenizer_name',
default='',
type=str,
required=True,
help='Pretrained tokenizer name or path if not the same as model_name',
)
parser.add_argument(
'--version_2_with_negative',
action='store_true',
help='If true, the SQuAD examples contain some that do not have an answer.',
)
parser.add_argument(
'--null_score_diff_threshold',
type=float,
default=0.0,
help='If null_score - best_non_null is greater than the threshold predict null.',
)
parser.add_argument(
'--max_seq_length',
default=384,
type=int,
help=(
'The maximum total input sequence length after WordPiece tokenization. Sequences '
'longer than this will be truncated, and sequences shorter than this will be padded.'
),
)
parser.add_argument(
'--doc_stride',
default=128,
type=int,
help='When splitting up a long document into chunks, how much stride to take between chunks.',
)
parser.add_argument('--per_device_eval_batch_size', default=8, type=int, help='Batch size per GPU/CPU for evaluation.')
parser.add_argument(
'--n_best_size',
default=20,
type=int,
help='The total number of n-best predictions to generate in the nbest_predictions.json output file.',
)
parser.add_argument(
'--max_answer_length',
default=30,
type=int,
help=(
'The maximum length of an answer that can be generated. This is needed because the start '
'and end predictions are not conditioned on one another.'
),
)
parser.add_argument('--seed', type=int, default=42, help='random seed for initialization')
parser.add_argument(
'--dataset_name',
type=str,
default=None,
required=True,
help='The name of the dataset to use (via the datasets library).',
)
parser.add_argument(
'--dataset_config_name',
type=str,
default=None,
help='The configuration name of the dataset to use (via the datasets library).',
)
parser.add_argument(
'--preprocessing_num_workers', type=int, default=4, help='A csv or a json file containing the training data.'
)
parser.add_argument('--overwrite_cache', action='store_true', help='Overwrite the cached training and evaluation sets')
parser.add_argument(
'--fp16',
action='store_true',
help='Whether to use 16-bit (mixed) precision instead of 32-bit',
)
parser.add_argument(
'--int8',
action='store_true',
help='Whether to use INT8',
)
__SCREAMING_SNAKE_CASE : Optional[int] = parser.parse_args()
if args.tokenizer_name:
__SCREAMING_SNAKE_CASE : str = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=True)
else:
raise ValueError(
'You are instantiating a new tokenizer from scratch. This is not supported by this script.'
'You can do it from another script, save it, and load it from here, using --tokenizer_name.'
)
logger.info('Training/evaluation parameters %s', args)
__SCREAMING_SNAKE_CASE : List[Any] = args.per_device_eval_batch_size
__SCREAMING_SNAKE_CASE : int = (args.eval_batch_size, args.max_seq_length)
# TRT Engine properties
__SCREAMING_SNAKE_CASE : Optional[Any] = True
__SCREAMING_SNAKE_CASE : Tuple = 'temp_engine/bert-fp32.engine'
if args.fpaa:
__SCREAMING_SNAKE_CASE : Dict = 'temp_engine/bert-fp16.engine'
if args.inta:
__SCREAMING_SNAKE_CASE : Tuple = 'temp_engine/bert-int8.engine'
# import ONNX file
if not os.path.exists('temp_engine'):
os.makedirs('temp_engine')
__SCREAMING_SNAKE_CASE : Optional[Any] = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network, trt.OnnxParser(
network, TRT_LOGGER
) as parser:
with open(args.onnx_model_path, 'rb') as model:
if not parser.parse(model.read()):
for error in range(parser.num_errors):
print(parser.get_error(error))
# Query input names and shapes from parsed TensorRT network
__SCREAMING_SNAKE_CASE : List[Any] = [network.get_input(i) for i in range(network.num_inputs)]
__SCREAMING_SNAKE_CASE : List[Any] = [_input.name for _input in network_inputs] # ex: ["actual_input1"]
with builder.create_builder_config() as config:
__SCREAMING_SNAKE_CASE : Tuple = 1 << 50
if STRICT_TYPES:
config.set_flag(trt.BuilderFlag.STRICT_TYPES)
if args.fpaa:
config.set_flag(trt.BuilderFlag.FPaa)
if args.inta:
config.set_flag(trt.BuilderFlag.INTa)
__SCREAMING_SNAKE_CASE : Dict = builder.create_optimization_profile()
config.add_optimization_profile(profile)
for i in range(len(input_names)):
profile.set_shape(input_names[i], INPUT_SHAPE, INPUT_SHAPE, INPUT_SHAPE)
__SCREAMING_SNAKE_CASE : Union[str, Any] = builder.build_engine(network, config)
# serialize_engine and store in file (can be directly loaded and deserialized):
with open(engine_name, 'wb') as f:
f.write(engine.serialize())
def _snake_case ( lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> List[Any]:
__a : Dict = np.asarray(inputs["""input_ids"""] , dtype=np.intaa )
__a : List[Any] = np.asarray(inputs["""attention_mask"""] , dtype=np.intaa )
__a : str = np.asarray(inputs["""token_type_ids"""] , dtype=np.intaa )
# Copy inputs
cuda.memcpy_htod_async(d_inputs[0] , input_ids.ravel() , lowercase )
cuda.memcpy_htod_async(d_inputs[1] , attention_mask.ravel() , lowercase )
cuda.memcpy_htod_async(d_inputs[2] , token_type_ids.ravel() , lowercase )
# start time
__a : Optional[Any] = time.time()
# Run inference
context.execute_async(
bindings=[int(lowercase ) for d_inp in d_inputs] + [int(lowercase ), int(lowercase )] , stream_handle=stream.handle )
# Transfer predictions back from GPU
cuda.memcpy_dtoh_async(lowercase , lowercase , lowercase )
cuda.memcpy_dtoh_async(lowercase , lowercase , lowercase )
# Synchronize the stream and take time
stream.synchronize()
# end time
__a : str = time.time()
__a : Any = end_time - start_time
__a : Optional[int] = (h_outputa, h_outputa)
# print(outputs)
return outputs, infer_time
# Initialize the accelerator. We will let the accelerator handle device placement for us in this example.
__SCREAMING_SNAKE_CASE : Optional[Any] = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO,
)
# Setup logging, we only want one process per machine to log things on the screen.
# accelerator.is_local_main_process is only True for one process per machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR)
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed)
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
if args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
__SCREAMING_SNAKE_CASE : List[str] = load_dataset(args.dataset_name, args.dataset_config_name)
else:
raise ValueError('Evaluation requires a dataset name')
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Preprocessing the datasets.
# Preprocessing is slighlty different for training and evaluation.
__SCREAMING_SNAKE_CASE : int = raw_datasets['validation'].column_names
__SCREAMING_SNAKE_CASE : Tuple = 'question' if 'question' in column_names else column_names[0]
__SCREAMING_SNAKE_CASE : List[Any] = 'context' if 'context' in column_names else column_names[1]
__SCREAMING_SNAKE_CASE : Tuple = 'answers' if 'answers' in column_names else column_names[2]
# Padding side determines if we do (question|context) or (context|question).
__SCREAMING_SNAKE_CASE : Tuple = tokenizer.padding_side == 'right'
if args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f'''The max_seq_length passed ({args.max_seq_length}) is larger than the maximum length for the'''
f'''model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.'''
)
__SCREAMING_SNAKE_CASE : Dict = min(args.max_seq_length, tokenizer.model_max_length)
def _snake_case ( lowercase ) -> Tuple:
# Some of the questions have lots of whitespace on the left, which is not useful and will make the
# truncation of the context fail (the tokenized question will take a lots of space). So we remove that
# left whitespace
__a : Optional[Any] = [q.lstrip() for q in examples[question_column_name]]
# Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results
# in one example possible giving several features when a context is long, each of those features having a
# context that overlaps a bit the context of the previous feature.
__a : Optional[int] = tokenizer(
examples[question_column_name if pad_on_right else context_column_name] , examples[context_column_name if pad_on_right else question_column_name] , truncation="""only_second""" if pad_on_right else """only_first""" , max_length=lowercase , stride=args.doc_stride , return_overflowing_tokens=lowercase , return_offsets_mapping=lowercase , padding="""max_length""" , )
# Since one example might give us several features if it has a long context, we need a map from a feature to
# its corresponding example. This key gives us just that.
__a : Optional[Any] = tokenized_examples.pop("""overflow_to_sample_mapping""" )
# For evaluation, we will need to convert our predictions to substrings of the context, so we keep the
# corresponding example_id and we will store the offset mappings.
__a : Optional[Any] = []
for i in range(len(tokenized_examples["""input_ids"""] ) ):
# Grab the sequence corresponding to that example (to know what is the context and what is the question).
__a : Dict = tokenized_examples.sequence_ids(lowercase )
__a : Optional[Any] = 1 if pad_on_right else 0
# One example can give several spans, this is the index of the example containing this span of text.
__a : Union[str, Any] = sample_mapping[i]
tokenized_examples["example_id"].append(examples["""id"""][sample_index] )
# Set to None the offset_mapping that are not part of the context so it's easy to determine if a token
# position is part of the context or not.
__a : int = [
(o if sequence_ids[k] == context_index else None)
for k, o in enumerate(tokenized_examples["""offset_mapping"""][i] )
]
return tokenized_examples
__SCREAMING_SNAKE_CASE : int = raw_datasets['validation']
# Validation Feature Creation
__SCREAMING_SNAKE_CASE : Union[str, Any] = eval_examples.map(
prepare_validation_features,
batched=True,
num_proc=args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not args.overwrite_cache,
desc='Running tokenizer on validation dataset',
)
__SCREAMING_SNAKE_CASE : List[Any] = default_data_collator
__SCREAMING_SNAKE_CASE : Union[str, Any] = eval_dataset.remove_columns(['example_id', 'offset_mapping'])
__SCREAMING_SNAKE_CASE : List[str] = DataLoader(
eval_dataset_for_model, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size
)
def _snake_case ( lowercase , lowercase , lowercase , lowercase="eval" ) -> Any:
# Post-processing: we match the start logits and end logits to answers in the original context.
__a : List[str] = postprocess_qa_predictions(
examples=lowercase , features=lowercase , predictions=lowercase , version_2_with_negative=args.version_2_with_negative , n_best_size=args.n_best_size , max_answer_length=args.max_answer_length , null_score_diff_threshold=args.null_score_diff_threshold , output_dir=args.output_dir , prefix=lowercase , )
# Format the result to the format the metric expects.
if args.version_2_with_negative:
__a : List[str] = [
{"""id""": k, """prediction_text""": v, """no_answer_probability""": 0.0} for k, v in predictions.items()
]
else:
__a : List[str] = [{"""id""": k, """prediction_text""": v} for k, v in predictions.items()]
__a : Optional[Any] = [{"""id""": ex["""id"""], """answers""": ex[answer_column_name]} for ex in examples]
return EvalPrediction(predictions=lowercase , label_ids=lowercase )
__SCREAMING_SNAKE_CASE : List[Any] = load_metric('squad_v2' if args.version_2_with_negative else 'squad')
# Evaluation!
logger.info('Loading ONNX model %s for evaluation', args.onnx_model_path)
with open(engine_name, 'rb') as f, trt.Runtime(TRT_LOGGER) as runtime, runtime.deserialize_cuda_engine(
f.read()
) as engine, engine.create_execution_context() as context:
# setup for TRT inferrence
for i in range(len(input_names)):
context.set_binding_shape(i, INPUT_SHAPE)
assert context.all_binding_shapes_specified
def _snake_case ( lowercase ) -> Optional[int]:
return trt.volume(engine.get_binding_shape(lowercase ) ) * engine.get_binding_dtype(lowercase ).itemsize
# Allocate device memory for inputs and outputs.
__SCREAMING_SNAKE_CASE : List[str] = [cuda.mem_alloc(binding_nbytes(binding)) for binding in engine if engine.binding_is_input(binding)]
# Allocate output buffer
__SCREAMING_SNAKE_CASE : str = cuda.pagelocked_empty(tuple(context.get_binding_shape(3)), dtype=np.floataa)
__SCREAMING_SNAKE_CASE : Union[str, Any] = cuda.pagelocked_empty(tuple(context.get_binding_shape(4)), dtype=np.floataa)
__SCREAMING_SNAKE_CASE : str = cuda.mem_alloc(h_outputa.nbytes)
__SCREAMING_SNAKE_CASE : Tuple = cuda.mem_alloc(h_outputa.nbytes)
# Create a stream in which to copy inputs/outputs and run inference.
__SCREAMING_SNAKE_CASE : Tuple = cuda.Stream()
# Evaluation
logger.info('***** Running Evaluation *****')
logger.info(f''' Num examples = {len(eval_dataset)}''')
logger.info(f''' Batch size = {args.per_device_eval_batch_size}''')
__SCREAMING_SNAKE_CASE : Union[str, Any] = 0.0
__SCREAMING_SNAKE_CASE : str = 0
__SCREAMING_SNAKE_CASE : str = timeit.default_timer()
__SCREAMING_SNAKE_CASE : Dict = None
for step, batch in enumerate(eval_dataloader):
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Union[str, Any] = model_infer(batch, context, d_inputs, h_outputa, h_outputa, d_outputa, d_outputa, stream)
total_time += infer_time
niter += 1
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Optional[Any] = outputs
__SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor(start_logits)
__SCREAMING_SNAKE_CASE : Tuple = torch.tensor(end_logits)
# necessary to pad predictions and labels for being gathered
__SCREAMING_SNAKE_CASE : Optional[int] = accelerator.pad_across_processes(start_logits, dim=1, pad_index=-100)
__SCREAMING_SNAKE_CASE : Dict = accelerator.pad_across_processes(end_logits, dim=1, pad_index=-100)
__SCREAMING_SNAKE_CASE : List[str] = (accelerator.gather(start_logits).cpu().numpy(), accelerator.gather(end_logits).cpu().numpy())
__SCREAMING_SNAKE_CASE : List[str] = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-100)
if all_preds is not None:
__SCREAMING_SNAKE_CASE : Tuple = nested_truncate(all_preds, len(eval_dataset))
__SCREAMING_SNAKE_CASE : str = timeit.default_timer() - start_time
logger.info(' Evaluation done in total %f secs (%f sec per example)', evalTime, evalTime / len(eval_dataset))
# Inference time from TRT
logger.info('Average Inference Time = {:.3f} ms'.format(total_time * 1_000 / niter))
logger.info('Total Inference Time = {:.3f} ms'.format(total_time * 1_000))
logger.info('Total Number of Inference = %d', niter)
__SCREAMING_SNAKE_CASE : Optional[int] = post_processing_function(eval_examples, eval_dataset, all_preds)
__SCREAMING_SNAKE_CASE : List[Any] = metric.compute(predictions=prediction.predictions, references=prediction.label_ids)
logger.info(f'''Evaluation metrics: {eval_metric}''') | 697 | 1 |
'''simple docstring'''
def _snake_case ( lowercase = 1_0**9 ) -> int:
__a : Dict = 1
__a : Any = 2
__a : Optional[Any] = 0
__a : Tuple = 0
__a : Tuple = 0
while perimeter <= max_perimeter:
perimeters_sum += perimeter
prev_value += 2 * value
value += prev_value
__a : List[Any] = 2 * value + 2 if i % 2 == 0 else 2 * value - 2
i += 1
return perimeters_sum
if __name__ == "__main__":
print(f'''{solution() = }''') | 697 |
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import KarrasVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
lowercase__ = 42
lowercase__ = 42
def __init__( self , __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
super().__init__()
self.register_modules(unet=__UpperCamelCase , scheduler=__UpperCamelCase )
@torch.no_grad()
def __call__( self , __UpperCamelCase = 1 , __UpperCamelCase = 50 , __UpperCamelCase = None , __UpperCamelCase = "pil" , __UpperCamelCase = True , **__UpperCamelCase , ):
'''simple docstring'''
__a : int = self.unet.config.sample_size
__a : Optional[int] = (batch_size, 3, img_size, img_size)
__a : Union[str, Any] = self.unet
# sample x_0 ~ N(0, sigma_0^2 * I)
__a : Dict = randn_tensor(__UpperCamelCase , generator=__UpperCamelCase , device=self.device ) * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(__UpperCamelCase )
for t in self.progress_bar(self.scheduler.timesteps ):
# here sigma_t == t_i from the paper
__a : Dict = self.scheduler.schedule[t]
__a : Any = self.scheduler.schedule[t - 1] if t > 0 else 0
# 1. Select temporarily increased noise level sigma_hat
# 2. Add new noise to move from sample_i to sample_hat
__a , __a : Tuple = self.scheduler.add_noise_to_input(__UpperCamelCase , __UpperCamelCase , generator=__UpperCamelCase )
# 3. Predict the noise residual given the noise magnitude `sigma_hat`
# The model inputs and output are adjusted by following eq. (213) in [1].
__a : List[Any] = (sigma_hat / 2) * model((sample_hat + 1) / 2 , sigma_hat / 2 ).sample
# 4. Evaluate dx/dt at sigma_hat
# 5. Take Euler step from sigma to sigma_prev
__a : str = self.scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
if sigma_prev != 0:
# 6. Apply 2nd order correction
# The model inputs and output are adjusted by following eq. (213) in [1].
__a : Union[str, Any] = (sigma_prev / 2) * model((step_output.prev_sample + 1) / 2 , sigma_prev / 2 ).sample
__a : Tuple = self.scheduler.step_correct(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , step_output.prev_sample , step_output["""derivative"""] , )
__a : Tuple = step_output.prev_sample
__a : Optional[Any] = (sample / 2 + 0.5).clamp(0 , 1 )
__a : Dict = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__a : List[Any] = self.numpy_to_pil(__UpperCamelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__UpperCamelCase ) | 697 | 1 |
'''simple docstring'''
import argparse
import logging
import os
import time
import timeit
import datasets
import numpy as np
import pycuda.autoinit # noqa: F401
import pycuda.driver as cuda
import tensorrt as trt
import torch
from absl import logging as absl_logging
from accelerate import Accelerator
from datasets import load_dataset, load_metric
from torch.utils.data import DataLoader
from utils_qa import postprocess_qa_predictions
import transformers
from transformers import AutoTokenizer, EvalPrediction, default_data_collator, set_seed
from transformers.trainer_pt_utils import nested_concat, nested_truncate
__SCREAMING_SNAKE_CASE : Optional[int] = trt.Logger(trt.Logger.WARNING)
__SCREAMING_SNAKE_CASE : Tuple = absl_logging.get_absl_logger()
absl_logger.setLevel(logging.WARNING)
__SCREAMING_SNAKE_CASE : Any = logging.getLogger(__name__)
__SCREAMING_SNAKE_CASE : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--onnx_model_path',
default=None,
type=str,
required=True,
help='Path to ONNX model: ',
)
parser.add_argument(
'--output_dir',
default=None,
type=str,
required=True,
help='The output directory where the model checkpoints and predictions will be written.',
)
# Other parameters
parser.add_argument(
'--tokenizer_name',
default='',
type=str,
required=True,
help='Pretrained tokenizer name or path if not the same as model_name',
)
parser.add_argument(
'--version_2_with_negative',
action='store_true',
help='If true, the SQuAD examples contain some that do not have an answer.',
)
parser.add_argument(
'--null_score_diff_threshold',
type=float,
default=0.0,
help='If null_score - best_non_null is greater than the threshold predict null.',
)
parser.add_argument(
'--max_seq_length',
default=384,
type=int,
help=(
'The maximum total input sequence length after WordPiece tokenization. Sequences '
'longer than this will be truncated, and sequences shorter than this will be padded.'
),
)
parser.add_argument(
'--doc_stride',
default=128,
type=int,
help='When splitting up a long document into chunks, how much stride to take between chunks.',
)
parser.add_argument('--per_device_eval_batch_size', default=8, type=int, help='Batch size per GPU/CPU for evaluation.')
parser.add_argument(
'--n_best_size',
default=20,
type=int,
help='The total number of n-best predictions to generate in the nbest_predictions.json output file.',
)
parser.add_argument(
'--max_answer_length',
default=30,
type=int,
help=(
'The maximum length of an answer that can be generated. This is needed because the start '
'and end predictions are not conditioned on one another.'
),
)
parser.add_argument('--seed', type=int, default=42, help='random seed for initialization')
parser.add_argument(
'--dataset_name',
type=str,
default=None,
required=True,
help='The name of the dataset to use (via the datasets library).',
)
parser.add_argument(
'--dataset_config_name',
type=str,
default=None,
help='The configuration name of the dataset to use (via the datasets library).',
)
parser.add_argument(
'--preprocessing_num_workers', type=int, default=4, help='A csv or a json file containing the training data.'
)
parser.add_argument('--overwrite_cache', action='store_true', help='Overwrite the cached training and evaluation sets')
parser.add_argument(
'--fp16',
action='store_true',
help='Whether to use 16-bit (mixed) precision instead of 32-bit',
)
parser.add_argument(
'--int8',
action='store_true',
help='Whether to use INT8',
)
__SCREAMING_SNAKE_CASE : Optional[int] = parser.parse_args()
if args.tokenizer_name:
__SCREAMING_SNAKE_CASE : str = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=True)
else:
raise ValueError(
'You are instantiating a new tokenizer from scratch. This is not supported by this script.'
'You can do it from another script, save it, and load it from here, using --tokenizer_name.'
)
logger.info('Training/evaluation parameters %s', args)
__SCREAMING_SNAKE_CASE : List[Any] = args.per_device_eval_batch_size
__SCREAMING_SNAKE_CASE : int = (args.eval_batch_size, args.max_seq_length)
# TRT Engine properties
__SCREAMING_SNAKE_CASE : Optional[Any] = True
__SCREAMING_SNAKE_CASE : Tuple = 'temp_engine/bert-fp32.engine'
if args.fpaa:
__SCREAMING_SNAKE_CASE : Dict = 'temp_engine/bert-fp16.engine'
if args.inta:
__SCREAMING_SNAKE_CASE : Tuple = 'temp_engine/bert-int8.engine'
# import ONNX file
if not os.path.exists('temp_engine'):
os.makedirs('temp_engine')
__SCREAMING_SNAKE_CASE : Optional[Any] = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network, trt.OnnxParser(
network, TRT_LOGGER
) as parser:
with open(args.onnx_model_path, 'rb') as model:
if not parser.parse(model.read()):
for error in range(parser.num_errors):
print(parser.get_error(error))
# Query input names and shapes from parsed TensorRT network
__SCREAMING_SNAKE_CASE : List[Any] = [network.get_input(i) for i in range(network.num_inputs)]
__SCREAMING_SNAKE_CASE : List[Any] = [_input.name for _input in network_inputs] # ex: ["actual_input1"]
with builder.create_builder_config() as config:
__SCREAMING_SNAKE_CASE : Tuple = 1 << 50
if STRICT_TYPES:
config.set_flag(trt.BuilderFlag.STRICT_TYPES)
if args.fpaa:
config.set_flag(trt.BuilderFlag.FPaa)
if args.inta:
config.set_flag(trt.BuilderFlag.INTa)
__SCREAMING_SNAKE_CASE : Dict = builder.create_optimization_profile()
config.add_optimization_profile(profile)
for i in range(len(input_names)):
profile.set_shape(input_names[i], INPUT_SHAPE, INPUT_SHAPE, INPUT_SHAPE)
__SCREAMING_SNAKE_CASE : Union[str, Any] = builder.build_engine(network, config)
# serialize_engine and store in file (can be directly loaded and deserialized):
with open(engine_name, 'wb') as f:
f.write(engine.serialize())
def _snake_case ( lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> List[Any]:
__a : Dict = np.asarray(inputs["""input_ids"""] , dtype=np.intaa )
__a : List[Any] = np.asarray(inputs["""attention_mask"""] , dtype=np.intaa )
__a : str = np.asarray(inputs["""token_type_ids"""] , dtype=np.intaa )
# Copy inputs
cuda.memcpy_htod_async(d_inputs[0] , input_ids.ravel() , lowercase )
cuda.memcpy_htod_async(d_inputs[1] , attention_mask.ravel() , lowercase )
cuda.memcpy_htod_async(d_inputs[2] , token_type_ids.ravel() , lowercase )
# start time
__a : Optional[Any] = time.time()
# Run inference
context.execute_async(
bindings=[int(lowercase ) for d_inp in d_inputs] + [int(lowercase ), int(lowercase )] , stream_handle=stream.handle )
# Transfer predictions back from GPU
cuda.memcpy_dtoh_async(lowercase , lowercase , lowercase )
cuda.memcpy_dtoh_async(lowercase , lowercase , lowercase )
# Synchronize the stream and take time
stream.synchronize()
# end time
__a : str = time.time()
__a : Any = end_time - start_time
__a : Optional[int] = (h_outputa, h_outputa)
# print(outputs)
return outputs, infer_time
# Initialize the accelerator. We will let the accelerator handle device placement for us in this example.
__SCREAMING_SNAKE_CASE : Optional[Any] = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO,
)
# Setup logging, we only want one process per machine to log things on the screen.
# accelerator.is_local_main_process is only True for one process per machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR)
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed)
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
if args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
__SCREAMING_SNAKE_CASE : List[str] = load_dataset(args.dataset_name, args.dataset_config_name)
else:
raise ValueError('Evaluation requires a dataset name')
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Preprocessing the datasets.
# Preprocessing is slighlty different for training and evaluation.
__SCREAMING_SNAKE_CASE : int = raw_datasets['validation'].column_names
__SCREAMING_SNAKE_CASE : Tuple = 'question' if 'question' in column_names else column_names[0]
__SCREAMING_SNAKE_CASE : List[Any] = 'context' if 'context' in column_names else column_names[1]
__SCREAMING_SNAKE_CASE : Tuple = 'answers' if 'answers' in column_names else column_names[2]
# Padding side determines if we do (question|context) or (context|question).
__SCREAMING_SNAKE_CASE : Tuple = tokenizer.padding_side == 'right'
if args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f'''The max_seq_length passed ({args.max_seq_length}) is larger than the maximum length for the'''
f'''model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.'''
)
__SCREAMING_SNAKE_CASE : Dict = min(args.max_seq_length, tokenizer.model_max_length)
def _snake_case ( lowercase ) -> Tuple:
# Some of the questions have lots of whitespace on the left, which is not useful and will make the
# truncation of the context fail (the tokenized question will take a lots of space). So we remove that
# left whitespace
__a : Optional[Any] = [q.lstrip() for q in examples[question_column_name]]
# Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results
# in one example possible giving several features when a context is long, each of those features having a
# context that overlaps a bit the context of the previous feature.
__a : Optional[int] = tokenizer(
examples[question_column_name if pad_on_right else context_column_name] , examples[context_column_name if pad_on_right else question_column_name] , truncation="""only_second""" if pad_on_right else """only_first""" , max_length=lowercase , stride=args.doc_stride , return_overflowing_tokens=lowercase , return_offsets_mapping=lowercase , padding="""max_length""" , )
# Since one example might give us several features if it has a long context, we need a map from a feature to
# its corresponding example. This key gives us just that.
__a : Optional[Any] = tokenized_examples.pop("""overflow_to_sample_mapping""" )
# For evaluation, we will need to convert our predictions to substrings of the context, so we keep the
# corresponding example_id and we will store the offset mappings.
__a : Optional[Any] = []
for i in range(len(tokenized_examples["""input_ids"""] ) ):
# Grab the sequence corresponding to that example (to know what is the context and what is the question).
__a : Dict = tokenized_examples.sequence_ids(lowercase )
__a : Optional[Any] = 1 if pad_on_right else 0
# One example can give several spans, this is the index of the example containing this span of text.
__a : Union[str, Any] = sample_mapping[i]
tokenized_examples["example_id"].append(examples["""id"""][sample_index] )
# Set to None the offset_mapping that are not part of the context so it's easy to determine if a token
# position is part of the context or not.
__a : int = [
(o if sequence_ids[k] == context_index else None)
for k, o in enumerate(tokenized_examples["""offset_mapping"""][i] )
]
return tokenized_examples
__SCREAMING_SNAKE_CASE : int = raw_datasets['validation']
# Validation Feature Creation
__SCREAMING_SNAKE_CASE : Union[str, Any] = eval_examples.map(
prepare_validation_features,
batched=True,
num_proc=args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not args.overwrite_cache,
desc='Running tokenizer on validation dataset',
)
__SCREAMING_SNAKE_CASE : List[Any] = default_data_collator
__SCREAMING_SNAKE_CASE : Union[str, Any] = eval_dataset.remove_columns(['example_id', 'offset_mapping'])
__SCREAMING_SNAKE_CASE : List[str] = DataLoader(
eval_dataset_for_model, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size
)
def _snake_case ( lowercase , lowercase , lowercase , lowercase="eval" ) -> Any:
# Post-processing: we match the start logits and end logits to answers in the original context.
__a : List[str] = postprocess_qa_predictions(
examples=lowercase , features=lowercase , predictions=lowercase , version_2_with_negative=args.version_2_with_negative , n_best_size=args.n_best_size , max_answer_length=args.max_answer_length , null_score_diff_threshold=args.null_score_diff_threshold , output_dir=args.output_dir , prefix=lowercase , )
# Format the result to the format the metric expects.
if args.version_2_with_negative:
__a : List[str] = [
{"""id""": k, """prediction_text""": v, """no_answer_probability""": 0.0} for k, v in predictions.items()
]
else:
__a : List[str] = [{"""id""": k, """prediction_text""": v} for k, v in predictions.items()]
__a : Optional[Any] = [{"""id""": ex["""id"""], """answers""": ex[answer_column_name]} for ex in examples]
return EvalPrediction(predictions=lowercase , label_ids=lowercase )
__SCREAMING_SNAKE_CASE : List[Any] = load_metric('squad_v2' if args.version_2_with_negative else 'squad')
# Evaluation!
logger.info('Loading ONNX model %s for evaluation', args.onnx_model_path)
with open(engine_name, 'rb') as f, trt.Runtime(TRT_LOGGER) as runtime, runtime.deserialize_cuda_engine(
f.read()
) as engine, engine.create_execution_context() as context:
# setup for TRT inferrence
for i in range(len(input_names)):
context.set_binding_shape(i, INPUT_SHAPE)
assert context.all_binding_shapes_specified
def _snake_case ( lowercase ) -> Optional[int]:
return trt.volume(engine.get_binding_shape(lowercase ) ) * engine.get_binding_dtype(lowercase ).itemsize
# Allocate device memory for inputs and outputs.
__SCREAMING_SNAKE_CASE : List[str] = [cuda.mem_alloc(binding_nbytes(binding)) for binding in engine if engine.binding_is_input(binding)]
# Allocate output buffer
__SCREAMING_SNAKE_CASE : str = cuda.pagelocked_empty(tuple(context.get_binding_shape(3)), dtype=np.floataa)
__SCREAMING_SNAKE_CASE : Union[str, Any] = cuda.pagelocked_empty(tuple(context.get_binding_shape(4)), dtype=np.floataa)
__SCREAMING_SNAKE_CASE : str = cuda.mem_alloc(h_outputa.nbytes)
__SCREAMING_SNAKE_CASE : Tuple = cuda.mem_alloc(h_outputa.nbytes)
# Create a stream in which to copy inputs/outputs and run inference.
__SCREAMING_SNAKE_CASE : Tuple = cuda.Stream()
# Evaluation
logger.info('***** Running Evaluation *****')
logger.info(f''' Num examples = {len(eval_dataset)}''')
logger.info(f''' Batch size = {args.per_device_eval_batch_size}''')
__SCREAMING_SNAKE_CASE : Union[str, Any] = 0.0
__SCREAMING_SNAKE_CASE : str = 0
__SCREAMING_SNAKE_CASE : str = timeit.default_timer()
__SCREAMING_SNAKE_CASE : Dict = None
for step, batch in enumerate(eval_dataloader):
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Union[str, Any] = model_infer(batch, context, d_inputs, h_outputa, h_outputa, d_outputa, d_outputa, stream)
total_time += infer_time
niter += 1
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Optional[Any] = outputs
__SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor(start_logits)
__SCREAMING_SNAKE_CASE : Tuple = torch.tensor(end_logits)
# necessary to pad predictions and labels for being gathered
__SCREAMING_SNAKE_CASE : Optional[int] = accelerator.pad_across_processes(start_logits, dim=1, pad_index=-100)
__SCREAMING_SNAKE_CASE : Dict = accelerator.pad_across_processes(end_logits, dim=1, pad_index=-100)
__SCREAMING_SNAKE_CASE : List[str] = (accelerator.gather(start_logits).cpu().numpy(), accelerator.gather(end_logits).cpu().numpy())
__SCREAMING_SNAKE_CASE : List[str] = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-100)
if all_preds is not None:
__SCREAMING_SNAKE_CASE : Tuple = nested_truncate(all_preds, len(eval_dataset))
__SCREAMING_SNAKE_CASE : str = timeit.default_timer() - start_time
logger.info(' Evaluation done in total %f secs (%f sec per example)', evalTime, evalTime / len(eval_dataset))
# Inference time from TRT
logger.info('Average Inference Time = {:.3f} ms'.format(total_time * 1_000 / niter))
logger.info('Total Inference Time = {:.3f} ms'.format(total_time * 1_000))
logger.info('Total Number of Inference = %d', niter)
__SCREAMING_SNAKE_CASE : Optional[int] = post_processing_function(eval_examples, eval_dataset, all_preds)
__SCREAMING_SNAKE_CASE : List[Any] = metric.compute(predictions=prediction.predictions, references=prediction.label_ids)
logger.info(f'''Evaluation metrics: {eval_metric}''') | 697 |
'''simple docstring'''
def _snake_case ( lowercase ) -> bool:
if not isinstance(lowercase , lowercase ):
raise ValueError("""check_bouncy() accepts only integer arguments""" )
__a : str = str(lowercase )
__a : Any = """""".join(sorted(lowercase ) )
return sorted_str_n != str_n and sorted_str_n[::-1] != str_n
def _snake_case ( lowercase = 9_9 ) -> int:
if not 0 < percent < 1_0_0:
raise ValueError("""solution() only accepts values from 0 to 100""" )
__a : List[str] = 0
__a : Union[str, Any] = 1
while True:
if check_bouncy(lowercase ):
bouncy_num += 1
if (bouncy_num / num) * 1_0_0 >= percent:
return num
num += 1
if __name__ == "__main__":
from doctest import testmod
testmod()
print(f'''{solution(99)}''') | 697 | 1 |
'''simple docstring'''
from collections import defaultdict
from typing import Optional
from ..image_utils import load_image
from ..utils import (
add_end_docstrings,
is_torch_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_MASK_GENERATION_MAPPING
__SCREAMING_SNAKE_CASE : List[str] = logging.get_logger(__name__)
@add_end_docstrings(__UpperCamelCase )
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
def __init__( self , **__UpperCamelCase ):
'''simple docstring'''
super().__init__(**__UpperCamelCase )
requires_backends(self , """vision""" )
requires_backends(self , """torch""" )
if self.framework != "pt":
raise ValueError(f"""The {self.__class__} is only available in PyTorch.""" )
self.check_model_type(__UpperCamelCase )
def __lowerCamelCase ( self , **__UpperCamelCase ):
'''simple docstring'''
__a : Optional[int] = {}
__a : Optional[Any] = {}
__a : List[str] = {}
# preprocess args
if "points_per_batch" in kwargs:
__a : str = kwargs["""points_per_batch"""]
if "points_per_crop" in kwargs:
__a : Union[str, Any] = kwargs["""points_per_crop"""]
if "crops_n_layers" in kwargs:
__a : str = kwargs["""crops_n_layers"""]
if "crop_overlap_ratio" in kwargs:
__a : Optional[Any] = kwargs["""crop_overlap_ratio"""]
if "crop_n_points_downscale_factor" in kwargs:
__a : List[str] = kwargs["""crop_n_points_downscale_factor"""]
# postprocess args
if "pred_iou_thresh" in kwargs:
__a : int = kwargs["""pred_iou_thresh"""]
if "stability_score_offset" in kwargs:
__a : Dict = kwargs["""stability_score_offset"""]
if "mask_threshold" in kwargs:
__a : int = kwargs["""mask_threshold"""]
if "stability_score_thresh" in kwargs:
__a : Optional[Any] = kwargs["""stability_score_thresh"""]
if "crops_nms_thresh" in kwargs:
__a : List[Any] = kwargs["""crops_nms_thresh"""]
if "output_rle_mask" in kwargs:
__a : Optional[int] = kwargs["""output_rle_mask"""]
if "output_bboxes_mask" in kwargs:
__a : Optional[Any] = kwargs["""output_bboxes_mask"""]
return preprocess_kwargs, forward_params, postprocess_kwargs
def __call__( self , __UpperCamelCase , *__UpperCamelCase , __UpperCamelCase=None , __UpperCamelCase=None , **__UpperCamelCase ):
'''simple docstring'''
return super().__call__(__UpperCamelCase , *__UpperCamelCase , num_workers=__UpperCamelCase , batch_size=__UpperCamelCase , **__UpperCamelCase )
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase=64 , __UpperCamelCase = 0 , __UpperCamelCase = 512 / 1500 , __UpperCamelCase = 32 , __UpperCamelCase = 1 , ):
'''simple docstring'''
__a : Optional[Any] = load_image(__UpperCamelCase )
__a : Optional[Any] = self.image_processor.size["""longest_edge"""]
__a , __a , __a , __a : List[Any] = self.image_processor.generate_crop_boxes(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
__a : Optional[int] = self.image_processor(images=__UpperCamelCase , return_tensors="""pt""" )
with self.device_placement():
if self.framework == "pt":
__a : Any = self.get_inference_context()
with inference_context():
__a : Dict = self._ensure_tensor_on_device(__UpperCamelCase , device=self.device )
__a : Optional[Any] = self.model.get_image_embeddings(model_inputs.pop("""pixel_values""" ) )
__a : str = image_embeddings
__a : List[Any] = grid_points.shape[1]
__a : Dict = points_per_batch if points_per_batch is not None else n_points
if points_per_batch <= 0:
raise ValueError(
"""Cannot have points_per_batch<=0. Must be >=1 to returned batched outputs. """
"""To return all points at once, set points_per_batch to None""" )
for i in range(0 , __UpperCamelCase , __UpperCamelCase ):
__a : Union[str, Any] = grid_points[:, i : i + points_per_batch, :, :]
__a : Dict = input_labels[:, i : i + points_per_batch]
__a : str = i == n_points - points_per_batch
yield {
"input_points": batched_points,
"input_labels": labels,
"input_boxes": crop_boxes,
"is_last": is_last,
**model_inputs,
}
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase=0.8_8 , __UpperCamelCase=0.9_5 , __UpperCamelCase=0 , __UpperCamelCase=1 , ):
'''simple docstring'''
__a : List[str] = model_inputs.pop("""input_boxes""" )
__a : Any = model_inputs.pop("""is_last""" )
__a : Dict = model_inputs.pop("""original_sizes""" ).tolist()
__a : Any = model_inputs.pop("""reshaped_input_sizes""" ).tolist()
__a : str = self.model(**__UpperCamelCase )
# post processing happens here in order to avoid CPU GPU copies of ALL the masks
__a : Dict = model_outputs["""pred_masks"""]
__a : int = self.image_processor.post_process_masks(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , binarize=__UpperCamelCase )
__a : List[str] = model_outputs["""iou_scores"""]
__a , __a , __a : Tuple = self.image_processor.filter_masks(
masks[0] , iou_scores[0] , original_sizes[0] , input_boxes[0] , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , )
return {
"masks": masks,
"is_last": is_last,
"boxes": boxes,
"iou_scores": iou_scores,
}
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase=False , __UpperCamelCase=False , __UpperCamelCase=0.7 , ):
'''simple docstring'''
__a : Dict = []
__a : List[Any] = []
__a : Union[str, Any] = []
for model_output in model_outputs:
all_scores.append(model_output.pop("""iou_scores""" ) )
all_masks.extend(model_output.pop("""masks""" ) )
all_boxes.append(model_output.pop("""boxes""" ) )
__a : Optional[Any] = torch.cat(__UpperCamelCase )
__a : Optional[Any] = torch.cat(__UpperCamelCase )
__a , __a , __a , __a : int = self.image_processor.post_process_for_mask_generation(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
__a : int = defaultdict(__UpperCamelCase )
for output in model_outputs:
for k, v in output.items():
extra[k].append(__UpperCamelCase )
__a : int = {}
if output_rle_mask:
__a : Optional[Any] = rle_mask
if output_bboxes_mask:
__a : Optional[int] = bounding_boxes
return {"masks": output_masks, "scores": iou_scores, **optional, **extra} | 697 |
'''simple docstring'''
import argparse
import torch
from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def _snake_case ( lowercase , lowercase , lowercase ) -> Any:
# Construct model
if gpta_config_file == "":
__a : Dict = GPTaConfig()
else:
__a : Optional[Any] = GPTaConfig.from_json_file(lowercase )
__a : Union[str, Any] = GPTaModel(lowercase )
# Load weights from numpy
load_tf_weights_in_gpta(lowercase , lowercase , lowercase )
# Save pytorch-model
__a : Optional[int] = pytorch_dump_folder_path + """/""" + WEIGHTS_NAME
__a : Dict = pytorch_dump_folder_path + """/""" + CONFIG_NAME
print(F"""Save PyTorch model to {pytorch_weights_dump_path}""" )
torch.save(model.state_dict() , lowercase )
print(F"""Save configuration file to {pytorch_config_dump_path}""" )
with open(lowercase , """w""" , encoding="""utf-8""" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--gpt2_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--gpt2_config_file',
default='',
type=str,
help=(
'An optional config json file corresponding to the pre-trained OpenAI model. \n'
'This specifies the model architecture.'
),
)
__SCREAMING_SNAKE_CASE : Dict = parser.parse_args()
convert_gpta_checkpoint_to_pytorch(args.gpta_checkpoint_path, args.gpta_config_file, args.pytorch_dump_folder_path) | 697 | 1 |
'''simple docstring'''
import os
from itertools import chain
from random import randrange, shuffle
import pytest
from .sola import PokerHand
__SCREAMING_SNAKE_CASE : List[str] = (
'4S 3H 2C 7S 5H',
'9D 8H 2C 6S 7H',
'2D 6D 9D TH 7D',
'TC 8C 2S JH 6C',
'JH 8S TH AH QH',
'TS KS 5S 9S AC',
'KD 6S 9D TH AD',
'KS 8D 4D 9S 4S', # pair
'8C 4S KH JS 4D', # pair
'QH 8H KD JH 8S', # pair
'KC 4H KS 2H 8D', # pair
'KD 4S KC 3H 8S', # pair
'AH 8S AS KC JH', # pair
'3H 4C 4H 3S 2H', # 2 pairs
'5S 5D 2C KH KH', # 2 pairs
'3C KH 5D 5S KH', # 2 pairs
'AS 3C KH AD KH', # 2 pairs
'7C 7S 3S 7H 5S', # 3 of a kind
'7C 7S KH 2H 7H', # 3 of a kind
'AC KH QH AH AS', # 3 of a kind
'2H 4D 3C AS 5S', # straight (low ace)
'3C 5C 4C 2C 6H', # straight
'6S 8S 7S 5H 9H', # straight
'JS QS 9H TS KH', # straight
'QC KH TS JS AH', # straight (high ace)
'8C 9C 5C 3C TC', # flush
'3S 8S 9S 5S KS', # flush
'4C 5C 9C 8C KC', # flush
'JH 8H AH KH QH', # flush
'3D 2H 3H 2C 2D', # full house
'2H 2C 3S 3H 3D', # full house
'KH KC 3S 3H 3D', # full house
'JC 6H JS JD JH', # 4 of a kind
'JC 7H JS JD JH', # 4 of a kind
'JC KH JS JD JH', # 4 of a kind
'2S AS 4S 5S 3S', # straight flush (low ace)
'2D 6D 3D 4D 5D', # straight flush
'5C 6C 3C 7C 4C', # straight flush
'JH 9H TH KH QH', # straight flush
'JH AH TH KH QH', # royal flush (high ace straight flush)
)
__SCREAMING_SNAKE_CASE : Optional[Any] = (
('2H 3H 4H 5H 6H', 'KS AS TS QS JS', 'Loss'),
('2H 3H 4H 5H 6H', 'AS AD AC AH JD', 'Win'),
('AS AH 2H AD AC', 'JS JD JC JH 3D', 'Win'),
('2S AH 2H AS AC', 'JS JD JC JH AD', 'Loss'),
('2S AH 2H AS AC', '2H 3H 5H 6H 7H', 'Win'),
('AS 3S 4S 8S 2S', '2H 3H 5H 6H 7H', 'Win'),
('2H 3H 5H 6H 7H', '2S 3H 4H 5S 6C', 'Win'),
('2S 3H 4H 5S 6C', '3D 4C 5H 6H 2S', 'Tie'),
('2S 3H 4H 5S 6C', 'AH AC 5H 6H AS', 'Win'),
('2S 2H 4H 5S 4C', 'AH AC 5H 6H AS', 'Loss'),
('2S 2H 4H 5S 4C', 'AH AC 5H 6H 7S', 'Win'),
('6S AD 7H 4S AS', 'AH AC 5H 6H 7S', 'Loss'),
('2S AH 4H 5S KC', 'AH AC 5H 6H 7S', 'Loss'),
('2S 3H 6H 7S 9C', '7H 3C TH 6H 9S', 'Loss'),
('4S 5H 6H TS AC', '3S 5H 6H TS AC', 'Win'),
('2S AH 4H 5S 6C', 'AD 4C 5H 6H 2C', 'Tie'),
('AS AH 3H AD AC', 'AS AH 2H AD AC', 'Win'),
('AH AC 5H 5C QS', 'AH AC 5H 5C KS', 'Loss'),
('AH AC 5H 5C QS', 'KH KC 5H 5C QS', 'Win'),
('7C 7S KH 2H 7H', '3C 3S AH 2H 3H', 'Win'),
('3C 3S AH 2H 3H', '7C 7S KH 2H 7H', 'Loss'),
('6H 5H 4H 3H 2H', '5H 4H 3H 2H AH', 'Win'),
('5H 4H 3H 2H AH', '5H 4H 3H 2H AH', 'Tie'),
('5H 4H 3H 2H AH', '6H 5H 4H 3H 2H', 'Loss'),
('AH AD KS KC AC', 'AH KD KH AC KC', 'Win'),
('2H 4D 3C AS 5S', '2H 4D 3C 6S 5S', 'Loss'),
('2H 3S 3C 3H 2S', '3S 3C 2S 2H 2D', 'Win'),
('4D 6D 5D 2D JH', '3S 8S 3H TC KH', 'Loss'),
('4S 6C 8S 3S 7S', 'AD KS 2D 7D 7C', 'Loss'),
('6S 4C 7H 8C 3H', '5H JC AH 9D 9C', 'Loss'),
('9D 9H JH TC QH', '3C 2S JS 5C 7H', 'Win'),
('2H TC 8S AD 9S', '4H TS 7H 2C 5C', 'Win'),
('9D 3S 2C 7S 7C', 'JC TD 3C TC 9H', 'Loss'),
)
__SCREAMING_SNAKE_CASE : Tuple = (
('2H 3H 4H 5H 6H', True),
('AS AH 2H AD AC', False),
('2H 3H 5H 6H 7H', True),
('KS AS TS QS JS', True),
('8H 9H QS JS TH', False),
('AS 3S 4S 8S 2S', True),
)
__SCREAMING_SNAKE_CASE : Dict = (
('2H 3H 4H 5H 6H', True),
('AS AH 2H AD AC', False),
('2H 3H 5H 6H 7H', False),
('KS AS TS QS JS', True),
('8H 9H QS JS TH', True),
)
__SCREAMING_SNAKE_CASE : Optional[int] = (
('2H 4D 3C AS 5S', True, [5, 4, 3, 2, 14]),
('2H 5D 3C AS 5S', False, [14, 5, 5, 3, 2]),
('JH QD KC AS TS', False, [14, 13, 12, 11, 10]),
('9D 3S 2C 7S 7C', False, [9, 7, 7, 3, 2]),
)
__SCREAMING_SNAKE_CASE : int = (
('JH AH TH KH QH', 0),
('JH 9H TH KH QH', 0),
('JC KH JS JD JH', 7),
('KH KC 3S 3H 3D', 6),
('8C 9C 5C 3C TC', 0),
('JS QS 9H TS KH', 0),
('7C 7S KH 2H 7H', 3),
('3C KH 5D 5S KH', 2),
('QH 8H KD JH 8S', 1),
('2D 6D 9D TH 7D', 0),
)
__SCREAMING_SNAKE_CASE : int = (
('JH AH TH KH QH', 23),
('JH 9H TH KH QH', 22),
('JC KH JS JD JH', 21),
('KH KC 3S 3H 3D', 20),
('8C 9C 5C 3C TC', 19),
('JS QS 9H TS KH', 18),
('7C 7S KH 2H 7H', 17),
('3C KH 5D 5S KH', 16),
('QH 8H KD JH 8S', 15),
('2D 6D 9D TH 7D', 14),
)
def _snake_case ( ) -> List[str]:
__a , __a : List[Any] = randrange(len(lowercase ) ), randrange(len(lowercase ) )
__a : int = ["""Loss""", """Tie""", """Win"""][(play >= oppo) + (play > oppo)]
__a , __a : int = SORTED_HANDS[play], SORTED_HANDS[oppo]
return hand, other, expected
def _snake_case ( lowercase = 1_0_0 ) -> Any:
return (generate_random_hand() for _ in range(lowercase ))
@pytest.mark.parametrize("""hand, expected""" , lowercase )
def _snake_case ( lowercase , lowercase ) -> int:
assert PokerHand(lowercase )._is_flush() == expected
@pytest.mark.parametrize("""hand, expected""" , lowercase )
def _snake_case ( lowercase , lowercase ) -> Any:
assert PokerHand(lowercase )._is_straight() == expected
@pytest.mark.parametrize("""hand, expected, card_values""" , lowercase )
def _snake_case ( lowercase , lowercase , lowercase ) -> List[str]:
__a : Union[str, Any] = PokerHand(lowercase )
assert player._is_five_high_straight() == expected
assert player._card_values == card_values
@pytest.mark.parametrize("""hand, expected""" , lowercase )
def _snake_case ( lowercase , lowercase ) -> Optional[int]:
assert PokerHand(lowercase )._is_same_kind() == expected
@pytest.mark.parametrize("""hand, expected""" , lowercase )
def _snake_case ( lowercase , lowercase ) -> Union[str, Any]:
assert PokerHand(lowercase )._hand_type == expected
@pytest.mark.parametrize("""hand, other, expected""" , lowercase )
def _snake_case ( lowercase , lowercase , lowercase ) -> Optional[int]:
assert PokerHand(lowercase ).compare_with(PokerHand(lowercase ) ) == expected
@pytest.mark.parametrize("""hand, other, expected""" , generate_random_hands() )
def _snake_case ( lowercase , lowercase , lowercase ) -> int:
assert PokerHand(lowercase ).compare_with(PokerHand(lowercase ) ) == expected
def _snake_case ( ) -> Union[str, Any]:
__a : Tuple = [PokerHand(lowercase ) for hand in SORTED_HANDS]
__a : Optional[int] = poker_hands.copy()
shuffle(lowercase )
__a : List[str] = chain(sorted(lowercase ) )
for index, hand in enumerate(lowercase ):
assert hand == poker_hands[index]
def _snake_case ( ) -> List[str]:
# Test that five high straights are compared correctly.
__a : Optional[int] = [PokerHand("""2D AC 3H 4H 5S""" ), PokerHand("""2S 3H 4H 5S 6C""" )]
pokerhands.sort(reverse=lowercase )
assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C"
def _snake_case ( ) -> List[str]:
# Multiple calls to five_high_straight function should still return True
# and shouldn't mutate the list in every call other than the first.
__a : Dict = PokerHand("""2C 4S AS 3D 5C""" )
__a : Dict = True
__a : Optional[int] = [5, 4, 3, 2, 1_4]
for _ in range(1_0 ):
assert pokerhand._is_five_high_straight() == expected
assert pokerhand._card_values == expected_card_values
def _snake_case ( ) -> Dict:
# Problem number 54 from Project Euler
# Testing from poker_hands.txt file
__a : Tuple = 0
__a : int = os.path.abspath(os.path.dirname(lowercase ) )
__a : Union[str, Any] = os.path.join(lowercase , """poker_hands.txt""" )
with open(lowercase ) as file_hand:
for line in file_hand:
__a : Union[str, Any] = line[:1_4].strip()
__a : Optional[Any] = line[1_5:].strip()
__a , __a : List[str] = PokerHand(lowercase ), PokerHand(lowercase )
__a : str = player.compare_with(lowercase )
if output == "Win":
answer += 1
assert answer == 3_7_6 | 697 |
'''simple docstring'''
import unittest
from transformers import (
MODEL_FOR_OBJECT_DETECTION_MAPPING,
AutoFeatureExtractor,
AutoModelForObjectDetection,
ObjectDetectionPipeline,
is_vision_available,
pipeline,
)
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_pytesseract,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class SCREAMING_SNAKE_CASE__ :
@staticmethod
def __lowerCamelCase ( *__UpperCamelCase , **__UpperCamelCase ):
'''simple docstring'''
pass
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
lowercase__ = MODEL_FOR_OBJECT_DETECTION_MAPPING
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
__a : Optional[Any] = ObjectDetectionPipeline(model=__UpperCamelCase , image_processor=__UpperCamelCase )
return object_detector, ["./tests/fixtures/tests_samples/COCO/000000039769.png"]
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
__a : List[str] = object_detector("""./tests/fixtures/tests_samples/COCO/000000039769.png""" , threshold=0.0 )
self.assertGreater(len(__UpperCamelCase ) , 0 )
for detected_object in outputs:
self.assertEqual(
__UpperCamelCase , {
"""score""": ANY(__UpperCamelCase ),
"""label""": ANY(__UpperCamelCase ),
"""box""": {"""xmin""": ANY(__UpperCamelCase ), """ymin""": ANY(__UpperCamelCase ), """xmax""": ANY(__UpperCamelCase ), """ymax""": ANY(__UpperCamelCase )},
} , )
import datasets
__a : Optional[int] = datasets.load_dataset("""hf-internal-testing/fixtures_image_utils""" , """image""" , split="""test""" )
__a : Tuple = [
Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ),
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
# RGBA
dataset[0]["""file"""],
# LA
dataset[1]["""file"""],
# L
dataset[2]["""file"""],
]
__a : Any = object_detector(__UpperCamelCase , threshold=0.0 )
self.assertEqual(len(__UpperCamelCase ) , len(__UpperCamelCase ) )
for outputs in batch_outputs:
self.assertGreater(len(__UpperCamelCase ) , 0 )
for detected_object in outputs:
self.assertEqual(
__UpperCamelCase , {
"""score""": ANY(__UpperCamelCase ),
"""label""": ANY(__UpperCamelCase ),
"""box""": {"""xmin""": ANY(__UpperCamelCase ), """ymin""": ANY(__UpperCamelCase ), """xmax""": ANY(__UpperCamelCase ), """ymax""": ANY(__UpperCamelCase )},
} , )
@require_tf
@unittest.skip("""Object detection not implemented in TF""" )
def __lowerCamelCase ( self ):
'''simple docstring'''
pass
@require_torch
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Optional[Any] = """hf-internal-testing/tiny-detr-mobilenetsv3"""
__a : Dict = AutoModelForObjectDetection.from_pretrained(__UpperCamelCase )
__a : Optional[Any] = AutoFeatureExtractor.from_pretrained(__UpperCamelCase )
__a : str = ObjectDetectionPipeline(model=__UpperCamelCase , feature_extractor=__UpperCamelCase )
__a : Optional[int] = object_detector("""http://images.cocodataset.org/val2017/000000039769.jpg""" , threshold=0.0 )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [
{"""score""": 0.3_3_7_6, """label""": """LABEL_0""", """box""": {"""xmin""": 159, """ymin""": 120, """xmax""": 480, """ymax""": 359}},
{"""score""": 0.3_3_7_6, """label""": """LABEL_0""", """box""": {"""xmin""": 159, """ymin""": 120, """xmax""": 480, """ymax""": 359}},
] , )
__a : Union[str, Any] = object_detector(
[
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
] , threshold=0.0 , )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [
[
{"""score""": 0.3_3_7_6, """label""": """LABEL_0""", """box""": {"""xmin""": 159, """ymin""": 120, """xmax""": 480, """ymax""": 359}},
{"""score""": 0.3_3_7_6, """label""": """LABEL_0""", """box""": {"""xmin""": 159, """ymin""": 120, """xmax""": 480, """ymax""": 359}},
],
[
{"""score""": 0.3_3_7_6, """label""": """LABEL_0""", """box""": {"""xmin""": 159, """ymin""": 120, """xmax""": 480, """ymax""": 359}},
{"""score""": 0.3_3_7_6, """label""": """LABEL_0""", """box""": {"""xmin""": 159, """ymin""": 120, """xmax""": 480, """ymax""": 359}},
],
] , )
@require_torch
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : str = """facebook/detr-resnet-50"""
__a : Dict = AutoModelForObjectDetection.from_pretrained(__UpperCamelCase )
__a : int = AutoFeatureExtractor.from_pretrained(__UpperCamelCase )
__a : int = ObjectDetectionPipeline(model=__UpperCamelCase , feature_extractor=__UpperCamelCase )
__a : Any = object_detector("""http://images.cocodataset.org/val2017/000000039769.jpg""" )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [
{"""score""": 0.9_9_8_2, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 175, """ymax""": 117}},
{"""score""": 0.9_9_6_0, """label""": """remote""", """box""": {"""xmin""": 333, """ymin""": 72, """xmax""": 368, """ymax""": 187}},
{"""score""": 0.9_9_5_5, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 639, """ymax""": 473}},
{"""score""": 0.9_9_8_8, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}},
{"""score""": 0.9_9_8_7, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}},
] , )
__a : Optional[Any] = object_detector(
[
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
] )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [
[
{"""score""": 0.9_9_8_2, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 175, """ymax""": 117}},
{"""score""": 0.9_9_6_0, """label""": """remote""", """box""": {"""xmin""": 333, """ymin""": 72, """xmax""": 368, """ymax""": 187}},
{"""score""": 0.9_9_5_5, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 639, """ymax""": 473}},
{"""score""": 0.9_9_8_8, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}},
{"""score""": 0.9_9_8_7, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}},
],
[
{"""score""": 0.9_9_8_2, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 175, """ymax""": 117}},
{"""score""": 0.9_9_6_0, """label""": """remote""", """box""": {"""xmin""": 333, """ymin""": 72, """xmax""": 368, """ymax""": 187}},
{"""score""": 0.9_9_5_5, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 639, """ymax""": 473}},
{"""score""": 0.9_9_8_8, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}},
{"""score""": 0.9_9_8_7, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}},
],
] , )
@require_torch
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : int = """facebook/detr-resnet-50"""
__a : Optional[int] = pipeline("""object-detection""" , model=__UpperCamelCase )
__a : Optional[int] = object_detector("""http://images.cocodataset.org/val2017/000000039769.jpg""" )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [
{"""score""": 0.9_9_8_2, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 175, """ymax""": 117}},
{"""score""": 0.9_9_6_0, """label""": """remote""", """box""": {"""xmin""": 333, """ymin""": 72, """xmax""": 368, """ymax""": 187}},
{"""score""": 0.9_9_5_5, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 639, """ymax""": 473}},
{"""score""": 0.9_9_8_8, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}},
{"""score""": 0.9_9_8_7, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}},
] , )
__a : List[str] = object_detector(
[
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
] )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [
[
{"""score""": 0.9_9_8_2, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 175, """ymax""": 117}},
{"""score""": 0.9_9_6_0, """label""": """remote""", """box""": {"""xmin""": 333, """ymin""": 72, """xmax""": 368, """ymax""": 187}},
{"""score""": 0.9_9_5_5, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 639, """ymax""": 473}},
{"""score""": 0.9_9_8_8, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}},
{"""score""": 0.9_9_8_7, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}},
],
[
{"""score""": 0.9_9_8_2, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 175, """ymax""": 117}},
{"""score""": 0.9_9_6_0, """label""": """remote""", """box""": {"""xmin""": 333, """ymin""": 72, """xmax""": 368, """ymax""": 187}},
{"""score""": 0.9_9_5_5, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 639, """ymax""": 473}},
{"""score""": 0.9_9_8_8, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}},
{"""score""": 0.9_9_8_7, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}},
],
] , )
@require_torch
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Union[str, Any] = 0.9_9_8_5
__a : Union[str, Any] = """facebook/detr-resnet-50"""
__a : Optional[int] = pipeline("""object-detection""" , model=__UpperCamelCase )
__a : Union[str, Any] = object_detector("""http://images.cocodataset.org/val2017/000000039769.jpg""" , threshold=__UpperCamelCase )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [
{"""score""": 0.9_9_8_8, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}},
{"""score""": 0.9_9_8_7, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}},
] , )
@require_torch
@require_pytesseract
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : str = """Narsil/layoutlmv3-finetuned-funsd"""
__a : List[Any] = 0.9_9_9_3
__a : Dict = pipeline("""object-detection""" , model=__UpperCamelCase , threshold=__UpperCamelCase )
__a : List[str] = object_detector(
"""https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png""" )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [
{"""score""": 0.9_9_9_3, """label""": """I-ANSWER""", """box""": {"""xmin""": 294, """ymin""": 254, """xmax""": 343, """ymax""": 264}},
{"""score""": 0.9_9_9_3, """label""": """I-ANSWER""", """box""": {"""xmin""": 294, """ymin""": 254, """xmax""": 343, """ymax""": 264}},
] , ) | 697 | 1 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Optional[int] = 10
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Union[str, Any] = [1, 2, 3, 4]
__a : Union[str, Any] = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0]
self.assertEqual(truncate_or_pad(__UpperCamelCase , self.block_size , 0 ) , __UpperCamelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Optional[Any] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
__a : Dict = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(__UpperCamelCase , self.block_size , 0 ) , __UpperCamelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Tuple = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
__a : List[Any] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(__UpperCamelCase , self.block_size , 0 ) , __UpperCamelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Union[str, Any] = """It was the year of Our Lord one thousand seven hundred and
seventy-five.\n\nSpiritual revelations were conceded to England at that
favoured period, as at this."""
__a , __a : str = process_story(__UpperCamelCase )
self.assertEqual(__UpperCamelCase , [] )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Tuple = """"""
__a , __a : List[Any] = process_story(__UpperCamelCase )
self.assertEqual(__UpperCamelCase , [] )
self.assertEqual(__UpperCamelCase , [] )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : List[Any] = (
"""It was the year of Our Lord one thousand seven hundred and """
"""seventy-five\n\nSpiritual revelations were conceded to England """
"""at that favoured period, as at this.\n@highlight\n\nIt was the best of times"""
)
__a , __a : Optional[int] = process_story(__UpperCamelCase )
__a : Any = [
"""It was the year of Our Lord one thousand seven hundred and seventy-five.""",
"""Spiritual revelations were conceded to England at that favoured period, as at this.""",
]
self.assertEqual(__UpperCamelCase , __UpperCamelCase )
__a : Dict = ["""It was the best of times."""]
self.assertEqual(__UpperCamelCase , __UpperCamelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Dict = torch.tensor([1, 2, 3, 4] )
__a : Union[str, Any] = torch.tensor([1, 1, 1, 1] )
np.testing.assert_array_equal(build_mask(__UpperCamelCase , 0 ).numpy() , expected.numpy() )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : str = torch.tensor([1, 2, 3, 4, 23, 23, 23] )
__a : int = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(__UpperCamelCase , 23 ).numpy() , expected.numpy() )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Union[str, Any] = torch.tensor([8, 2, 3, 4, 1, 1, 1] )
__a : List[Any] = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(__UpperCamelCase , 1 ).numpy() , expected.numpy() )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : List[Any] = 101
__a : str = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 101, 5, 6], [1, 101, 3, 4, 101, 6]] )
__a : Optional[Any] = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] )
__a : Union[str, Any] = compute_token_type_ids(__UpperCamelCase , __UpperCamelCase )
np.testing.assert_array_equal(__UpperCamelCase , __UpperCamelCase ) | 697 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__SCREAMING_SNAKE_CASE : List[str] = {
'configuration_blenderbot_small': [
'BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP',
'BlenderbotSmallConfig',
'BlenderbotSmallOnnxConfig',
],
'tokenization_blenderbot_small': ['BlenderbotSmallTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Union[str, Any] = ['BlenderbotSmallTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : List[str] = [
'BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST',
'BlenderbotSmallForCausalLM',
'BlenderbotSmallForConditionalGeneration',
'BlenderbotSmallModel',
'BlenderbotSmallPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Optional[int] = [
'TFBlenderbotSmallForConditionalGeneration',
'TFBlenderbotSmallModel',
'TFBlenderbotSmallPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Optional[Any] = [
'FlaxBlenderbotSmallForConditionalGeneration',
'FlaxBlenderbotSmallModel',
'FlaxBlenderbotSmallPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotSmallConfig,
BlenderbotSmallOnnxConfig,
)
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_small_fast import BlenderbotSmallTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotSmallForCausalLM,
BlenderbotSmallForConditionalGeneration,
BlenderbotSmallModel,
BlenderbotSmallPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot_small import (
TFBlenderbotSmallForConditionalGeneration,
TFBlenderbotSmallModel,
TFBlenderbotSmallPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot_small import (
FlaxBlenderbotSmallForConditionalGeneration,
FlaxBlenderbotSmallModel,
FlaxBlenderbotSmallPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 697 | 1 |
'''simple docstring'''
from itertools import product
def _snake_case ( lowercase , lowercase ) -> list[int]:
__a : Optional[int] = sides_number
__a : Union[str, Any] = max_face_number * dice_number
__a : Optional[Any] = [0] * (max_total + 1)
__a : Dict = 1
__a : str = range(lowercase , max_face_number + 1 )
for dice_numbers in product(lowercase , repeat=lowercase ):
__a : int = sum(lowercase )
totals_frequencies[total] += 1
return totals_frequencies
def _snake_case ( ) -> float:
__a : Tuple = total_frequency_distribution(
sides_number=4 , dice_number=9 )
__a : Union[str, Any] = total_frequency_distribution(
sides_number=6 , dice_number=6 )
__a : str = 0
__a : Dict = 9
__a : str = 4 * 9
__a : Any = 6
for peter_total in range(lowercase , max_peter_total + 1 ):
peter_wins_count += peter_totals_frequencies[peter_total] * sum(
colin_totals_frequencies[min_colin_total:peter_total] )
__a : str = (4**9) * (6**6)
__a : List[Any] = peter_wins_count / total_games_number
__a : List[Any] = round(lowercase , ndigits=7 )
return rounded_peter_win_probability
if __name__ == "__main__":
print(f'''{solution() = }''') | 697 |
'''simple docstring'''
import numpy as np
import torch
from torch.utils.data import Dataset
from utils import logger
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
def __init__( self , __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
__a : Any = params
__a : Optional[Any] = np.array(__UpperCamelCase )
__a : Union[str, Any] = np.array([len(__UpperCamelCase ) for t in data] )
self.check()
self.remove_long_sequences()
self.remove_empty_sequences()
self.remove_unknown_sequences()
self.check()
self.print_statistics()
def __getitem__( self , __UpperCamelCase ):
'''simple docstring'''
return (self.token_ids[index], self.lengths[index])
def __len__( self ):
'''simple docstring'''
return len(self.lengths )
def __lowerCamelCase ( self ):
'''simple docstring'''
assert len(self.token_ids ) == len(self.lengths )
assert all(self.lengths[i] == len(self.token_ids[i] ) for i in range(len(self.lengths ) ) )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Tuple = self.params.max_model_input_size
__a : Union[str, Any] = self.lengths > max_len
logger.info(f"""Splitting {sum(__UpperCamelCase )} too long sequences.""" )
def divide_chunks(__UpperCamelCase , __UpperCamelCase ):
return [l[i : i + n] for i in range(0 , len(__UpperCamelCase ) , __UpperCamelCase )]
__a : int = []
__a : Union[str, Any] = []
if self.params.mlm:
__a , __a : Any = self.params.special_tok_ids["""cls_token"""], self.params.special_tok_ids["""sep_token"""]
else:
__a , __a : str = self.params.special_tok_ids["""bos_token"""], self.params.special_tok_ids["""eos_token"""]
for seq_, len_ in zip(self.token_ids , self.lengths ):
assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_
if len_ <= max_len:
new_tok_ids.append(seq_ )
new_lengths.append(len_ )
else:
__a : Any = []
for sub_s in divide_chunks(seq_ , max_len - 2 ):
if sub_s[0] != cls_id:
__a : int = np.insert(__UpperCamelCase , 0 , __UpperCamelCase )
if sub_s[-1] != sep_id:
__a : str = np.insert(__UpperCamelCase , len(__UpperCamelCase ) , __UpperCamelCase )
assert len(__UpperCamelCase ) <= max_len
assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s
sub_seqs.append(__UpperCamelCase )
new_tok_ids.extend(__UpperCamelCase )
new_lengths.extend([len(__UpperCamelCase ) for l in sub_seqs] )
__a : Dict = np.array(__UpperCamelCase )
__a : Tuple = np.array(__UpperCamelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : List[str] = len(self )
__a : List[str] = self.lengths > 11
__a : int = self.token_ids[indices]
__a : Union[str, Any] = self.lengths[indices]
__a : Any = len(self )
logger.info(f"""Remove {init_size - new_size} too short (<=11 tokens) sequences.""" )
def __lowerCamelCase ( self ):
'''simple docstring'''
if "unk_token" not in self.params.special_tok_ids:
return
else:
__a : List[str] = self.params.special_tok_ids["""unk_token"""]
__a : str = len(self )
__a : str = np.array([np.count_nonzero(a == unk_token_id ) for a in self.token_ids] )
__a : Optional[Any] = (unk_occs / self.lengths) < 0.5
__a : List[str] = self.token_ids[indices]
__a : Optional[int] = self.lengths[indices]
__a : Any = len(self )
logger.info(f"""Remove {init_size - new_size} sequences with a high level of unknown tokens (50%).""" )
def __lowerCamelCase ( self ):
'''simple docstring'''
if not self.params.is_master:
return
logger.info(f"""{len(self )} sequences""" )
# data_len = sum(self.lengths)
# nb_unique_tokens = len(Counter(list(chain(*self.token_ids))))
# logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)')
# unk_idx = self.params.special_tok_ids['unk_token']
# nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids])
# logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)')
def __lowerCamelCase ( self , __UpperCamelCase ):
'''simple docstring'''
__a : List[str] = [t[0] for t in batch]
__a : str = [t[1] for t in batch]
assert len(__UpperCamelCase ) == len(__UpperCamelCase )
# Max for paddings
__a : Optional[int] = max(__UpperCamelCase )
# Pad token ids
if self.params.mlm:
__a : int = self.params.special_tok_ids["""pad_token"""]
else:
__a : Tuple = self.params.special_tok_ids["""unk_token"""]
__a : Any = [list(t.astype(__UpperCamelCase ) ) + [pad_idx] * (max_seq_len_ - len(__UpperCamelCase )) for t in token_ids]
assert len(tk_ ) == len(__UpperCamelCase )
assert all(len(__UpperCamelCase ) == max_seq_len_ for t in tk_ )
__a : Any = torch.tensor(tk_ ) # (bs, max_seq_len_)
__a : Optional[Any] = torch.tensor(__UpperCamelCase ) # (bs)
return tk_t, lg_t | 697 | 1 |
'''simple docstring'''
from math import factorial, radians
def _snake_case ( lowercase , lowercase = 1_8 , lowercase = 1_0 ) -> float:
__a : Union[str, Any] = angle_in_degrees - ((angle_in_degrees // 3_6_0.0) * 3_6_0.0)
# Converting from degrees to radians
__a : Optional[Any] = radians(lowercase )
__a : Union[str, Any] = angle_in_radians
__a : Optional[int] = 3
__a : Optional[int] = -1
for _ in range(lowercase ):
result += (b * (angle_in_radians**a)) / factorial(lowercase )
__a : Tuple = -b # One positive term and the next will be negative and so on...
a += 2 # Increased by 2 for every term.
return round(lowercase , lowercase )
if __name__ == "__main__":
__import__('doctest').testmod() | 697 |
'''simple docstring'''
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
lowercase__ = ""
lowercase__ = "hf-legacy" # "hf://"" is reserved for hffs
def __init__( self , __UpperCamelCase = None , __UpperCamelCase = None , **__UpperCamelCase , ):
'''simple docstring'''
super().__init__(self , **__UpperCamelCase )
__a : int = repo_info
__a : int = token
__a : Any = None
def __lowerCamelCase ( self ):
'''simple docstring'''
if self.dir_cache is None:
__a : Union[str, Any] = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
__a : List[str] = {
"""name""": hf_file.rfilename,
"""size""": None,
"""type""": """file""",
}
self.dir_cache.update(
{
str(__UpperCamelCase ): {"""name""": str(__UpperCamelCase ), """size""": None, """type""": """directory"""}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase = "rb" , **__UpperCamelCase , ):
'''simple docstring'''
if not isinstance(self.repo_info , __UpperCamelCase ):
raise NotImplementedError(f"""Open is only implemented for dataset repositories, but got {self.repo_info}""" )
__a : Any = hf_hub_url(self.repo_info.id , __UpperCamelCase , revision=self.repo_info.sha )
return fsspec.open(
__UpperCamelCase , mode=__UpperCamelCase , headers=get_authentication_headers_for_url(__UpperCamelCase , use_auth_token=self.token ) , client_kwargs={"""trust_env""": True} , ).open()
def __lowerCamelCase ( self , __UpperCamelCase , **__UpperCamelCase ):
'''simple docstring'''
self._get_dirs()
__a : str = self._strip_protocol(__UpperCamelCase )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(__UpperCamelCase )
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase=False , **__UpperCamelCase ):
'''simple docstring'''
self._get_dirs()
__a : int = PurePosixPath(path.strip("""/""" ) )
__a : List[str] = {}
for p, f in self.dir_cache.items():
__a : str = PurePosixPath(p.strip("""/""" ) )
__a : Optional[int] = p.parent
if root == path:
__a : List[str] = f
__a : str = list(paths.values() )
if detail:
return out
else:
return sorted(f["""name"""] for f in out ) | 697 | 1 |
'''simple docstring'''
from argparse import ArgumentParser, Namespace
from typing import Any, List, Optional
from ..pipelines import Pipeline, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from fastapi import Body, FastAPI, HTTPException
from fastapi.routing import APIRoute
from pydantic import BaseModel
from starlette.responses import JSONResponse
from uvicorn import run
__SCREAMING_SNAKE_CASE : Optional[int] = True
except (ImportError, AttributeError):
__SCREAMING_SNAKE_CASE : Tuple = object
def _snake_case ( *lowercase , **lowercase ) -> Tuple:
pass
__SCREAMING_SNAKE_CASE : str = False
__SCREAMING_SNAKE_CASE : Optional[Any] = logging.get_logger('transformers-cli/serving')
def _snake_case ( lowercase ) -> Dict:
__a : Optional[Any] = pipeline(
task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , )
return ServeCommand(lowercase , args.host , args.port , args.workers )
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
lowercase__ = 42
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
lowercase__ = 42
lowercase__ = 42
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
lowercase__ = 42
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
lowercase__ = 42
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
@staticmethod
def __lowerCamelCase ( __UpperCamelCase ):
'''simple docstring'''
__a : Tuple = parser.add_parser(
"""serve""" , help="""CLI tool to run inference requests through REST and GraphQL endpoints.""" )
serve_parser.add_argument(
"""--task""" , type=__UpperCamelCase , choices=get_supported_tasks() , help="""The task to run the pipeline on""" , )
serve_parser.add_argument("""--host""" , type=__UpperCamelCase , default="""localhost""" , help="""Interface the server will listen on.""" )
serve_parser.add_argument("""--port""" , type=__UpperCamelCase , default=8888 , help="""Port the serving will listen to.""" )
serve_parser.add_argument("""--workers""" , type=__UpperCamelCase , default=1 , help="""Number of http workers""" )
serve_parser.add_argument("""--model""" , type=__UpperCamelCase , help="""Model's name or path to stored model.""" )
serve_parser.add_argument("""--config""" , type=__UpperCamelCase , help="""Model's config name or path to stored model.""" )
serve_parser.add_argument("""--tokenizer""" , type=__UpperCamelCase , help="""Tokenizer name to use.""" )
serve_parser.add_argument(
"""--device""" , type=__UpperCamelCase , default=-1 , help="""Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)""" , )
serve_parser.set_defaults(func=__UpperCamelCase )
def __init__( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
__a : Any = pipeline
__a : List[str] = host
__a : Optional[Any] = port
__a : Dict = workers
if not _serve_dependencies_installed:
raise RuntimeError(
"""Using serve command requires FastAPI and uvicorn. """
"""Please install transformers with [serving]: pip install \"transformers[serving]\"."""
"""Or install FastAPI and uvicorn separately.""" )
else:
logger.info(f"""Serving model over {host}:{port}""" )
__a : Tuple = FastAPI(
routes=[
APIRoute(
"""/""" , self.model_info , response_model=__UpperCamelCase , response_class=__UpperCamelCase , methods=["""GET"""] , ),
APIRoute(
"""/tokenize""" , self.tokenize , response_model=__UpperCamelCase , response_class=__UpperCamelCase , methods=["""POST"""] , ),
APIRoute(
"""/detokenize""" , self.detokenize , response_model=__UpperCamelCase , response_class=__UpperCamelCase , methods=["""POST"""] , ),
APIRoute(
"""/forward""" , self.forward , response_model=__UpperCamelCase , response_class=__UpperCamelCase , methods=["""POST"""] , ),
] , timeout=600 , )
def __lowerCamelCase ( self ):
'''simple docstring'''
run(self._app , host=self.host , port=self.port , workers=self.workers )
def __lowerCamelCase ( self ):
'''simple docstring'''
return ServeModelInfoResult(infos=vars(self._pipeline.model.config ) )
def __lowerCamelCase ( self , __UpperCamelCase = Body(__UpperCamelCase , embed=__UpperCamelCase ) , __UpperCamelCase = Body(__UpperCamelCase , embed=__UpperCamelCase ) ):
'''simple docstring'''
try:
__a : Optional[Any] = self._pipeline.tokenizer.tokenize(__UpperCamelCase )
if return_ids:
__a : List[str] = self._pipeline.tokenizer.convert_tokens_to_ids(__UpperCamelCase )
return ServeTokenizeResult(tokens=__UpperCamelCase , tokens_ids=__UpperCamelCase )
else:
return ServeTokenizeResult(tokens=__UpperCamelCase )
except Exception as e:
raise HTTPException(status_code=500 , detail={"""model""": """""", """error""": str(__UpperCamelCase )} )
def __lowerCamelCase ( self , __UpperCamelCase = Body(__UpperCamelCase , embed=__UpperCamelCase ) , __UpperCamelCase = Body(__UpperCamelCase , embed=__UpperCamelCase ) , __UpperCamelCase = Body(__UpperCamelCase , embed=__UpperCamelCase ) , ):
'''simple docstring'''
try:
__a : str = self._pipeline.tokenizer.decode(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
return ServeDeTokenizeResult(model="""""" , text=__UpperCamelCase )
except Exception as e:
raise HTTPException(status_code=500 , detail={"""model""": """""", """error""": str(__UpperCamelCase )} )
async def __lowerCamelCase ( self , __UpperCamelCase=Body(__UpperCamelCase , embed=__UpperCamelCase ) ):
'''simple docstring'''
if len(__UpperCamelCase ) == 0:
return ServeForwardResult(output=[] , attention=[] )
try:
# Forward through the model
__a : Dict = self._pipeline(__UpperCamelCase )
return ServeForwardResult(output=__UpperCamelCase )
except Exception as e:
raise HTTPException(500 , {"""error""": str(__UpperCamelCase )} ) | 697 |
'''simple docstring'''
import inspect
import unittest
from transformers import DPTConfig
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel
from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class SCREAMING_SNAKE_CASE__ :
def __init__( self , __UpperCamelCase , __UpperCamelCase=2 , __UpperCamelCase=32 , __UpperCamelCase=16 , __UpperCamelCase=3 , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=32 , __UpperCamelCase=4 , __UpperCamelCase=[0, 1, 2, 3] , __UpperCamelCase=4 , __UpperCamelCase=37 , __UpperCamelCase="gelu" , __UpperCamelCase=0.1 , __UpperCamelCase=0.1 , __UpperCamelCase=0.0_2 , __UpperCamelCase=3 , __UpperCamelCase=[1, 384, 24, 24] , __UpperCamelCase=True , __UpperCamelCase=None , ):
'''simple docstring'''
__a : List[str] = parent
__a : Tuple = batch_size
__a : str = image_size
__a : int = patch_size
__a : Dict = num_channels
__a : int = is_training
__a : Dict = use_labels
__a : Union[str, Any] = hidden_size
__a : Dict = num_hidden_layers
__a : Dict = backbone_out_indices
__a : Optional[int] = num_attention_heads
__a : List[str] = intermediate_size
__a : Optional[Any] = hidden_act
__a : Dict = hidden_dropout_prob
__a : Tuple = attention_probs_dropout_prob
__a : Any = initializer_range
__a : Any = num_labels
__a : Optional[Any] = backbone_featmap_shape
__a : List[Any] = scope
__a : List[str] = is_hybrid
# sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token)
__a : Union[str, Any] = (image_size // patch_size) ** 2
__a : List[str] = num_patches + 1
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__a : Union[str, Any] = None
if self.use_labels:
__a : str = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__a : Tuple = self.get_config()
return config, pixel_values, labels
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : List[str] = {
"""global_padding""": """same""",
"""layer_type""": """bottleneck""",
"""depths""": [3, 4, 9],
"""out_features""": ["""stage1""", """stage2""", """stage3"""],
"""embedding_dynamic_padding""": True,
"""hidden_sizes""": [96, 192, 384, 768],
"""num_groups""": 2,
}
return DPTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , backbone_out_indices=self.backbone_out_indices , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__UpperCamelCase , initializer_range=self.initializer_range , is_hybrid=self.is_hybrid , backbone_config=__UpperCamelCase , backbone_featmap_shape=self.backbone_featmap_shape , )
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
__a : Optional[Any] = DPTModel(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
__a : List[str] = model(__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
__a : List[str] = self.num_labels
__a : Union[str, Any] = DPTForDepthEstimation(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
__a : Tuple = model(__UpperCamelCase )
self.parent.assertEqual(result.predicted_depth.shape , (self.batch_size, self.image_size, self.image_size) )
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
__a : Dict = self.num_labels
__a : Tuple = DPTForSemanticSegmentation(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
__a : str = model(__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Optional[int] = self.prepare_config_and_inputs()
__a , __a , __a : Tuple = config_and_inputs
__a : List[str] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase , __UpperCamelCase , unittest.TestCase ):
lowercase__ = (DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else ()
lowercase__ = (
{
"depth-estimation": DPTForDepthEstimation,
"feature-extraction": DPTModel,
"image-segmentation": DPTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
lowercase__ = False
lowercase__ = False
lowercase__ = False
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Optional[int] = DPTModelTester(self )
__a : List[Any] = ConfigTester(self , config_class=__UpperCamelCase , has_text_modality=__UpperCamelCase , hidden_size=37 )
def __lowerCamelCase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="""DPT does not use inputs_embeds""" )
def __lowerCamelCase ( self ):
'''simple docstring'''
pass
def __lowerCamelCase ( self ):
'''simple docstring'''
__a , __a : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a : str = model_class(__UpperCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__a : Any = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__UpperCamelCase , nn.Linear ) )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a , __a : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a : Any = model_class(__UpperCamelCase )
__a : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__a : int = [*signature.parameters.keys()]
__a : List[str] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __UpperCamelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_depth_estimation(*__UpperCamelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__UpperCamelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
__a , __a : Dict = self.model_tester.prepare_config_and_inputs_for_common()
__a : List[Any] = True
if model_class in get_values(__UpperCamelCase ):
continue
__a : str = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.train()
__a : Union[str, Any] = self._prepare_for_class(__UpperCamelCase , __UpperCamelCase , return_labels=__UpperCamelCase )
__a : List[Any] = model(**__UpperCamelCase ).loss
loss.backward()
def __lowerCamelCase ( self ):
'''simple docstring'''
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
__a , __a : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
__a : Any = False
__a : Dict = True
if model_class in get_values(__UpperCamelCase ) or not model_class.supports_gradient_checkpointing:
continue
__a : Any = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.gradient_checkpointing_enable()
model.train()
__a : List[str] = self._prepare_for_class(__UpperCamelCase , __UpperCamelCase , return_labels=__UpperCamelCase )
__a : Dict = model(**__UpperCamelCase ).loss
loss.backward()
def __lowerCamelCase ( self ):
'''simple docstring'''
__a , __a : Any = self.model_tester.prepare_config_and_inputs_for_common()
__a : Any = _config_zero_init(__UpperCamelCase )
for model_class in self.all_model_classes:
__a : Any = model_class(config=__UpperCamelCase )
# Skip the check for the backbone
__a : Optional[Any] = []
for name, module in model.named_modules():
if module.__class__.__name__ == "DPTViTHybridEmbeddings":
__a : Optional[int] = [f"""{name}.{key}""" for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def __lowerCamelCase ( self ):
'''simple docstring'''
pass
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]:
__a : int = DPTModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a , __a : int = self.model_tester.prepare_config_and_inputs_for_common()
__a : Optional[int] = """add"""
with self.assertRaises(__UpperCamelCase ):
__a : int = DPTForDepthEstimation(__UpperCamelCase )
def _snake_case ( ) -> Any:
__a : Dict = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
@slow
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : int = DPTImageProcessor.from_pretrained("""Intel/dpt-hybrid-midas""" )
__a : int = DPTForDepthEstimation.from_pretrained("""Intel/dpt-hybrid-midas""" ).to(__UpperCamelCase )
__a : Union[str, Any] = prepare_img()
__a : Any = image_processor(images=__UpperCamelCase , return_tensors="""pt""" ).to(__UpperCamelCase )
# forward pass
with torch.no_grad():
__a : Optional[Any] = model(**__UpperCamelCase )
__a : int = outputs.predicted_depth
# verify the predicted depth
__a : Any = torch.Size((1, 384, 384) )
self.assertEqual(predicted_depth.shape , __UpperCamelCase )
__a : int = torch.tensor(
[[[5.6_4_3_7, 5.6_1_4_6, 5.6_5_1_1], [5.4_3_7_1, 5.5_6_4_9, 5.5_9_5_8], [5.5_2_1_5, 5.5_1_8_4, 5.5_2_9_3]]] ).to(__UpperCamelCase )
self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 100 , __UpperCamelCase , atol=1E-4 ) ) | 697 | 1 |
'''simple docstring'''
import enum
import shutil
import sys
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Any = shutil.get_terminal_size()
__SCREAMING_SNAKE_CASE : List[Any] = {'UP': 'A', 'DOWN': 'B', 'RIGHT': 'C', 'LEFT': 'D'}
class SCREAMING_SNAKE_CASE__ ( enum.Enum ):
lowercase__ = 0
lowercase__ = 1
def _snake_case ( lowercase , lowercase="" ) -> Optional[int]:
sys.stdout.write(str(lowercase ) + end )
sys.stdout.flush()
def _snake_case ( lowercase , lowercase , lowercase="" ) -> Any:
forceWrite(F"""\u001b[{color}m{content}\u001b[0m""" , lowercase )
def _snake_case ( ) -> Union[str, Any]:
forceWrite("""\r""" )
def _snake_case ( lowercase , lowercase ) -> List[Any]:
forceWrite(F"""\033[{num_lines}{CURSOR_TO_CHAR[direction.upper()]}""" )
def _snake_case ( ) -> Optional[Any]:
forceWrite(""" """ * TERMINAL_WIDTH )
reset_cursor()
def _snake_case ( ) -> Any:
reset_cursor()
forceWrite("""-""" * TERMINAL_WIDTH ) | 697 |
'''simple docstring'''
import unittest
from .lib import (
Matrix,
Vector,
axpy,
square_zero_matrix,
unit_basis_vector,
zero_vector,
)
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Dict = Vector([1, 2, 3] )
self.assertEqual(x.component(0 ) , 1 )
self.assertEqual(x.component(2 ) , 3 )
__a : Optional[int] = Vector()
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Any = Vector([0, 0, 0, 0, 0, 1] )
self.assertEqual(str(__UpperCamelCase ) , """(0,0,0,0,0,1)""" )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Tuple = Vector([1, 2, 3, 4] )
self.assertEqual(len(__UpperCamelCase ) , 4 )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Optional[Any] = Vector([1, 2] )
__a : List[str] = Vector([1, 2, 3, 4, 5] )
__a : Optional[int] = Vector([0, 0, 0, 0, 0, 0, 0, 0, 0, 0] )
__a : Dict = Vector([1, -1, 1, -1, 2, -3, 4, -5] )
self.assertAlmostEqual(x.euclidean_length() , 2.2_3_6 , 3 )
self.assertAlmostEqual(y.euclidean_length() , 7.4_1_6 , 3 )
self.assertEqual(z.euclidean_length() , 0 )
self.assertAlmostEqual(w.euclidean_length() , 7.6_1_6 , 3 )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Dict = Vector([1, 2, 3] )
__a : Union[str, Any] = Vector([1, 1, 1] )
self.assertEqual((x + y).component(0 ) , 2 )
self.assertEqual((x + y).component(1 ) , 3 )
self.assertEqual((x + y).component(2 ) , 4 )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : List[str] = Vector([1, 2, 3] )
__a : Any = Vector([1, 1, 1] )
self.assertEqual((x - y).component(0 ) , 0 )
self.assertEqual((x - y).component(1 ) , 1 )
self.assertEqual((x - y).component(2 ) , 2 )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Tuple = Vector([1, 2, 3] )
__a : Optional[Any] = Vector([2, -1, 4] ) # for test of dot product
__a : Union[str, Any] = Vector([1, -2, -1] )
self.assertEqual(str(x * 3.0 ) , """(3.0,6.0,9.0)""" )
self.assertEqual((a * b) , 0 )
def __lowerCamelCase ( self ):
'''simple docstring'''
self.assertEqual(str(zero_vector(10 ) ).count("""0""" ) , 10 )
def __lowerCamelCase ( self ):
'''simple docstring'''
self.assertEqual(str(unit_basis_vector(3 , 1 ) ) , """(0,1,0)""" )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Dict = Vector([1, 2, 3] )
__a : Optional[int] = Vector([1, 0, 1] )
self.assertEqual(str(axpy(2 , __UpperCamelCase , __UpperCamelCase ) ) , """(3,4,7)""" )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : int = Vector([1, 0, 0, 0, 0, 0] )
__a : Any = x.copy()
self.assertEqual(str(__UpperCamelCase ) , str(__UpperCamelCase ) )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Union[str, Any] = Vector([1, 0, 0] )
x.change_component(0 , 0 )
x.change_component(1 , 1 )
self.assertEqual(str(__UpperCamelCase ) , """(0,1,0)""" )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Dict = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual("""|1,2,3|\n|2,4,5|\n|6,7,8|\n""" , str(__UpperCamelCase ) )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : List[Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
__a : List[Any] = [[-3, -14, -10], [-5, -10, -5], [-2, -1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(minors[x][y] , a.minor(__UpperCamelCase , __UpperCamelCase ) )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : List[Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
__a : Any = [[-3, 14, -10], [5, -10, 5], [-2, 1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(cofactors[x][y] , a.cofactor(__UpperCamelCase , __UpperCamelCase ) )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : List[Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(-5 , a.determinant() )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Any = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]] , 3 , 3 )
__a : List[Any] = Vector([1, 2, 3] )
self.assertEqual("""(14,32,50)""" , str(a * x ) )
self.assertEqual("""|2,4,6|\n|8,10,12|\n|14,16,18|\n""" , str(a * 2 ) )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : List[str] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
a.change_component(0 , 2 , 5 )
self.assertEqual("""|1,2,5|\n|2,4,5|\n|6,7,8|\n""" , str(__UpperCamelCase ) )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Tuple = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(7 , a.component(2 , 1 ) , 0.0_1 )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Dict = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
__a : Union[str, Any] = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 )
self.assertEqual("""|2,4,10|\n|4,8,10|\n|12,14,18|\n""" , str(a + b ) )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : List[Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
__a : List[str] = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 )
self.assertEqual("""|0,0,-4|\n|0,0,0|\n|0,0,-2|\n""" , str(a - b ) )
def __lowerCamelCase ( self ):
'''simple docstring'''
self.assertEqual(
"""|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n""" , str(square_zero_matrix(5 ) ) , )
if __name__ == "__main__":
unittest.main() | 697 | 1 |
'''simple docstring'''
import re
def _snake_case ( lowercase ) -> str:
if len(re.findall("""[ATCG]""" , lowercase ) ) != len(lowercase ):
raise ValueError("""Invalid Strand""" )
return dna.translate(dna.maketrans("""ATCG""" , """TAGC""" ) )
if __name__ == "__main__":
import doctest
doctest.testmod() | 697 |
'''simple docstring'''
import os
from itertools import chain
from random import randrange, shuffle
import pytest
from .sola import PokerHand
__SCREAMING_SNAKE_CASE : List[str] = (
'4S 3H 2C 7S 5H',
'9D 8H 2C 6S 7H',
'2D 6D 9D TH 7D',
'TC 8C 2S JH 6C',
'JH 8S TH AH QH',
'TS KS 5S 9S AC',
'KD 6S 9D TH AD',
'KS 8D 4D 9S 4S', # pair
'8C 4S KH JS 4D', # pair
'QH 8H KD JH 8S', # pair
'KC 4H KS 2H 8D', # pair
'KD 4S KC 3H 8S', # pair
'AH 8S AS KC JH', # pair
'3H 4C 4H 3S 2H', # 2 pairs
'5S 5D 2C KH KH', # 2 pairs
'3C KH 5D 5S KH', # 2 pairs
'AS 3C KH AD KH', # 2 pairs
'7C 7S 3S 7H 5S', # 3 of a kind
'7C 7S KH 2H 7H', # 3 of a kind
'AC KH QH AH AS', # 3 of a kind
'2H 4D 3C AS 5S', # straight (low ace)
'3C 5C 4C 2C 6H', # straight
'6S 8S 7S 5H 9H', # straight
'JS QS 9H TS KH', # straight
'QC KH TS JS AH', # straight (high ace)
'8C 9C 5C 3C TC', # flush
'3S 8S 9S 5S KS', # flush
'4C 5C 9C 8C KC', # flush
'JH 8H AH KH QH', # flush
'3D 2H 3H 2C 2D', # full house
'2H 2C 3S 3H 3D', # full house
'KH KC 3S 3H 3D', # full house
'JC 6H JS JD JH', # 4 of a kind
'JC 7H JS JD JH', # 4 of a kind
'JC KH JS JD JH', # 4 of a kind
'2S AS 4S 5S 3S', # straight flush (low ace)
'2D 6D 3D 4D 5D', # straight flush
'5C 6C 3C 7C 4C', # straight flush
'JH 9H TH KH QH', # straight flush
'JH AH TH KH QH', # royal flush (high ace straight flush)
)
__SCREAMING_SNAKE_CASE : Optional[Any] = (
('2H 3H 4H 5H 6H', 'KS AS TS QS JS', 'Loss'),
('2H 3H 4H 5H 6H', 'AS AD AC AH JD', 'Win'),
('AS AH 2H AD AC', 'JS JD JC JH 3D', 'Win'),
('2S AH 2H AS AC', 'JS JD JC JH AD', 'Loss'),
('2S AH 2H AS AC', '2H 3H 5H 6H 7H', 'Win'),
('AS 3S 4S 8S 2S', '2H 3H 5H 6H 7H', 'Win'),
('2H 3H 5H 6H 7H', '2S 3H 4H 5S 6C', 'Win'),
('2S 3H 4H 5S 6C', '3D 4C 5H 6H 2S', 'Tie'),
('2S 3H 4H 5S 6C', 'AH AC 5H 6H AS', 'Win'),
('2S 2H 4H 5S 4C', 'AH AC 5H 6H AS', 'Loss'),
('2S 2H 4H 5S 4C', 'AH AC 5H 6H 7S', 'Win'),
('6S AD 7H 4S AS', 'AH AC 5H 6H 7S', 'Loss'),
('2S AH 4H 5S KC', 'AH AC 5H 6H 7S', 'Loss'),
('2S 3H 6H 7S 9C', '7H 3C TH 6H 9S', 'Loss'),
('4S 5H 6H TS AC', '3S 5H 6H TS AC', 'Win'),
('2S AH 4H 5S 6C', 'AD 4C 5H 6H 2C', 'Tie'),
('AS AH 3H AD AC', 'AS AH 2H AD AC', 'Win'),
('AH AC 5H 5C QS', 'AH AC 5H 5C KS', 'Loss'),
('AH AC 5H 5C QS', 'KH KC 5H 5C QS', 'Win'),
('7C 7S KH 2H 7H', '3C 3S AH 2H 3H', 'Win'),
('3C 3S AH 2H 3H', '7C 7S KH 2H 7H', 'Loss'),
('6H 5H 4H 3H 2H', '5H 4H 3H 2H AH', 'Win'),
('5H 4H 3H 2H AH', '5H 4H 3H 2H AH', 'Tie'),
('5H 4H 3H 2H AH', '6H 5H 4H 3H 2H', 'Loss'),
('AH AD KS KC AC', 'AH KD KH AC KC', 'Win'),
('2H 4D 3C AS 5S', '2H 4D 3C 6S 5S', 'Loss'),
('2H 3S 3C 3H 2S', '3S 3C 2S 2H 2D', 'Win'),
('4D 6D 5D 2D JH', '3S 8S 3H TC KH', 'Loss'),
('4S 6C 8S 3S 7S', 'AD KS 2D 7D 7C', 'Loss'),
('6S 4C 7H 8C 3H', '5H JC AH 9D 9C', 'Loss'),
('9D 9H JH TC QH', '3C 2S JS 5C 7H', 'Win'),
('2H TC 8S AD 9S', '4H TS 7H 2C 5C', 'Win'),
('9D 3S 2C 7S 7C', 'JC TD 3C TC 9H', 'Loss'),
)
__SCREAMING_SNAKE_CASE : Tuple = (
('2H 3H 4H 5H 6H', True),
('AS AH 2H AD AC', False),
('2H 3H 5H 6H 7H', True),
('KS AS TS QS JS', True),
('8H 9H QS JS TH', False),
('AS 3S 4S 8S 2S', True),
)
__SCREAMING_SNAKE_CASE : Dict = (
('2H 3H 4H 5H 6H', True),
('AS AH 2H AD AC', False),
('2H 3H 5H 6H 7H', False),
('KS AS TS QS JS', True),
('8H 9H QS JS TH', True),
)
__SCREAMING_SNAKE_CASE : Optional[int] = (
('2H 4D 3C AS 5S', True, [5, 4, 3, 2, 14]),
('2H 5D 3C AS 5S', False, [14, 5, 5, 3, 2]),
('JH QD KC AS TS', False, [14, 13, 12, 11, 10]),
('9D 3S 2C 7S 7C', False, [9, 7, 7, 3, 2]),
)
__SCREAMING_SNAKE_CASE : int = (
('JH AH TH KH QH', 0),
('JH 9H TH KH QH', 0),
('JC KH JS JD JH', 7),
('KH KC 3S 3H 3D', 6),
('8C 9C 5C 3C TC', 0),
('JS QS 9H TS KH', 0),
('7C 7S KH 2H 7H', 3),
('3C KH 5D 5S KH', 2),
('QH 8H KD JH 8S', 1),
('2D 6D 9D TH 7D', 0),
)
__SCREAMING_SNAKE_CASE : int = (
('JH AH TH KH QH', 23),
('JH 9H TH KH QH', 22),
('JC KH JS JD JH', 21),
('KH KC 3S 3H 3D', 20),
('8C 9C 5C 3C TC', 19),
('JS QS 9H TS KH', 18),
('7C 7S KH 2H 7H', 17),
('3C KH 5D 5S KH', 16),
('QH 8H KD JH 8S', 15),
('2D 6D 9D TH 7D', 14),
)
def _snake_case ( ) -> List[str]:
__a , __a : List[Any] = randrange(len(lowercase ) ), randrange(len(lowercase ) )
__a : int = ["""Loss""", """Tie""", """Win"""][(play >= oppo) + (play > oppo)]
__a , __a : int = SORTED_HANDS[play], SORTED_HANDS[oppo]
return hand, other, expected
def _snake_case ( lowercase = 1_0_0 ) -> Any:
return (generate_random_hand() for _ in range(lowercase ))
@pytest.mark.parametrize("""hand, expected""" , lowercase )
def _snake_case ( lowercase , lowercase ) -> int:
assert PokerHand(lowercase )._is_flush() == expected
@pytest.mark.parametrize("""hand, expected""" , lowercase )
def _snake_case ( lowercase , lowercase ) -> Any:
assert PokerHand(lowercase )._is_straight() == expected
@pytest.mark.parametrize("""hand, expected, card_values""" , lowercase )
def _snake_case ( lowercase , lowercase , lowercase ) -> List[str]:
__a : Union[str, Any] = PokerHand(lowercase )
assert player._is_five_high_straight() == expected
assert player._card_values == card_values
@pytest.mark.parametrize("""hand, expected""" , lowercase )
def _snake_case ( lowercase , lowercase ) -> Optional[int]:
assert PokerHand(lowercase )._is_same_kind() == expected
@pytest.mark.parametrize("""hand, expected""" , lowercase )
def _snake_case ( lowercase , lowercase ) -> Union[str, Any]:
assert PokerHand(lowercase )._hand_type == expected
@pytest.mark.parametrize("""hand, other, expected""" , lowercase )
def _snake_case ( lowercase , lowercase , lowercase ) -> Optional[int]:
assert PokerHand(lowercase ).compare_with(PokerHand(lowercase ) ) == expected
@pytest.mark.parametrize("""hand, other, expected""" , generate_random_hands() )
def _snake_case ( lowercase , lowercase , lowercase ) -> int:
assert PokerHand(lowercase ).compare_with(PokerHand(lowercase ) ) == expected
def _snake_case ( ) -> Union[str, Any]:
__a : Tuple = [PokerHand(lowercase ) for hand in SORTED_HANDS]
__a : Optional[int] = poker_hands.copy()
shuffle(lowercase )
__a : List[str] = chain(sorted(lowercase ) )
for index, hand in enumerate(lowercase ):
assert hand == poker_hands[index]
def _snake_case ( ) -> List[str]:
# Test that five high straights are compared correctly.
__a : Optional[int] = [PokerHand("""2D AC 3H 4H 5S""" ), PokerHand("""2S 3H 4H 5S 6C""" )]
pokerhands.sort(reverse=lowercase )
assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C"
def _snake_case ( ) -> List[str]:
# Multiple calls to five_high_straight function should still return True
# and shouldn't mutate the list in every call other than the first.
__a : Dict = PokerHand("""2C 4S AS 3D 5C""" )
__a : Dict = True
__a : Optional[int] = [5, 4, 3, 2, 1_4]
for _ in range(1_0 ):
assert pokerhand._is_five_high_straight() == expected
assert pokerhand._card_values == expected_card_values
def _snake_case ( ) -> Dict:
# Problem number 54 from Project Euler
# Testing from poker_hands.txt file
__a : Tuple = 0
__a : int = os.path.abspath(os.path.dirname(lowercase ) )
__a : Union[str, Any] = os.path.join(lowercase , """poker_hands.txt""" )
with open(lowercase ) as file_hand:
for line in file_hand:
__a : Union[str, Any] = line[:1_4].strip()
__a : Optional[Any] = line[1_5:].strip()
__a , __a : List[str] = PokerHand(lowercase ), PokerHand(lowercase )
__a : str = player.compare_with(lowercase )
if output == "Win":
answer += 1
assert answer == 3_7_6 | 697 | 1 |
'''simple docstring'''
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFAutoModel, is_tensorflow_text_available, is_tf_available
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_tensorflow_text_available():
from transformers.models.bert import TFBertTokenizer
__SCREAMING_SNAKE_CASE : Union[str, Any] = ['bert-base-uncased', 'bert-base-cased']
__SCREAMING_SNAKE_CASE : Optional[Any] = 'hf-internal-testing/tiny-bert-tf-only'
if is_tf_available():
class SCREAMING_SNAKE_CASE__ ( tf.keras.Model ):
def __init__( self , __UpperCamelCase ):
'''simple docstring'''
super().__init__()
__a : Dict = tokenizer
__a : Optional[int] = AutoConfig.from_pretrained(__UpperCamelCase )
__a : Union[str, Any] = TFAutoModel.from_config(__UpperCamelCase )
def __lowerCamelCase ( self , __UpperCamelCase ):
'''simple docstring'''
__a : Any = self.tokenizer(__UpperCamelCase )
__a : Tuple = self.bert(**__UpperCamelCase )
return out["pooler_output"]
@require_tf
@require_tensorflow_text
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def __lowerCamelCase ( self ):
'''simple docstring'''
super().setUp()
__a : Union[str, Any] = [
BertTokenizer.from_pretrained(__UpperCamelCase ) for checkpoint in (TOKENIZER_CHECKPOINTS * 2)
] # repeat for when fast_bert_tokenizer=false
__a : Union[str, Any] = [TFBertTokenizer.from_pretrained(__UpperCamelCase ) for checkpoint in TOKENIZER_CHECKPOINTS] + [
TFBertTokenizer.from_pretrained(__UpperCamelCase , use_fast_bert_tokenizer=__UpperCamelCase )
for checkpoint in TOKENIZER_CHECKPOINTS
]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
__a : Optional[int] = [
"""This is a straightforward English test sentence.""",
"""This one has some weird characters\rto\nsee\r\nif those\u00E9break things.""",
"""Now we're going to add some Chinese: 一 二 三 一二三""",
"""And some much more rare Chinese: 齉 堃 齉堃""",
"""Je vais aussi écrire en français pour tester les accents""",
"""Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ""",
]
__a : Dict = list(zip(self.test_sentences , self.test_sentences[::-1] ) )
def __lowerCamelCase ( self ):
'''simple docstring'''
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ):
for test_inputs in (self.test_sentences, self.paired_sentences):
__a : List[str] = tokenizer(__UpperCamelCase , return_tensors="""tf""" , padding="""longest""" )
__a : Any = tf_tokenizer(__UpperCamelCase )
for key in python_outputs.keys():
self.assertTrue(tf.reduce_all(python_outputs[key].shape == tf_outputs[key].shape ) )
self.assertTrue(tf.reduce_all(tf.cast(python_outputs[key] , tf.intaa ) == tf_outputs[key] ) )
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
__a : Optional[Any] = tf_tokenizer(self.paired_sentences )
__a : Union[str, Any] = tf_tokenizer(
text=[sentence[0] for sentence in self.paired_sentences] , text_pair=[sentence[1] for sentence in self.paired_sentences] , )
for key in merged_outputs.keys():
self.assertTrue(tf.reduce_all(tf.cast(merged_outputs[key] , tf.intaa ) == separated_outputs[key] ) )
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
__a : int = tf.function(__UpperCamelCase )
for test_inputs in (self.test_sentences, self.paired_sentences):
__a : Union[str, Any] = tf.constant(__UpperCamelCase )
__a : int = compiled_tokenizer(__UpperCamelCase )
__a : List[Any] = tf_tokenizer(__UpperCamelCase )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
__a : Dict = ModelToSave(tokenizer=__UpperCamelCase )
__a : Any = tf.convert_to_tensor(self.test_sentences )
__a : Optional[Any] = model(__UpperCamelCase ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
__a : Optional[Any] = Path(__UpperCamelCase ) / """saved.model"""
model.save(__UpperCamelCase )
__a : Optional[int] = tf.keras.models.load_model(__UpperCamelCase )
__a : str = loaded_model(__UpperCamelCase )
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertLessEqual(tf.reduce_max(tf.abs(out - loaded_output ) ) , 1E-5 ) | 697 |
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__SCREAMING_SNAKE_CASE : Optional[Any] = {'configuration_focalnet': ['FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FocalNetConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : List[Any] = [
'FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'FocalNetForImageClassification',
'FocalNetForMaskedImageModeling',
'FocalNetBackbone',
'FocalNetModel',
'FocalNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 697 | 1 |
'''simple docstring'''
import argparse
import json
from tqdm import tqdm
def _snake_case ( ) -> Dict:
__a : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--src_path""" , type=lowercase , default="""biencoder-nq-dev.json""" , help="""Path to raw DPR training data""" , )
parser.add_argument(
"""--evaluation_set""" , type=lowercase , help="""where to store parsed evaluation_set file""" , )
parser.add_argument(
"""--gold_data_path""" , type=lowercase , help="""where to store parsed gold_data_path file""" , )
__a : Union[str, Any] = parser.parse_args()
with open(args.src_path , """r""" ) as src_file, open(args.evaluation_set , """w""" ) as eval_file, open(
args.gold_data_path , """w""" ) as gold_file:
__a : List[str] = json.load(lowercase )
for dpr_record in tqdm(lowercase ):
__a : Dict = dpr_record["""question"""]
__a : Dict = [context["""title"""] for context in dpr_record["""positive_ctxs"""]]
eval_file.write(question + """\n""" )
gold_file.write("""\t""".join(lowercase ) + """\n""" )
if __name__ == "__main__":
main() | 697 |
'''simple docstring'''
from __future__ import annotations
import bisect
def _snake_case ( lowercase , lowercase , lowercase = 0 , lowercase = -1 ) -> int:
if hi < 0:
__a : Union[str, Any] = len(lowercase )
while lo < hi:
__a : List[str] = lo + (hi - lo) // 2
if sorted_collection[mid] < item:
__a : int = mid + 1
else:
__a : int = mid
return lo
def _snake_case ( lowercase , lowercase , lowercase = 0 , lowercase = -1 ) -> int:
if hi < 0:
__a : Any = len(lowercase )
while lo < hi:
__a : Any = lo + (hi - lo) // 2
if sorted_collection[mid] <= item:
__a : List[str] = mid + 1
else:
__a : Any = mid
return lo
def _snake_case ( lowercase , lowercase , lowercase = 0 , lowercase = -1 ) -> None:
sorted_collection.insert(bisect_left(lowercase , lowercase , lowercase , lowercase ) , lowercase )
def _snake_case ( lowercase , lowercase , lowercase = 0 , lowercase = -1 ) -> None:
sorted_collection.insert(bisect_right(lowercase , lowercase , lowercase , lowercase ) , lowercase )
def _snake_case ( lowercase , lowercase ) -> int | None:
__a : Dict = 0
__a : Any = len(lowercase ) - 1
while left <= right:
__a : str = left + (right - left) // 2
__a : List[Any] = sorted_collection[midpoint]
if current_item == item:
return midpoint
elif item < current_item:
__a : Optional[Any] = midpoint - 1
else:
__a : Optional[int] = midpoint + 1
return None
def _snake_case ( lowercase , lowercase ) -> int | None:
__a : Optional[int] = bisect.bisect_left(lowercase , lowercase )
if index != len(lowercase ) and sorted_collection[index] == item:
return index
return None
def _snake_case ( lowercase , lowercase , lowercase , lowercase ) -> int | None:
if right < left:
return None
__a : Any = left + (right - left) // 2
if sorted_collection[midpoint] == item:
return midpoint
elif sorted_collection[midpoint] > item:
return binary_search_by_recursion(lowercase , lowercase , lowercase , midpoint - 1 )
else:
return binary_search_by_recursion(lowercase , lowercase , midpoint + 1 , lowercase )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : List[Any] = input('Enter numbers separated by comma:\n').strip()
__SCREAMING_SNAKE_CASE : Optional[Any] = sorted(int(item) for item in user_input.split(','))
__SCREAMING_SNAKE_CASE : List[str] = int(input('Enter a single number to be found in the list:\n'))
__SCREAMING_SNAKE_CASE : Optional[int] = binary_search(collection, target)
if result is None:
print(f'''{target} was not found in {collection}.''')
else:
print(f'''{target} was found at position {result} in {collection}.''') | 697 | 1 |
'''simple docstring'''
import os
from pathlib import Path
def _snake_case ( ) -> str:
from torch.utils.cpp_extension import load
__a : Union[str, Any] = Path(lowercase ).resolve().parent.parent.parent / """kernels""" / """deformable_detr"""
__a : Any = [
root / filename
for filename in [
"""vision.cpp""",
os.path.join("""cpu""" , """ms_deform_attn_cpu.cpp""" ),
os.path.join("""cuda""" , """ms_deform_attn_cuda.cu""" ),
]
]
load(
"""MultiScaleDeformableAttention""" , lowercase , with_cuda=lowercase , extra_include_paths=[str(lowercase )] , extra_cflags=["""-DWITH_CUDA=1"""] , extra_cuda_cflags=[
"""-DCUDA_HAS_FP16=1""",
"""-D__CUDA_NO_HALF_OPERATORS__""",
"""-D__CUDA_NO_HALF_CONVERSIONS__""",
"""-D__CUDA_NO_HALF2_OPERATORS__""",
] , )
import MultiScaleDeformableAttention as MSDA
return MSDA | 697 |
'''simple docstring'''
from itertools import product
def _snake_case ( lowercase , lowercase ) -> list[int]:
__a : Optional[int] = sides_number
__a : Union[str, Any] = max_face_number * dice_number
__a : Optional[Any] = [0] * (max_total + 1)
__a : Dict = 1
__a : str = range(lowercase , max_face_number + 1 )
for dice_numbers in product(lowercase , repeat=lowercase ):
__a : int = sum(lowercase )
totals_frequencies[total] += 1
return totals_frequencies
def _snake_case ( ) -> float:
__a : Tuple = total_frequency_distribution(
sides_number=4 , dice_number=9 )
__a : Union[str, Any] = total_frequency_distribution(
sides_number=6 , dice_number=6 )
__a : str = 0
__a : Dict = 9
__a : str = 4 * 9
__a : Any = 6
for peter_total in range(lowercase , max_peter_total + 1 ):
peter_wins_count += peter_totals_frequencies[peter_total] * sum(
colin_totals_frequencies[min_colin_total:peter_total] )
__a : str = (4**9) * (6**6)
__a : List[Any] = peter_wins_count / total_games_number
__a : List[Any] = round(lowercase , ndigits=7 )
return rounded_peter_win_probability
if __name__ == "__main__":
print(f'''{solution() = }''') | 697 | 1 |
'''simple docstring'''
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XGLMTokenizer, XGLMTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__SCREAMING_SNAKE_CASE : int = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase , unittest.TestCase ):
lowercase__ = XGLMTokenizer
lowercase__ = XGLMTokenizerFast
lowercase__ = True
lowercase__ = True
def __lowerCamelCase ( self ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
__a : Tuple = XGLMTokenizer(__UpperCamelCase , keep_accents=__UpperCamelCase )
tokenizer.save_pretrained(self.tmpdirname )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Optional[int] = """<pad>"""
__a : Union[str, Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__UpperCamelCase ) , __UpperCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__UpperCamelCase ) , __UpperCamelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Any = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(len(__UpperCamelCase ) , 1008 )
def __lowerCamelCase ( self ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 1008 )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : List[str] = XGLMTokenizer(__UpperCamelCase , keep_accents=__UpperCamelCase )
__a : Dict = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(__UpperCamelCase , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__UpperCamelCase ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
__a : Any = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
__UpperCamelCase , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
__a : Optional[int] = tokenizer.convert_tokens_to_ids(__UpperCamelCase )
self.assertListEqual(
__UpperCamelCase , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
__a : str = tokenizer.convert_ids_to_tokens(__UpperCamelCase )
self.assertListEqual(
__UpperCamelCase , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
@cached_property
def __lowerCamelCase ( self ):
'''simple docstring'''
return XGLMTokenizer.from_pretrained("""facebook/xglm-564M""" )
def __lowerCamelCase ( self ):
'''simple docstring'''
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(__UpperCamelCase , f.name )
__a : List[Any] = XGLMTokenizer(f.name , keep_accents=__UpperCamelCase )
__a : str = pickle.dumps(__UpperCamelCase )
pickle.loads(__UpperCamelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
__a : Dict = self.get_tokenizer()
__a : int = self.get_rust_tokenizer()
__a : List[str] = """I was born in 92000, and this is falsé."""
__a : List[Any] = tokenizer.tokenize(__UpperCamelCase )
__a : Optional[Any] = rust_tokenizer.tokenize(__UpperCamelCase )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
__a : Tuple = tokenizer.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase )
__a : Tuple = rust_tokenizer.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
__a : List[Any] = self.get_rust_tokenizer()
__a : Optional[int] = tokenizer.encode(__UpperCamelCase )
__a : Union[str, Any] = rust_tokenizer.encode(__UpperCamelCase )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Union[str, Any] = """Hello World!"""
__a : List[Any] = [2, 3_1227, 4447, 35]
self.assertListEqual(__UpperCamelCase , self.big_tokenizer.encode(__UpperCamelCase ) )
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : List[str] = (
"""This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"""
""" add words that should not exsist and be tokenized to unk, such as saoneuhaoesuth"""
)
# fmt: off
__a : List[str] = [2, 1018, 67, 11, 1988, 2617, 5631, 278, 11, 3407, 48, 7_1630, 2_8085, 4, 3234, 157, 13, 6, 5, 6, 4, 3526, 768, 15, 659, 57, 298, 3983, 864, 129, 21, 6, 5, 1_3675, 377, 652, 7580, 1_0341, 155, 2817, 422, 1666, 7, 1674, 53, 113, 20_2277, 1_7892, 33, 60, 87, 4, 3234, 157, 61, 2667, 5_2376, 19, 88, 23, 735]
# fmt: on
self.assertListEqual(__UpperCamelCase , self.big_tokenizer.encode(__UpperCamelCase ) )
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Dict = {
"""input_ids""": [[2, 10_8825, 1163, 15, 8_8010, 473, 1_5898, 157, 1_3672, 1857, 312, 8, 23_8021, 1163, 53, 1_3672, 1857, 312, 8, 5_3283, 18_2396, 8, 1_8566, 16, 3_6733, 4101, 8, 230, 24_4017, 12_2553, 7, 15, 13_2597, 4, 293, 1_2511, 7610, 4, 3414, 13_2597, 9, 4, 3_2361, 362, 4, 734, 2_8512, 3_2569, 18, 4, 3_2361, 2_6096, 1_4982, 73, 1_8715, 2_1433, 23_5261, 15, 492, 1_2427, 16, 53, 1_8715, 2_1433, 6_5454, 15, 2_3659, 563, 16, 278, 597, 2843, 595, 7931, 18_2396, 6_4186, 22, 886, 595, 13_2981, 53, 2_5540, 3449, 4_3982, 3_9901, 5951, 878, 330, 4, 2_7694, 8_0269, 312, 53, 6517, 1_1780, 611, 2_0408, 5], [2, 6, 13_2597, 67, 4_2897, 33, 592, 8, 16_3729, 2_5540, 361, 13_6997, 10_9514, 17_3230, 7, 501, 60, 10_2913, 196, 5631, 235, 6_3243, 473, 6, 23_1757, 74, 5277, 7905, 53, 3095, 3_7317, 22, 454, 18_3874, 5], [2, 268, 3_1298, 4_6530, 6, 13_2935, 4_3831, 7, 597, 32, 24, 3688, 9865, 5]],
"""attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]
} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__UpperCamelCase , model_name="""facebook/xglm-564M""" , padding=__UpperCamelCase , ) | 697 |
'''simple docstring'''
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import DiffusionPipeline
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import logging
__SCREAMING_SNAKE_CASE : Union[str, Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
def __init__( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , ):
'''simple docstring'''
super().__init__()
self.register_modules(
vae=__UpperCamelCase , text_encoder=__UpperCamelCase , tokenizer=__UpperCamelCase , unet=__UpperCamelCase , scheduler=__UpperCamelCase , safety_checker=__UpperCamelCase , feature_extractor=__UpperCamelCase , )
def __lowerCamelCase ( self , __UpperCamelCase = "auto" ):
'''simple docstring'''
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
__a : Union[str, Any] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(__UpperCamelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
self.enable_attention_slicing(__UpperCamelCase )
@torch.no_grad()
def __call__( self , __UpperCamelCase , __UpperCamelCase = 512 , __UpperCamelCase = 512 , __UpperCamelCase = 50 , __UpperCamelCase = 7.5 , __UpperCamelCase = None , __UpperCamelCase = 1 , __UpperCamelCase = 0.0 , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = "pil" , __UpperCamelCase = True , __UpperCamelCase = None , __UpperCamelCase = 1 , __UpperCamelCase = None , **__UpperCamelCase , ):
'''simple docstring'''
if isinstance(__UpperCamelCase , __UpperCamelCase ):
__a : Union[str, Any] = 1
elif isinstance(__UpperCamelCase , __UpperCamelCase ):
__a : Tuple = len(__UpperCamelCase )
else:
raise ValueError(f"""`prompt` has to be of type `str` or `list` but is {type(__UpperCamelCase )}""" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"""`height` and `width` have to be divisible by 8 but are {height} and {width}.""" )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(__UpperCamelCase , __UpperCamelCase ) or callback_steps <= 0)
):
raise ValueError(
f"""`callback_steps` has to be a positive integer but is {callback_steps} of type"""
f""" {type(__UpperCamelCase )}.""" )
# get prompt text embeddings
__a : Tuple = self.tokenizer(
__UpperCamelCase , padding="""max_length""" , max_length=self.tokenizer.model_max_length , return_tensors="""pt""" , )
__a : Union[str, Any] = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
__a : str = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"""The following part of your input was truncated because CLIP can only handle sequences up to"""
f""" {self.tokenizer.model_max_length} tokens: {removed_text}""" )
__a : Optional[int] = text_input_ids[:, : self.tokenizer.model_max_length]
if text_embeddings is None:
__a : int = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
__a , __a , __a : Union[str, Any] = text_embeddings.shape
__a : Optional[Any] = text_embeddings.repeat(1 , __UpperCamelCase , 1 )
__a : Union[str, Any] = text_embeddings.view(bs_embed * num_images_per_prompt , __UpperCamelCase , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
__a : Any = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
__a : List[str]
if negative_prompt is None:
__a : Optional[Any] = [""""""]
elif type(__UpperCamelCase ) is not type(__UpperCamelCase ):
raise TypeError(
f"""`negative_prompt` should be the same type to `prompt`, but got {type(__UpperCamelCase )} !="""
f""" {type(__UpperCamelCase )}.""" )
elif isinstance(__UpperCamelCase , __UpperCamelCase ):
__a : Any = [negative_prompt]
elif batch_size != len(__UpperCamelCase ):
raise ValueError(
f"""`negative_prompt`: {negative_prompt} has batch size {len(__UpperCamelCase )}, but `prompt`:"""
f""" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"""
""" the batch size of `prompt`.""" )
else:
__a : Tuple = negative_prompt
__a : Any = text_input_ids.shape[-1]
__a : List[str] = self.tokenizer(
__UpperCamelCase , padding="""max_length""" , max_length=__UpperCamelCase , truncation=__UpperCamelCase , return_tensors="""pt""" , )
__a : str = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
__a : List[str] = uncond_embeddings.shape[1]
__a : List[Any] = uncond_embeddings.repeat(__UpperCamelCase , __UpperCamelCase , 1 )
__a : Tuple = uncond_embeddings.view(batch_size * num_images_per_prompt , __UpperCamelCase , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
__a : List[Any] = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
__a : Tuple = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
__a : List[Any] = (batch_size * num_images_per_prompt, self.unet.config.in_channels, 64, 64)
__a : int = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
__a : Any = torch.randn(
__UpperCamelCase , generator=__UpperCamelCase , device="""cpu""" , dtype=__UpperCamelCase ).to(self.device )
__a : Optional[Any] = torch.randn(__UpperCamelCase , generator=__UpperCamelCase , device="""cpu""" , dtype=__UpperCamelCase ).to(
self.device )
else:
__a : Optional[int] = torch.randn(
__UpperCamelCase , generator=__UpperCamelCase , device=self.device , dtype=__UpperCamelCase )
__a : str = torch.randn(__UpperCamelCase , generator=__UpperCamelCase , device=self.device , dtype=__UpperCamelCase )
else:
if latents_reference.shape != latents_shape:
raise ValueError(f"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" )
__a : Optional[Any] = latents_reference.to(self.device )
__a : str = latents.to(self.device )
# This is the key part of the pipeline where we
# try to ensure that the generated images w/ the same seed
# but different sizes actually result in similar images
__a : List[str] = (latents_shape[3] - latents_shape_reference[3]) // 2
__a : int = (latents_shape[2] - latents_shape_reference[2]) // 2
__a : int = latents_shape_reference[3] if dx >= 0 else latents_shape_reference[3] + 2 * dx
__a : Tuple = latents_shape_reference[2] if dy >= 0 else latents_shape_reference[2] + 2 * dy
__a : Optional[Any] = 0 if dx < 0 else dx
__a : Optional[Any] = 0 if dy < 0 else dy
__a : Optional[int] = max(-dx , 0 )
__a : Optional[Any] = max(-dy , 0 )
# import pdb
# pdb.set_trace()
__a : Optional[int] = latents_reference[:, :, dy : dy + h, dx : dx + w]
# set timesteps
self.scheduler.set_timesteps(__UpperCamelCase )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
__a : Dict = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
__a : Any = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
__a : List[Any] = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
__a : Optional[Any] = {}
if accepts_eta:
__a : Union[str, Any] = eta
for i, t in enumerate(self.progress_bar(__UpperCamelCase ) ):
# expand the latents if we are doing classifier free guidance
__a : List[str] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
__a : Tuple = self.scheduler.scale_model_input(__UpperCamelCase , __UpperCamelCase )
# predict the noise residual
__a : Union[str, Any] = self.unet(__UpperCamelCase , __UpperCamelCase , encoder_hidden_states=__UpperCamelCase ).sample
# perform guidance
if do_classifier_free_guidance:
__a , __a : List[str] = noise_pred.chunk(2 )
__a : Optional[int] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
__a : List[Any] = self.scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
__a : Optional[Any] = 1 / 0.1_8_2_1_5 * latents
__a : Optional[int] = self.vae.decode(__UpperCamelCase ).sample
__a : List[str] = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
__a : int = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if self.safety_checker is not None:
__a : List[str] = self.feature_extractor(self.numpy_to_pil(__UpperCamelCase ) , return_tensors="""pt""" ).to(
self.device )
__a , __a : int = self.safety_checker(
images=__UpperCamelCase , clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype ) )
else:
__a : Optional[int] = None
if output_type == "pil":
__a : str = self.numpy_to_pil(__UpperCamelCase )
if not return_dict:
return (image, has_nsfw_concept)
return StableDiffusionPipelineOutput(images=__UpperCamelCase , nsfw_content_detected=__UpperCamelCase ) | 697 | 1 |
'''simple docstring'''
import argparse
import torch
from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def _snake_case ( lowercase , lowercase , lowercase ) -> Any:
# Construct model
if gpta_config_file == "":
__a : Dict = GPTaConfig()
else:
__a : Optional[Any] = GPTaConfig.from_json_file(lowercase )
__a : Union[str, Any] = GPTaModel(lowercase )
# Load weights from numpy
load_tf_weights_in_gpta(lowercase , lowercase , lowercase )
# Save pytorch-model
__a : Optional[int] = pytorch_dump_folder_path + """/""" + WEIGHTS_NAME
__a : Dict = pytorch_dump_folder_path + """/""" + CONFIG_NAME
print(F"""Save PyTorch model to {pytorch_weights_dump_path}""" )
torch.save(model.state_dict() , lowercase )
print(F"""Save configuration file to {pytorch_config_dump_path}""" )
with open(lowercase , """w""" , encoding="""utf-8""" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--gpt2_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--gpt2_config_file',
default='',
type=str,
help=(
'An optional config json file corresponding to the pre-trained OpenAI model. \n'
'This specifies the model architecture.'
),
)
__SCREAMING_SNAKE_CASE : Dict = parser.parse_args()
convert_gpta_checkpoint_to_pytorch(args.gpta_checkpoint_path, args.gpta_config_file, args.pytorch_dump_folder_path) | 697 |
'''simple docstring'''
import numpy as np
from PIL import Image
def _snake_case ( lowercase , lowercase , lowercase ) -> np.ndarray:
__a : Any = np.array(lowercase )
if arr.shape[0] != arr.shape[1]:
raise ValueError("""The input array is not a square matrix""" )
__a : Union[str, Any] = 0
__a : Dict = 0
__a : Optional[Any] = 0
__a : Tuple = 0
# compute the shape of the output matrix
__a : Optional[int] = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape maxpool_shape
__a : int = np.zeros((maxpool_shape, maxpool_shape) )
while i < arr.shape[0]:
if i + size > arr.shape[0]:
# if the end of the matrix is reached, break
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the maximum of the pooling matrix
__a : Optional[Any] = np.max(arr[i : i + size, j : j + size] )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
__a : Optional[Any] = 0
__a : str = 0
return updated_arr
def _snake_case ( lowercase , lowercase , lowercase ) -> np.ndarray:
__a : int = np.array(lowercase )
if arr.shape[0] != arr.shape[1]:
raise ValueError("""The input array is not a square matrix""" )
__a : int = 0
__a : Optional[Any] = 0
__a : str = 0
__a : List[Any] = 0
# compute the shape of the output matrix
__a : int = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape avgpool_shape
__a : Optional[int] = np.zeros((avgpool_shape, avgpool_shape) )
while i < arr.shape[0]:
# if the end of the matrix is reached, break
if i + size > arr.shape[0]:
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the average of the pooling matrix
__a : Any = int(np.average(arr[i : i + size, j : j + size] ) )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
__a : str = 0
__a : List[Any] = 0
return updated_arr
# Main Function
if __name__ == "__main__":
from doctest import testmod
testmod(name='avgpooling', verbose=True)
# Loading the image
__SCREAMING_SNAKE_CASE : str = Image.open('path_to_image')
# Converting the image to numpy array and maxpooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(maxpooling(np.array(image), size=3, stride=2)).show()
# Converting the image to numpy array and averagepooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(avgpooling(np.array(image), size=3, stride=2)).show() | 697 | 1 |
'''simple docstring'''
def _snake_case ( lowercase = 5_0 ) -> int:
__a : Union[str, Any] = [1] * (length + 1)
for row_length in range(3 , length + 1 ):
for block_length in range(3 , row_length + 1 ):
for block_start in range(row_length - block_length ):
ways_number[row_length] += ways_number[
row_length - block_start - block_length - 1
]
ways_number[row_length] += 1
return ways_number[length]
if __name__ == "__main__":
print(f'''{solution() = }''') | 697 |
'''simple docstring'''
import qiskit
def _snake_case ( lowercase , lowercase ) -> qiskit.result.counts.Counts:
__a : Any = qiskit.Aer.get_backend("""aer_simulator""" )
# Create a Quantum Circuit acting on the q register
__a : str = qiskit.QuantumCircuit(lowercase , lowercase )
# Map the quantum measurement to the classical bits
circuit.measure([0] , [0] )
# Execute the circuit on the simulator
__a : Any = qiskit.execute(lowercase , lowercase , shots=1_0_0_0 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(lowercase )
if __name__ == "__main__":
print(f'''Total count for various states are: {single_qubit_measure(1, 1)}''') | 697 | 1 |
'''simple docstring'''
import inspect
import unittest
from transformers import DPTConfig
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel
from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class SCREAMING_SNAKE_CASE__ :
def __init__( self , __UpperCamelCase , __UpperCamelCase=2 , __UpperCamelCase=32 , __UpperCamelCase=16 , __UpperCamelCase=3 , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=32 , __UpperCamelCase=4 , __UpperCamelCase=[0, 1, 2, 3] , __UpperCamelCase=4 , __UpperCamelCase=37 , __UpperCamelCase="gelu" , __UpperCamelCase=0.1 , __UpperCamelCase=0.1 , __UpperCamelCase=0.0_2 , __UpperCamelCase=3 , __UpperCamelCase=[1, 384, 24, 24] , __UpperCamelCase=True , __UpperCamelCase=None , ):
'''simple docstring'''
__a : List[str] = parent
__a : Tuple = batch_size
__a : str = image_size
__a : int = patch_size
__a : Dict = num_channels
__a : int = is_training
__a : Dict = use_labels
__a : Union[str, Any] = hidden_size
__a : Dict = num_hidden_layers
__a : Dict = backbone_out_indices
__a : Optional[int] = num_attention_heads
__a : List[str] = intermediate_size
__a : Optional[Any] = hidden_act
__a : Dict = hidden_dropout_prob
__a : Tuple = attention_probs_dropout_prob
__a : Any = initializer_range
__a : Any = num_labels
__a : Optional[Any] = backbone_featmap_shape
__a : List[Any] = scope
__a : List[str] = is_hybrid
# sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token)
__a : Union[str, Any] = (image_size // patch_size) ** 2
__a : List[str] = num_patches + 1
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__a : Union[str, Any] = None
if self.use_labels:
__a : str = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__a : Tuple = self.get_config()
return config, pixel_values, labels
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : List[str] = {
"""global_padding""": """same""",
"""layer_type""": """bottleneck""",
"""depths""": [3, 4, 9],
"""out_features""": ["""stage1""", """stage2""", """stage3"""],
"""embedding_dynamic_padding""": True,
"""hidden_sizes""": [96, 192, 384, 768],
"""num_groups""": 2,
}
return DPTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , backbone_out_indices=self.backbone_out_indices , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__UpperCamelCase , initializer_range=self.initializer_range , is_hybrid=self.is_hybrid , backbone_config=__UpperCamelCase , backbone_featmap_shape=self.backbone_featmap_shape , )
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
__a : Optional[Any] = DPTModel(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
__a : List[str] = model(__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
__a : List[str] = self.num_labels
__a : Union[str, Any] = DPTForDepthEstimation(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
__a : Tuple = model(__UpperCamelCase )
self.parent.assertEqual(result.predicted_depth.shape , (self.batch_size, self.image_size, self.image_size) )
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
__a : Dict = self.num_labels
__a : Tuple = DPTForSemanticSegmentation(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
__a : str = model(__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Optional[int] = self.prepare_config_and_inputs()
__a , __a , __a : Tuple = config_and_inputs
__a : List[str] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase , __UpperCamelCase , unittest.TestCase ):
lowercase__ = (DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else ()
lowercase__ = (
{
"depth-estimation": DPTForDepthEstimation,
"feature-extraction": DPTModel,
"image-segmentation": DPTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
lowercase__ = False
lowercase__ = False
lowercase__ = False
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Optional[int] = DPTModelTester(self )
__a : List[Any] = ConfigTester(self , config_class=__UpperCamelCase , has_text_modality=__UpperCamelCase , hidden_size=37 )
def __lowerCamelCase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="""DPT does not use inputs_embeds""" )
def __lowerCamelCase ( self ):
'''simple docstring'''
pass
def __lowerCamelCase ( self ):
'''simple docstring'''
__a , __a : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a : str = model_class(__UpperCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__a : Any = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__UpperCamelCase , nn.Linear ) )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a , __a : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a : Any = model_class(__UpperCamelCase )
__a : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__a : int = [*signature.parameters.keys()]
__a : List[str] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __UpperCamelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_depth_estimation(*__UpperCamelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__UpperCamelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
__a , __a : Dict = self.model_tester.prepare_config_and_inputs_for_common()
__a : List[Any] = True
if model_class in get_values(__UpperCamelCase ):
continue
__a : str = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.train()
__a : Union[str, Any] = self._prepare_for_class(__UpperCamelCase , __UpperCamelCase , return_labels=__UpperCamelCase )
__a : List[Any] = model(**__UpperCamelCase ).loss
loss.backward()
def __lowerCamelCase ( self ):
'''simple docstring'''
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
__a , __a : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
__a : Any = False
__a : Dict = True
if model_class in get_values(__UpperCamelCase ) or not model_class.supports_gradient_checkpointing:
continue
__a : Any = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.gradient_checkpointing_enable()
model.train()
__a : List[str] = self._prepare_for_class(__UpperCamelCase , __UpperCamelCase , return_labels=__UpperCamelCase )
__a : Dict = model(**__UpperCamelCase ).loss
loss.backward()
def __lowerCamelCase ( self ):
'''simple docstring'''
__a , __a : Any = self.model_tester.prepare_config_and_inputs_for_common()
__a : Any = _config_zero_init(__UpperCamelCase )
for model_class in self.all_model_classes:
__a : Any = model_class(config=__UpperCamelCase )
# Skip the check for the backbone
__a : Optional[Any] = []
for name, module in model.named_modules():
if module.__class__.__name__ == "DPTViTHybridEmbeddings":
__a : Optional[int] = [f"""{name}.{key}""" for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def __lowerCamelCase ( self ):
'''simple docstring'''
pass
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]:
__a : int = DPTModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a , __a : int = self.model_tester.prepare_config_and_inputs_for_common()
__a : Optional[int] = """add"""
with self.assertRaises(__UpperCamelCase ):
__a : int = DPTForDepthEstimation(__UpperCamelCase )
def _snake_case ( ) -> Any:
__a : Dict = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
@slow
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : int = DPTImageProcessor.from_pretrained("""Intel/dpt-hybrid-midas""" )
__a : int = DPTForDepthEstimation.from_pretrained("""Intel/dpt-hybrid-midas""" ).to(__UpperCamelCase )
__a : Union[str, Any] = prepare_img()
__a : Any = image_processor(images=__UpperCamelCase , return_tensors="""pt""" ).to(__UpperCamelCase )
# forward pass
with torch.no_grad():
__a : Optional[Any] = model(**__UpperCamelCase )
__a : int = outputs.predicted_depth
# verify the predicted depth
__a : Any = torch.Size((1, 384, 384) )
self.assertEqual(predicted_depth.shape , __UpperCamelCase )
__a : int = torch.tensor(
[[[5.6_4_3_7, 5.6_1_4_6, 5.6_5_1_1], [5.4_3_7_1, 5.5_6_4_9, 5.5_9_5_8], [5.5_2_1_5, 5.5_1_8_4, 5.5_2_9_3]]] ).to(__UpperCamelCase )
self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 100 , __UpperCamelCase , atol=1E-4 ) ) | 697 |
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConformerConfig,
WavaVecaConformerForCTC,
WavaVecaConformerForPreTraining,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
__SCREAMING_SNAKE_CASE : str = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : Any = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.linear_k': 'encoder.layers.*.self_attn.linear_k',
'self_attn.linear_v': 'encoder.layers.*.self_attn.linear_v',
'self_attn.linear_q': 'encoder.layers.*.self_attn.linear_q',
'self_attn.pos_bias_u': 'encoder.layers.*.self_attn.pos_bias_u',
'self_attn.pos_bias_v': 'encoder.layers.*.self_attn.pos_bias_v',
'self_attn.linear_out': 'encoder.layers.*.self_attn.linear_out',
'self_attn.linear_pos': 'encoder.layers.*.self_attn.linear_pos',
'self_attn.rotary_emb': 'encoder.embed_positions',
'self_attn_layer_norm': 'encoder.layers.*.self_attn_layer_norm',
'conv_module.pointwise_conv1': 'encoder.layers.*.conv_module.pointwise_conv1',
'conv_module.pointwise_conv2': 'encoder.layers.*.conv_module.pointwise_conv2',
'conv_module.depthwise_conv': 'encoder.layers.*.conv_module.depthwise_conv',
'conv_module.batch_norm': 'encoder.layers.*.conv_module.batch_norm',
'conv_module.layer_norm': 'encoder.layers.*.conv_module.layer_norm',
'ffn1.w_1': 'encoder.layers.*.ffn1.intermediate_dense',
'ffn1.w_2': 'encoder.layers.*.ffn1.output_dense',
'ffn1.layer_norm': 'encoder.layers.*.ffn1_layer_norm',
'ffn2.w_1': 'encoder.layers.*.ffn2.intermediate_dense',
'ffn2.w_2': 'encoder.layers.*.ffn2.output_dense',
'ffn2.layer_norm': 'encoder.layers.*.ffn2_layer_norm',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
}
__SCREAMING_SNAKE_CASE : Optional[Any] = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def _snake_case ( lowercase , lowercase , lowercase , lowercase , lowercase ) -> List[Any]:
for attribute in key.split(""".""" ):
__a : str = getattr(lowercase , lowercase )
if weight_type is not None:
__a : Dict = getattr(lowercase , lowercase ).shape
else:
__a : Dict = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}""" )
if weight_type == "weight":
__a : Any = value
elif weight_type == "weight_g":
__a : int = value
elif weight_type == "weight_v":
__a : int = value
elif weight_type == "bias":
__a : List[Any] = value
elif weight_type == "running_mean":
__a : Union[str, Any] = value
elif weight_type == "running_var":
__a : Tuple = value
elif weight_type == "num_batches_tracked":
__a : Optional[int] = value
elif weight_type == "inv_freq":
__a : List[str] = value
else:
__a : List[str] = value
logger.info(F"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def _snake_case ( lowercase , lowercase , lowercase ) -> Dict:
__a : Dict = []
__a : Dict = fairseq_model.state_dict()
__a : Tuple = hf_model.wavaveca_conformer.feature_extractor
for name, value in fairseq_dict.items():
__a : int = False
if "conv_layers" in name:
load_conv_layer(
lowercase , lowercase , lowercase , lowercase , hf_model.config.feat_extract_norm == """group""" , )
__a : List[Any] = True
else:
for key, mapped_key in MAPPING.items():
__a : Optional[int] = """wav2vec2_conformer.""" + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
__a : str = True
if "*" in mapped_key:
__a : Optional[int] = name.split(lowercase )[0].split(""".""" )[-2]
__a : List[Any] = mapped_key.replace("""*""" , lowercase )
if "pos_bias_u" in name:
__a : Union[str, Any] = None
elif "pos_bias_v" in name:
__a : List[Any] = None
elif "weight_g" in name:
__a : List[Any] = """weight_g"""
elif "weight_v" in name:
__a : List[Any] = """weight_v"""
elif "bias" in name:
__a : Optional[int] = """bias"""
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__a : str = """weight"""
elif "running_mean" in name:
__a : List[str] = """running_mean"""
elif "inv_freq" in name:
__a : Dict = """inv_freq"""
elif "running_var" in name:
__a : Union[str, Any] = """running_var"""
elif "num_batches_tracked" in name:
__a : int = """num_batches_tracked"""
else:
__a : Optional[int] = None
set_recursively(lowercase , lowercase , lowercase , lowercase , lowercase )
continue
if not is_used:
unused_weights.append(lowercase )
logger.warning(F"""Unused weights: {unused_weights}""" )
def _snake_case ( lowercase , lowercase , lowercase , lowercase , lowercase ) -> List[str]:
__a : Optional[Any] = full_name.split("""conv_layers.""" )[-1]
__a : Union[str, Any] = name.split(""".""" )
__a : Optional[Any] = int(items[0] )
__a : int = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
__a : Dict = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
__a : str = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" )
__a : Dict = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" )
__a : Union[str, Any] = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(lowercase )
@torch.no_grad()
def _snake_case ( lowercase , lowercase , lowercase=None , lowercase=None , lowercase=True ) -> Optional[Any]:
if config_path is not None:
__a : Any = WavaVecaConformerConfig.from_pretrained(lowercase , hidden_act="""swish""" )
else:
__a : Optional[int] = WavaVecaConformerConfig()
if "rope" in checkpoint_path:
__a : Optional[Any] = """rotary"""
if is_finetuned:
if dict_path:
__a : List[Any] = Dictionary.load(lowercase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
__a : int = target_dict.pad_index
__a : List[str] = target_dict.bos_index
__a : str = target_dict.eos_index
__a : Dict = len(target_dict.symbols )
__a : Any = os.path.join(lowercase , """vocab.json""" )
if not os.path.isdir(lowercase ):
logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(lowercase ) )
return
os.makedirs(lowercase , exist_ok=lowercase )
__a : Dict = target_dict.indices
# fairseq has the <pad> and <s> switched
__a : Optional[Any] = 0
__a : List[Any] = 1
with open(lowercase , """w""" , encoding="""utf-8""" ) as vocab_handle:
json.dump(lowercase , lowercase )
__a : int = WavaVecaCTCTokenizer(
lowercase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=lowercase , )
__a : Optional[int] = True if config.feat_extract_norm == """layer""" else False
__a : Dict = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6_0_0_0 , padding_value=0 , do_normalize=lowercase , return_attention_mask=lowercase , )
__a : str = WavaVecaProcessor(feature_extractor=lowercase , tokenizer=lowercase )
processor.save_pretrained(lowercase )
__a : List[str] = WavaVecaConformerForCTC(lowercase )
else:
__a : Optional[int] = WavaVecaConformerForPreTraining(lowercase )
if is_finetuned:
__a , __a , __a : Dict = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
else:
__a : Optional[int] = argparse.Namespace(task="""audio_pretraining""" )
__a : Tuple = fairseq.tasks.setup_task(lowercase )
__a , __a , __a : int = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=lowercase )
__a : Any = model[0].eval()
recursively_load_weights(lowercase , lowercase , not is_finetuned )
hf_wavavec.save_pretrained(lowercase )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : Dict = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
__SCREAMING_SNAKE_CASE : int = parser.parse_args()
convert_wavaveca_conformer_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
) | 697 | 1 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_deformable_detr import DeformableDetrImageProcessor
__SCREAMING_SNAKE_CASE : Optional[int] = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
def __init__( self , *__UpperCamelCase , **__UpperCamelCase ):
'''simple docstring'''
warnings.warn(
"""The class DeformableDetrFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use DeformableDetrImageProcessor instead.""" , __UpperCamelCase , )
super().__init__(*__UpperCamelCase , **__UpperCamelCase ) | 697 |
'''simple docstring'''
import warnings
from functools import wraps
from typing import Callable
def _snake_case ( lowercase ) -> Callable:
@wraps(lowercase )
def _inner_fn(*lowercase , **lowercase ):
warnings.warn(
(F"""'{fn.__name__}' is experimental and might be subject to breaking changes in the future.""") , lowercase , )
return fn(*lowercase , **lowercase )
return _inner_fn | 697 | 1 |
'''simple docstring'''
from __future__ import annotations
def _snake_case ( lowercase ) -> bool:
__a : Any = len(lowercase )
# We need to create solution object to save path.
__a : Union[str, Any] = [[0 for _ in range(lowercase )] for _ in range(lowercase )]
__a : Tuple = run_maze(lowercase , 0 , 0 , lowercase )
if solved:
print("""\n""".join(str(lowercase ) for row in solutions ) )
else:
print("""No solution exists!""" )
return solved
def _snake_case ( lowercase , lowercase , lowercase , lowercase ) -> bool:
__a : List[Any] = len(lowercase )
# Final check point.
if i == j == (size - 1):
__a : Tuple = 1
return True
__a : Tuple = (not i < 0) and (not j < 0) # Check lower bounds
__a : Dict = (i < size) and (j < size) # Check upper bounds
if lower_flag and upper_flag:
# check for already visited and block points.
__a : List[str] = (not solutions[i][j]) and (not maze[i][j])
if block_flag:
# check visited
__a : List[Any] = 1
# check for directions
if (
run_maze(lowercase , i + 1 , lowercase , lowercase )
or run_maze(lowercase , lowercase , j + 1 , lowercase )
or run_maze(lowercase , i - 1 , lowercase , lowercase )
or run_maze(lowercase , lowercase , j - 1 , lowercase )
):
return True
__a : Union[str, Any] = 0
return False
return False
if __name__ == "__main__":
import doctest
doctest.testmod() | 697 |
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
from ....audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ....feature_extraction_sequence_utils import SequenceFeatureExtractor
from ....feature_extraction_utils import BatchFeature
from ....file_utils import PaddingStrategy, TensorType
from ....utils import logging
__SCREAMING_SNAKE_CASE : Tuple = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
lowercase__ = ["input_features", "attention_mask"]
def __init__( self , __UpperCamelCase=80 , __UpperCamelCase=1_6000 , __UpperCamelCase=0.0 , __UpperCamelCase=10 , __UpperCamelCase=25 , __UpperCamelCase="hamming_window" , __UpperCamelCase=3_2_7_6_8.0 , __UpperCamelCase=0.9_7 , __UpperCamelCase=1.0 , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=False , **__UpperCamelCase , ):
'''simple docstring'''
super().__init__(feature_size=__UpperCamelCase , sampling_rate=__UpperCamelCase , padding_value=__UpperCamelCase , **__UpperCamelCase )
__a : List[str] = feature_size
__a : List[str] = sampling_rate
__a : int = padding_value
__a : Any = hop_length
__a : int = win_length
__a : Tuple = frame_signal_scale
__a : Union[str, Any] = preemphasis_coeff
__a : List[str] = mel_floor
__a : Union[str, Any] = normalize_means
__a : Optional[Any] = normalize_vars
__a : Optional[Any] = win_function
__a : Union[str, Any] = return_attention_mask
__a : List[Any] = win_length * sampling_rate // 1000
__a : List[Any] = hop_length * sampling_rate // 1000
__a : Optional[Any] = optimal_fft_length(self.sample_size )
__a : Any = (self.n_fft // 2) + 1
def __lowerCamelCase ( self , __UpperCamelCase ):
'''simple docstring'''
if self.win_function == "hamming_window":
__a : str = window_function(window_length=self.sample_size , name=self.win_function , periodic=__UpperCamelCase )
else:
__a : Dict = window_function(window_length=self.sample_size , name=self.win_function )
__a : Optional[Any] = mel_filter_bank(
num_frequency_bins=self.n_freqs , num_mel_filters=self.feature_size , min_frequency=0.0 , max_frequency=self.sampling_rate / 2.0 , sampling_rate=self.sampling_rate , )
__a : Any = spectrogram(
one_waveform * self.frame_signal_scale , window=__UpperCamelCase , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , center=__UpperCamelCase , preemphasis=self.preemphasis_coeff , mel_filters=__UpperCamelCase , mel_floor=self.mel_floor , log_mel="""log""" , )
return msfc_features.T
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
if self.normalize_means:
__a : int = x[:input_length].mean(axis=0 )
__a : str = np.subtract(__UpperCamelCase , __UpperCamelCase )
if self.normalize_vars:
__a : Dict = x[:input_length].std(axis=0 )
__a : Dict = np.divide(__UpperCamelCase , __UpperCamelCase )
if input_length < x.shape[0]:
__a : Union[str, Any] = padding_value
# make sure array is in float32
__a : Any = x.astype(np.floataa )
return x
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase = None ):
'''simple docstring'''
__a : Tuple = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [self._normalize_one(__UpperCamelCase , __UpperCamelCase , self.padding_value ) for x, n in zip(__UpperCamelCase , __UpperCamelCase )]
def __call__( self , __UpperCamelCase , __UpperCamelCase = False , __UpperCamelCase = None , __UpperCamelCase = False , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , **__UpperCamelCase , ):
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"""The model corresponding to this feature extractor: {self} was trained using a sampling rate of"""
f""" {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with"""
f""" {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
"""It is strongly recommended to pass the ``sampling_rate`` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
__a : Tuple = isinstance(__UpperCamelCase , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f"""Only mono-channel audio is supported for input to {self}""" )
__a : Tuple = is_batched_numpy or (
isinstance(__UpperCamelCase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
__a : Tuple = [np.asarray(__UpperCamelCase , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(__UpperCamelCase , np.ndarray ):
__a : List[str] = np.asarray(__UpperCamelCase , dtype=np.floataa )
elif isinstance(__UpperCamelCase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
__a : str = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
__a : Any = [raw_speech]
# extract fbank features
__a : str = [self._extract_mfsc_features(__UpperCamelCase ) for one_waveform in raw_speech]
# convert into correct format for padding
__a : Optional[Any] = BatchFeature({"""input_features""": features} )
__a : Any = self.pad(
__UpperCamelCase , padding=__UpperCamelCase , max_length=__UpperCamelCase , truncation=__UpperCamelCase , pad_to_multiple_of=__UpperCamelCase , return_attention_mask=__UpperCamelCase , **__UpperCamelCase , )
# make sure list is in array format
__a : int = padded_inputs.get("""input_features""" )
if isinstance(input_features[0] , __UpperCamelCase ):
__a : Union[str, Any] = [np.asarray(__UpperCamelCase , dtype=np.floataa ) for feature in input_features]
__a : List[str] = padded_inputs.get("""attention_mask""" )
if attention_mask is not None:
__a : Optional[int] = [np.asarray(__UpperCamelCase , dtype=np.intaa ) for array in attention_mask]
if self.normalize_means or self.normalize_vars:
__a : Optional[Any] = (
np.array(__UpperCamelCase , dtype=np.intaa )
if self._get_padding_strategies(__UpperCamelCase , max_length=__UpperCamelCase ) is not PaddingStrategy.DO_NOT_PAD
and padding
else None
)
__a : int = self.normalize(
padded_inputs["""input_features"""] , attention_mask=__UpperCamelCase )
if return_tensors is not None:
__a : List[Any] = padded_inputs.convert_to_tensors(__UpperCamelCase )
return padded_inputs | 697 | 1 |
'''simple docstring'''
import argparse
import torch
from transformers import OpenAIGPTConfig, OpenAIGPTModel, load_tf_weights_in_openai_gpt
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def _snake_case ( lowercase , lowercase , lowercase ) -> Union[str, Any]:
# Construct model
if openai_config_file == "":
__a : str = OpenAIGPTConfig()
else:
__a : Optional[Any] = OpenAIGPTConfig.from_json_file(lowercase )
__a : Tuple = OpenAIGPTModel(lowercase )
# Load weights from numpy
load_tf_weights_in_openai_gpt(lowercase , lowercase , lowercase )
# Save pytorch-model
__a : int = pytorch_dump_folder_path + """/""" + WEIGHTS_NAME
__a : Tuple = pytorch_dump_folder_path + """/""" + CONFIG_NAME
print(F"""Save PyTorch model to {pytorch_weights_dump_path}""" )
torch.save(model.state_dict() , lowercase )
print(F"""Save configuration file to {pytorch_config_dump_path}""" )
with open(lowercase , """w""" , encoding="""utf-8""" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--openai_checkpoint_folder_path',
default=None,
type=str,
required=True,
help='Path to the TensorFlow checkpoint path.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--openai_config_file',
default='',
type=str,
help=(
'An optional config json file corresponding to the pre-trained OpenAI model. \n'
'This specifies the model architecture.'
),
)
__SCREAMING_SNAKE_CASE : List[str] = parser.parse_args()
convert_openai_checkpoint_to_pytorch(
args.openai_checkpoint_folder_path, args.openai_config_file, args.pytorch_dump_folder_path
) | 697 |
'''simple docstring'''
__SCREAMING_SNAKE_CASE : int = 9.80_665
def _snake_case ( lowercase , lowercase , lowercase = g ) -> float:
if fluid_density <= 0:
raise ValueError("""Impossible fluid density""" )
if volume < 0:
raise ValueError("""Impossible Object volume""" )
if gravity <= 0:
raise ValueError("""Impossible Gravity""" )
return fluid_density * gravity * volume
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod() | 697 | 1 |
'''simple docstring'''
import math
from typing import Optional
import numpy as np
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__SCREAMING_SNAKE_CASE : Optional[int] = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : Optional[Any] = {
'facebook/encodec_24khz': 'https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json',
'facebook/encodec_48khz': 'https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json',
}
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
lowercase__ = "encodec"
def __init__( self , __UpperCamelCase=[1.5, 3.0, 6.0, 1_2.0, 2_4.0] , __UpperCamelCase=2_4000 , __UpperCamelCase=1 , __UpperCamelCase=False , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=128 , __UpperCamelCase=32 , __UpperCamelCase=1 , __UpperCamelCase=[8, 5, 4, 2] , __UpperCamelCase="weight_norm" , __UpperCamelCase=7 , __UpperCamelCase=7 , __UpperCamelCase=3 , __UpperCamelCase=2 , __UpperCamelCase=True , __UpperCamelCase="reflect" , __UpperCamelCase=2 , __UpperCamelCase=2 , __UpperCamelCase=1.0 , __UpperCamelCase=1024 , __UpperCamelCase=None , __UpperCamelCase=True , **__UpperCamelCase , ):
'''simple docstring'''
__a : Dict = target_bandwidths
__a : str = sampling_rate
__a : Tuple = audio_channels
__a : List[Any] = normalize
__a : Tuple = chunk_length_s
__a : Optional[Any] = overlap
__a : Dict = hidden_size
__a : Optional[int] = num_filters
__a : Optional[Any] = num_residual_layers
__a : int = upsampling_ratios
__a : Union[str, Any] = norm_type
__a : Union[str, Any] = kernel_size
__a : Union[str, Any] = last_kernel_size
__a : List[Any] = residual_kernel_size
__a : int = dilation_growth_rate
__a : Union[str, Any] = use_causal_conv
__a : int = pad_mode
__a : str = compress
__a : Tuple = num_lstm_layers
__a : str = trim_right_ratio
__a : List[Any] = codebook_size
__a : Tuple = codebook_dim if codebook_dim is not None else hidden_size
__a : Dict = use_conv_shortcut
if self.norm_type not in ["weight_norm", "time_group_norm"]:
raise ValueError(
f"""self.norm_type must be one of `\"weight_norm\"`, `\"time_group_norm\"`), got {self.norm_type}""" )
super().__init__(**__UpperCamelCase )
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Optional[int] = np.prod(self.upsampling_ratios )
return math.ceil(self.sampling_rate / hop_length )
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return int(1000 * self.target_bandwidths[-1] // (self.frame_rate * 10) ) | 697 |
'''simple docstring'''
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetrImageProcessor
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def __init__( self , __UpperCamelCase , __UpperCamelCase=7 , __UpperCamelCase=3 , __UpperCamelCase=30 , __UpperCamelCase=400 , __UpperCamelCase=True , __UpperCamelCase=None , __UpperCamelCase=True , __UpperCamelCase=1 / 255 , __UpperCamelCase=True , __UpperCamelCase=[0.5, 0.5, 0.5] , __UpperCamelCase=[0.5, 0.5, 0.5] , __UpperCamelCase=True , ):
'''simple docstring'''
__a : List[Any] = size if size is not None else {"""shortest_edge""": 18, """longest_edge""": 1333}
__a : Dict = parent
__a : Union[str, Any] = batch_size
__a : Optional[int] = num_channels
__a : Dict = min_resolution
__a : List[Any] = max_resolution
__a : int = do_resize
__a : str = size
__a : Optional[Any] = do_rescale
__a : Optional[Any] = rescale_factor
__a : str = do_normalize
__a : Any = image_mean
__a : Optional[Any] = image_std
__a : Dict = do_pad
def __lowerCamelCase ( self ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_pad": self.do_pad,
}
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase=False ):
'''simple docstring'''
if not batched:
__a : Union[str, Any] = image_inputs[0]
if isinstance(__UpperCamelCase , Image.Image ):
__a , __a : Tuple = image.size
else:
__a , __a : Tuple = image.shape[1], image.shape[2]
if w < h:
__a : Optional[int] = int(self.size["""shortest_edge"""] * h / w )
__a : Tuple = self.size["""shortest_edge"""]
elif w > h:
__a : Optional[Any] = self.size["""shortest_edge"""]
__a : Any = int(self.size["""shortest_edge"""] * w / h )
else:
__a : Any = self.size["""shortest_edge"""]
__a : Optional[int] = self.size["""shortest_edge"""]
else:
__a : Any = []
for image in image_inputs:
__a , __a : Any = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
__a : List[Any] = max(__UpperCamelCase , key=lambda __UpperCamelCase : item[0] )[0]
__a : Optional[Any] = max(__UpperCamelCase , key=lambda __UpperCamelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase , unittest.TestCase ):
lowercase__ = DetrImageProcessor if is_vision_available() else None
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : str = DetrImageProcessingTester(self )
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Optional[int] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__UpperCamelCase , """image_mean""" ) )
self.assertTrue(hasattr(__UpperCamelCase , """image_std""" ) )
self.assertTrue(hasattr(__UpperCamelCase , """do_normalize""" ) )
self.assertTrue(hasattr(__UpperCamelCase , """do_rescale""" ) )
self.assertTrue(hasattr(__UpperCamelCase , """rescale_factor""" ) )
self.assertTrue(hasattr(__UpperCamelCase , """do_resize""" ) )
self.assertTrue(hasattr(__UpperCamelCase , """size""" ) )
self.assertTrue(hasattr(__UpperCamelCase , """do_pad""" ) )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 18, """longest_edge""": 1333} )
self.assertEqual(image_processor.do_pad , __UpperCamelCase )
__a : List[Any] = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=__UpperCamelCase )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42, """longest_edge""": 84} )
self.assertEqual(image_processor.do_pad , __UpperCamelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
pass
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__a : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCamelCase , Image.Image )
# Test not batched input
__a : Optional[Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
__a , __a : Any = self.image_processor_tester.get_expected_values(__UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__a , __a : Optional[int] = self.image_processor_tester.get_expected_values(__UpperCamelCase , batched=__UpperCamelCase )
__a : Any = image_processing(__UpperCamelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Dict = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__a : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCamelCase , numpify=__UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCamelCase , np.ndarray )
# Test not batched input
__a : Dict = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
__a , __a : Any = self.image_processor_tester.get_expected_values(__UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__a : List[str] = image_processing(__UpperCamelCase , return_tensors="""pt""" ).pixel_values
__a , __a : str = self.image_processor_tester.get_expected_values(__UpperCamelCase , batched=__UpperCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Any = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__a : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCamelCase , torchify=__UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCamelCase , torch.Tensor )
# Test not batched input
__a : Any = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
__a , __a : Any = self.image_processor_tester.get_expected_values(__UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__a : List[str] = image_processing(__UpperCamelCase , return_tensors="""pt""" ).pixel_values
__a , __a : Any = self.image_processor_tester.get_expected_values(__UpperCamelCase , batched=__UpperCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : List[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_annotations.txt""" , """r""" ) as f:
__a : Dict = json.loads(f.read() )
__a : Optional[int] = {"""image_id""": 3_9769, """annotations""": target}
# encode them
__a : List[str] = DetrImageProcessor.from_pretrained("""facebook/detr-resnet-50""" )
__a : Tuple = image_processing(images=__UpperCamelCase , annotations=__UpperCamelCase , return_tensors="""pt""" )
# verify pixel values
__a : Union[str, Any] = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["""pixel_values"""].shape , __UpperCamelCase )
__a : List[str] = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , __UpperCamelCase , atol=1E-4 ) )
# verify area
__a : List[Any] = torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , __UpperCamelCase ) )
# verify boxes
__a : Optional[int] = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , __UpperCamelCase )
__a : Any = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , __UpperCamelCase , atol=1E-3 ) )
# verify image_id
__a : Union[str, Any] = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , __UpperCamelCase ) )
# verify is_crowd
__a : List[Any] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , __UpperCamelCase ) )
# verify class_labels
__a : Any = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , __UpperCamelCase ) )
# verify orig_size
__a : Any = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , __UpperCamelCase ) )
# verify size
__a : str = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , __UpperCamelCase ) )
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : List[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt""" , """r""" ) as f:
__a : Tuple = json.loads(f.read() )
__a : str = {"""file_name""": """000000039769.png""", """image_id""": 3_9769, """segments_info""": target}
__a : int = pathlib.Path("""./tests/fixtures/tests_samples/COCO/coco_panoptic""" )
# encode them
__a : List[str] = DetrImageProcessor.from_pretrained("""facebook/detr-resnet-50-panoptic""" )
__a : Tuple = image_processing(images=__UpperCamelCase , annotations=__UpperCamelCase , masks_path=__UpperCamelCase , return_tensors="""pt""" )
# verify pixel values
__a : List[str] = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["""pixel_values"""].shape , __UpperCamelCase )
__a : Any = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , __UpperCamelCase , atol=1E-4 ) )
# verify area
__a : Optional[Any] = torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , __UpperCamelCase ) )
# verify boxes
__a : Optional[Any] = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , __UpperCamelCase )
__a : List[str] = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , __UpperCamelCase , atol=1E-3 ) )
# verify image_id
__a : List[str] = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , __UpperCamelCase ) )
# verify is_crowd
__a : Optional[int] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , __UpperCamelCase ) )
# verify class_labels
__a : Optional[int] = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , __UpperCamelCase ) )
# verify masks
__a : Union[str, Any] = 82_2873
self.assertEqual(encoding["""labels"""][0]["""masks"""].sum().item() , __UpperCamelCase )
# verify orig_size
__a : str = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , __UpperCamelCase ) )
# verify size
__a : List[Any] = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , __UpperCamelCase ) ) | 697 | 1 |
'''simple docstring'''
__SCREAMING_SNAKE_CASE : Optional[int] = {
'meter': 'm',
'kilometer': 'km',
'megametre': 'Mm',
'gigametre': 'Gm',
'terametre': 'Tm',
'petametre': 'Pm',
'exametre': 'Em',
'zettametre': 'Zm',
'yottametre': 'Ym',
}
# Exponent of the factor(meter)
__SCREAMING_SNAKE_CASE : str = {
'm': 0,
'km': 3,
'Mm': 6,
'Gm': 9,
'Tm': 12,
'Pm': 15,
'Em': 18,
'Zm': 21,
'Ym': 24,
}
def _snake_case ( lowercase , lowercase , lowercase ) -> float:
__a : Optional[int] = from_type.lower().strip("""s""" )
__a : Optional[int] = to_type.lower().strip("""s""" )
__a : str = UNIT_SYMBOL.get(lowercase , lowercase )
__a : Optional[Any] = UNIT_SYMBOL.get(lowercase , lowercase )
if from_sanitized not in METRIC_CONVERSION:
__a : Optional[int] = (
F"""Invalid 'from_type' value: {from_type!r}.\n"""
F"""Conversion abbreviations are: {", ".join(lowercase )}"""
)
raise ValueError(lowercase )
if to_sanitized not in METRIC_CONVERSION:
__a : Optional[Any] = (
F"""Invalid 'to_type' value: {to_type!r}.\n"""
F"""Conversion abbreviations are: {", ".join(lowercase )}"""
)
raise ValueError(lowercase )
__a : Tuple = METRIC_CONVERSION[from_sanitized]
__a : Dict = METRIC_CONVERSION[to_sanitized]
__a : List[str] = 1
if from_exponent > to_exponent:
__a : Optional[Any] = from_exponent - to_exponent
else:
__a : Union[str, Any] = -(to_exponent - from_exponent)
return value * pow(1_0 , lowercase )
if __name__ == "__main__":
from doctest import testmod
testmod() | 697 |
'''simple docstring'''
import argparse
import logging
import os
import time
import timeit
import datasets
import numpy as np
import pycuda.autoinit # noqa: F401
import pycuda.driver as cuda
import tensorrt as trt
import torch
from absl import logging as absl_logging
from accelerate import Accelerator
from datasets import load_dataset, load_metric
from torch.utils.data import DataLoader
from utils_qa import postprocess_qa_predictions
import transformers
from transformers import AutoTokenizer, EvalPrediction, default_data_collator, set_seed
from transformers.trainer_pt_utils import nested_concat, nested_truncate
__SCREAMING_SNAKE_CASE : Optional[int] = trt.Logger(trt.Logger.WARNING)
__SCREAMING_SNAKE_CASE : Tuple = absl_logging.get_absl_logger()
absl_logger.setLevel(logging.WARNING)
__SCREAMING_SNAKE_CASE : Any = logging.getLogger(__name__)
__SCREAMING_SNAKE_CASE : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--onnx_model_path',
default=None,
type=str,
required=True,
help='Path to ONNX model: ',
)
parser.add_argument(
'--output_dir',
default=None,
type=str,
required=True,
help='The output directory where the model checkpoints and predictions will be written.',
)
# Other parameters
parser.add_argument(
'--tokenizer_name',
default='',
type=str,
required=True,
help='Pretrained tokenizer name or path if not the same as model_name',
)
parser.add_argument(
'--version_2_with_negative',
action='store_true',
help='If true, the SQuAD examples contain some that do not have an answer.',
)
parser.add_argument(
'--null_score_diff_threshold',
type=float,
default=0.0,
help='If null_score - best_non_null is greater than the threshold predict null.',
)
parser.add_argument(
'--max_seq_length',
default=384,
type=int,
help=(
'The maximum total input sequence length after WordPiece tokenization. Sequences '
'longer than this will be truncated, and sequences shorter than this will be padded.'
),
)
parser.add_argument(
'--doc_stride',
default=128,
type=int,
help='When splitting up a long document into chunks, how much stride to take between chunks.',
)
parser.add_argument('--per_device_eval_batch_size', default=8, type=int, help='Batch size per GPU/CPU for evaluation.')
parser.add_argument(
'--n_best_size',
default=20,
type=int,
help='The total number of n-best predictions to generate in the nbest_predictions.json output file.',
)
parser.add_argument(
'--max_answer_length',
default=30,
type=int,
help=(
'The maximum length of an answer that can be generated. This is needed because the start '
'and end predictions are not conditioned on one another.'
),
)
parser.add_argument('--seed', type=int, default=42, help='random seed for initialization')
parser.add_argument(
'--dataset_name',
type=str,
default=None,
required=True,
help='The name of the dataset to use (via the datasets library).',
)
parser.add_argument(
'--dataset_config_name',
type=str,
default=None,
help='The configuration name of the dataset to use (via the datasets library).',
)
parser.add_argument(
'--preprocessing_num_workers', type=int, default=4, help='A csv or a json file containing the training data.'
)
parser.add_argument('--overwrite_cache', action='store_true', help='Overwrite the cached training and evaluation sets')
parser.add_argument(
'--fp16',
action='store_true',
help='Whether to use 16-bit (mixed) precision instead of 32-bit',
)
parser.add_argument(
'--int8',
action='store_true',
help='Whether to use INT8',
)
__SCREAMING_SNAKE_CASE : Optional[int] = parser.parse_args()
if args.tokenizer_name:
__SCREAMING_SNAKE_CASE : str = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=True)
else:
raise ValueError(
'You are instantiating a new tokenizer from scratch. This is not supported by this script.'
'You can do it from another script, save it, and load it from here, using --tokenizer_name.'
)
logger.info('Training/evaluation parameters %s', args)
__SCREAMING_SNAKE_CASE : List[Any] = args.per_device_eval_batch_size
__SCREAMING_SNAKE_CASE : int = (args.eval_batch_size, args.max_seq_length)
# TRT Engine properties
__SCREAMING_SNAKE_CASE : Optional[Any] = True
__SCREAMING_SNAKE_CASE : Tuple = 'temp_engine/bert-fp32.engine'
if args.fpaa:
__SCREAMING_SNAKE_CASE : Dict = 'temp_engine/bert-fp16.engine'
if args.inta:
__SCREAMING_SNAKE_CASE : Tuple = 'temp_engine/bert-int8.engine'
# import ONNX file
if not os.path.exists('temp_engine'):
os.makedirs('temp_engine')
__SCREAMING_SNAKE_CASE : Optional[Any] = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network, trt.OnnxParser(
network, TRT_LOGGER
) as parser:
with open(args.onnx_model_path, 'rb') as model:
if not parser.parse(model.read()):
for error in range(parser.num_errors):
print(parser.get_error(error))
# Query input names and shapes from parsed TensorRT network
__SCREAMING_SNAKE_CASE : List[Any] = [network.get_input(i) for i in range(network.num_inputs)]
__SCREAMING_SNAKE_CASE : List[Any] = [_input.name for _input in network_inputs] # ex: ["actual_input1"]
with builder.create_builder_config() as config:
__SCREAMING_SNAKE_CASE : Tuple = 1 << 50
if STRICT_TYPES:
config.set_flag(trt.BuilderFlag.STRICT_TYPES)
if args.fpaa:
config.set_flag(trt.BuilderFlag.FPaa)
if args.inta:
config.set_flag(trt.BuilderFlag.INTa)
__SCREAMING_SNAKE_CASE : Dict = builder.create_optimization_profile()
config.add_optimization_profile(profile)
for i in range(len(input_names)):
profile.set_shape(input_names[i], INPUT_SHAPE, INPUT_SHAPE, INPUT_SHAPE)
__SCREAMING_SNAKE_CASE : Union[str, Any] = builder.build_engine(network, config)
# serialize_engine and store in file (can be directly loaded and deserialized):
with open(engine_name, 'wb') as f:
f.write(engine.serialize())
def _snake_case ( lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> List[Any]:
__a : Dict = np.asarray(inputs["""input_ids"""] , dtype=np.intaa )
__a : List[Any] = np.asarray(inputs["""attention_mask"""] , dtype=np.intaa )
__a : str = np.asarray(inputs["""token_type_ids"""] , dtype=np.intaa )
# Copy inputs
cuda.memcpy_htod_async(d_inputs[0] , input_ids.ravel() , lowercase )
cuda.memcpy_htod_async(d_inputs[1] , attention_mask.ravel() , lowercase )
cuda.memcpy_htod_async(d_inputs[2] , token_type_ids.ravel() , lowercase )
# start time
__a : Optional[Any] = time.time()
# Run inference
context.execute_async(
bindings=[int(lowercase ) for d_inp in d_inputs] + [int(lowercase ), int(lowercase )] , stream_handle=stream.handle )
# Transfer predictions back from GPU
cuda.memcpy_dtoh_async(lowercase , lowercase , lowercase )
cuda.memcpy_dtoh_async(lowercase , lowercase , lowercase )
# Synchronize the stream and take time
stream.synchronize()
# end time
__a : str = time.time()
__a : Any = end_time - start_time
__a : Optional[int] = (h_outputa, h_outputa)
# print(outputs)
return outputs, infer_time
# Initialize the accelerator. We will let the accelerator handle device placement for us in this example.
__SCREAMING_SNAKE_CASE : Optional[Any] = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO,
)
# Setup logging, we only want one process per machine to log things on the screen.
# accelerator.is_local_main_process is only True for one process per machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR)
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed)
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
if args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
__SCREAMING_SNAKE_CASE : List[str] = load_dataset(args.dataset_name, args.dataset_config_name)
else:
raise ValueError('Evaluation requires a dataset name')
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Preprocessing the datasets.
# Preprocessing is slighlty different for training and evaluation.
__SCREAMING_SNAKE_CASE : int = raw_datasets['validation'].column_names
__SCREAMING_SNAKE_CASE : Tuple = 'question' if 'question' in column_names else column_names[0]
__SCREAMING_SNAKE_CASE : List[Any] = 'context' if 'context' in column_names else column_names[1]
__SCREAMING_SNAKE_CASE : Tuple = 'answers' if 'answers' in column_names else column_names[2]
# Padding side determines if we do (question|context) or (context|question).
__SCREAMING_SNAKE_CASE : Tuple = tokenizer.padding_side == 'right'
if args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f'''The max_seq_length passed ({args.max_seq_length}) is larger than the maximum length for the'''
f'''model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.'''
)
__SCREAMING_SNAKE_CASE : Dict = min(args.max_seq_length, tokenizer.model_max_length)
def _snake_case ( lowercase ) -> Tuple:
# Some of the questions have lots of whitespace on the left, which is not useful and will make the
# truncation of the context fail (the tokenized question will take a lots of space). So we remove that
# left whitespace
__a : Optional[Any] = [q.lstrip() for q in examples[question_column_name]]
# Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results
# in one example possible giving several features when a context is long, each of those features having a
# context that overlaps a bit the context of the previous feature.
__a : Optional[int] = tokenizer(
examples[question_column_name if pad_on_right else context_column_name] , examples[context_column_name if pad_on_right else question_column_name] , truncation="""only_second""" if pad_on_right else """only_first""" , max_length=lowercase , stride=args.doc_stride , return_overflowing_tokens=lowercase , return_offsets_mapping=lowercase , padding="""max_length""" , )
# Since one example might give us several features if it has a long context, we need a map from a feature to
# its corresponding example. This key gives us just that.
__a : Optional[Any] = tokenized_examples.pop("""overflow_to_sample_mapping""" )
# For evaluation, we will need to convert our predictions to substrings of the context, so we keep the
# corresponding example_id and we will store the offset mappings.
__a : Optional[Any] = []
for i in range(len(tokenized_examples["""input_ids"""] ) ):
# Grab the sequence corresponding to that example (to know what is the context and what is the question).
__a : Dict = tokenized_examples.sequence_ids(lowercase )
__a : Optional[Any] = 1 if pad_on_right else 0
# One example can give several spans, this is the index of the example containing this span of text.
__a : Union[str, Any] = sample_mapping[i]
tokenized_examples["example_id"].append(examples["""id"""][sample_index] )
# Set to None the offset_mapping that are not part of the context so it's easy to determine if a token
# position is part of the context or not.
__a : int = [
(o if sequence_ids[k] == context_index else None)
for k, o in enumerate(tokenized_examples["""offset_mapping"""][i] )
]
return tokenized_examples
__SCREAMING_SNAKE_CASE : int = raw_datasets['validation']
# Validation Feature Creation
__SCREAMING_SNAKE_CASE : Union[str, Any] = eval_examples.map(
prepare_validation_features,
batched=True,
num_proc=args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not args.overwrite_cache,
desc='Running tokenizer on validation dataset',
)
__SCREAMING_SNAKE_CASE : List[Any] = default_data_collator
__SCREAMING_SNAKE_CASE : Union[str, Any] = eval_dataset.remove_columns(['example_id', 'offset_mapping'])
__SCREAMING_SNAKE_CASE : List[str] = DataLoader(
eval_dataset_for_model, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size
)
def _snake_case ( lowercase , lowercase , lowercase , lowercase="eval" ) -> Any:
# Post-processing: we match the start logits and end logits to answers in the original context.
__a : List[str] = postprocess_qa_predictions(
examples=lowercase , features=lowercase , predictions=lowercase , version_2_with_negative=args.version_2_with_negative , n_best_size=args.n_best_size , max_answer_length=args.max_answer_length , null_score_diff_threshold=args.null_score_diff_threshold , output_dir=args.output_dir , prefix=lowercase , )
# Format the result to the format the metric expects.
if args.version_2_with_negative:
__a : List[str] = [
{"""id""": k, """prediction_text""": v, """no_answer_probability""": 0.0} for k, v in predictions.items()
]
else:
__a : List[str] = [{"""id""": k, """prediction_text""": v} for k, v in predictions.items()]
__a : Optional[Any] = [{"""id""": ex["""id"""], """answers""": ex[answer_column_name]} for ex in examples]
return EvalPrediction(predictions=lowercase , label_ids=lowercase )
__SCREAMING_SNAKE_CASE : List[Any] = load_metric('squad_v2' if args.version_2_with_negative else 'squad')
# Evaluation!
logger.info('Loading ONNX model %s for evaluation', args.onnx_model_path)
with open(engine_name, 'rb') as f, trt.Runtime(TRT_LOGGER) as runtime, runtime.deserialize_cuda_engine(
f.read()
) as engine, engine.create_execution_context() as context:
# setup for TRT inferrence
for i in range(len(input_names)):
context.set_binding_shape(i, INPUT_SHAPE)
assert context.all_binding_shapes_specified
def _snake_case ( lowercase ) -> Optional[int]:
return trt.volume(engine.get_binding_shape(lowercase ) ) * engine.get_binding_dtype(lowercase ).itemsize
# Allocate device memory for inputs and outputs.
__SCREAMING_SNAKE_CASE : List[str] = [cuda.mem_alloc(binding_nbytes(binding)) for binding in engine if engine.binding_is_input(binding)]
# Allocate output buffer
__SCREAMING_SNAKE_CASE : str = cuda.pagelocked_empty(tuple(context.get_binding_shape(3)), dtype=np.floataa)
__SCREAMING_SNAKE_CASE : Union[str, Any] = cuda.pagelocked_empty(tuple(context.get_binding_shape(4)), dtype=np.floataa)
__SCREAMING_SNAKE_CASE : str = cuda.mem_alloc(h_outputa.nbytes)
__SCREAMING_SNAKE_CASE : Tuple = cuda.mem_alloc(h_outputa.nbytes)
# Create a stream in which to copy inputs/outputs and run inference.
__SCREAMING_SNAKE_CASE : Tuple = cuda.Stream()
# Evaluation
logger.info('***** Running Evaluation *****')
logger.info(f''' Num examples = {len(eval_dataset)}''')
logger.info(f''' Batch size = {args.per_device_eval_batch_size}''')
__SCREAMING_SNAKE_CASE : Union[str, Any] = 0.0
__SCREAMING_SNAKE_CASE : str = 0
__SCREAMING_SNAKE_CASE : str = timeit.default_timer()
__SCREAMING_SNAKE_CASE : Dict = None
for step, batch in enumerate(eval_dataloader):
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Union[str, Any] = model_infer(batch, context, d_inputs, h_outputa, h_outputa, d_outputa, d_outputa, stream)
total_time += infer_time
niter += 1
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Optional[Any] = outputs
__SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor(start_logits)
__SCREAMING_SNAKE_CASE : Tuple = torch.tensor(end_logits)
# necessary to pad predictions and labels for being gathered
__SCREAMING_SNAKE_CASE : Optional[int] = accelerator.pad_across_processes(start_logits, dim=1, pad_index=-100)
__SCREAMING_SNAKE_CASE : Dict = accelerator.pad_across_processes(end_logits, dim=1, pad_index=-100)
__SCREAMING_SNAKE_CASE : List[str] = (accelerator.gather(start_logits).cpu().numpy(), accelerator.gather(end_logits).cpu().numpy())
__SCREAMING_SNAKE_CASE : List[str] = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-100)
if all_preds is not None:
__SCREAMING_SNAKE_CASE : Tuple = nested_truncate(all_preds, len(eval_dataset))
__SCREAMING_SNAKE_CASE : str = timeit.default_timer() - start_time
logger.info(' Evaluation done in total %f secs (%f sec per example)', evalTime, evalTime / len(eval_dataset))
# Inference time from TRT
logger.info('Average Inference Time = {:.3f} ms'.format(total_time * 1_000 / niter))
logger.info('Total Inference Time = {:.3f} ms'.format(total_time * 1_000))
logger.info('Total Number of Inference = %d', niter)
__SCREAMING_SNAKE_CASE : Optional[int] = post_processing_function(eval_examples, eval_dataset, all_preds)
__SCREAMING_SNAKE_CASE : List[Any] = metric.compute(predictions=prediction.predictions, references=prediction.label_ids)
logger.info(f'''Evaluation metrics: {eval_metric}''') | 697 | 1 |
'''simple docstring'''
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis_float32 (there's also the fix_lavis branch)
# also note: to convert Vicuna checkpoints, we had to include /home/niels/python_projects/checkpoints/FastChat/vicuna-7b in lavis/configs/models/blip2/blip2_instruct_vicuna7b.yaml
# same for Vicuna-13b
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipImageProcessor,
InstructBlipConfig,
InstructBlipForConditionalGeneration,
InstructBlipProcessor,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
LlamaConfig,
LlamaTokenizerFast,
TaConfig,
TaTokenizerFast,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def _snake_case ( ) -> List[Any]:
__a : Optional[int] = """https://raw.githubusercontent.com/salesforce/LAVIS/main/docs/_static/Confusing-Pictures.jpg"""
__a : int = Image.open(requests.get(lowercase , stream=lowercase ).raw ).convert("""RGB""" )
return image
def _snake_case ( lowercase ) -> Dict:
__a : Optional[Any] = []
# fmt: off
# vision encoder
rename_keys.append(("""visual_encoder.cls_token""", """vision_model.embeddings.class_embedding""") )
rename_keys.append(("""visual_encoder.pos_embed""", """vision_model.embeddings.position_embedding""") )
rename_keys.append(("""visual_encoder.patch_embed.proj.weight""", """vision_model.embeddings.patch_embedding.weight""") )
rename_keys.append(("""visual_encoder.patch_embed.proj.bias""", """vision_model.embeddings.patch_embedding.bias""") )
rename_keys.append(("""ln_vision.weight""", """vision_model.post_layernorm.weight""") )
rename_keys.append(("""ln_vision.bias""", """vision_model.post_layernorm.bias""") )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((F"""visual_encoder.blocks.{i}.norm1.weight""", F"""vision_model.encoder.layers.{i}.layer_norm1.weight""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.norm1.bias""", F"""vision_model.encoder.layers.{i}.layer_norm1.bias""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.norm2.weight""", F"""vision_model.encoder.layers.{i}.layer_norm2.weight""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.norm2.bias""", F"""vision_model.encoder.layers.{i}.layer_norm2.bias""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.attn.qkv.weight""", F"""vision_model.encoder.layers.{i}.self_attn.qkv.weight""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.attn.proj.weight""", F"""vision_model.encoder.layers.{i}.self_attn.projection.weight""",) )
rename_keys.append((F"""visual_encoder.blocks.{i}.attn.proj.bias""", F"""vision_model.encoder.layers.{i}.self_attn.projection.bias""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.mlp.fc1.weight""", F"""vision_model.encoder.layers.{i}.mlp.fc1.weight""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.mlp.fc1.bias""", F"""vision_model.encoder.layers.{i}.mlp.fc1.bias""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.mlp.fc2.weight""", F"""vision_model.encoder.layers.{i}.mlp.fc2.weight""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.mlp.fc2.bias""", F"""vision_model.encoder.layers.{i}.mlp.fc2.bias""") )
# QFormer
rename_keys.append(("""Qformer.bert.embeddings.LayerNorm.weight""", """qformer.embeddings.layernorm.weight""") )
rename_keys.append(("""Qformer.bert.embeddings.LayerNorm.bias""", """qformer.embeddings.layernorm.bias""") )
# fmt: on
return rename_keys
def _snake_case ( lowercase , lowercase , lowercase ) -> str:
__a : List[Any] = dct.pop(lowercase )
__a : Dict = val
def _snake_case ( lowercase , lowercase ) -> Dict:
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
__a : Union[str, Any] = state_dict.pop(F"""visual_encoder.blocks.{i}.attn.q_bias""" )
__a : Union[str, Any] = state_dict.pop(F"""visual_encoder.blocks.{i}.attn.v_bias""" )
# next, set bias in the state dict
__a : Any = torch.cat((q_bias, torch.zeros_like(lowercase , requires_grad=lowercase ), v_bias) )
__a : List[Any] = qkv_bias
def _snake_case ( lowercase ) -> Optional[int]:
__a : int = 3_6_4 if """coco""" in model_name else 2_2_4
__a : Union[str, Any] = InstructBlipVisionConfig(image_size=lowercase ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "t5-xl" in model_name:
__a : str = TaConfig.from_pretrained("""google/flan-t5-xl""" , dense_act_fn="""gelu""" , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
__a : Optional[Any] = TaConfig.from_pretrained("""google/flan-t5-xxl""" , dense_act_fn="""gelu""" , bos_token_id=1 ).to_dict()
elif "vicuna-7b" in model_name:
__a : List[Any] = LlamaConfig.from_pretrained("""decapoda-research/llama-7b-hf""" , vocab_size=3_2_0_0_1 ).to_dict()
elif "vicuna-13b" in model_name:
__a : Dict = LlamaConfig.from_pretrained("""decapoda-research/llama-13b-hf""" , vocab_size=3_2_0_0_1 ).to_dict()
else:
raise ValueError("""Model name not supported""" )
# the authors add one special "[DEC]" token to the vocab of Q-Former, hence vocab size = 30522 + 1
__a : List[Any] = InstructBlipQFormerConfig(vocab_size=3_0_5_2_3 ).to_dict()
__a : List[Any] = InstructBlipConfig(vision_config=lowercase , text_config=lowercase , qformer_config=lowercase )
return config, image_size
@torch.no_grad()
def _snake_case ( lowercase , lowercase=None , lowercase=False ) -> List[Any]:
__a : Any = AutoTokenizer.from_pretrained("""bert-base-uncased""" , truncation_side="""left""" )
qformer_tokenizer.add_special_tokens({"""bos_token""": """[DEC]"""} )
if "t5" in model_name:
__a : str = TaTokenizerFast.from_pretrained("""google/flan-t5-xl""" , truncation_side="""left""" )
elif "vicuna" in model_name:
# the following was used in the original implementation:
# tokenizer = LlamaTokenizer.from_pretrained("huggyllama/llama-7b", use_fast=False, truncation_side="left")
# tokenizer.add_special_tokens({"pad_token": "[PAD]"})
# tokenizer.add_special_tokens({"bos_token": "</s>"})
# tokenizer.add_special_tokens({"eos_token": "</s>"})
# tokenizer.add_special_tokens({"unk_token": "</s>"})
__a : int = LlamaTokenizerFast.from_pretrained(
"""huggyllama/llama-7b""" , truncation_side="""left""" , bos_token="""</s>""" , unk_token="""</s>""" )
tokenizer.add_special_tokens({"""pad_token""": """[PAD]"""} )
__a , __a : List[str] = get_blipa_config(lowercase )
__a : Any = InstructBlipForConditionalGeneration(lowercase ).eval()
__a : Optional[int] = {
"""instructblip-vicuna-7b""": ("""blip2_vicuna_instruct""", """vicuna7b"""),
"""instructblip-vicuna-13b""": ("""blip2_vicuna_instruct""", """vicuna13b"""),
"""instructblip-flan-t5-xl""": ("""blip2_t5_instruct""", """flant5xl"""),
"""instructblip-flan-t5-xxl""": ("""blip2_t5_instruct""", """flant5xxl"""),
}
__a , __a : Union[str, Any] = model_name_to_original[model_name]
# load original model
print("""Loading original model...""" )
__a : Union[str, Any] = """cuda:1""" if torch.cuda.is_available() else """cpu"""
__a : int = """cuda:2""" if torch.cuda.is_available() else """cpu"""
__a , __a , __a : Union[str, Any] = load_model_and_preprocess(
name=lowercase , model_type=lowercase , is_eval=lowercase , device=lowercase )
original_model.eval()
print("""Done!""" )
# update state dict keys
__a : Optional[Any] = original_model.state_dict()
__a : List[Any] = create_rename_keys(lowercase )
for src, dest in rename_keys:
rename_key(lowercase , lowercase , lowercase )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
__a : Any = state_dict.pop(lowercase )
if key.startswith("""Qformer.bert""" ):
__a : List[Any] = key.replace("""Qformer.bert""" , """qformer""" )
if "attention.self" in key:
__a : List[Any] = key.replace("""self""" , """attention""" )
if "llm_proj" in key:
__a : Optional[Any] = key.replace("""llm_proj""" , """language_projection""" )
if "t5_proj" in key:
__a : Tuple = key.replace("""t5_proj""" , """language_projection""" )
if key.startswith("""llm_model""" ):
__a : Dict = key.replace("""llm_model""" , """language_model""" )
if key.startswith("""t5""" ):
__a : str = key.replace("""t5""" , """language""" )
__a : List[str] = val
# read in qv biases
read_in_q_v_bias(lowercase , lowercase )
# note: weights get loaded in torch.float32 by default
hf_model.load_state_dict(lowercase , strict=lowercase )
__a : Optional[Any] = load_demo_image()
__a : Dict = """What is unusual about this image?"""
# create processor
__a : Any = BlipImageProcessor(
size={"""height""": image_size, """width""": image_size} , image_mean=lowercase , image_std=lowercase )
__a : int = InstructBlipProcessor(
image_processor=lowercase , tokenizer=lowercase , qformer_tokenizer=lowercase , )
__a : Tuple = processor(images=lowercase , text=lowercase , return_tensors="""pt""" ).to(lowercase )
# make sure processor creates exact same pixel values
__a : Tuple = vis_processors["""eval"""](lowercase ).unsqueeze(0 ).to(lowercase )
__a : Tuple = inputs.pixel_values
assert torch.allclose(original_pixel_values.to(pixel_values.device ) , lowercase )
original_model.to(lowercase )
hf_model.to(lowercase )
with torch.no_grad():
if "vicuna" in model_name:
__a : str = original_model({"""image""": original_pixel_values, """text_input""": [prompt]} ).logits
__a : List[Any] = hf_model(**lowercase ).logits
else:
__a : Any = original_model(
{"""image""": original_pixel_values, """text_input""": [prompt], """text_output""": ["""\n"""]} ).logits
__a : Union[str, Any] = tokenizer("""\n""" , return_tensors="""pt""" ).input_ids.to(lowercase )
__a : List[str] = label_input_ids.masked_fill(label_input_ids == tokenizer.pad_token_id , -1_0_0 )
__a : List[str] = hf_model(**lowercase , labels=lowercase ).logits
print("""First values of original logits:""" , original_logits[0, :3, :3] )
print("""First values of HF logits:""" , logits[0, :3, :3] )
# assert values
assert original_logits.shape == logits.shape
__a : Optional[int] = 1E-4 if """vicuna""" in model_name else 1E-5
assert torch.allclose(original_logits.to(logits.device ) , lowercase , atol=lowercase )
print("""Looks ok!""" )
print("""Generating with original model...""" )
__a : Optional[int] = original_model.generate({"""image""": original_pixel_values, """prompt""": prompt} , num_beams=5 )
# important: we need to cast the weights of the HF model to the appropriate type
print("""Generating with HF model...""" )
__a : Tuple = hf_model.generate(
**lowercase , do_sample=lowercase , num_beams=5 , max_length=2_5_6 , min_length=1 , top_p=0.9 , repetition_penalty=1.5 , length_penalty=1.0 , temperature=1 , )
if "vicuna" in model_name:
# convert output id 0 to 2 (eos_token_id)
# TODO add this in the generate method?
__a : str = 2
print("""Original generation:""" , lowercase )
__a : Union[str, Any] = processor.batch_decode(lowercase , skip_special_tokens=lowercase )
__a : List[str] = [text.strip() for text in output_text]
print("""HF generation:""" , lowercase )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(lowercase )
hf_model.save_pretrained(lowercase )
if push_to_hub:
processor.push_to_hub(F"""Salesforce/{model_name}""" )
hf_model.push_to_hub(F"""Salesforce/{model_name}""" )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : Tuple = argparse.ArgumentParser()
__SCREAMING_SNAKE_CASE : List[str] = [
'instructblip-vicuna-7b',
'instructblip-vicuna-13b',
'instructblip-flan-t5-xl',
'instructblip-flan-t5-xxl',
]
parser.add_argument(
'--model_name',
default='instructblip-flan-t5-xl',
choices=choices,
type=str,
help='Path to hf config.json of model to convert',
)
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model and processor to the hub after converting',
)
__SCREAMING_SNAKE_CASE : Any = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub) | 697 |
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import KarrasVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
lowercase__ = 42
lowercase__ = 42
def __init__( self , __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
super().__init__()
self.register_modules(unet=__UpperCamelCase , scheduler=__UpperCamelCase )
@torch.no_grad()
def __call__( self , __UpperCamelCase = 1 , __UpperCamelCase = 50 , __UpperCamelCase = None , __UpperCamelCase = "pil" , __UpperCamelCase = True , **__UpperCamelCase , ):
'''simple docstring'''
__a : int = self.unet.config.sample_size
__a : Optional[int] = (batch_size, 3, img_size, img_size)
__a : Union[str, Any] = self.unet
# sample x_0 ~ N(0, sigma_0^2 * I)
__a : Dict = randn_tensor(__UpperCamelCase , generator=__UpperCamelCase , device=self.device ) * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(__UpperCamelCase )
for t in self.progress_bar(self.scheduler.timesteps ):
# here sigma_t == t_i from the paper
__a : Dict = self.scheduler.schedule[t]
__a : Any = self.scheduler.schedule[t - 1] if t > 0 else 0
# 1. Select temporarily increased noise level sigma_hat
# 2. Add new noise to move from sample_i to sample_hat
__a , __a : Tuple = self.scheduler.add_noise_to_input(__UpperCamelCase , __UpperCamelCase , generator=__UpperCamelCase )
# 3. Predict the noise residual given the noise magnitude `sigma_hat`
# The model inputs and output are adjusted by following eq. (213) in [1].
__a : List[Any] = (sigma_hat / 2) * model((sample_hat + 1) / 2 , sigma_hat / 2 ).sample
# 4. Evaluate dx/dt at sigma_hat
# 5. Take Euler step from sigma to sigma_prev
__a : str = self.scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
if sigma_prev != 0:
# 6. Apply 2nd order correction
# The model inputs and output are adjusted by following eq. (213) in [1].
__a : Union[str, Any] = (sigma_prev / 2) * model((step_output.prev_sample + 1) / 2 , sigma_prev / 2 ).sample
__a : Tuple = self.scheduler.step_correct(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , step_output.prev_sample , step_output["""derivative"""] , )
__a : Tuple = step_output.prev_sample
__a : Optional[Any] = (sample / 2 + 0.5).clamp(0 , 1 )
__a : Dict = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__a : List[Any] = self.numpy_to_pil(__UpperCamelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__UpperCamelCase ) | 697 | 1 |
'''simple docstring'''
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
__SCREAMING_SNAKE_CASE : int = Lock()
def _snake_case ( lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> Dict:
global process_lock
# we perform n swaps since after n swaps we know we are sorted
# we *could* stop early if we are sorted already, but it takes as long to
# find out we are sorted as it does to sort the list with this algorithm
for i in range(0 , 1_0 ):
if (i + position) % 2 == 0 and r_send is not None:
# send your value to your right neighbor
process_lock.acquire()
r_send[1].send(lowercase )
process_lock.release()
# receive your right neighbor's value
process_lock.acquire()
__a : Optional[int] = rr_cv[0].recv()
process_lock.release()
# take the lower value since you are on the left
__a : int = min(lowercase , lowercase )
elif (i + position) % 2 != 0 and l_send is not None:
# send your value to your left neighbor
process_lock.acquire()
l_send[1].send(lowercase )
process_lock.release()
# receive your left neighbor's value
process_lock.acquire()
__a : str = lr_cv[0].recv()
process_lock.release()
# take the higher value since you are on the right
__a : List[str] = max(lowercase , lowercase )
# after all swaps are performed, send the values back to main
result_pipe[1].send(lowercase )
def _snake_case ( lowercase ) -> Optional[int]:
__a : List[Any] = []
__a : List[Any] = []
# initialize the list of pipes where the values will be retrieved
for _ in arr:
result_pipe.append(Pipe() )
# creates the processes
# the first and last process only have one neighbor so they are made outside
# of the loop
__a : Optional[int] = Pipe()
__a : Optional[Any] = Pipe()
process_array_.append(
Process(
target=lowercase , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) )
__a : str = temp_rs
__a : Tuple = temp_rr
for i in range(1 , len(lowercase ) - 1 ):
__a : Dict = Pipe()
__a : Dict = Pipe()
process_array_.append(
Process(
target=lowercase , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) )
__a : Optional[Any] = temp_rs
__a : Optional[Any] = temp_rr
process_array_.append(
Process(
target=lowercase , args=(
len(lowercase ) - 1,
arr[len(lowercase ) - 1],
temp_ls,
None,
temp_lr,
None,
result_pipe[len(lowercase ) - 1],
) , ) )
# start the processes
for p in process_array_:
p.start()
# wait for the processes to end and write their values to the list
for p in range(0 , len(lowercase ) ):
__a : List[str] = result_pipe[p][0].recv()
process_array_[p].join()
return arr
def _snake_case ( ) -> List[str]:
__a : Dict = list(range(1_0 , 0 , -1 ) )
print("""Initial List""" )
print(*lowercase )
__a : Optional[int] = odd_even_transposition(lowercase )
print("""Sorted List\n""" )
print(*lowercase )
if __name__ == "__main__":
main() | 697 |
'''simple docstring'''
def _snake_case ( lowercase ) -> bool:
if not isinstance(lowercase , lowercase ):
raise ValueError("""check_bouncy() accepts only integer arguments""" )
__a : str = str(lowercase )
__a : Any = """""".join(sorted(lowercase ) )
return sorted_str_n != str_n and sorted_str_n[::-1] != str_n
def _snake_case ( lowercase = 9_9 ) -> int:
if not 0 < percent < 1_0_0:
raise ValueError("""solution() only accepts values from 0 to 100""" )
__a : List[str] = 0
__a : Union[str, Any] = 1
while True:
if check_bouncy(lowercase ):
bouncy_num += 1
if (bouncy_num / num) * 1_0_0 >= percent:
return num
num += 1
if __name__ == "__main__":
from doctest import testmod
testmod()
print(f'''{solution(99)}''') | 697 | 1 |
'''simple docstring'''
import random
import unittest
import torch
from diffusers import IFImgaImgSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase , __UpperCamelCase , unittest.TestCase ):
lowercase__ = IFImgaImgSuperResolutionPipeline
lowercase__ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"width", "height"}
lowercase__ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"original_image"} )
lowercase__ = PipelineTesterMixin.required_optional_params - {"latents"}
def __lowerCamelCase ( self ):
'''simple docstring'''
return self._get_superresolution_dummy_components()
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase=0 ):
'''simple docstring'''
if str(__UpperCamelCase ).startswith("""mps""" ):
__a : Optional[Any] = torch.manual_seed(__UpperCamelCase )
else:
__a : Optional[int] = torch.Generator(device=__UpperCamelCase ).manual_seed(__UpperCamelCase )
__a : Any = floats_tensor((1, 3, 32, 32) , rng=random.Random(__UpperCamelCase ) ).to(__UpperCamelCase )
__a : List[str] = floats_tensor((1, 3, 16, 16) , rng=random.Random(__UpperCamelCase ) ).to(__UpperCamelCase )
__a : str = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""original_image""": original_image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def __lowerCamelCase ( self ):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def __lowerCamelCase ( self ):
'''simple docstring'''
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""" )
def __lowerCamelCase ( self ):
'''simple docstring'''
super().test_save_load_floataa(expected_max_diff=1E-1 )
def __lowerCamelCase ( self ):
'''simple docstring'''
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def __lowerCamelCase ( self ):
'''simple docstring'''
self._test_save_load_local()
def __lowerCamelCase ( self ):
'''simple docstring'''
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , ) | 697 |
'''simple docstring'''
import argparse
import torch
from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def _snake_case ( lowercase , lowercase , lowercase ) -> Any:
# Construct model
if gpta_config_file == "":
__a : Dict = GPTaConfig()
else:
__a : Optional[Any] = GPTaConfig.from_json_file(lowercase )
__a : Union[str, Any] = GPTaModel(lowercase )
# Load weights from numpy
load_tf_weights_in_gpta(lowercase , lowercase , lowercase )
# Save pytorch-model
__a : Optional[int] = pytorch_dump_folder_path + """/""" + WEIGHTS_NAME
__a : Dict = pytorch_dump_folder_path + """/""" + CONFIG_NAME
print(F"""Save PyTorch model to {pytorch_weights_dump_path}""" )
torch.save(model.state_dict() , lowercase )
print(F"""Save configuration file to {pytorch_config_dump_path}""" )
with open(lowercase , """w""" , encoding="""utf-8""" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--gpt2_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--gpt2_config_file',
default='',
type=str,
help=(
'An optional config json file corresponding to the pre-trained OpenAI model. \n'
'This specifies the model architecture.'
),
)
__SCREAMING_SNAKE_CASE : Dict = parser.parse_args()
convert_gpta_checkpoint_to_pytorch(args.gpta_checkpoint_path, args.gpta_config_file, args.pytorch_dump_folder_path) | 697 | 1 |
'''simple docstring'''
from typing import Dict, Optional
import numpy as np
import datasets
__SCREAMING_SNAKE_CASE : List[str] = '\nIoU is the area of overlap between the predicted segmentation and the ground truth divided by the area of union\nbetween the predicted segmentation and the ground truth. For binary (two classes) or multi-class segmentation,\nthe mean IoU of the image is calculated by taking the IoU of each class and averaging them.\n'
__SCREAMING_SNAKE_CASE : Tuple = '\nArgs:\n predictions (`List[ndarray]`):\n List of predicted segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.\n references (`List[ndarray]`):\n List of ground truth segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.\n num_labels (`int`):\n Number of classes (categories).\n ignore_index (`int`):\n Index that will be ignored during evaluation.\n nan_to_num (`int`, *optional*):\n If specified, NaN values will be replaced by the number defined by the user.\n label_map (`dict`, *optional*):\n If specified, dictionary mapping old label indices to new label indices.\n reduce_labels (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background,\n and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by 255.\n\nReturns:\n `Dict[str, float | ndarray]` comprising various elements:\n - *mean_iou* (`float`):\n Mean Intersection-over-Union (IoU averaged over all categories).\n - *mean_accuracy* (`float`):\n Mean accuracy (averaged over all categories).\n - *overall_accuracy* (`float`):\n Overall accuracy on all images.\n - *per_category_accuracy* (`ndarray` of shape `(num_labels,)`):\n Per category accuracy.\n - *per_category_iou* (`ndarray` of shape `(num_labels,)`):\n Per category IoU.\n\nExamples:\n\n >>> import numpy as np\n\n >>> mean_iou = datasets.load_metric("mean_iou")\n\n >>> # suppose one has 3 different segmentation maps predicted\n >>> predicted_1 = np.array([[1, 2], [3, 4], [5, 255]])\n >>> actual_1 = np.array([[0, 3], [5, 4], [6, 255]])\n\n >>> predicted_2 = np.array([[2, 7], [9, 2], [3, 6]])\n >>> actual_2 = np.array([[1, 7], [9, 2], [3, 6]])\n\n >>> predicted_3 = np.array([[2, 2, 3], [8, 2, 4], [3, 255, 2]])\n >>> actual_3 = np.array([[1, 2, 2], [8, 2, 1], [3, 255, 1]])\n\n >>> predicted = [predicted_1, predicted_2, predicted_3]\n >>> ground_truth = [actual_1, actual_2, actual_3]\n\n >>> results = mean_iou.compute(predictions=predicted, references=ground_truth, num_labels=10, ignore_index=255, reduce_labels=False)\n >>> print(results) # doctest: +NORMALIZE_WHITESPACE\n {\'mean_iou\': 0.47750000000000004, \'mean_accuracy\': 0.5916666666666666, \'overall_accuracy\': 0.5263157894736842, \'per_category_iou\': array([0. , 0. , 0.375, 0.4 , 0.5 , 0. , 0.5 , 1. , 1. , 1. ]), \'per_category_accuracy\': array([0. , 0. , 0.75 , 0.66666667, 1. , 0. , 0.5 , 1. , 1. , 1. ])}\n'
__SCREAMING_SNAKE_CASE : int = '\\n@software{MMSegmentation_Contributors_OpenMMLab_Semantic_Segmentation_2020,\nauthor = {{MMSegmentation Contributors}},\nlicense = {Apache-2.0},\nmonth = {7},\ntitle = {{OpenMMLab Semantic Segmentation Toolbox and Benchmark}},\nurl = {https://github.com/open-mmlab/mmsegmentation},\nyear = {2020}\n}'
def _snake_case ( lowercase , lowercase , lowercase , lowercase , lowercase = None , lowercase = False , ) -> Any:
if label_map is not None:
for old_id, new_id in label_map.items():
__a : List[str] = new_id
# turn into Numpy arrays
__a : Any = np.array(lowercase )
__a : str = np.array(lowercase )
if reduce_labels:
__a : Tuple = 2_5_5
__a : Optional[int] = label - 1
__a : Union[str, Any] = 2_5_5
__a : Union[str, Any] = label != ignore_index
__a : Any = np.not_equal(lowercase , lowercase )
__a : Optional[Any] = pred_label[mask]
__a : Optional[int] = np.array(lowercase )[mask]
__a : Any = pred_label[pred_label == label]
__a : Optional[Any] = np.histogram(lowercase , bins=lowercase , range=(0, num_labels - 1) )[0]
__a : Tuple = np.histogram(lowercase , bins=lowercase , range=(0, num_labels - 1) )[0]
__a : str = np.histogram(lowercase , bins=lowercase , range=(0, num_labels - 1) )[0]
__a : str = area_pred_label + area_label - area_intersect
return area_intersect, area_union, area_pred_label, area_label
def _snake_case ( lowercase , lowercase , lowercase , lowercase , lowercase = None , lowercase = False , ) -> Tuple:
__a : List[Any] = np.zeros((num_labels,) , dtype=np.floataa )
__a : Optional[Any] = np.zeros((num_labels,) , dtype=np.floataa )
__a : int = np.zeros((num_labels,) , dtype=np.floataa )
__a : str = np.zeros((num_labels,) , dtype=np.floataa )
for result, gt_seg_map in zip(lowercase , lowercase ):
__a , __a , __a , __a : str = intersect_and_union(
lowercase , lowercase , lowercase , lowercase , lowercase , lowercase )
total_area_intersect += area_intersect
total_area_union += area_union
total_area_pred_label += area_pred_label
total_area_label += area_label
return total_area_intersect, total_area_union, total_area_pred_label, total_area_label
def _snake_case ( lowercase , lowercase , lowercase , lowercase , lowercase = None , lowercase = None , lowercase = False , ) -> str:
__a , __a , __a , __a : Dict = total_intersect_and_union(
lowercase , lowercase , lowercase , lowercase , lowercase , lowercase )
# compute metrics
__a : Tuple = {}
__a : str = total_area_intersect.sum() / total_area_label.sum()
__a : str = total_area_intersect / total_area_union
__a : Optional[Any] = total_area_intersect / total_area_label
__a : List[str] = np.nanmean(lowercase )
__a : str = np.nanmean(lowercase )
__a : Optional[Any] = all_acc
__a : List[str] = iou
__a : Any = acc
if nan_to_num is not None:
__a : List[Any] = {metric: np.nan_to_num(lowercase , nan=lowercase ) for metric, metric_value in metrics.items()}
return metrics
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE__ ( datasets.Metric ):
def __lowerCamelCase ( self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
# 1st Seq - height dim, 2nd - width dim
{
"""predictions""": datasets.Sequence(datasets.Sequence(datasets.Value("""uint16""" ) ) ),
"""references""": datasets.Sequence(datasets.Sequence(datasets.Value("""uint16""" ) ) ),
} ) , reference_urls=[
"""https://github.com/open-mmlab/mmsegmentation/blob/71c201b1813267d78764f306a297ca717827c4bf/mmseg/core/evaluation/metrics.py"""
] , )
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = False , ):
'''simple docstring'''
__a : Any = mean_iou(
results=__UpperCamelCase , gt_seg_maps=__UpperCamelCase , num_labels=__UpperCamelCase , ignore_index=__UpperCamelCase , nan_to_num=__UpperCamelCase , label_map=__UpperCamelCase , reduce_labels=__UpperCamelCase , )
return iou_result | 697 |
'''simple docstring'''
import unittest
from transformers import (
MODEL_FOR_OBJECT_DETECTION_MAPPING,
AutoFeatureExtractor,
AutoModelForObjectDetection,
ObjectDetectionPipeline,
is_vision_available,
pipeline,
)
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_pytesseract,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class SCREAMING_SNAKE_CASE__ :
@staticmethod
def __lowerCamelCase ( *__UpperCamelCase , **__UpperCamelCase ):
'''simple docstring'''
pass
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
lowercase__ = MODEL_FOR_OBJECT_DETECTION_MAPPING
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
__a : Optional[Any] = ObjectDetectionPipeline(model=__UpperCamelCase , image_processor=__UpperCamelCase )
return object_detector, ["./tests/fixtures/tests_samples/COCO/000000039769.png"]
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
__a : List[str] = object_detector("""./tests/fixtures/tests_samples/COCO/000000039769.png""" , threshold=0.0 )
self.assertGreater(len(__UpperCamelCase ) , 0 )
for detected_object in outputs:
self.assertEqual(
__UpperCamelCase , {
"""score""": ANY(__UpperCamelCase ),
"""label""": ANY(__UpperCamelCase ),
"""box""": {"""xmin""": ANY(__UpperCamelCase ), """ymin""": ANY(__UpperCamelCase ), """xmax""": ANY(__UpperCamelCase ), """ymax""": ANY(__UpperCamelCase )},
} , )
import datasets
__a : Optional[int] = datasets.load_dataset("""hf-internal-testing/fixtures_image_utils""" , """image""" , split="""test""" )
__a : Tuple = [
Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ),
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
# RGBA
dataset[0]["""file"""],
# LA
dataset[1]["""file"""],
# L
dataset[2]["""file"""],
]
__a : Any = object_detector(__UpperCamelCase , threshold=0.0 )
self.assertEqual(len(__UpperCamelCase ) , len(__UpperCamelCase ) )
for outputs in batch_outputs:
self.assertGreater(len(__UpperCamelCase ) , 0 )
for detected_object in outputs:
self.assertEqual(
__UpperCamelCase , {
"""score""": ANY(__UpperCamelCase ),
"""label""": ANY(__UpperCamelCase ),
"""box""": {"""xmin""": ANY(__UpperCamelCase ), """ymin""": ANY(__UpperCamelCase ), """xmax""": ANY(__UpperCamelCase ), """ymax""": ANY(__UpperCamelCase )},
} , )
@require_tf
@unittest.skip("""Object detection not implemented in TF""" )
def __lowerCamelCase ( self ):
'''simple docstring'''
pass
@require_torch
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Optional[Any] = """hf-internal-testing/tiny-detr-mobilenetsv3"""
__a : Dict = AutoModelForObjectDetection.from_pretrained(__UpperCamelCase )
__a : Optional[Any] = AutoFeatureExtractor.from_pretrained(__UpperCamelCase )
__a : str = ObjectDetectionPipeline(model=__UpperCamelCase , feature_extractor=__UpperCamelCase )
__a : Optional[int] = object_detector("""http://images.cocodataset.org/val2017/000000039769.jpg""" , threshold=0.0 )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [
{"""score""": 0.3_3_7_6, """label""": """LABEL_0""", """box""": {"""xmin""": 159, """ymin""": 120, """xmax""": 480, """ymax""": 359}},
{"""score""": 0.3_3_7_6, """label""": """LABEL_0""", """box""": {"""xmin""": 159, """ymin""": 120, """xmax""": 480, """ymax""": 359}},
] , )
__a : Union[str, Any] = object_detector(
[
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
] , threshold=0.0 , )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [
[
{"""score""": 0.3_3_7_6, """label""": """LABEL_0""", """box""": {"""xmin""": 159, """ymin""": 120, """xmax""": 480, """ymax""": 359}},
{"""score""": 0.3_3_7_6, """label""": """LABEL_0""", """box""": {"""xmin""": 159, """ymin""": 120, """xmax""": 480, """ymax""": 359}},
],
[
{"""score""": 0.3_3_7_6, """label""": """LABEL_0""", """box""": {"""xmin""": 159, """ymin""": 120, """xmax""": 480, """ymax""": 359}},
{"""score""": 0.3_3_7_6, """label""": """LABEL_0""", """box""": {"""xmin""": 159, """ymin""": 120, """xmax""": 480, """ymax""": 359}},
],
] , )
@require_torch
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : str = """facebook/detr-resnet-50"""
__a : Dict = AutoModelForObjectDetection.from_pretrained(__UpperCamelCase )
__a : int = AutoFeatureExtractor.from_pretrained(__UpperCamelCase )
__a : int = ObjectDetectionPipeline(model=__UpperCamelCase , feature_extractor=__UpperCamelCase )
__a : Any = object_detector("""http://images.cocodataset.org/val2017/000000039769.jpg""" )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [
{"""score""": 0.9_9_8_2, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 175, """ymax""": 117}},
{"""score""": 0.9_9_6_0, """label""": """remote""", """box""": {"""xmin""": 333, """ymin""": 72, """xmax""": 368, """ymax""": 187}},
{"""score""": 0.9_9_5_5, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 639, """ymax""": 473}},
{"""score""": 0.9_9_8_8, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}},
{"""score""": 0.9_9_8_7, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}},
] , )
__a : Optional[Any] = object_detector(
[
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
] )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [
[
{"""score""": 0.9_9_8_2, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 175, """ymax""": 117}},
{"""score""": 0.9_9_6_0, """label""": """remote""", """box""": {"""xmin""": 333, """ymin""": 72, """xmax""": 368, """ymax""": 187}},
{"""score""": 0.9_9_5_5, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 639, """ymax""": 473}},
{"""score""": 0.9_9_8_8, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}},
{"""score""": 0.9_9_8_7, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}},
],
[
{"""score""": 0.9_9_8_2, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 175, """ymax""": 117}},
{"""score""": 0.9_9_6_0, """label""": """remote""", """box""": {"""xmin""": 333, """ymin""": 72, """xmax""": 368, """ymax""": 187}},
{"""score""": 0.9_9_5_5, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 639, """ymax""": 473}},
{"""score""": 0.9_9_8_8, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}},
{"""score""": 0.9_9_8_7, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}},
],
] , )
@require_torch
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : int = """facebook/detr-resnet-50"""
__a : Optional[int] = pipeline("""object-detection""" , model=__UpperCamelCase )
__a : Optional[int] = object_detector("""http://images.cocodataset.org/val2017/000000039769.jpg""" )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [
{"""score""": 0.9_9_8_2, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 175, """ymax""": 117}},
{"""score""": 0.9_9_6_0, """label""": """remote""", """box""": {"""xmin""": 333, """ymin""": 72, """xmax""": 368, """ymax""": 187}},
{"""score""": 0.9_9_5_5, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 639, """ymax""": 473}},
{"""score""": 0.9_9_8_8, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}},
{"""score""": 0.9_9_8_7, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}},
] , )
__a : List[str] = object_detector(
[
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
] )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [
[
{"""score""": 0.9_9_8_2, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 175, """ymax""": 117}},
{"""score""": 0.9_9_6_0, """label""": """remote""", """box""": {"""xmin""": 333, """ymin""": 72, """xmax""": 368, """ymax""": 187}},
{"""score""": 0.9_9_5_5, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 639, """ymax""": 473}},
{"""score""": 0.9_9_8_8, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}},
{"""score""": 0.9_9_8_7, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}},
],
[
{"""score""": 0.9_9_8_2, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 175, """ymax""": 117}},
{"""score""": 0.9_9_6_0, """label""": """remote""", """box""": {"""xmin""": 333, """ymin""": 72, """xmax""": 368, """ymax""": 187}},
{"""score""": 0.9_9_5_5, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 639, """ymax""": 473}},
{"""score""": 0.9_9_8_8, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}},
{"""score""": 0.9_9_8_7, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}},
],
] , )
@require_torch
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Union[str, Any] = 0.9_9_8_5
__a : Union[str, Any] = """facebook/detr-resnet-50"""
__a : Optional[int] = pipeline("""object-detection""" , model=__UpperCamelCase )
__a : Union[str, Any] = object_detector("""http://images.cocodataset.org/val2017/000000039769.jpg""" , threshold=__UpperCamelCase )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [
{"""score""": 0.9_9_8_8, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}},
{"""score""": 0.9_9_8_7, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}},
] , )
@require_torch
@require_pytesseract
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : str = """Narsil/layoutlmv3-finetuned-funsd"""
__a : List[Any] = 0.9_9_9_3
__a : Dict = pipeline("""object-detection""" , model=__UpperCamelCase , threshold=__UpperCamelCase )
__a : List[str] = object_detector(
"""https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png""" )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [
{"""score""": 0.9_9_9_3, """label""": """I-ANSWER""", """box""": {"""xmin""": 294, """ymin""": 254, """xmax""": 343, """ymax""": 264}},
{"""score""": 0.9_9_9_3, """label""": """I-ANSWER""", """box""": {"""xmin""": 294, """ymin""": 254, """xmax""": 343, """ymax""": 264}},
] , ) | 697 | 1 |
'''simple docstring'''
import random
from .binary_exp_mod import bin_exp_mod
def _snake_case ( lowercase , lowercase=1_0_0_0 ) -> List[str]:
if n < 2:
return False
if n % 2 == 0:
return n == 2
# this means n is odd
__a : List[str] = n - 1
__a : Any = 0
while d % 2 == 0:
d /= 2
exp += 1
# n - 1=d*(2**exp)
__a : Dict = 0
while count < prec:
__a : List[str] = random.randint(2 , n - 1 )
__a : Optional[Any] = bin_exp_mod(lowercase , lowercase , lowercase )
if b != 1:
__a : Any = True
for _ in range(lowercase ):
if b == n - 1:
__a : List[Any] = False
break
__a : Any = b * b
b %= n
if flag:
return False
count += 1
return True
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : Union[str, Any] = abs(int(input('Enter bound : ').strip()))
print('Here\'s the list of primes:')
print(', '.join(str(i) for i in range(n + 1) if is_prime_big(i))) | 697 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__SCREAMING_SNAKE_CASE : List[str] = {
'configuration_blenderbot_small': [
'BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP',
'BlenderbotSmallConfig',
'BlenderbotSmallOnnxConfig',
],
'tokenization_blenderbot_small': ['BlenderbotSmallTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Union[str, Any] = ['BlenderbotSmallTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : List[str] = [
'BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST',
'BlenderbotSmallForCausalLM',
'BlenderbotSmallForConditionalGeneration',
'BlenderbotSmallModel',
'BlenderbotSmallPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Optional[int] = [
'TFBlenderbotSmallForConditionalGeneration',
'TFBlenderbotSmallModel',
'TFBlenderbotSmallPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Optional[Any] = [
'FlaxBlenderbotSmallForConditionalGeneration',
'FlaxBlenderbotSmallModel',
'FlaxBlenderbotSmallPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotSmallConfig,
BlenderbotSmallOnnxConfig,
)
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_small_fast import BlenderbotSmallTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotSmallForCausalLM,
BlenderbotSmallForConditionalGeneration,
BlenderbotSmallModel,
BlenderbotSmallPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot_small import (
TFBlenderbotSmallForConditionalGeneration,
TFBlenderbotSmallModel,
TFBlenderbotSmallPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot_small import (
FlaxBlenderbotSmallForConditionalGeneration,
FlaxBlenderbotSmallModel,
FlaxBlenderbotSmallPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 697 | 1 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class SCREAMING_SNAKE_CASE__ ( metaclass=__UpperCamelCase ):
lowercase__ = ["transformers", "torch", "note_seq"]
def __init__( self , *__UpperCamelCase , **__UpperCamelCase ):
'''simple docstring'''
requires_backends(self , ["""transformers""", """torch""", """note_seq"""] )
@classmethod
def __lowerCamelCase ( cls , *__UpperCamelCase , **__UpperCamelCase ):
'''simple docstring'''
requires_backends(cls , ["""transformers""", """torch""", """note_seq"""] )
@classmethod
def __lowerCamelCase ( cls , *__UpperCamelCase , **__UpperCamelCase ):
'''simple docstring'''
requires_backends(cls , ["""transformers""", """torch""", """note_seq"""] ) | 697 |
'''simple docstring'''
import numpy as np
import torch
from torch.utils.data import Dataset
from utils import logger
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
def __init__( self , __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
__a : Any = params
__a : Optional[Any] = np.array(__UpperCamelCase )
__a : Union[str, Any] = np.array([len(__UpperCamelCase ) for t in data] )
self.check()
self.remove_long_sequences()
self.remove_empty_sequences()
self.remove_unknown_sequences()
self.check()
self.print_statistics()
def __getitem__( self , __UpperCamelCase ):
'''simple docstring'''
return (self.token_ids[index], self.lengths[index])
def __len__( self ):
'''simple docstring'''
return len(self.lengths )
def __lowerCamelCase ( self ):
'''simple docstring'''
assert len(self.token_ids ) == len(self.lengths )
assert all(self.lengths[i] == len(self.token_ids[i] ) for i in range(len(self.lengths ) ) )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Tuple = self.params.max_model_input_size
__a : Union[str, Any] = self.lengths > max_len
logger.info(f"""Splitting {sum(__UpperCamelCase )} too long sequences.""" )
def divide_chunks(__UpperCamelCase , __UpperCamelCase ):
return [l[i : i + n] for i in range(0 , len(__UpperCamelCase ) , __UpperCamelCase )]
__a : int = []
__a : Union[str, Any] = []
if self.params.mlm:
__a , __a : Any = self.params.special_tok_ids["""cls_token"""], self.params.special_tok_ids["""sep_token"""]
else:
__a , __a : str = self.params.special_tok_ids["""bos_token"""], self.params.special_tok_ids["""eos_token"""]
for seq_, len_ in zip(self.token_ids , self.lengths ):
assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_
if len_ <= max_len:
new_tok_ids.append(seq_ )
new_lengths.append(len_ )
else:
__a : Any = []
for sub_s in divide_chunks(seq_ , max_len - 2 ):
if sub_s[0] != cls_id:
__a : int = np.insert(__UpperCamelCase , 0 , __UpperCamelCase )
if sub_s[-1] != sep_id:
__a : str = np.insert(__UpperCamelCase , len(__UpperCamelCase ) , __UpperCamelCase )
assert len(__UpperCamelCase ) <= max_len
assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s
sub_seqs.append(__UpperCamelCase )
new_tok_ids.extend(__UpperCamelCase )
new_lengths.extend([len(__UpperCamelCase ) for l in sub_seqs] )
__a : Dict = np.array(__UpperCamelCase )
__a : Tuple = np.array(__UpperCamelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : List[str] = len(self )
__a : List[str] = self.lengths > 11
__a : int = self.token_ids[indices]
__a : Union[str, Any] = self.lengths[indices]
__a : Any = len(self )
logger.info(f"""Remove {init_size - new_size} too short (<=11 tokens) sequences.""" )
def __lowerCamelCase ( self ):
'''simple docstring'''
if "unk_token" not in self.params.special_tok_ids:
return
else:
__a : List[str] = self.params.special_tok_ids["""unk_token"""]
__a : str = len(self )
__a : str = np.array([np.count_nonzero(a == unk_token_id ) for a in self.token_ids] )
__a : Optional[Any] = (unk_occs / self.lengths) < 0.5
__a : List[str] = self.token_ids[indices]
__a : Optional[int] = self.lengths[indices]
__a : Any = len(self )
logger.info(f"""Remove {init_size - new_size} sequences with a high level of unknown tokens (50%).""" )
def __lowerCamelCase ( self ):
'''simple docstring'''
if not self.params.is_master:
return
logger.info(f"""{len(self )} sequences""" )
# data_len = sum(self.lengths)
# nb_unique_tokens = len(Counter(list(chain(*self.token_ids))))
# logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)')
# unk_idx = self.params.special_tok_ids['unk_token']
# nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids])
# logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)')
def __lowerCamelCase ( self , __UpperCamelCase ):
'''simple docstring'''
__a : List[str] = [t[0] for t in batch]
__a : str = [t[1] for t in batch]
assert len(__UpperCamelCase ) == len(__UpperCamelCase )
# Max for paddings
__a : Optional[int] = max(__UpperCamelCase )
# Pad token ids
if self.params.mlm:
__a : int = self.params.special_tok_ids["""pad_token"""]
else:
__a : Tuple = self.params.special_tok_ids["""unk_token"""]
__a : Any = [list(t.astype(__UpperCamelCase ) ) + [pad_idx] * (max_seq_len_ - len(__UpperCamelCase )) for t in token_ids]
assert len(tk_ ) == len(__UpperCamelCase )
assert all(len(__UpperCamelCase ) == max_seq_len_ for t in tk_ )
__a : Any = torch.tensor(tk_ ) # (bs, max_seq_len_)
__a : Optional[Any] = torch.tensor(__UpperCamelCase ) # (bs)
return tk_t, lg_t | 697 | 1 |
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConformerConfig,
WavaVecaConformerForCTC,
WavaVecaConformerForPreTraining,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
__SCREAMING_SNAKE_CASE : str = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : Any = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.linear_k': 'encoder.layers.*.self_attn.linear_k',
'self_attn.linear_v': 'encoder.layers.*.self_attn.linear_v',
'self_attn.linear_q': 'encoder.layers.*.self_attn.linear_q',
'self_attn.pos_bias_u': 'encoder.layers.*.self_attn.pos_bias_u',
'self_attn.pos_bias_v': 'encoder.layers.*.self_attn.pos_bias_v',
'self_attn.linear_out': 'encoder.layers.*.self_attn.linear_out',
'self_attn.linear_pos': 'encoder.layers.*.self_attn.linear_pos',
'self_attn.rotary_emb': 'encoder.embed_positions',
'self_attn_layer_norm': 'encoder.layers.*.self_attn_layer_norm',
'conv_module.pointwise_conv1': 'encoder.layers.*.conv_module.pointwise_conv1',
'conv_module.pointwise_conv2': 'encoder.layers.*.conv_module.pointwise_conv2',
'conv_module.depthwise_conv': 'encoder.layers.*.conv_module.depthwise_conv',
'conv_module.batch_norm': 'encoder.layers.*.conv_module.batch_norm',
'conv_module.layer_norm': 'encoder.layers.*.conv_module.layer_norm',
'ffn1.w_1': 'encoder.layers.*.ffn1.intermediate_dense',
'ffn1.w_2': 'encoder.layers.*.ffn1.output_dense',
'ffn1.layer_norm': 'encoder.layers.*.ffn1_layer_norm',
'ffn2.w_1': 'encoder.layers.*.ffn2.intermediate_dense',
'ffn2.w_2': 'encoder.layers.*.ffn2.output_dense',
'ffn2.layer_norm': 'encoder.layers.*.ffn2_layer_norm',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
}
__SCREAMING_SNAKE_CASE : Optional[Any] = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def _snake_case ( lowercase , lowercase , lowercase , lowercase , lowercase ) -> List[Any]:
for attribute in key.split(""".""" ):
__a : str = getattr(lowercase , lowercase )
if weight_type is not None:
__a : Dict = getattr(lowercase , lowercase ).shape
else:
__a : Dict = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}""" )
if weight_type == "weight":
__a : Any = value
elif weight_type == "weight_g":
__a : int = value
elif weight_type == "weight_v":
__a : int = value
elif weight_type == "bias":
__a : List[Any] = value
elif weight_type == "running_mean":
__a : Union[str, Any] = value
elif weight_type == "running_var":
__a : Tuple = value
elif weight_type == "num_batches_tracked":
__a : Optional[int] = value
elif weight_type == "inv_freq":
__a : List[str] = value
else:
__a : List[str] = value
logger.info(F"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def _snake_case ( lowercase , lowercase , lowercase ) -> Dict:
__a : Dict = []
__a : Dict = fairseq_model.state_dict()
__a : Tuple = hf_model.wavaveca_conformer.feature_extractor
for name, value in fairseq_dict.items():
__a : int = False
if "conv_layers" in name:
load_conv_layer(
lowercase , lowercase , lowercase , lowercase , hf_model.config.feat_extract_norm == """group""" , )
__a : List[Any] = True
else:
for key, mapped_key in MAPPING.items():
__a : Optional[int] = """wav2vec2_conformer.""" + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
__a : str = True
if "*" in mapped_key:
__a : Optional[int] = name.split(lowercase )[0].split(""".""" )[-2]
__a : List[Any] = mapped_key.replace("""*""" , lowercase )
if "pos_bias_u" in name:
__a : Union[str, Any] = None
elif "pos_bias_v" in name:
__a : List[Any] = None
elif "weight_g" in name:
__a : List[Any] = """weight_g"""
elif "weight_v" in name:
__a : List[Any] = """weight_v"""
elif "bias" in name:
__a : Optional[int] = """bias"""
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__a : str = """weight"""
elif "running_mean" in name:
__a : List[str] = """running_mean"""
elif "inv_freq" in name:
__a : Dict = """inv_freq"""
elif "running_var" in name:
__a : Union[str, Any] = """running_var"""
elif "num_batches_tracked" in name:
__a : int = """num_batches_tracked"""
else:
__a : Optional[int] = None
set_recursively(lowercase , lowercase , lowercase , lowercase , lowercase )
continue
if not is_used:
unused_weights.append(lowercase )
logger.warning(F"""Unused weights: {unused_weights}""" )
def _snake_case ( lowercase , lowercase , lowercase , lowercase , lowercase ) -> List[str]:
__a : Optional[Any] = full_name.split("""conv_layers.""" )[-1]
__a : Union[str, Any] = name.split(""".""" )
__a : Optional[Any] = int(items[0] )
__a : int = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
__a : Dict = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
__a : str = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" )
__a : Dict = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" )
__a : Union[str, Any] = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(lowercase )
@torch.no_grad()
def _snake_case ( lowercase , lowercase , lowercase=None , lowercase=None , lowercase=True ) -> Optional[Any]:
if config_path is not None:
__a : Any = WavaVecaConformerConfig.from_pretrained(lowercase , hidden_act="""swish""" )
else:
__a : Optional[int] = WavaVecaConformerConfig()
if "rope" in checkpoint_path:
__a : Optional[Any] = """rotary"""
if is_finetuned:
if dict_path:
__a : List[Any] = Dictionary.load(lowercase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
__a : int = target_dict.pad_index
__a : List[str] = target_dict.bos_index
__a : str = target_dict.eos_index
__a : Dict = len(target_dict.symbols )
__a : Any = os.path.join(lowercase , """vocab.json""" )
if not os.path.isdir(lowercase ):
logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(lowercase ) )
return
os.makedirs(lowercase , exist_ok=lowercase )
__a : Dict = target_dict.indices
# fairseq has the <pad> and <s> switched
__a : Optional[Any] = 0
__a : List[Any] = 1
with open(lowercase , """w""" , encoding="""utf-8""" ) as vocab_handle:
json.dump(lowercase , lowercase )
__a : int = WavaVecaCTCTokenizer(
lowercase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=lowercase , )
__a : Optional[int] = True if config.feat_extract_norm == """layer""" else False
__a : Dict = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6_0_0_0 , padding_value=0 , do_normalize=lowercase , return_attention_mask=lowercase , )
__a : str = WavaVecaProcessor(feature_extractor=lowercase , tokenizer=lowercase )
processor.save_pretrained(lowercase )
__a : List[str] = WavaVecaConformerForCTC(lowercase )
else:
__a : Optional[int] = WavaVecaConformerForPreTraining(lowercase )
if is_finetuned:
__a , __a , __a : Dict = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
else:
__a : Optional[int] = argparse.Namespace(task="""audio_pretraining""" )
__a : Tuple = fairseq.tasks.setup_task(lowercase )
__a , __a , __a : int = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=lowercase )
__a : Any = model[0].eval()
recursively_load_weights(lowercase , lowercase , not is_finetuned )
hf_wavavec.save_pretrained(lowercase )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : Dict = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
__SCREAMING_SNAKE_CASE : int = parser.parse_args()
convert_wavaveca_conformer_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
) | 697 |
'''simple docstring'''
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
lowercase__ = ""
lowercase__ = "hf-legacy" # "hf://"" is reserved for hffs
def __init__( self , __UpperCamelCase = None , __UpperCamelCase = None , **__UpperCamelCase , ):
'''simple docstring'''
super().__init__(self , **__UpperCamelCase )
__a : int = repo_info
__a : int = token
__a : Any = None
def __lowerCamelCase ( self ):
'''simple docstring'''
if self.dir_cache is None:
__a : Union[str, Any] = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
__a : List[str] = {
"""name""": hf_file.rfilename,
"""size""": None,
"""type""": """file""",
}
self.dir_cache.update(
{
str(__UpperCamelCase ): {"""name""": str(__UpperCamelCase ), """size""": None, """type""": """directory"""}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase = "rb" , **__UpperCamelCase , ):
'''simple docstring'''
if not isinstance(self.repo_info , __UpperCamelCase ):
raise NotImplementedError(f"""Open is only implemented for dataset repositories, but got {self.repo_info}""" )
__a : Any = hf_hub_url(self.repo_info.id , __UpperCamelCase , revision=self.repo_info.sha )
return fsspec.open(
__UpperCamelCase , mode=__UpperCamelCase , headers=get_authentication_headers_for_url(__UpperCamelCase , use_auth_token=self.token ) , client_kwargs={"""trust_env""": True} , ).open()
def __lowerCamelCase ( self , __UpperCamelCase , **__UpperCamelCase ):
'''simple docstring'''
self._get_dirs()
__a : str = self._strip_protocol(__UpperCamelCase )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(__UpperCamelCase )
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase=False , **__UpperCamelCase ):
'''simple docstring'''
self._get_dirs()
__a : int = PurePosixPath(path.strip("""/""" ) )
__a : List[str] = {}
for p, f in self.dir_cache.items():
__a : str = PurePosixPath(p.strip("""/""" ) )
__a : Optional[int] = p.parent
if root == path:
__a : List[str] = f
__a : str = list(paths.values() )
if detail:
return out
else:
return sorted(f["""name"""] for f in out ) | 697 | 1 |
'''simple docstring'''
import copy
import inspect
import unittest
from transformers import PretrainedConfig, SwiftFormerConfig
from transformers.testing_utils import (
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwiftFormerForImageClassification, SwiftFormerModel
from transformers.models.swiftformer.modeling_swiftformer import SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class SCREAMING_SNAKE_CASE__ :
def __init__( self , __UpperCamelCase , __UpperCamelCase=13 , __UpperCamelCase=3 , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=0.1 , __UpperCamelCase=0.1 , __UpperCamelCase=224 , __UpperCamelCase=1000 , __UpperCamelCase=[3, 3, 6, 4] , __UpperCamelCase=[48, 56, 112, 220] , ):
'''simple docstring'''
__a : str = parent
__a : Union[str, Any] = batch_size
__a : Any = num_channels
__a : List[str] = is_training
__a : str = use_labels
__a : str = hidden_dropout_prob
__a : List[Any] = attention_probs_dropout_prob
__a : Dict = num_labels
__a : Optional[int] = image_size
__a : List[str] = layer_depths
__a : int = embed_dims
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__a : Dict = None
if self.use_labels:
__a : str = ids_tensor([self.batch_size] , self.num_labels )
__a : Optional[int] = self.get_config()
return config, pixel_values, labels
def __lowerCamelCase ( self ):
'''simple docstring'''
return SwiftFormerConfig(
depths=self.layer_depths , embed_dims=self.embed_dims , mlp_ratio=4 , downsamples=[True, True, True, True] , hidden_act="""gelu""" , num_labels=self.num_labels , down_patch_size=3 , down_stride=2 , down_pad=1 , drop_rate=0.0 , drop_path_rate=0.0 , use_layer_scale=__UpperCamelCase , layer_scale_init_value=1E-5 , )
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
__a : Any = SwiftFormerModel(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
__a : List[Any] = model(__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dims[-1], 7, 7) )
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
__a : Any = self.num_labels
__a : int = SwiftFormerForImageClassification(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
__a : str = model(__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
__a : Any = SwiftFormerForImageClassification(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
__a : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__a : Optional[int] = model(__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowerCamelCase ( self ):
'''simple docstring'''
((__a) , (__a) , (__a)) : Optional[Any] = self.prepare_config_and_inputs()
__a : int = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase , __UpperCamelCase , unittest.TestCase ):
lowercase__ = (SwiftFormerModel, SwiftFormerForImageClassification) if is_torch_available() else ()
lowercase__ = (
{"feature-extraction": SwiftFormerModel, "image-classification": SwiftFormerForImageClassification}
if is_torch_available()
else {}
)
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Tuple = SwiftFormerModelTester(self )
__a : Dict = ConfigTester(
self , config_class=__UpperCamelCase , has_text_modality=__UpperCamelCase , hidden_size=37 , num_attention_heads=12 , num_hidden_layers=12 , )
def __lowerCamelCase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="""SwiftFormer does not use inputs_embeds""" )
def __lowerCamelCase ( self ):
'''simple docstring'''
pass
def __lowerCamelCase ( self ):
'''simple docstring'''
__a , __a : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a : Union[str, Any] = model_class(__UpperCamelCase )
__a : Dict = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__UpperCamelCase , nn.Linear ) )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a , __a : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a : Dict = model_class(__UpperCamelCase )
__a : List[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__a : Union[str, Any] = [*signature.parameters.keys()]
__a : Any = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __UpperCamelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__UpperCamelCase )
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
for model_name in SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a : Any = SwiftFormerModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
@unittest.skip(reason="""SwiftFormer does not output attentions""" )
def __lowerCamelCase ( self ):
'''simple docstring'''
pass
def __lowerCamelCase ( self ):
'''simple docstring'''
def check_hidden_states_output(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
__a : Any = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
with torch.no_grad():
__a : Union[str, Any] = model(**self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) )
__a : List[Any] = outputs.hidden_states
__a : List[str] = 8
self.assertEqual(len(__UpperCamelCase ) , __UpperCamelCase ) # TODO
# SwiftFormer's feature maps are of shape (batch_size, embed_dims, height, width)
# with the width and height being successively divided by 2, after every 2 blocks
for i in range(len(__UpperCamelCase ) ):
self.assertEqual(
hidden_states[i].shape , torch.Size(
[
self.model_tester.batch_size,
self.model_tester.embed_dims[i // 2],
(self.model_tester.image_size // 4) // 2 ** (i // 2),
(self.model_tester.image_size // 4) // 2 ** (i // 2),
] ) , )
__a , __a : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a : Optional[int] = True
check_hidden_states_output(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__a : Optional[int] = True
check_hidden_states_output(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
def _config_zero_init(__UpperCamelCase ):
__a : Optional[Any] = copy.deepcopy(__UpperCamelCase )
for key in configs_no_init.__dict__.keys():
if "_range" in key or "_std" in key or "initializer_factor" in key or "layer_scale" in key:
setattr(__UpperCamelCase , __UpperCamelCase , 1E-10 )
if isinstance(getattr(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) , __UpperCamelCase ):
__a : Optional[Any] = _config_zero_init(getattr(__UpperCamelCase , __UpperCamelCase ) )
setattr(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
return configs_no_init
__a , __a : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
__a : Any = _config_zero_init(__UpperCamelCase )
for model_class in self.all_model_classes:
__a : Any = model_class(config=__UpperCamelCase )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9) / 1E9).round().item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def __lowerCamelCase ( self ):
'''simple docstring'''
pass
def _snake_case ( ) -> Optional[Any]:
__a : Optional[int] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@cached_property
def __lowerCamelCase ( self ):
'''simple docstring'''
return ViTImageProcessor.from_pretrained("""MBZUAI/swiftformer-xs""" ) if is_vision_available() else None
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : str = SwiftFormerForImageClassification.from_pretrained("""MBZUAI/swiftformer-xs""" ).to(__UpperCamelCase )
__a : int = self.default_image_processor
__a : List[str] = prepare_img()
__a : Any = image_processor(images=__UpperCamelCase , return_tensors="""pt""" ).to(__UpperCamelCase )
# forward pass
with torch.no_grad():
__a : Union[str, Any] = model(**__UpperCamelCase )
# verify the logits
__a : List[str] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , __UpperCamelCase )
__a : List[Any] = torch.tensor([[-2.1703E00, 2.1107E00, -2.0811E00]] ).to(__UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __UpperCamelCase , atol=1E-4 ) ) | 697 |
'''simple docstring'''
import inspect
import unittest
from transformers import DPTConfig
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel
from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class SCREAMING_SNAKE_CASE__ :
def __init__( self , __UpperCamelCase , __UpperCamelCase=2 , __UpperCamelCase=32 , __UpperCamelCase=16 , __UpperCamelCase=3 , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=32 , __UpperCamelCase=4 , __UpperCamelCase=[0, 1, 2, 3] , __UpperCamelCase=4 , __UpperCamelCase=37 , __UpperCamelCase="gelu" , __UpperCamelCase=0.1 , __UpperCamelCase=0.1 , __UpperCamelCase=0.0_2 , __UpperCamelCase=3 , __UpperCamelCase=[1, 384, 24, 24] , __UpperCamelCase=True , __UpperCamelCase=None , ):
'''simple docstring'''
__a : List[str] = parent
__a : Tuple = batch_size
__a : str = image_size
__a : int = patch_size
__a : Dict = num_channels
__a : int = is_training
__a : Dict = use_labels
__a : Union[str, Any] = hidden_size
__a : Dict = num_hidden_layers
__a : Dict = backbone_out_indices
__a : Optional[int] = num_attention_heads
__a : List[str] = intermediate_size
__a : Optional[Any] = hidden_act
__a : Dict = hidden_dropout_prob
__a : Tuple = attention_probs_dropout_prob
__a : Any = initializer_range
__a : Any = num_labels
__a : Optional[Any] = backbone_featmap_shape
__a : List[Any] = scope
__a : List[str] = is_hybrid
# sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token)
__a : Union[str, Any] = (image_size // patch_size) ** 2
__a : List[str] = num_patches + 1
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__a : Union[str, Any] = None
if self.use_labels:
__a : str = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__a : Tuple = self.get_config()
return config, pixel_values, labels
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : List[str] = {
"""global_padding""": """same""",
"""layer_type""": """bottleneck""",
"""depths""": [3, 4, 9],
"""out_features""": ["""stage1""", """stage2""", """stage3"""],
"""embedding_dynamic_padding""": True,
"""hidden_sizes""": [96, 192, 384, 768],
"""num_groups""": 2,
}
return DPTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , backbone_out_indices=self.backbone_out_indices , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__UpperCamelCase , initializer_range=self.initializer_range , is_hybrid=self.is_hybrid , backbone_config=__UpperCamelCase , backbone_featmap_shape=self.backbone_featmap_shape , )
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
__a : Optional[Any] = DPTModel(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
__a : List[str] = model(__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
__a : List[str] = self.num_labels
__a : Union[str, Any] = DPTForDepthEstimation(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
__a : Tuple = model(__UpperCamelCase )
self.parent.assertEqual(result.predicted_depth.shape , (self.batch_size, self.image_size, self.image_size) )
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
__a : Dict = self.num_labels
__a : Tuple = DPTForSemanticSegmentation(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
__a : str = model(__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Optional[int] = self.prepare_config_and_inputs()
__a , __a , __a : Tuple = config_and_inputs
__a : List[str] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase , __UpperCamelCase , unittest.TestCase ):
lowercase__ = (DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else ()
lowercase__ = (
{
"depth-estimation": DPTForDepthEstimation,
"feature-extraction": DPTModel,
"image-segmentation": DPTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
lowercase__ = False
lowercase__ = False
lowercase__ = False
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Optional[int] = DPTModelTester(self )
__a : List[Any] = ConfigTester(self , config_class=__UpperCamelCase , has_text_modality=__UpperCamelCase , hidden_size=37 )
def __lowerCamelCase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="""DPT does not use inputs_embeds""" )
def __lowerCamelCase ( self ):
'''simple docstring'''
pass
def __lowerCamelCase ( self ):
'''simple docstring'''
__a , __a : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a : str = model_class(__UpperCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__a : Any = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__UpperCamelCase , nn.Linear ) )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a , __a : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a : Any = model_class(__UpperCamelCase )
__a : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__a : int = [*signature.parameters.keys()]
__a : List[str] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __UpperCamelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_depth_estimation(*__UpperCamelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__UpperCamelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
__a , __a : Dict = self.model_tester.prepare_config_and_inputs_for_common()
__a : List[Any] = True
if model_class in get_values(__UpperCamelCase ):
continue
__a : str = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.train()
__a : Union[str, Any] = self._prepare_for_class(__UpperCamelCase , __UpperCamelCase , return_labels=__UpperCamelCase )
__a : List[Any] = model(**__UpperCamelCase ).loss
loss.backward()
def __lowerCamelCase ( self ):
'''simple docstring'''
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
__a , __a : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
__a : Any = False
__a : Dict = True
if model_class in get_values(__UpperCamelCase ) or not model_class.supports_gradient_checkpointing:
continue
__a : Any = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.gradient_checkpointing_enable()
model.train()
__a : List[str] = self._prepare_for_class(__UpperCamelCase , __UpperCamelCase , return_labels=__UpperCamelCase )
__a : Dict = model(**__UpperCamelCase ).loss
loss.backward()
def __lowerCamelCase ( self ):
'''simple docstring'''
__a , __a : Any = self.model_tester.prepare_config_and_inputs_for_common()
__a : Any = _config_zero_init(__UpperCamelCase )
for model_class in self.all_model_classes:
__a : Any = model_class(config=__UpperCamelCase )
# Skip the check for the backbone
__a : Optional[Any] = []
for name, module in model.named_modules():
if module.__class__.__name__ == "DPTViTHybridEmbeddings":
__a : Optional[int] = [f"""{name}.{key}""" for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def __lowerCamelCase ( self ):
'''simple docstring'''
pass
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]:
__a : int = DPTModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a , __a : int = self.model_tester.prepare_config_and_inputs_for_common()
__a : Optional[int] = """add"""
with self.assertRaises(__UpperCamelCase ):
__a : int = DPTForDepthEstimation(__UpperCamelCase )
def _snake_case ( ) -> Any:
__a : Dict = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
@slow
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : int = DPTImageProcessor.from_pretrained("""Intel/dpt-hybrid-midas""" )
__a : int = DPTForDepthEstimation.from_pretrained("""Intel/dpt-hybrid-midas""" ).to(__UpperCamelCase )
__a : Union[str, Any] = prepare_img()
__a : Any = image_processor(images=__UpperCamelCase , return_tensors="""pt""" ).to(__UpperCamelCase )
# forward pass
with torch.no_grad():
__a : Optional[Any] = model(**__UpperCamelCase )
__a : int = outputs.predicted_depth
# verify the predicted depth
__a : Any = torch.Size((1, 384, 384) )
self.assertEqual(predicted_depth.shape , __UpperCamelCase )
__a : int = torch.tensor(
[[[5.6_4_3_7, 5.6_1_4_6, 5.6_5_1_1], [5.4_3_7_1, 5.5_6_4_9, 5.5_9_5_8], [5.5_2_1_5, 5.5_1_8_4, 5.5_2_9_3]]] ).to(__UpperCamelCase )
self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 100 , __UpperCamelCase , atol=1E-4 ) ) | 697 | 1 |
'''simple docstring'''
from timeit import timeit
def _snake_case ( lowercase ) -> int:
if number < 0:
raise ValueError("""the value of input must not be negative""" )
__a : Union[str, Any] = 0
while number:
number &= number - 1
result += 1
return result
def _snake_case ( lowercase ) -> int:
if number < 0:
raise ValueError("""the value of input must not be negative""" )
__a : Optional[int] = 0
while number:
if number % 2 == 1:
result += 1
number >>= 1
return result
def _snake_case ( ) -> None:
def do_benchmark(lowercase ) -> None:
__a : Tuple = """import __main__ as z"""
print(F"""Benchmark when {number = }:""" )
print(F"""{get_set_bits_count_using_modulo_operator(lowercase ) = }""" )
__a : Any = timeit("""z.get_set_bits_count_using_modulo_operator(25)""" , setup=lowercase )
print(F"""timeit() runs in {timing} seconds""" )
print(F"""{get_set_bits_count_using_brian_kernighans_algorithm(lowercase ) = }""" )
__a : List[Any] = timeit(
"""z.get_set_bits_count_using_brian_kernighans_algorithm(25)""" , setup=lowercase , )
print(F"""timeit() runs in {timing} seconds""" )
for number in (2_5, 3_7, 5_8, 0):
do_benchmark(lowercase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark() | 697 |
'''simple docstring'''
import unittest
from .lib import (
Matrix,
Vector,
axpy,
square_zero_matrix,
unit_basis_vector,
zero_vector,
)
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Dict = Vector([1, 2, 3] )
self.assertEqual(x.component(0 ) , 1 )
self.assertEqual(x.component(2 ) , 3 )
__a : Optional[int] = Vector()
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Any = Vector([0, 0, 0, 0, 0, 1] )
self.assertEqual(str(__UpperCamelCase ) , """(0,0,0,0,0,1)""" )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Tuple = Vector([1, 2, 3, 4] )
self.assertEqual(len(__UpperCamelCase ) , 4 )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Optional[Any] = Vector([1, 2] )
__a : List[str] = Vector([1, 2, 3, 4, 5] )
__a : Optional[int] = Vector([0, 0, 0, 0, 0, 0, 0, 0, 0, 0] )
__a : Dict = Vector([1, -1, 1, -1, 2, -3, 4, -5] )
self.assertAlmostEqual(x.euclidean_length() , 2.2_3_6 , 3 )
self.assertAlmostEqual(y.euclidean_length() , 7.4_1_6 , 3 )
self.assertEqual(z.euclidean_length() , 0 )
self.assertAlmostEqual(w.euclidean_length() , 7.6_1_6 , 3 )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Dict = Vector([1, 2, 3] )
__a : Union[str, Any] = Vector([1, 1, 1] )
self.assertEqual((x + y).component(0 ) , 2 )
self.assertEqual((x + y).component(1 ) , 3 )
self.assertEqual((x + y).component(2 ) , 4 )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : List[str] = Vector([1, 2, 3] )
__a : Any = Vector([1, 1, 1] )
self.assertEqual((x - y).component(0 ) , 0 )
self.assertEqual((x - y).component(1 ) , 1 )
self.assertEqual((x - y).component(2 ) , 2 )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Tuple = Vector([1, 2, 3] )
__a : Optional[Any] = Vector([2, -1, 4] ) # for test of dot product
__a : Union[str, Any] = Vector([1, -2, -1] )
self.assertEqual(str(x * 3.0 ) , """(3.0,6.0,9.0)""" )
self.assertEqual((a * b) , 0 )
def __lowerCamelCase ( self ):
'''simple docstring'''
self.assertEqual(str(zero_vector(10 ) ).count("""0""" ) , 10 )
def __lowerCamelCase ( self ):
'''simple docstring'''
self.assertEqual(str(unit_basis_vector(3 , 1 ) ) , """(0,1,0)""" )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Dict = Vector([1, 2, 3] )
__a : Optional[int] = Vector([1, 0, 1] )
self.assertEqual(str(axpy(2 , __UpperCamelCase , __UpperCamelCase ) ) , """(3,4,7)""" )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : int = Vector([1, 0, 0, 0, 0, 0] )
__a : Any = x.copy()
self.assertEqual(str(__UpperCamelCase ) , str(__UpperCamelCase ) )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Union[str, Any] = Vector([1, 0, 0] )
x.change_component(0 , 0 )
x.change_component(1 , 1 )
self.assertEqual(str(__UpperCamelCase ) , """(0,1,0)""" )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Dict = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual("""|1,2,3|\n|2,4,5|\n|6,7,8|\n""" , str(__UpperCamelCase ) )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : List[Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
__a : List[Any] = [[-3, -14, -10], [-5, -10, -5], [-2, -1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(minors[x][y] , a.minor(__UpperCamelCase , __UpperCamelCase ) )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : List[Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
__a : Any = [[-3, 14, -10], [5, -10, 5], [-2, 1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(cofactors[x][y] , a.cofactor(__UpperCamelCase , __UpperCamelCase ) )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : List[Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(-5 , a.determinant() )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Any = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]] , 3 , 3 )
__a : List[Any] = Vector([1, 2, 3] )
self.assertEqual("""(14,32,50)""" , str(a * x ) )
self.assertEqual("""|2,4,6|\n|8,10,12|\n|14,16,18|\n""" , str(a * 2 ) )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : List[str] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
a.change_component(0 , 2 , 5 )
self.assertEqual("""|1,2,5|\n|2,4,5|\n|6,7,8|\n""" , str(__UpperCamelCase ) )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Tuple = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(7 , a.component(2 , 1 ) , 0.0_1 )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Dict = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
__a : Union[str, Any] = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 )
self.assertEqual("""|2,4,10|\n|4,8,10|\n|12,14,18|\n""" , str(a + b ) )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : List[Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
__a : List[str] = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 )
self.assertEqual("""|0,0,-4|\n|0,0,0|\n|0,0,-2|\n""" , str(a - b ) )
def __lowerCamelCase ( self ):
'''simple docstring'''
self.assertEqual(
"""|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n""" , str(square_zero_matrix(5 ) ) , )
if __name__ == "__main__":
unittest.main() | 697 | 1 |
'''simple docstring'''
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to properly calculate the metrics on the
# validation dataset when in a distributed system, and builds off the
# `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
__SCREAMING_SNAKE_CASE : List[Any] = 16
__SCREAMING_SNAKE_CASE : List[str] = 32
def _snake_case ( lowercase , lowercase = 1_6 ) -> List[Any]:
__a : Union[str, Any] = AutoTokenizer.from_pretrained("""bert-base-cased""" )
__a : Tuple = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(lowercase ):
# max_length=None => use the model max length (it's actually the default)
__a : Optional[Any] = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=lowercase , max_length=lowercase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
__a : Union[str, Any] = datasets.map(
lowercase , batched=lowercase , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__a : Any = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(lowercase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
__a : Dict = 1_2_8 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
__a : Union[str, Any] = 1_6
elif accelerator.mixed_precision != "no":
__a : Optional[Any] = 8
else:
__a : int = None
return tokenizer.pad(
lowercase , padding="""longest""" , max_length=lowercase , pad_to_multiple_of=lowercase , return_tensors="""pt""" , )
# Instantiate dataloaders.
__a : str = DataLoader(
tokenized_datasets["""train"""] , shuffle=lowercase , collate_fn=lowercase , batch_size=lowercase )
__a : List[str] = DataLoader(
tokenized_datasets["""validation"""] , shuffle=lowercase , collate_fn=lowercase , batch_size=lowercase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
__SCREAMING_SNAKE_CASE : Union[str, Any] = mocked_dataloaders # noqa: F811
def _snake_case ( lowercase , lowercase ) -> Optional[Any]:
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""" , lowercase ) == "1":
__a : List[Any] = 2
# Initialize accelerator
__a : Optional[int] = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__a : Union[str, Any] = config["""lr"""]
__a : Any = int(config["""num_epochs"""] )
__a : Optional[int] = int(config["""seed"""] )
__a : Any = int(config["""batch_size"""] )
__a : Tuple = evaluate.load("""glue""" , """mrpc""" )
# If the batch size is too big we use gradient accumulation
__a : Dict = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
__a : List[Any] = batch_size // MAX_GPU_BATCH_SIZE
__a : List[Any] = MAX_GPU_BATCH_SIZE
set_seed(lowercase )
__a , __a : List[str] = get_dataloaders(lowercase , lowercase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__a : Dict = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=lowercase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
__a : str = model.to(accelerator.device )
# Instantiate optimizer
__a : Union[str, Any] = AdamW(params=model.parameters() , lr=lowercase )
# Instantiate scheduler
__a : Optional[int] = get_linear_schedule_with_warmup(
optimizer=lowercase , num_warmup_steps=1_0_0 , num_training_steps=(len(lowercase ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__a , __a , __a , __a , __a : Tuple = accelerator.prepare(
lowercase , lowercase , lowercase , lowercase , lowercase )
# Now we train the model
for epoch in range(lowercase ):
model.train()
for step, batch in enumerate(lowercase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
__a : str = model(**lowercase )
__a : Dict = outputs.loss
__a : Any = loss / gradient_accumulation_steps
accelerator.backward(lowercase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
__a : Optional[Any] = 0
for step, batch in enumerate(lowercase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__a : Optional[int] = model(**lowercase )
__a : Tuple = outputs.logits.argmax(dim=-1 )
__a , __a : Any = accelerator.gather((predictions, batch["""labels"""]) )
# New Code #
# First we check if it's a distributed system
if accelerator.use_distributed:
# Then see if we're on the last batch of our eval dataloader
if step == len(lowercase ) - 1:
# Last batch needs to be truncated on distributed systems as it contains additional samples
__a : Optional[Any] = predictions[: len(eval_dataloader.dataset ) - samples_seen]
__a : int = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
# Otherwise we add the number of samples seen
samples_seen += references.shape[0]
# All of this can be avoided if you use `Accelerator.gather_for_metrics` instead of `Accelerator.gather`:
# accelerator.gather_for_metrics((predictions, batch["labels"]))
metric.add_batch(
predictions=lowercase , references=lowercase , )
__a : Union[str, Any] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"""epoch {epoch}:""" , lowercase )
def _snake_case ( ) -> List[str]:
__a : Tuple = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" , type=lowercase , default=lowercase , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" )
__a : Optional[Any] = parser.parse_args()
__a : Union[str, Any] = {"""lr""": 2E-5, """num_epochs""": 3, """seed""": 4_2, """batch_size""": 1_6}
training_function(lowercase , lowercase )
if __name__ == "__main__":
main() | 697 |
'''simple docstring'''
import os
from itertools import chain
from random import randrange, shuffle
import pytest
from .sola import PokerHand
__SCREAMING_SNAKE_CASE : List[str] = (
'4S 3H 2C 7S 5H',
'9D 8H 2C 6S 7H',
'2D 6D 9D TH 7D',
'TC 8C 2S JH 6C',
'JH 8S TH AH QH',
'TS KS 5S 9S AC',
'KD 6S 9D TH AD',
'KS 8D 4D 9S 4S', # pair
'8C 4S KH JS 4D', # pair
'QH 8H KD JH 8S', # pair
'KC 4H KS 2H 8D', # pair
'KD 4S KC 3H 8S', # pair
'AH 8S AS KC JH', # pair
'3H 4C 4H 3S 2H', # 2 pairs
'5S 5D 2C KH KH', # 2 pairs
'3C KH 5D 5S KH', # 2 pairs
'AS 3C KH AD KH', # 2 pairs
'7C 7S 3S 7H 5S', # 3 of a kind
'7C 7S KH 2H 7H', # 3 of a kind
'AC KH QH AH AS', # 3 of a kind
'2H 4D 3C AS 5S', # straight (low ace)
'3C 5C 4C 2C 6H', # straight
'6S 8S 7S 5H 9H', # straight
'JS QS 9H TS KH', # straight
'QC KH TS JS AH', # straight (high ace)
'8C 9C 5C 3C TC', # flush
'3S 8S 9S 5S KS', # flush
'4C 5C 9C 8C KC', # flush
'JH 8H AH KH QH', # flush
'3D 2H 3H 2C 2D', # full house
'2H 2C 3S 3H 3D', # full house
'KH KC 3S 3H 3D', # full house
'JC 6H JS JD JH', # 4 of a kind
'JC 7H JS JD JH', # 4 of a kind
'JC KH JS JD JH', # 4 of a kind
'2S AS 4S 5S 3S', # straight flush (low ace)
'2D 6D 3D 4D 5D', # straight flush
'5C 6C 3C 7C 4C', # straight flush
'JH 9H TH KH QH', # straight flush
'JH AH TH KH QH', # royal flush (high ace straight flush)
)
__SCREAMING_SNAKE_CASE : Optional[Any] = (
('2H 3H 4H 5H 6H', 'KS AS TS QS JS', 'Loss'),
('2H 3H 4H 5H 6H', 'AS AD AC AH JD', 'Win'),
('AS AH 2H AD AC', 'JS JD JC JH 3D', 'Win'),
('2S AH 2H AS AC', 'JS JD JC JH AD', 'Loss'),
('2S AH 2H AS AC', '2H 3H 5H 6H 7H', 'Win'),
('AS 3S 4S 8S 2S', '2H 3H 5H 6H 7H', 'Win'),
('2H 3H 5H 6H 7H', '2S 3H 4H 5S 6C', 'Win'),
('2S 3H 4H 5S 6C', '3D 4C 5H 6H 2S', 'Tie'),
('2S 3H 4H 5S 6C', 'AH AC 5H 6H AS', 'Win'),
('2S 2H 4H 5S 4C', 'AH AC 5H 6H AS', 'Loss'),
('2S 2H 4H 5S 4C', 'AH AC 5H 6H 7S', 'Win'),
('6S AD 7H 4S AS', 'AH AC 5H 6H 7S', 'Loss'),
('2S AH 4H 5S KC', 'AH AC 5H 6H 7S', 'Loss'),
('2S 3H 6H 7S 9C', '7H 3C TH 6H 9S', 'Loss'),
('4S 5H 6H TS AC', '3S 5H 6H TS AC', 'Win'),
('2S AH 4H 5S 6C', 'AD 4C 5H 6H 2C', 'Tie'),
('AS AH 3H AD AC', 'AS AH 2H AD AC', 'Win'),
('AH AC 5H 5C QS', 'AH AC 5H 5C KS', 'Loss'),
('AH AC 5H 5C QS', 'KH KC 5H 5C QS', 'Win'),
('7C 7S KH 2H 7H', '3C 3S AH 2H 3H', 'Win'),
('3C 3S AH 2H 3H', '7C 7S KH 2H 7H', 'Loss'),
('6H 5H 4H 3H 2H', '5H 4H 3H 2H AH', 'Win'),
('5H 4H 3H 2H AH', '5H 4H 3H 2H AH', 'Tie'),
('5H 4H 3H 2H AH', '6H 5H 4H 3H 2H', 'Loss'),
('AH AD KS KC AC', 'AH KD KH AC KC', 'Win'),
('2H 4D 3C AS 5S', '2H 4D 3C 6S 5S', 'Loss'),
('2H 3S 3C 3H 2S', '3S 3C 2S 2H 2D', 'Win'),
('4D 6D 5D 2D JH', '3S 8S 3H TC KH', 'Loss'),
('4S 6C 8S 3S 7S', 'AD KS 2D 7D 7C', 'Loss'),
('6S 4C 7H 8C 3H', '5H JC AH 9D 9C', 'Loss'),
('9D 9H JH TC QH', '3C 2S JS 5C 7H', 'Win'),
('2H TC 8S AD 9S', '4H TS 7H 2C 5C', 'Win'),
('9D 3S 2C 7S 7C', 'JC TD 3C TC 9H', 'Loss'),
)
__SCREAMING_SNAKE_CASE : Tuple = (
('2H 3H 4H 5H 6H', True),
('AS AH 2H AD AC', False),
('2H 3H 5H 6H 7H', True),
('KS AS TS QS JS', True),
('8H 9H QS JS TH', False),
('AS 3S 4S 8S 2S', True),
)
__SCREAMING_SNAKE_CASE : Dict = (
('2H 3H 4H 5H 6H', True),
('AS AH 2H AD AC', False),
('2H 3H 5H 6H 7H', False),
('KS AS TS QS JS', True),
('8H 9H QS JS TH', True),
)
__SCREAMING_SNAKE_CASE : Optional[int] = (
('2H 4D 3C AS 5S', True, [5, 4, 3, 2, 14]),
('2H 5D 3C AS 5S', False, [14, 5, 5, 3, 2]),
('JH QD KC AS TS', False, [14, 13, 12, 11, 10]),
('9D 3S 2C 7S 7C', False, [9, 7, 7, 3, 2]),
)
__SCREAMING_SNAKE_CASE : int = (
('JH AH TH KH QH', 0),
('JH 9H TH KH QH', 0),
('JC KH JS JD JH', 7),
('KH KC 3S 3H 3D', 6),
('8C 9C 5C 3C TC', 0),
('JS QS 9H TS KH', 0),
('7C 7S KH 2H 7H', 3),
('3C KH 5D 5S KH', 2),
('QH 8H KD JH 8S', 1),
('2D 6D 9D TH 7D', 0),
)
__SCREAMING_SNAKE_CASE : int = (
('JH AH TH KH QH', 23),
('JH 9H TH KH QH', 22),
('JC KH JS JD JH', 21),
('KH KC 3S 3H 3D', 20),
('8C 9C 5C 3C TC', 19),
('JS QS 9H TS KH', 18),
('7C 7S KH 2H 7H', 17),
('3C KH 5D 5S KH', 16),
('QH 8H KD JH 8S', 15),
('2D 6D 9D TH 7D', 14),
)
def _snake_case ( ) -> List[str]:
__a , __a : List[Any] = randrange(len(lowercase ) ), randrange(len(lowercase ) )
__a : int = ["""Loss""", """Tie""", """Win"""][(play >= oppo) + (play > oppo)]
__a , __a : int = SORTED_HANDS[play], SORTED_HANDS[oppo]
return hand, other, expected
def _snake_case ( lowercase = 1_0_0 ) -> Any:
return (generate_random_hand() for _ in range(lowercase ))
@pytest.mark.parametrize("""hand, expected""" , lowercase )
def _snake_case ( lowercase , lowercase ) -> int:
assert PokerHand(lowercase )._is_flush() == expected
@pytest.mark.parametrize("""hand, expected""" , lowercase )
def _snake_case ( lowercase , lowercase ) -> Any:
assert PokerHand(lowercase )._is_straight() == expected
@pytest.mark.parametrize("""hand, expected, card_values""" , lowercase )
def _snake_case ( lowercase , lowercase , lowercase ) -> List[str]:
__a : Union[str, Any] = PokerHand(lowercase )
assert player._is_five_high_straight() == expected
assert player._card_values == card_values
@pytest.mark.parametrize("""hand, expected""" , lowercase )
def _snake_case ( lowercase , lowercase ) -> Optional[int]:
assert PokerHand(lowercase )._is_same_kind() == expected
@pytest.mark.parametrize("""hand, expected""" , lowercase )
def _snake_case ( lowercase , lowercase ) -> Union[str, Any]:
assert PokerHand(lowercase )._hand_type == expected
@pytest.mark.parametrize("""hand, other, expected""" , lowercase )
def _snake_case ( lowercase , lowercase , lowercase ) -> Optional[int]:
assert PokerHand(lowercase ).compare_with(PokerHand(lowercase ) ) == expected
@pytest.mark.parametrize("""hand, other, expected""" , generate_random_hands() )
def _snake_case ( lowercase , lowercase , lowercase ) -> int:
assert PokerHand(lowercase ).compare_with(PokerHand(lowercase ) ) == expected
def _snake_case ( ) -> Union[str, Any]:
__a : Tuple = [PokerHand(lowercase ) for hand in SORTED_HANDS]
__a : Optional[int] = poker_hands.copy()
shuffle(lowercase )
__a : List[str] = chain(sorted(lowercase ) )
for index, hand in enumerate(lowercase ):
assert hand == poker_hands[index]
def _snake_case ( ) -> List[str]:
# Test that five high straights are compared correctly.
__a : Optional[int] = [PokerHand("""2D AC 3H 4H 5S""" ), PokerHand("""2S 3H 4H 5S 6C""" )]
pokerhands.sort(reverse=lowercase )
assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C"
def _snake_case ( ) -> List[str]:
# Multiple calls to five_high_straight function should still return True
# and shouldn't mutate the list in every call other than the first.
__a : Dict = PokerHand("""2C 4S AS 3D 5C""" )
__a : Dict = True
__a : Optional[int] = [5, 4, 3, 2, 1_4]
for _ in range(1_0 ):
assert pokerhand._is_five_high_straight() == expected
assert pokerhand._card_values == expected_card_values
def _snake_case ( ) -> Dict:
# Problem number 54 from Project Euler
# Testing from poker_hands.txt file
__a : Tuple = 0
__a : int = os.path.abspath(os.path.dirname(lowercase ) )
__a : Union[str, Any] = os.path.join(lowercase , """poker_hands.txt""" )
with open(lowercase ) as file_hand:
for line in file_hand:
__a : Union[str, Any] = line[:1_4].strip()
__a : Optional[Any] = line[1_5:].strip()
__a , __a : List[str] = PokerHand(lowercase ), PokerHand(lowercase )
__a : str = player.compare_with(lowercase )
if output == "Win":
answer += 1
assert answer == 3_7_6 | 697 | 1 |
'''simple docstring'''
from typing import Any
class SCREAMING_SNAKE_CASE__ :
def __init__( self , __UpperCamelCase ):
'''simple docstring'''
__a : List[Any] = data
__a : str = None
def __repr__( self ):
'''simple docstring'''
return f"""Node({self.data})"""
class SCREAMING_SNAKE_CASE__ :
def __init__( self ):
'''simple docstring'''
__a : List[Any] = None
def __iter__( self ):
'''simple docstring'''
__a : List[Any] = self.head
while node:
yield node.data
__a : Tuple = node.next
def __len__( self ):
'''simple docstring'''
return sum(1 for _ in self )
def __repr__( self ):
'''simple docstring'''
return "->".join([str(__UpperCamelCase ) for item in self] )
def __getitem__( self , __UpperCamelCase ):
'''simple docstring'''
if not 0 <= index < len(self ):
raise ValueError("""list index out of range.""" )
for i, node in enumerate(self ):
if i == index:
return node
return None
def __setitem__( self , __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
if not 0 <= index < len(self ):
raise ValueError("""list index out of range.""" )
__a : Union[str, Any] = self.head
for _ in range(__UpperCamelCase ):
__a : Optional[Any] = current.next
__a : List[str] = data
def __lowerCamelCase ( self , __UpperCamelCase ):
'''simple docstring'''
self.insert_nth(len(self ) , __UpperCamelCase )
def __lowerCamelCase ( self , __UpperCamelCase ):
'''simple docstring'''
self.insert_nth(0 , __UpperCamelCase )
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
if not 0 <= index <= len(self ):
raise IndexError("""list index out of range""" )
__a : List[Any] = Node(__UpperCamelCase )
if self.head is None:
__a : Dict = new_node
elif index == 0:
__a : Dict = self.head # link new_node to head
__a : str = new_node
else:
__a : str = self.head
for _ in range(index - 1 ):
__a : Tuple = temp.next
__a : Tuple = temp.next
__a : Any = new_node
def __lowerCamelCase ( self ): # print every node data
'''simple docstring'''
print(self )
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.delete_nth(0 )
def __lowerCamelCase ( self ): # delete from tail
'''simple docstring'''
return self.delete_nth(len(self ) - 1 )
def __lowerCamelCase ( self , __UpperCamelCase = 0 ):
'''simple docstring'''
if not 0 <= index <= len(self ) - 1: # test if index is valid
raise IndexError("""List index out of range.""" )
__a : Union[str, Any] = self.head # default first node
if index == 0:
__a : Optional[int] = self.head.next
else:
__a : str = self.head
for _ in range(index - 1 ):
__a : str = temp.next
__a : List[str] = temp.next
__a : str = temp.next.next
return delete_node.data
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.head is None
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : List[Any] = None
__a : Tuple = self.head
while current:
# Store the current node's next node.
__a : Any = current.next
# Make the current node's next point backwards
__a : Union[str, Any] = prev
# Make the previous node be the current node
__a : List[str] = current
# Make the current node the next node (to progress iteration)
__a : Optional[Any] = next_node
# Return prev in order to put the head at the end
__a : Dict = prev
def _snake_case ( ) -> None:
__a : Any = LinkedList()
assert linked_list.is_empty() is True
assert str(lowercase ) == ""
try:
linked_list.delete_head()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
try:
linked_list.delete_tail()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
for i in range(1_0 ):
assert len(lowercase ) == i
linked_list.insert_nth(lowercase , i + 1 )
assert str(lowercase ) == "->".join(str(lowercase ) for i in range(1 , 1_1 ) )
linked_list.insert_head(0 )
linked_list.insert_tail(1_1 )
assert str(lowercase ) == "->".join(str(lowercase ) for i in range(0 , 1_2 ) )
assert linked_list.delete_head() == 0
assert linked_list.delete_nth(9 ) == 1_0
assert linked_list.delete_tail() == 1_1
assert len(lowercase ) == 9
assert str(lowercase ) == "->".join(str(lowercase ) for i in range(1 , 1_0 ) )
assert all(linked_list[i] == i + 1 for i in range(0 , 9 ) ) is True
for i in range(0 , 9 ):
__a : List[str] = -i
assert all(linked_list[i] == -i for i in range(0 , 9 ) ) is True
linked_list.reverse()
assert str(lowercase ) == "->".join(str(lowercase ) for i in range(-8 , 1 ) )
def _snake_case ( ) -> None:
__a : Tuple = [
-9,
1_0_0,
Node(7_7_3_4_5_1_1_2 ),
"""dlrow olleH""",
7,
5_5_5_5,
0,
-1_9_2.5_5_5_5_5,
"""Hello, world!""",
7_7.9,
Node(1_0 ),
None,
None,
1_2.2_0,
]
__a : Any = LinkedList()
for i in test_input:
linked_list.insert_tail(lowercase )
# Check if it's empty or not
assert linked_list.is_empty() is False
assert (
str(lowercase ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->"
"-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the head
__a : Optional[int] = linked_list.delete_head()
assert result == -9
assert (
str(lowercase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the tail
__a : Any = linked_list.delete_tail()
assert result == 1_2.2
assert (
str(lowercase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None"
)
# Delete a node in specific location in linked list
__a : Optional[int] = linked_list.delete_nth(1_0 )
assert result is None
assert (
str(lowercase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None"
)
# Add a Node instance to its head
linked_list.insert_head(Node("""Hello again, world!""" ) )
assert (
str(lowercase )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None"
)
# Add None to its tail
linked_list.insert_tail(lowercase )
assert (
str(lowercase )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None"
)
# Reverse the linked list
linked_list.reverse()
assert (
str(lowercase )
== "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->"
"7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)"
)
def _snake_case ( ) -> List[str]:
from doctest import testmod
testmod()
__a : Optional[Any] = LinkedList()
linked_list.insert_head(input("""Inserting 1st at head """ ).strip() )
linked_list.insert_head(input("""Inserting 2nd at head """ ).strip() )
print("""\nPrint list:""" )
linked_list.print_list()
linked_list.insert_tail(input("""\nInserting 1st at tail """ ).strip() )
linked_list.insert_tail(input("""Inserting 2nd at tail """ ).strip() )
print("""\nPrint list:""" )
linked_list.print_list()
print("""\nDelete head""" )
linked_list.delete_head()
print("""Delete tail""" )
linked_list.delete_tail()
print("""\nPrint list:""" )
linked_list.print_list()
print("""\nReverse linked list""" )
linked_list.reverse()
print("""\nPrint list:""" )
linked_list.print_list()
print("""\nString representation of linked list:""" )
print(lowercase )
print("""\nReading/changing Node data using indexing:""" )
print(F"""Element at Position 1: {linked_list[1]}""" )
__a : List[Any] = input("""Enter New Value: """ ).strip()
print("""New list:""" )
print(lowercase )
print(F"""length of linked_list is : {len(lowercase )}""" )
if __name__ == "__main__":
main() | 697 |
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__SCREAMING_SNAKE_CASE : Optional[Any] = {'configuration_focalnet': ['FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FocalNetConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : List[Any] = [
'FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'FocalNetForImageClassification',
'FocalNetForMaskedImageModeling',
'FocalNetBackbone',
'FocalNetModel',
'FocalNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 697 | 1 |
'''simple docstring'''
from __future__ import annotations
def _snake_case ( lowercase , lowercase = None , lowercase = None ) -> None:
if start is None:
__a : Dict = 0
if end is None:
__a : List[str] = len(lowercase ) - 1
if start >= end:
return
__a : int = (start + end) // 2
slowsort(lowercase , lowercase , lowercase )
slowsort(lowercase , mid + 1 , lowercase )
if sequence[end] < sequence[mid]:
__a , __a : List[Any] = sequence[mid], sequence[end]
slowsort(lowercase , lowercase , end - 1 )
if __name__ == "__main__":
from doctest import testmod
testmod() | 697 |
'''simple docstring'''
from __future__ import annotations
import bisect
def _snake_case ( lowercase , lowercase , lowercase = 0 , lowercase = -1 ) -> int:
if hi < 0:
__a : Union[str, Any] = len(lowercase )
while lo < hi:
__a : List[str] = lo + (hi - lo) // 2
if sorted_collection[mid] < item:
__a : int = mid + 1
else:
__a : int = mid
return lo
def _snake_case ( lowercase , lowercase , lowercase = 0 , lowercase = -1 ) -> int:
if hi < 0:
__a : Any = len(lowercase )
while lo < hi:
__a : Any = lo + (hi - lo) // 2
if sorted_collection[mid] <= item:
__a : List[str] = mid + 1
else:
__a : Any = mid
return lo
def _snake_case ( lowercase , lowercase , lowercase = 0 , lowercase = -1 ) -> None:
sorted_collection.insert(bisect_left(lowercase , lowercase , lowercase , lowercase ) , lowercase )
def _snake_case ( lowercase , lowercase , lowercase = 0 , lowercase = -1 ) -> None:
sorted_collection.insert(bisect_right(lowercase , lowercase , lowercase , lowercase ) , lowercase )
def _snake_case ( lowercase , lowercase ) -> int | None:
__a : Dict = 0
__a : Any = len(lowercase ) - 1
while left <= right:
__a : str = left + (right - left) // 2
__a : List[Any] = sorted_collection[midpoint]
if current_item == item:
return midpoint
elif item < current_item:
__a : Optional[Any] = midpoint - 1
else:
__a : Optional[int] = midpoint + 1
return None
def _snake_case ( lowercase , lowercase ) -> int | None:
__a : Optional[int] = bisect.bisect_left(lowercase , lowercase )
if index != len(lowercase ) and sorted_collection[index] == item:
return index
return None
def _snake_case ( lowercase , lowercase , lowercase , lowercase ) -> int | None:
if right < left:
return None
__a : Any = left + (right - left) // 2
if sorted_collection[midpoint] == item:
return midpoint
elif sorted_collection[midpoint] > item:
return binary_search_by_recursion(lowercase , lowercase , lowercase , midpoint - 1 )
else:
return binary_search_by_recursion(lowercase , lowercase , midpoint + 1 , lowercase )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : List[Any] = input('Enter numbers separated by comma:\n').strip()
__SCREAMING_SNAKE_CASE : Optional[Any] = sorted(int(item) for item in user_input.split(','))
__SCREAMING_SNAKE_CASE : List[str] = int(input('Enter a single number to be found in the list:\n'))
__SCREAMING_SNAKE_CASE : Optional[int] = binary_search(collection, target)
if result is None:
print(f'''{target} was not found in {collection}.''')
else:
print(f'''{target} was found at position {result} in {collection}.''') | 697 | 1 |
'''simple docstring'''
import os
import random
import sys
from . import cryptomath_module as cryptomath
from . import rabin_miller
__SCREAMING_SNAKE_CASE : str = 3
def _snake_case ( lowercase ) -> int:
print("""Generating primitive root of p""" )
while True:
__a : Dict = random.randrange(3 , lowercase )
if pow(lowercase , 2 , lowercase ) == 1:
continue
if pow(lowercase , lowercase , lowercase ) == 1:
continue
return g
def _snake_case ( lowercase ) -> tuple[tuple[int, int, int, int], tuple[int, int]]:
print("""Generating prime p...""" )
__a : List[str] = rabin_miller.generate_large_prime(lowercase ) # select large prime number.
__a : Dict = primitive_root(lowercase ) # one primitive root on modulo p.
__a : Optional[Any] = random.randrange(3 , lowercase ) # private_key -> have to be greater than 2 for safety.
__a : Tuple = cryptomath.find_mod_inverse(pow(lowercase , lowercase , lowercase ) , lowercase )
__a : Optional[Any] = (key_size, e_a, e_a, p)
__a : str = (key_size, d)
return public_key, private_key
def _snake_case ( lowercase , lowercase ) -> None:
if os.path.exists(F"""{name}_pubkey.txt""" ) or os.path.exists(F"""{name}_privkey.txt""" ):
print("""\nWARNING:""" )
print(
F"""\"{name}_pubkey.txt\" or \"{name}_privkey.txt\" already exists. \n"""
"""Use a different name or delete these files and re-run this program.""" )
sys.exit()
__a , __a : List[str] = generate_key(lowercase )
print(F"""\nWriting public key to file {name}_pubkey.txt...""" )
with open(F"""{name}_pubkey.txt""" , """w""" ) as fo:
fo.write(F"""{public_key[0]},{public_key[1]},{public_key[2]},{public_key[3]}""" )
print(F"""Writing private key to file {name}_privkey.txt...""" )
with open(F"""{name}_privkey.txt""" , """w""" ) as fo:
fo.write(F"""{private_key[0]},{private_key[1]}""" )
def _snake_case ( ) -> None:
print("""Making key files...""" )
make_key_files("""elgamal""" , 2_0_4_8 )
print("""Key files generation successful""" )
if __name__ == "__main__":
main() | 697 |
'''simple docstring'''
from itertools import product
def _snake_case ( lowercase , lowercase ) -> list[int]:
__a : Optional[int] = sides_number
__a : Union[str, Any] = max_face_number * dice_number
__a : Optional[Any] = [0] * (max_total + 1)
__a : Dict = 1
__a : str = range(lowercase , max_face_number + 1 )
for dice_numbers in product(lowercase , repeat=lowercase ):
__a : int = sum(lowercase )
totals_frequencies[total] += 1
return totals_frequencies
def _snake_case ( ) -> float:
__a : Tuple = total_frequency_distribution(
sides_number=4 , dice_number=9 )
__a : Union[str, Any] = total_frequency_distribution(
sides_number=6 , dice_number=6 )
__a : str = 0
__a : Dict = 9
__a : str = 4 * 9
__a : Any = 6
for peter_total in range(lowercase , max_peter_total + 1 ):
peter_wins_count += peter_totals_frequencies[peter_total] * sum(
colin_totals_frequencies[min_colin_total:peter_total] )
__a : str = (4**9) * (6**6)
__a : List[Any] = peter_wins_count / total_games_number
__a : List[Any] = round(lowercase , ndigits=7 )
return rounded_peter_win_probability
if __name__ == "__main__":
print(f'''{solution() = }''') | 697 | 1 |
'''simple docstring'''
import inspect
from typing import Optional, Union
import numpy as np
import PIL
import torch
from torch.nn import functional as F
from torchvision import transforms
from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
DPMSolverMultistepScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.utils import (
PIL_INTERPOLATION,
randn_tensor,
)
def _snake_case ( lowercase , lowercase , lowercase ) -> int:
if isinstance(lowercase , torch.Tensor ):
return image
elif isinstance(lowercase , PIL.Image.Image ):
__a : Any = [image]
if isinstance(image[0] , PIL.Image.Image ):
__a : Union[str, Any] = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION["""lanczos"""] ) )[None, :] for i in image]
__a : List[Any] = np.concatenate(lowercase , axis=0 )
__a : Any = np.array(lowercase ).astype(np.floataa ) / 2_5_5.0
__a : str = image.transpose(0 , 3 , 1 , 2 )
__a : Dict = 2.0 * image - 1.0
__a : str = torch.from_numpy(lowercase )
elif isinstance(image[0] , torch.Tensor ):
__a : Union[str, Any] = torch.cat(lowercase , dim=0 )
return image
def _snake_case ( lowercase , lowercase , lowercase , lowercase=0.9_9_9_5 ) -> Tuple:
if not isinstance(lowercase , np.ndarray ):
__a : str = True
__a : Optional[int] = va.device
__a : int = va.cpu().numpy()
__a : Optional[int] = va.cpu().numpy()
__a : str = np.sum(va * va / (np.linalg.norm(lowercase ) * np.linalg.norm(lowercase )) )
if np.abs(lowercase ) > DOT_THRESHOLD:
__a : List[str] = (1 - t) * va + t * va
else:
__a : List[str] = np.arccos(lowercase )
__a : List[str] = np.sin(lowercase )
__a : Optional[Any] = theta_a * t
__a : List[Any] = np.sin(lowercase )
__a : str = np.sin(theta_a - theta_t ) / sin_theta_a
__a : Optional[int] = sin_theta_t / sin_theta_a
__a : str = sa * va + sa * va
if inputs_are_torch:
__a : Tuple = torch.from_numpy(lowercase ).to(lowercase )
return va
def _snake_case ( lowercase , lowercase ) -> Dict:
__a : str = F.normalize(lowercase , dim=-1 )
__a : str = F.normalize(lowercase , dim=-1 )
return (x - y).norm(dim=-1 ).div(2 ).arcsin().pow(2 ).mul(2 )
def _snake_case ( lowercase , lowercase ) -> Optional[Any]:
for param in model.parameters():
__a : Optional[int] = value
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
def __init__( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=None , ):
'''simple docstring'''
super().__init__()
self.register_modules(
vae=__UpperCamelCase , text_encoder=__UpperCamelCase , clip_model=__UpperCamelCase , tokenizer=__UpperCamelCase , unet=__UpperCamelCase , scheduler=__UpperCamelCase , feature_extractor=__UpperCamelCase , coca_model=__UpperCamelCase , coca_tokenizer=__UpperCamelCase , coca_transform=__UpperCamelCase , )
__a : List[str] = (
feature_extractor.size
if isinstance(feature_extractor.size , __UpperCamelCase )
else feature_extractor.size["""shortest_edge"""]
)
__a : str = transforms.Normalize(mean=feature_extractor.image_mean , std=feature_extractor.image_std )
set_requires_grad(self.text_encoder , __UpperCamelCase )
set_requires_grad(self.clip_model , __UpperCamelCase )
def __lowerCamelCase ( self , __UpperCamelCase = "auto" ):
'''simple docstring'''
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
__a : Optional[Any] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(__UpperCamelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
self.enable_attention_slicing(__UpperCamelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
set_requires_grad(self.vae , __UpperCamelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
set_requires_grad(self.vae , __UpperCamelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
set_requires_grad(self.unet , __UpperCamelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
set_requires_grad(self.unet , __UpperCamelCase )
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
__a : int = min(int(num_inference_steps * strength ) , __UpperCamelCase )
__a : Union[str, Any] = max(num_inference_steps - init_timestep , 0 )
__a : Optional[int] = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=None ):
'''simple docstring'''
if not isinstance(__UpperCamelCase , torch.Tensor ):
raise ValueError(f"""`image` has to be of type `torch.Tensor` but is {type(__UpperCamelCase )}""" )
__a : str = image.to(device=__UpperCamelCase , dtype=__UpperCamelCase )
if isinstance(__UpperCamelCase , __UpperCamelCase ):
__a : Optional[int] = [
self.vae.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(__UpperCamelCase )
]
__a : Optional[Any] = torch.cat(__UpperCamelCase , dim=0 )
else:
__a : int = self.vae.encode(__UpperCamelCase ).latent_dist.sample(__UpperCamelCase )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
__a : Any = 0.1_8_2_1_5 * init_latents
__a : List[str] = init_latents.repeat_interleave(__UpperCamelCase , dim=0 )
__a : str = randn_tensor(init_latents.shape , generator=__UpperCamelCase , device=__UpperCamelCase , dtype=__UpperCamelCase )
# get latents
__a : Tuple = self.scheduler.add_noise(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
__a : List[str] = init_latents
return latents
def __lowerCamelCase ( self , __UpperCamelCase ):
'''simple docstring'''
__a : List[Any] = self.coca_transform(__UpperCamelCase ).unsqueeze(0 )
with torch.no_grad(), torch.cuda.amp.autocast():
__a : int = self.coca_model.generate(transformed_image.to(device=self.device , dtype=self.coca_model.dtype ) )
__a : Dict = self.coca_tokenizer.decode(generated[0].cpu().numpy() )
return generated.split("""<end_of_text>""" )[0].replace("""<start_of_text>""" , """""" ).rstrip(""" .,""" )
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
__a : Any = self.feature_extractor.preprocess(__UpperCamelCase )
__a : Optional[int] = torch.from_numpy(clip_image_input["""pixel_values"""][0] ).unsqueeze(0 ).to(self.device ).half()
__a : List[str] = self.clip_model.get_image_features(__UpperCamelCase )
__a : List[Any] = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=__UpperCamelCase )
__a : List[Any] = image_embeddings_clip.repeat_interleave(__UpperCamelCase , dim=0 )
return image_embeddings_clip
@torch.enable_grad()
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , ):
'''simple docstring'''
__a : Union[str, Any] = latents.detach().requires_grad_()
__a : Any = self.scheduler.scale_model_input(__UpperCamelCase , __UpperCamelCase )
# predict the noise residual
__a : Any = self.unet(__UpperCamelCase , __UpperCamelCase , encoder_hidden_states=__UpperCamelCase ).sample
if isinstance(self.scheduler , (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler) ):
__a : Any = self.scheduler.alphas_cumprod[timestep]
__a : List[str] = 1 - alpha_prod_t
# compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
__a : List[str] = (latents - beta_prod_t ** 0.5 * noise_pred) / alpha_prod_t ** 0.5
__a : Optional[int] = torch.sqrt(__UpperCamelCase )
__a : Dict = pred_original_sample * (fac) + latents * (1 - fac)
elif isinstance(self.scheduler , __UpperCamelCase ):
__a : Optional[int] = self.scheduler.sigmas[index]
__a : Union[str, Any] = latents - sigma * noise_pred
else:
raise ValueError(f"""scheduler type {type(self.scheduler )} not supported""" )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
__a : int = 1 / 0.1_8_2_1_5 * sample
__a : List[Any] = self.vae.decode(__UpperCamelCase ).sample
__a : int = (image / 2 + 0.5).clamp(0 , 1 )
__a : str = transforms.Resize(self.feature_extractor_size )(__UpperCamelCase )
__a : Any = self.normalize(__UpperCamelCase ).to(latents.dtype )
__a : List[str] = self.clip_model.get_image_features(__UpperCamelCase )
__a : Optional[Any] = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=__UpperCamelCase )
__a : Union[str, Any] = spherical_dist_loss(__UpperCamelCase , __UpperCamelCase ).mean() * clip_guidance_scale
__a : Dict = -torch.autograd.grad(__UpperCamelCase , __UpperCamelCase )[0]
if isinstance(self.scheduler , __UpperCamelCase ):
__a : List[Any] = latents.detach() + grads * (sigma**2)
__a : List[str] = noise_pred_original
else:
__a : Union[str, Any] = noise_pred_original - torch.sqrt(__UpperCamelCase ) * grads
return noise_pred, latents
@torch.no_grad()
def __call__( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = 512 , __UpperCamelCase = 512 , __UpperCamelCase = 0.6 , __UpperCamelCase = 50 , __UpperCamelCase = 7.5 , __UpperCamelCase = 1 , __UpperCamelCase = 0.0 , __UpperCamelCase = 100 , __UpperCamelCase = None , __UpperCamelCase = "pil" , __UpperCamelCase = True , __UpperCamelCase = 0.8 , __UpperCamelCase = 0.1 , __UpperCamelCase = 0.1 , ):
'''simple docstring'''
if isinstance(__UpperCamelCase , __UpperCamelCase ) and len(__UpperCamelCase ) != batch_size:
raise ValueError(f"""You have passed {batch_size} batch_size, but only {len(__UpperCamelCase )} generators.""" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"""`height` and `width` have to be divisible by 8 but are {height} and {width}.""" )
if isinstance(__UpperCamelCase , torch.Generator ) and batch_size > 1:
__a : Any = [generator] + [None] * (batch_size - 1)
__a : List[str] = [
("""model""", self.coca_model is None),
("""tokenizer""", self.coca_tokenizer is None),
("""transform""", self.coca_transform is None),
]
__a : Tuple = [x[0] for x in coca_is_none if x[1]]
__a : Any = """, """.join(__UpperCamelCase )
# generate prompts with coca model if prompt is None
if content_prompt is None:
if len(__UpperCamelCase ):
raise ValueError(
f"""Content prompt is None and CoCa [{coca_is_none_str}] is None."""
f"""Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.""" )
__a : Any = self.get_image_description(__UpperCamelCase )
if style_prompt is None:
if len(__UpperCamelCase ):
raise ValueError(
f"""Style prompt is None and CoCa [{coca_is_none_str}] is None."""
f""" Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.""" )
__a : List[str] = self.get_image_description(__UpperCamelCase )
# get prompt text embeddings for content and style
__a : Optional[Any] = self.tokenizer(
__UpperCamelCase , padding="""max_length""" , max_length=self.tokenizer.model_max_length , truncation=__UpperCamelCase , return_tensors="""pt""" , )
__a : Union[str, Any] = self.text_encoder(content_text_input.input_ids.to(self.device ) )[0]
__a : Optional[Any] = self.tokenizer(
__UpperCamelCase , padding="""max_length""" , max_length=self.tokenizer.model_max_length , truncation=__UpperCamelCase , return_tensors="""pt""" , )
__a : Any = self.text_encoder(style_text_input.input_ids.to(self.device ) )[0]
__a : List[str] = slerp(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# duplicate text embeddings for each generation per prompt
__a : str = text_embeddings.repeat_interleave(__UpperCamelCase , dim=0 )
# set timesteps
__a : Optional[int] = """offset""" in set(inspect.signature(self.scheduler.set_timesteps ).parameters.keys() )
__a : Optional[int] = {}
if accepts_offset:
__a : Any = 1
self.scheduler.set_timesteps(__UpperCamelCase , **__UpperCamelCase )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
self.scheduler.timesteps.to(self.device )
__a , __a : int = self.get_timesteps(__UpperCamelCase , __UpperCamelCase , self.device )
__a : int = timesteps[:1].repeat(__UpperCamelCase )
# Preprocess image
__a : List[Any] = preprocess(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
__a : List[Any] = self.prepare_latents(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , text_embeddings.dtype , self.device , __UpperCamelCase )
__a : Union[str, Any] = preprocess(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
__a : int = self.prepare_latents(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , text_embeddings.dtype , self.device , __UpperCamelCase )
__a : Optional[Any] = slerp(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
if clip_guidance_scale > 0:
__a : Optional[Any] = self.get_clip_image_embeddings(__UpperCamelCase , __UpperCamelCase )
__a : int = self.get_clip_image_embeddings(__UpperCamelCase , __UpperCamelCase )
__a : str = slerp(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
__a : str = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
__a : str = content_text_input.input_ids.shape[-1]
__a : Optional[Any] = self.tokenizer([""""""] , padding="""max_length""" , max_length=__UpperCamelCase , return_tensors="""pt""" )
__a : Optional[int] = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt
__a : Any = uncond_embeddings.repeat_interleave(__UpperCamelCase , dim=0 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
__a : Union[str, Any] = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
__a : str = (batch_size, self.unet.config.in_channels, height // 8, width // 8)
__a : str = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not work reproducibly on mps
__a : Optional[int] = torch.randn(__UpperCamelCase , generator=__UpperCamelCase , device="""cpu""" , dtype=__UpperCamelCase ).to(
self.device )
else:
__a : int = torch.randn(__UpperCamelCase , generator=__UpperCamelCase , device=self.device , dtype=__UpperCamelCase )
else:
if latents.shape != latents_shape:
raise ValueError(f"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" )
__a : Optional[Any] = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
__a : Union[str, Any] = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
__a : List[str] = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
__a : Dict = {}
if accepts_eta:
__a : Union[str, Any] = eta
# check if the scheduler accepts generator
__a : int = """generator""" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
if accepts_generator:
__a : Union[str, Any] = generator
with self.progress_bar(total=__UpperCamelCase ):
for i, t in enumerate(__UpperCamelCase ):
# expand the latents if we are doing classifier free guidance
__a : Tuple = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
__a : Any = self.scheduler.scale_model_input(__UpperCamelCase , __UpperCamelCase )
# predict the noise residual
__a : Optional[int] = self.unet(__UpperCamelCase , __UpperCamelCase , encoder_hidden_states=__UpperCamelCase ).sample
# perform classifier free guidance
if do_classifier_free_guidance:
__a , __a : Tuple = noise_pred.chunk(2 )
__a : str = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# perform clip guidance
if clip_guidance_scale > 0:
__a : Any = (
text_embeddings.chunk(2 )[1] if do_classifier_free_guidance else text_embeddings
)
__a , __a : Any = self.cond_fn(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , )
# compute the previous noisy sample x_t -> x_t-1
__a : str = self.scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
__a : int = 1 / 0.1_8_2_1_5 * latents
__a : Tuple = self.vae.decode(__UpperCamelCase ).sample
__a : str = (image / 2 + 0.5).clamp(0 , 1 )
__a : str = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__a : Any = self.numpy_to_pil(__UpperCamelCase )
if not return_dict:
return (image, None)
return StableDiffusionPipelineOutput(images=__UpperCamelCase , nsfw_content_detected=__UpperCamelCase ) | 697 |
'''simple docstring'''
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import DiffusionPipeline
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import logging
__SCREAMING_SNAKE_CASE : Union[str, Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
def __init__( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , ):
'''simple docstring'''
super().__init__()
self.register_modules(
vae=__UpperCamelCase , text_encoder=__UpperCamelCase , tokenizer=__UpperCamelCase , unet=__UpperCamelCase , scheduler=__UpperCamelCase , safety_checker=__UpperCamelCase , feature_extractor=__UpperCamelCase , )
def __lowerCamelCase ( self , __UpperCamelCase = "auto" ):
'''simple docstring'''
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
__a : Union[str, Any] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(__UpperCamelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
self.enable_attention_slicing(__UpperCamelCase )
@torch.no_grad()
def __call__( self , __UpperCamelCase , __UpperCamelCase = 512 , __UpperCamelCase = 512 , __UpperCamelCase = 50 , __UpperCamelCase = 7.5 , __UpperCamelCase = None , __UpperCamelCase = 1 , __UpperCamelCase = 0.0 , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = "pil" , __UpperCamelCase = True , __UpperCamelCase = None , __UpperCamelCase = 1 , __UpperCamelCase = None , **__UpperCamelCase , ):
'''simple docstring'''
if isinstance(__UpperCamelCase , __UpperCamelCase ):
__a : Union[str, Any] = 1
elif isinstance(__UpperCamelCase , __UpperCamelCase ):
__a : Tuple = len(__UpperCamelCase )
else:
raise ValueError(f"""`prompt` has to be of type `str` or `list` but is {type(__UpperCamelCase )}""" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"""`height` and `width` have to be divisible by 8 but are {height} and {width}.""" )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(__UpperCamelCase , __UpperCamelCase ) or callback_steps <= 0)
):
raise ValueError(
f"""`callback_steps` has to be a positive integer but is {callback_steps} of type"""
f""" {type(__UpperCamelCase )}.""" )
# get prompt text embeddings
__a : Tuple = self.tokenizer(
__UpperCamelCase , padding="""max_length""" , max_length=self.tokenizer.model_max_length , return_tensors="""pt""" , )
__a : Union[str, Any] = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
__a : str = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"""The following part of your input was truncated because CLIP can only handle sequences up to"""
f""" {self.tokenizer.model_max_length} tokens: {removed_text}""" )
__a : Optional[int] = text_input_ids[:, : self.tokenizer.model_max_length]
if text_embeddings is None:
__a : int = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
__a , __a , __a : Union[str, Any] = text_embeddings.shape
__a : Optional[Any] = text_embeddings.repeat(1 , __UpperCamelCase , 1 )
__a : Union[str, Any] = text_embeddings.view(bs_embed * num_images_per_prompt , __UpperCamelCase , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
__a : Any = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
__a : List[str]
if negative_prompt is None:
__a : Optional[Any] = [""""""]
elif type(__UpperCamelCase ) is not type(__UpperCamelCase ):
raise TypeError(
f"""`negative_prompt` should be the same type to `prompt`, but got {type(__UpperCamelCase )} !="""
f""" {type(__UpperCamelCase )}.""" )
elif isinstance(__UpperCamelCase , __UpperCamelCase ):
__a : Any = [negative_prompt]
elif batch_size != len(__UpperCamelCase ):
raise ValueError(
f"""`negative_prompt`: {negative_prompt} has batch size {len(__UpperCamelCase )}, but `prompt`:"""
f""" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"""
""" the batch size of `prompt`.""" )
else:
__a : Tuple = negative_prompt
__a : Any = text_input_ids.shape[-1]
__a : List[str] = self.tokenizer(
__UpperCamelCase , padding="""max_length""" , max_length=__UpperCamelCase , truncation=__UpperCamelCase , return_tensors="""pt""" , )
__a : str = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
__a : List[str] = uncond_embeddings.shape[1]
__a : List[Any] = uncond_embeddings.repeat(__UpperCamelCase , __UpperCamelCase , 1 )
__a : Tuple = uncond_embeddings.view(batch_size * num_images_per_prompt , __UpperCamelCase , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
__a : List[Any] = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
__a : Tuple = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
__a : List[Any] = (batch_size * num_images_per_prompt, self.unet.config.in_channels, 64, 64)
__a : int = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
__a : Any = torch.randn(
__UpperCamelCase , generator=__UpperCamelCase , device="""cpu""" , dtype=__UpperCamelCase ).to(self.device )
__a : Optional[Any] = torch.randn(__UpperCamelCase , generator=__UpperCamelCase , device="""cpu""" , dtype=__UpperCamelCase ).to(
self.device )
else:
__a : Optional[int] = torch.randn(
__UpperCamelCase , generator=__UpperCamelCase , device=self.device , dtype=__UpperCamelCase )
__a : str = torch.randn(__UpperCamelCase , generator=__UpperCamelCase , device=self.device , dtype=__UpperCamelCase )
else:
if latents_reference.shape != latents_shape:
raise ValueError(f"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" )
__a : Optional[Any] = latents_reference.to(self.device )
__a : str = latents.to(self.device )
# This is the key part of the pipeline where we
# try to ensure that the generated images w/ the same seed
# but different sizes actually result in similar images
__a : List[str] = (latents_shape[3] - latents_shape_reference[3]) // 2
__a : int = (latents_shape[2] - latents_shape_reference[2]) // 2
__a : int = latents_shape_reference[3] if dx >= 0 else latents_shape_reference[3] + 2 * dx
__a : Tuple = latents_shape_reference[2] if dy >= 0 else latents_shape_reference[2] + 2 * dy
__a : Optional[Any] = 0 if dx < 0 else dx
__a : Optional[Any] = 0 if dy < 0 else dy
__a : Optional[int] = max(-dx , 0 )
__a : Optional[Any] = max(-dy , 0 )
# import pdb
# pdb.set_trace()
__a : Optional[int] = latents_reference[:, :, dy : dy + h, dx : dx + w]
# set timesteps
self.scheduler.set_timesteps(__UpperCamelCase )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
__a : Dict = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
__a : Any = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
__a : List[Any] = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
__a : Optional[Any] = {}
if accepts_eta:
__a : Union[str, Any] = eta
for i, t in enumerate(self.progress_bar(__UpperCamelCase ) ):
# expand the latents if we are doing classifier free guidance
__a : List[str] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
__a : Tuple = self.scheduler.scale_model_input(__UpperCamelCase , __UpperCamelCase )
# predict the noise residual
__a : Union[str, Any] = self.unet(__UpperCamelCase , __UpperCamelCase , encoder_hidden_states=__UpperCamelCase ).sample
# perform guidance
if do_classifier_free_guidance:
__a , __a : List[str] = noise_pred.chunk(2 )
__a : Optional[int] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
__a : List[Any] = self.scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
__a : Optional[Any] = 1 / 0.1_8_2_1_5 * latents
__a : Optional[int] = self.vae.decode(__UpperCamelCase ).sample
__a : List[str] = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
__a : int = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if self.safety_checker is not None:
__a : List[str] = self.feature_extractor(self.numpy_to_pil(__UpperCamelCase ) , return_tensors="""pt""" ).to(
self.device )
__a , __a : int = self.safety_checker(
images=__UpperCamelCase , clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype ) )
else:
__a : Optional[int] = None
if output_type == "pil":
__a : str = self.numpy_to_pil(__UpperCamelCase )
if not return_dict:
return (image, has_nsfw_concept)
return StableDiffusionPipelineOutput(images=__UpperCamelCase , nsfw_content_detected=__UpperCamelCase ) | 697 | 1 |
'''simple docstring'''
import numpy as np
from PIL import Image
def _snake_case ( lowercase , lowercase , lowercase ) -> np.ndarray:
__a : Any = np.array(lowercase )
if arr.shape[0] != arr.shape[1]:
raise ValueError("""The input array is not a square matrix""" )
__a : Union[str, Any] = 0
__a : Dict = 0
__a : Optional[Any] = 0
__a : Tuple = 0
# compute the shape of the output matrix
__a : Optional[int] = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape maxpool_shape
__a : int = np.zeros((maxpool_shape, maxpool_shape) )
while i < arr.shape[0]:
if i + size > arr.shape[0]:
# if the end of the matrix is reached, break
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the maximum of the pooling matrix
__a : Optional[Any] = np.max(arr[i : i + size, j : j + size] )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
__a : Optional[Any] = 0
__a : str = 0
return updated_arr
def _snake_case ( lowercase , lowercase , lowercase ) -> np.ndarray:
__a : int = np.array(lowercase )
if arr.shape[0] != arr.shape[1]:
raise ValueError("""The input array is not a square matrix""" )
__a : int = 0
__a : Optional[Any] = 0
__a : str = 0
__a : List[Any] = 0
# compute the shape of the output matrix
__a : int = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape avgpool_shape
__a : Optional[int] = np.zeros((avgpool_shape, avgpool_shape) )
while i < arr.shape[0]:
# if the end of the matrix is reached, break
if i + size > arr.shape[0]:
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the average of the pooling matrix
__a : Any = int(np.average(arr[i : i + size, j : j + size] ) )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
__a : str = 0
__a : List[Any] = 0
return updated_arr
# Main Function
if __name__ == "__main__":
from doctest import testmod
testmod(name='avgpooling', verbose=True)
# Loading the image
__SCREAMING_SNAKE_CASE : str = Image.open('path_to_image')
# Converting the image to numpy array and maxpooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(maxpooling(np.array(image), size=3, stride=2)).show()
# Converting the image to numpy array and averagepooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(avgpooling(np.array(image), size=3, stride=2)).show() | 697 |
'''simple docstring'''
import numpy as np
from PIL import Image
def _snake_case ( lowercase , lowercase , lowercase ) -> np.ndarray:
__a : Any = np.array(lowercase )
if arr.shape[0] != arr.shape[1]:
raise ValueError("""The input array is not a square matrix""" )
__a : Union[str, Any] = 0
__a : Dict = 0
__a : Optional[Any] = 0
__a : Tuple = 0
# compute the shape of the output matrix
__a : Optional[int] = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape maxpool_shape
__a : int = np.zeros((maxpool_shape, maxpool_shape) )
while i < arr.shape[0]:
if i + size > arr.shape[0]:
# if the end of the matrix is reached, break
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the maximum of the pooling matrix
__a : Optional[Any] = np.max(arr[i : i + size, j : j + size] )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
__a : Optional[Any] = 0
__a : str = 0
return updated_arr
def _snake_case ( lowercase , lowercase , lowercase ) -> np.ndarray:
__a : int = np.array(lowercase )
if arr.shape[0] != arr.shape[1]:
raise ValueError("""The input array is not a square matrix""" )
__a : int = 0
__a : Optional[Any] = 0
__a : str = 0
__a : List[Any] = 0
# compute the shape of the output matrix
__a : int = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape avgpool_shape
__a : Optional[int] = np.zeros((avgpool_shape, avgpool_shape) )
while i < arr.shape[0]:
# if the end of the matrix is reached, break
if i + size > arr.shape[0]:
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the average of the pooling matrix
__a : Any = int(np.average(arr[i : i + size, j : j + size] ) )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
__a : str = 0
__a : List[Any] = 0
return updated_arr
# Main Function
if __name__ == "__main__":
from doctest import testmod
testmod(name='avgpooling', verbose=True)
# Loading the image
__SCREAMING_SNAKE_CASE : str = Image.open('path_to_image')
# Converting the image to numpy array and maxpooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(maxpooling(np.array(image), size=3, stride=2)).show()
# Converting the image to numpy array and averagepooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(avgpooling(np.array(image), size=3, stride=2)).show() | 697 | 1 |
'''simple docstring'''
import unittest
from transformers import (
MODEL_FOR_OBJECT_DETECTION_MAPPING,
AutoFeatureExtractor,
AutoModelForObjectDetection,
ObjectDetectionPipeline,
is_vision_available,
pipeline,
)
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_pytesseract,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class SCREAMING_SNAKE_CASE__ :
@staticmethod
def __lowerCamelCase ( *__UpperCamelCase , **__UpperCamelCase ):
'''simple docstring'''
pass
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
lowercase__ = MODEL_FOR_OBJECT_DETECTION_MAPPING
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
__a : Optional[Any] = ObjectDetectionPipeline(model=__UpperCamelCase , image_processor=__UpperCamelCase )
return object_detector, ["./tests/fixtures/tests_samples/COCO/000000039769.png"]
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
__a : List[str] = object_detector("""./tests/fixtures/tests_samples/COCO/000000039769.png""" , threshold=0.0 )
self.assertGreater(len(__UpperCamelCase ) , 0 )
for detected_object in outputs:
self.assertEqual(
__UpperCamelCase , {
"""score""": ANY(__UpperCamelCase ),
"""label""": ANY(__UpperCamelCase ),
"""box""": {"""xmin""": ANY(__UpperCamelCase ), """ymin""": ANY(__UpperCamelCase ), """xmax""": ANY(__UpperCamelCase ), """ymax""": ANY(__UpperCamelCase )},
} , )
import datasets
__a : Optional[int] = datasets.load_dataset("""hf-internal-testing/fixtures_image_utils""" , """image""" , split="""test""" )
__a : Tuple = [
Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ),
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
# RGBA
dataset[0]["""file"""],
# LA
dataset[1]["""file"""],
# L
dataset[2]["""file"""],
]
__a : Any = object_detector(__UpperCamelCase , threshold=0.0 )
self.assertEqual(len(__UpperCamelCase ) , len(__UpperCamelCase ) )
for outputs in batch_outputs:
self.assertGreater(len(__UpperCamelCase ) , 0 )
for detected_object in outputs:
self.assertEqual(
__UpperCamelCase , {
"""score""": ANY(__UpperCamelCase ),
"""label""": ANY(__UpperCamelCase ),
"""box""": {"""xmin""": ANY(__UpperCamelCase ), """ymin""": ANY(__UpperCamelCase ), """xmax""": ANY(__UpperCamelCase ), """ymax""": ANY(__UpperCamelCase )},
} , )
@require_tf
@unittest.skip("""Object detection not implemented in TF""" )
def __lowerCamelCase ( self ):
'''simple docstring'''
pass
@require_torch
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Optional[Any] = """hf-internal-testing/tiny-detr-mobilenetsv3"""
__a : Dict = AutoModelForObjectDetection.from_pretrained(__UpperCamelCase )
__a : Optional[Any] = AutoFeatureExtractor.from_pretrained(__UpperCamelCase )
__a : str = ObjectDetectionPipeline(model=__UpperCamelCase , feature_extractor=__UpperCamelCase )
__a : Optional[int] = object_detector("""http://images.cocodataset.org/val2017/000000039769.jpg""" , threshold=0.0 )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [
{"""score""": 0.3_3_7_6, """label""": """LABEL_0""", """box""": {"""xmin""": 159, """ymin""": 120, """xmax""": 480, """ymax""": 359}},
{"""score""": 0.3_3_7_6, """label""": """LABEL_0""", """box""": {"""xmin""": 159, """ymin""": 120, """xmax""": 480, """ymax""": 359}},
] , )
__a : Union[str, Any] = object_detector(
[
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
] , threshold=0.0 , )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [
[
{"""score""": 0.3_3_7_6, """label""": """LABEL_0""", """box""": {"""xmin""": 159, """ymin""": 120, """xmax""": 480, """ymax""": 359}},
{"""score""": 0.3_3_7_6, """label""": """LABEL_0""", """box""": {"""xmin""": 159, """ymin""": 120, """xmax""": 480, """ymax""": 359}},
],
[
{"""score""": 0.3_3_7_6, """label""": """LABEL_0""", """box""": {"""xmin""": 159, """ymin""": 120, """xmax""": 480, """ymax""": 359}},
{"""score""": 0.3_3_7_6, """label""": """LABEL_0""", """box""": {"""xmin""": 159, """ymin""": 120, """xmax""": 480, """ymax""": 359}},
],
] , )
@require_torch
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : str = """facebook/detr-resnet-50"""
__a : Dict = AutoModelForObjectDetection.from_pretrained(__UpperCamelCase )
__a : int = AutoFeatureExtractor.from_pretrained(__UpperCamelCase )
__a : int = ObjectDetectionPipeline(model=__UpperCamelCase , feature_extractor=__UpperCamelCase )
__a : Any = object_detector("""http://images.cocodataset.org/val2017/000000039769.jpg""" )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [
{"""score""": 0.9_9_8_2, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 175, """ymax""": 117}},
{"""score""": 0.9_9_6_0, """label""": """remote""", """box""": {"""xmin""": 333, """ymin""": 72, """xmax""": 368, """ymax""": 187}},
{"""score""": 0.9_9_5_5, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 639, """ymax""": 473}},
{"""score""": 0.9_9_8_8, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}},
{"""score""": 0.9_9_8_7, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}},
] , )
__a : Optional[Any] = object_detector(
[
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
] )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [
[
{"""score""": 0.9_9_8_2, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 175, """ymax""": 117}},
{"""score""": 0.9_9_6_0, """label""": """remote""", """box""": {"""xmin""": 333, """ymin""": 72, """xmax""": 368, """ymax""": 187}},
{"""score""": 0.9_9_5_5, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 639, """ymax""": 473}},
{"""score""": 0.9_9_8_8, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}},
{"""score""": 0.9_9_8_7, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}},
],
[
{"""score""": 0.9_9_8_2, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 175, """ymax""": 117}},
{"""score""": 0.9_9_6_0, """label""": """remote""", """box""": {"""xmin""": 333, """ymin""": 72, """xmax""": 368, """ymax""": 187}},
{"""score""": 0.9_9_5_5, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 639, """ymax""": 473}},
{"""score""": 0.9_9_8_8, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}},
{"""score""": 0.9_9_8_7, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}},
],
] , )
@require_torch
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : int = """facebook/detr-resnet-50"""
__a : Optional[int] = pipeline("""object-detection""" , model=__UpperCamelCase )
__a : Optional[int] = object_detector("""http://images.cocodataset.org/val2017/000000039769.jpg""" )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [
{"""score""": 0.9_9_8_2, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 175, """ymax""": 117}},
{"""score""": 0.9_9_6_0, """label""": """remote""", """box""": {"""xmin""": 333, """ymin""": 72, """xmax""": 368, """ymax""": 187}},
{"""score""": 0.9_9_5_5, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 639, """ymax""": 473}},
{"""score""": 0.9_9_8_8, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}},
{"""score""": 0.9_9_8_7, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}},
] , )
__a : List[str] = object_detector(
[
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
] )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [
[
{"""score""": 0.9_9_8_2, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 175, """ymax""": 117}},
{"""score""": 0.9_9_6_0, """label""": """remote""", """box""": {"""xmin""": 333, """ymin""": 72, """xmax""": 368, """ymax""": 187}},
{"""score""": 0.9_9_5_5, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 639, """ymax""": 473}},
{"""score""": 0.9_9_8_8, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}},
{"""score""": 0.9_9_8_7, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}},
],
[
{"""score""": 0.9_9_8_2, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 175, """ymax""": 117}},
{"""score""": 0.9_9_6_0, """label""": """remote""", """box""": {"""xmin""": 333, """ymin""": 72, """xmax""": 368, """ymax""": 187}},
{"""score""": 0.9_9_5_5, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 639, """ymax""": 473}},
{"""score""": 0.9_9_8_8, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}},
{"""score""": 0.9_9_8_7, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}},
],
] , )
@require_torch
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Union[str, Any] = 0.9_9_8_5
__a : Union[str, Any] = """facebook/detr-resnet-50"""
__a : Optional[int] = pipeline("""object-detection""" , model=__UpperCamelCase )
__a : Union[str, Any] = object_detector("""http://images.cocodataset.org/val2017/000000039769.jpg""" , threshold=__UpperCamelCase )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [
{"""score""": 0.9_9_8_8, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}},
{"""score""": 0.9_9_8_7, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}},
] , )
@require_torch
@require_pytesseract
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : str = """Narsil/layoutlmv3-finetuned-funsd"""
__a : List[Any] = 0.9_9_9_3
__a : Dict = pipeline("""object-detection""" , model=__UpperCamelCase , threshold=__UpperCamelCase )
__a : List[str] = object_detector(
"""https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png""" )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [
{"""score""": 0.9_9_9_3, """label""": """I-ANSWER""", """box""": {"""xmin""": 294, """ymin""": 254, """xmax""": 343, """ymax""": 264}},
{"""score""": 0.9_9_9_3, """label""": """I-ANSWER""", """box""": {"""xmin""": 294, """ymin""": 254, """xmax""": 343, """ymax""": 264}},
] , ) | 697 |
'''simple docstring'''
import qiskit
def _snake_case ( lowercase , lowercase ) -> qiskit.result.counts.Counts:
__a : Any = qiskit.Aer.get_backend("""aer_simulator""" )
# Create a Quantum Circuit acting on the q register
__a : str = qiskit.QuantumCircuit(lowercase , lowercase )
# Map the quantum measurement to the classical bits
circuit.measure([0] , [0] )
# Execute the circuit on the simulator
__a : Any = qiskit.execute(lowercase , lowercase , shots=1_0_0_0 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(lowercase )
if __name__ == "__main__":
print(f'''Total count for various states are: {single_qubit_measure(1, 1)}''') | 697 | 1 |
'''simple docstring'''
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import SPIECE_UNDERLINE, is_sentencepiece_available
from transformers.models.speech_to_text import SpeechaTextTokenizer
from transformers.models.speech_to_text.tokenization_speech_to_text import VOCAB_FILES_NAMES, save_json
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
__SCREAMING_SNAKE_CASE : Union[str, Any] = get_tests_dir('fixtures/test_sentencepiece.model')
if is_sentencepiece_available():
import sentencepiece as sp
__SCREAMING_SNAKE_CASE : Tuple = 5
__SCREAMING_SNAKE_CASE : Union[str, Any] = 10
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase , unittest.TestCase ):
lowercase__ = SpeechaTextTokenizer
lowercase__ = False
lowercase__ = True
def __lowerCamelCase ( self ):
'''simple docstring'''
super().setUp()
__a : Union[str, Any] = sp.SentencePieceProcessor()
spm_model.Load(__UpperCamelCase )
__a : Tuple = ["""<s>""", """<pad>""", """</s>""", """<unk>"""]
vocab += [spm_model.IdToPiece(id_ ) for id_ in range(len(__UpperCamelCase ) )]
__a : List[str] = dict(zip(__UpperCamelCase , range(len(__UpperCamelCase ) ) ) )
__a : Optional[int] = Path(self.tmpdirname )
save_json(__UpperCamelCase , save_dir / VOCAB_FILES_NAMES["""vocab_file"""] )
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(__UpperCamelCase , save_dir / VOCAB_FILES_NAMES["""spm_file"""] )
__a : Optional[int] = SpeechaTextTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : List[Any] = """<pad>"""
__a : Union[str, Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__UpperCamelCase ) , __UpperCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__UpperCamelCase ) , __UpperCamelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Tuple = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(vocab_keys[-1] , """j""" )
self.assertEqual(len(__UpperCamelCase ) , 1001 )
def __lowerCamelCase ( self ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 1001 )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Any = SpeechaTextTokenizer.from_pretrained(self.tmpdirname )
__a : Union[str, Any] = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(__UpperCamelCase , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__UpperCamelCase ) , [289, 50, 14, 174, 386] , )
__a : Dict = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
__UpperCamelCase , [SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """9""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """é""", """."""] , )
__a : str = tokenizer.convert_tokens_to_ids(__UpperCamelCase )
self.assertListEqual(__UpperCamelCase , [12, 25, 88, 59, 28, 23, 11, 4, 606, 351, 351, 351, 7, 16, 70, 50, 76, 84, 10, 4, 8] )
__a : str = tokenizer.convert_ids_to_tokens(__UpperCamelCase )
self.assertListEqual(
__UpperCamelCase , [SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """<unk>""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """<unk>""", """."""] , )
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : List[str] = {"""input_ids""": [[3791, 797, 31, 11, 64, 797, 31, 2429, 433, 12, 1176, 12, 20, 786, 915, 142, 2413, 240, 37, 3238, 797, 31, 11, 35, 93, 915, 142, 2413, 240, 37, 5540, 567, 1276, 93, 37, 610, 40, 62, 455, 657, 1042, 123, 780, 177, 37, 309, 241, 1298, 514, 20, 292, 2737, 114, 2469, 241, 85, 64, 302, 548, 528, 423, 4, 509, 406, 423, 37, 601, 4, 777, 302, 548, 528, 423, 284, 4, 3388, 511, 459, 4, 3555, 40, 321, 302, 705, 4, 3388, 511, 583, 326, 5, 5, 5, 62, 3310, 560, 177, 2680, 217, 1508, 32, 31, 853, 418, 64, 583, 511, 1605, 62, 35, 93, 560, 177, 2680, 217, 1508, 1521, 64, 583, 511, 519, 62, 20, 1515, 764, 20, 149, 261, 5625, 7972, 20, 5540, 567, 1276, 93, 3925, 1675, 11, 15, 802, 7972, 576, 217, 1508, 11, 35, 93, 1253, 2441, 15, 289, 652, 31, 416, 321, 3842, 115, 40, 911, 8, 476, 619, 4, 380, 142, 423, 335, 240, 35, 93, 264, 8, 11, 335, 569, 420, 163, 5, 2], [260, 548, 528, 423, 20, 451, 20, 2681, 1153, 3434, 20, 5540, 37, 567, 126, 1253, 2441, 3376, 449, 210, 431, 1563, 177, 767, 5540, 11, 1203, 472, 11, 2953, 685, 285, 364, 706, 1153, 20, 6799, 20, 2869, 20, 4464, 126, 40, 2429, 20, 1040, 866, 2664, 418, 20, 318, 20, 1726, 186, 20, 265, 522, 35, 93, 2191, 4634, 20, 1040, 12, 6799, 15, 228, 2356, 142, 31, 11, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [2575, 2666, 684, 1582, 1176, 12, 627, 149, 619, 20, 4902, 563, 11, 20, 149, 261, 3420, 2356, 174, 142, 4714, 131, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__UpperCamelCase , model_name="""facebook/s2t-small-mustc-en-de-st""" , revision="""a14f04cf0776c02f62a8cb800cf7909e15ea23ad""" , )
@require_sentencepiece
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
lowercase__ = "valhalla/s2t_mustc_multilinguial_medium"
lowercase__ = "C'est trop cool"
lowercase__ = "Esto es genial"
@classmethod
def __lowerCamelCase ( cls ):
'''simple docstring'''
__a : SpeechaTextTokenizer = SpeechaTextTokenizer.from_pretrained(cls.checkpoint_name )
return cls
def __lowerCamelCase ( self ):
'''simple docstring'''
self.assertEqual(self.tokenizer.lang_code_to_id["""pt"""] , 4 )
self.assertEqual(self.tokenizer.lang_code_to_id["""ru"""] , 6 )
self.assertEqual(self.tokenizer.lang_code_to_id["""it"""] , 9 )
self.assertEqual(self.tokenizer.lang_code_to_id["""de"""] , 11 )
def __lowerCamelCase ( self ):
'''simple docstring'''
self.assertEqual(self.tokenizer.vocab_size , 1_0000 )
def __lowerCamelCase ( self ):
'''simple docstring'''
self.assertIn(__UpperCamelCase , self.tokenizer.all_special_ids )
__a : List[str] = [ES_CODE, 4, 1601, 47, 7647, 2]
__a : Union[str, Any] = self.tokenizer.decode(__UpperCamelCase , skip_special_tokens=__UpperCamelCase )
__a : int = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=__UpperCamelCase )
self.assertEqual(__UpperCamelCase , __UpperCamelCase )
self.assertNotIn(self.tokenizer.eos_token , __UpperCamelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Union[str, Any] = """fr"""
__a : str = self.tokenizer(self.french_text ).input_ids
self.assertEqual(encoded[0] , __UpperCamelCase )
self.assertEqual(encoded[-1] , self.tokenizer.eos_token_id )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Optional[int] = """fr"""
self.assertListEqual(self.tokenizer.prefix_tokens , [FR_CODE] )
__a : List[Any] = """es"""
self.assertListEqual(self.tokenizer.prefix_tokens , [ES_CODE] ) | 697 |
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConformerConfig,
WavaVecaConformerForCTC,
WavaVecaConformerForPreTraining,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
__SCREAMING_SNAKE_CASE : str = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : Any = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.linear_k': 'encoder.layers.*.self_attn.linear_k',
'self_attn.linear_v': 'encoder.layers.*.self_attn.linear_v',
'self_attn.linear_q': 'encoder.layers.*.self_attn.linear_q',
'self_attn.pos_bias_u': 'encoder.layers.*.self_attn.pos_bias_u',
'self_attn.pos_bias_v': 'encoder.layers.*.self_attn.pos_bias_v',
'self_attn.linear_out': 'encoder.layers.*.self_attn.linear_out',
'self_attn.linear_pos': 'encoder.layers.*.self_attn.linear_pos',
'self_attn.rotary_emb': 'encoder.embed_positions',
'self_attn_layer_norm': 'encoder.layers.*.self_attn_layer_norm',
'conv_module.pointwise_conv1': 'encoder.layers.*.conv_module.pointwise_conv1',
'conv_module.pointwise_conv2': 'encoder.layers.*.conv_module.pointwise_conv2',
'conv_module.depthwise_conv': 'encoder.layers.*.conv_module.depthwise_conv',
'conv_module.batch_norm': 'encoder.layers.*.conv_module.batch_norm',
'conv_module.layer_norm': 'encoder.layers.*.conv_module.layer_norm',
'ffn1.w_1': 'encoder.layers.*.ffn1.intermediate_dense',
'ffn1.w_2': 'encoder.layers.*.ffn1.output_dense',
'ffn1.layer_norm': 'encoder.layers.*.ffn1_layer_norm',
'ffn2.w_1': 'encoder.layers.*.ffn2.intermediate_dense',
'ffn2.w_2': 'encoder.layers.*.ffn2.output_dense',
'ffn2.layer_norm': 'encoder.layers.*.ffn2_layer_norm',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
}
__SCREAMING_SNAKE_CASE : Optional[Any] = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def _snake_case ( lowercase , lowercase , lowercase , lowercase , lowercase ) -> List[Any]:
for attribute in key.split(""".""" ):
__a : str = getattr(lowercase , lowercase )
if weight_type is not None:
__a : Dict = getattr(lowercase , lowercase ).shape
else:
__a : Dict = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}""" )
if weight_type == "weight":
__a : Any = value
elif weight_type == "weight_g":
__a : int = value
elif weight_type == "weight_v":
__a : int = value
elif weight_type == "bias":
__a : List[Any] = value
elif weight_type == "running_mean":
__a : Union[str, Any] = value
elif weight_type == "running_var":
__a : Tuple = value
elif weight_type == "num_batches_tracked":
__a : Optional[int] = value
elif weight_type == "inv_freq":
__a : List[str] = value
else:
__a : List[str] = value
logger.info(F"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def _snake_case ( lowercase , lowercase , lowercase ) -> Dict:
__a : Dict = []
__a : Dict = fairseq_model.state_dict()
__a : Tuple = hf_model.wavaveca_conformer.feature_extractor
for name, value in fairseq_dict.items():
__a : int = False
if "conv_layers" in name:
load_conv_layer(
lowercase , lowercase , lowercase , lowercase , hf_model.config.feat_extract_norm == """group""" , )
__a : List[Any] = True
else:
for key, mapped_key in MAPPING.items():
__a : Optional[int] = """wav2vec2_conformer.""" + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
__a : str = True
if "*" in mapped_key:
__a : Optional[int] = name.split(lowercase )[0].split(""".""" )[-2]
__a : List[Any] = mapped_key.replace("""*""" , lowercase )
if "pos_bias_u" in name:
__a : Union[str, Any] = None
elif "pos_bias_v" in name:
__a : List[Any] = None
elif "weight_g" in name:
__a : List[Any] = """weight_g"""
elif "weight_v" in name:
__a : List[Any] = """weight_v"""
elif "bias" in name:
__a : Optional[int] = """bias"""
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__a : str = """weight"""
elif "running_mean" in name:
__a : List[str] = """running_mean"""
elif "inv_freq" in name:
__a : Dict = """inv_freq"""
elif "running_var" in name:
__a : Union[str, Any] = """running_var"""
elif "num_batches_tracked" in name:
__a : int = """num_batches_tracked"""
else:
__a : Optional[int] = None
set_recursively(lowercase , lowercase , lowercase , lowercase , lowercase )
continue
if not is_used:
unused_weights.append(lowercase )
logger.warning(F"""Unused weights: {unused_weights}""" )
def _snake_case ( lowercase , lowercase , lowercase , lowercase , lowercase ) -> List[str]:
__a : Optional[Any] = full_name.split("""conv_layers.""" )[-1]
__a : Union[str, Any] = name.split(""".""" )
__a : Optional[Any] = int(items[0] )
__a : int = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
__a : Dict = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
__a : str = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" )
__a : Dict = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" )
__a : Union[str, Any] = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(lowercase )
@torch.no_grad()
def _snake_case ( lowercase , lowercase , lowercase=None , lowercase=None , lowercase=True ) -> Optional[Any]:
if config_path is not None:
__a : Any = WavaVecaConformerConfig.from_pretrained(lowercase , hidden_act="""swish""" )
else:
__a : Optional[int] = WavaVecaConformerConfig()
if "rope" in checkpoint_path:
__a : Optional[Any] = """rotary"""
if is_finetuned:
if dict_path:
__a : List[Any] = Dictionary.load(lowercase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
__a : int = target_dict.pad_index
__a : List[str] = target_dict.bos_index
__a : str = target_dict.eos_index
__a : Dict = len(target_dict.symbols )
__a : Any = os.path.join(lowercase , """vocab.json""" )
if not os.path.isdir(lowercase ):
logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(lowercase ) )
return
os.makedirs(lowercase , exist_ok=lowercase )
__a : Dict = target_dict.indices
# fairseq has the <pad> and <s> switched
__a : Optional[Any] = 0
__a : List[Any] = 1
with open(lowercase , """w""" , encoding="""utf-8""" ) as vocab_handle:
json.dump(lowercase , lowercase )
__a : int = WavaVecaCTCTokenizer(
lowercase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=lowercase , )
__a : Optional[int] = True if config.feat_extract_norm == """layer""" else False
__a : Dict = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6_0_0_0 , padding_value=0 , do_normalize=lowercase , return_attention_mask=lowercase , )
__a : str = WavaVecaProcessor(feature_extractor=lowercase , tokenizer=lowercase )
processor.save_pretrained(lowercase )
__a : List[str] = WavaVecaConformerForCTC(lowercase )
else:
__a : Optional[int] = WavaVecaConformerForPreTraining(lowercase )
if is_finetuned:
__a , __a , __a : Dict = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
else:
__a : Optional[int] = argparse.Namespace(task="""audio_pretraining""" )
__a : Tuple = fairseq.tasks.setup_task(lowercase )
__a , __a , __a : int = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=lowercase )
__a : Any = model[0].eval()
recursively_load_weights(lowercase , lowercase , not is_finetuned )
hf_wavavec.save_pretrained(lowercase )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : Dict = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
__SCREAMING_SNAKE_CASE : int = parser.parse_args()
convert_wavaveca_conformer_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
) | 697 | 1 |
'''simple docstring'''
from typing import Callable, Optional
from .. import Features
from ..packaged_modules.generator.generator import Generator
from .abc import AbstractDatasetInputStream
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
def __init__( self , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = False , __UpperCamelCase = False , __UpperCamelCase = None , __UpperCamelCase = None , **__UpperCamelCase , ):
'''simple docstring'''
super().__init__(
features=__UpperCamelCase , cache_dir=__UpperCamelCase , keep_in_memory=__UpperCamelCase , streaming=__UpperCamelCase , num_proc=__UpperCamelCase , **__UpperCamelCase , )
__a : Optional[Any] = Generator(
cache_dir=__UpperCamelCase , features=__UpperCamelCase , generator=__UpperCamelCase , gen_kwargs=__UpperCamelCase , **__UpperCamelCase , )
def __lowerCamelCase ( self ):
'''simple docstring'''
if self.streaming:
__a : Tuple = self.builder.as_streaming_dataset(split="""train""" )
# Build regular (map-style) dataset
else:
__a : str = None
__a : Dict = None
__a : Union[str, Any] = None
__a : Optional[int] = None
self.builder.download_and_prepare(
download_config=__UpperCamelCase , download_mode=__UpperCamelCase , verification_mode=__UpperCamelCase , base_path=__UpperCamelCase , num_proc=self.num_proc , )
__a : List[str] = self.builder.as_dataset(
split="""train""" , verification_mode=__UpperCamelCase , in_memory=self.keep_in_memory )
return dataset | 697 |
'''simple docstring'''
import warnings
from functools import wraps
from typing import Callable
def _snake_case ( lowercase ) -> Callable:
@wraps(lowercase )
def _inner_fn(*lowercase , **lowercase ):
warnings.warn(
(F"""'{fn.__name__}' is experimental and might be subject to breaking changes in the future.""") , lowercase , )
return fn(*lowercase , **lowercase )
return _inner_fn | 697 | 1 |
'''simple docstring'''
def _snake_case ( lowercase ) -> None:
__a : Optional[Any] = generate_pascal_triangle(lowercase )
for row_idx in range(lowercase ):
# Print left spaces
for _ in range(num_rows - row_idx - 1 ):
print(end=""" """ )
# Print row values
for col_idx in range(row_idx + 1 ):
if col_idx != row_idx:
print(triangle[row_idx][col_idx] , end=""" """ )
else:
print(triangle[row_idx][col_idx] , end="""""" )
print()
def _snake_case ( lowercase ) -> list[list[int]]:
if not isinstance(lowercase , lowercase ):
raise TypeError("""The input value of 'num_rows' should be 'int'""" )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
"""The input value of 'num_rows' should be greater than or equal to 0""" )
__a : list[list[int]] = []
for current_row_idx in range(lowercase ):
__a : Any = populate_current_row(lowercase , lowercase )
triangle.append(lowercase )
return triangle
def _snake_case ( lowercase , lowercase ) -> list[int]:
__a : str = [-1] * (current_row_idx + 1)
# first and last elements of current row are equal to 1
__a , __a : int = 1, 1
for current_col_idx in range(1 , lowercase ):
calculate_current_element(
lowercase , lowercase , lowercase , lowercase )
return current_row
def _snake_case ( lowercase , lowercase , lowercase , lowercase , ) -> None:
__a : Any = triangle[current_row_idx - 1][current_col_idx - 1]
__a : Tuple = triangle[current_row_idx - 1][current_col_idx]
__a : Union[str, Any] = above_to_left_elt + above_to_right_elt
def _snake_case ( lowercase ) -> list[list[int]]:
if not isinstance(lowercase , lowercase ):
raise TypeError("""The input value of 'num_rows' should be 'int'""" )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
"""The input value of 'num_rows' should be greater than or equal to 0""" )
__a : list[list[int]] = [[1]]
for row_index in range(1 , lowercase ):
__a : Any = [0] + result[-1] + [0]
__a : Any = row_index + 1
# Calculate the number of distinct elements in a row
__a : str = sum(divmod(lowercase , 2 ) )
__a : List[str] = [
temp_row[i - 1] + temp_row[i] for i in range(1 , distinct_elements + 1 )
]
__a : Union[str, Any] = row_first_half[: (row_index + 1) // 2]
row_second_half.reverse()
__a : List[str] = row_first_half + row_second_half
result.append(lowercase )
return result
def _snake_case ( ) -> None:
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(lowercase , lowercase ) -> None:
__a : Dict = F"""{func.__name__}({value})"""
__a : Tuple = timeit(F"""__main__.{call}""" , setup="""import __main__""" )
# print(f"{call:38} = {func(value)} -- {timing:.4f} seconds")
print(F"""{call:38} -- {timing:.4f} seconds""" )
for value in range(1_5 ): # (1, 7, 14):
for func in (generate_pascal_triangle, generate_pascal_triangle_optimized):
benchmark_a_function(lowercase , lowercase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark() | 697 |
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
from ....audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ....feature_extraction_sequence_utils import SequenceFeatureExtractor
from ....feature_extraction_utils import BatchFeature
from ....file_utils import PaddingStrategy, TensorType
from ....utils import logging
__SCREAMING_SNAKE_CASE : Tuple = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
lowercase__ = ["input_features", "attention_mask"]
def __init__( self , __UpperCamelCase=80 , __UpperCamelCase=1_6000 , __UpperCamelCase=0.0 , __UpperCamelCase=10 , __UpperCamelCase=25 , __UpperCamelCase="hamming_window" , __UpperCamelCase=3_2_7_6_8.0 , __UpperCamelCase=0.9_7 , __UpperCamelCase=1.0 , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=False , **__UpperCamelCase , ):
'''simple docstring'''
super().__init__(feature_size=__UpperCamelCase , sampling_rate=__UpperCamelCase , padding_value=__UpperCamelCase , **__UpperCamelCase )
__a : List[str] = feature_size
__a : List[str] = sampling_rate
__a : int = padding_value
__a : Any = hop_length
__a : int = win_length
__a : Tuple = frame_signal_scale
__a : Union[str, Any] = preemphasis_coeff
__a : List[str] = mel_floor
__a : Union[str, Any] = normalize_means
__a : Optional[Any] = normalize_vars
__a : Optional[Any] = win_function
__a : Union[str, Any] = return_attention_mask
__a : List[Any] = win_length * sampling_rate // 1000
__a : List[Any] = hop_length * sampling_rate // 1000
__a : Optional[Any] = optimal_fft_length(self.sample_size )
__a : Any = (self.n_fft // 2) + 1
def __lowerCamelCase ( self , __UpperCamelCase ):
'''simple docstring'''
if self.win_function == "hamming_window":
__a : str = window_function(window_length=self.sample_size , name=self.win_function , periodic=__UpperCamelCase )
else:
__a : Dict = window_function(window_length=self.sample_size , name=self.win_function )
__a : Optional[Any] = mel_filter_bank(
num_frequency_bins=self.n_freqs , num_mel_filters=self.feature_size , min_frequency=0.0 , max_frequency=self.sampling_rate / 2.0 , sampling_rate=self.sampling_rate , )
__a : Any = spectrogram(
one_waveform * self.frame_signal_scale , window=__UpperCamelCase , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , center=__UpperCamelCase , preemphasis=self.preemphasis_coeff , mel_filters=__UpperCamelCase , mel_floor=self.mel_floor , log_mel="""log""" , )
return msfc_features.T
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
if self.normalize_means:
__a : int = x[:input_length].mean(axis=0 )
__a : str = np.subtract(__UpperCamelCase , __UpperCamelCase )
if self.normalize_vars:
__a : Dict = x[:input_length].std(axis=0 )
__a : Dict = np.divide(__UpperCamelCase , __UpperCamelCase )
if input_length < x.shape[0]:
__a : Union[str, Any] = padding_value
# make sure array is in float32
__a : Any = x.astype(np.floataa )
return x
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase = None ):
'''simple docstring'''
__a : Tuple = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [self._normalize_one(__UpperCamelCase , __UpperCamelCase , self.padding_value ) for x, n in zip(__UpperCamelCase , __UpperCamelCase )]
def __call__( self , __UpperCamelCase , __UpperCamelCase = False , __UpperCamelCase = None , __UpperCamelCase = False , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , **__UpperCamelCase , ):
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"""The model corresponding to this feature extractor: {self} was trained using a sampling rate of"""
f""" {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with"""
f""" {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
"""It is strongly recommended to pass the ``sampling_rate`` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
__a : Tuple = isinstance(__UpperCamelCase , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f"""Only mono-channel audio is supported for input to {self}""" )
__a : Tuple = is_batched_numpy or (
isinstance(__UpperCamelCase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
__a : Tuple = [np.asarray(__UpperCamelCase , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(__UpperCamelCase , np.ndarray ):
__a : List[str] = np.asarray(__UpperCamelCase , dtype=np.floataa )
elif isinstance(__UpperCamelCase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
__a : str = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
__a : Any = [raw_speech]
# extract fbank features
__a : str = [self._extract_mfsc_features(__UpperCamelCase ) for one_waveform in raw_speech]
# convert into correct format for padding
__a : Optional[Any] = BatchFeature({"""input_features""": features} )
__a : Any = self.pad(
__UpperCamelCase , padding=__UpperCamelCase , max_length=__UpperCamelCase , truncation=__UpperCamelCase , pad_to_multiple_of=__UpperCamelCase , return_attention_mask=__UpperCamelCase , **__UpperCamelCase , )
# make sure list is in array format
__a : int = padded_inputs.get("""input_features""" )
if isinstance(input_features[0] , __UpperCamelCase ):
__a : Union[str, Any] = [np.asarray(__UpperCamelCase , dtype=np.floataa ) for feature in input_features]
__a : List[str] = padded_inputs.get("""attention_mask""" )
if attention_mask is not None:
__a : Optional[int] = [np.asarray(__UpperCamelCase , dtype=np.intaa ) for array in attention_mask]
if self.normalize_means or self.normalize_vars:
__a : Optional[Any] = (
np.array(__UpperCamelCase , dtype=np.intaa )
if self._get_padding_strategies(__UpperCamelCase , max_length=__UpperCamelCase ) is not PaddingStrategy.DO_NOT_PAD
and padding
else None
)
__a : int = self.normalize(
padded_inputs["""input_features"""] , attention_mask=__UpperCamelCase )
if return_tensors is not None:
__a : List[Any] = padded_inputs.convert_to_tensors(__UpperCamelCase )
return padded_inputs | 697 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__SCREAMING_SNAKE_CASE : Dict = {
'configuration_xlm_roberta': [
'XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP',
'XLMRobertaConfig',
'XLMRobertaOnnxConfig',
],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Dict = ['XLMRobertaTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Tuple = ['XLMRobertaTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : str = [
'XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'XLMRobertaForCausalLM',
'XLMRobertaForMaskedLM',
'XLMRobertaForMultipleChoice',
'XLMRobertaForQuestionAnswering',
'XLMRobertaForSequenceClassification',
'XLMRobertaForTokenClassification',
'XLMRobertaModel',
'XLMRobertaPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Optional[int] = [
'TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFXLMRobertaForCausalLM',
'TFXLMRobertaForMaskedLM',
'TFXLMRobertaForMultipleChoice',
'TFXLMRobertaForQuestionAnswering',
'TFXLMRobertaForSequenceClassification',
'TFXLMRobertaForTokenClassification',
'TFXLMRobertaModel',
'TFXLMRobertaPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Union[str, Any] = [
'FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'FlaxXLMRobertaForMaskedLM',
'FlaxXLMRobertaForCausalLM',
'FlaxXLMRobertaForMultipleChoice',
'FlaxXLMRobertaForQuestionAnswering',
'FlaxXLMRobertaForSequenceClassification',
'FlaxXLMRobertaForTokenClassification',
'FlaxXLMRobertaModel',
'FlaxXLMRobertaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaConfig,
XLMRobertaOnnxConfig,
)
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta import XLMRobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta_fast import XLMRobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaForCausalLM,
XLMRobertaForMaskedLM,
XLMRobertaForMultipleChoice,
XLMRobertaForQuestionAnswering,
XLMRobertaForSequenceClassification,
XLMRobertaForTokenClassification,
XLMRobertaModel,
XLMRobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm_roberta import (
TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMRobertaForCausalLM,
TFXLMRobertaForMaskedLM,
TFXLMRobertaForMultipleChoice,
TFXLMRobertaForQuestionAnswering,
TFXLMRobertaForSequenceClassification,
TFXLMRobertaForTokenClassification,
TFXLMRobertaModel,
TFXLMRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xlm_roberta import (
FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxXLMRobertaForCausalLM,
FlaxXLMRobertaForMaskedLM,
FlaxXLMRobertaForMultipleChoice,
FlaxXLMRobertaForQuestionAnswering,
FlaxXLMRobertaForSequenceClassification,
FlaxXLMRobertaForTokenClassification,
FlaxXLMRobertaModel,
FlaxXLMRobertaPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 697 |
'''simple docstring'''
__SCREAMING_SNAKE_CASE : int = 9.80_665
def _snake_case ( lowercase , lowercase , lowercase = g ) -> float:
if fluid_density <= 0:
raise ValueError("""Impossible fluid density""" )
if volume < 0:
raise ValueError("""Impossible Object volume""" )
if gravity <= 0:
raise ValueError("""Impossible Gravity""" )
return fluid_density * gravity * volume
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod() | 697 | 1 |
'''simple docstring'''
def _snake_case ( lowercase = 4_0_0_0_0_0_0 ) -> int:
__a : int = [0, 1]
__a : int = 0
while fib[i] <= n:
fib.append(fib[i] + fib[i + 1] )
if fib[i + 2] > n:
break
i += 1
__a : Dict = 0
for j in range(len(lowercase ) - 1 ):
if fib[j] % 2 == 0:
total += fib[j]
return total
if __name__ == "__main__":
print(f'''{solution() = }''') | 697 |
'''simple docstring'''
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetrImageProcessor
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def __init__( self , __UpperCamelCase , __UpperCamelCase=7 , __UpperCamelCase=3 , __UpperCamelCase=30 , __UpperCamelCase=400 , __UpperCamelCase=True , __UpperCamelCase=None , __UpperCamelCase=True , __UpperCamelCase=1 / 255 , __UpperCamelCase=True , __UpperCamelCase=[0.5, 0.5, 0.5] , __UpperCamelCase=[0.5, 0.5, 0.5] , __UpperCamelCase=True , ):
'''simple docstring'''
__a : List[Any] = size if size is not None else {"""shortest_edge""": 18, """longest_edge""": 1333}
__a : Dict = parent
__a : Union[str, Any] = batch_size
__a : Optional[int] = num_channels
__a : Dict = min_resolution
__a : List[Any] = max_resolution
__a : int = do_resize
__a : str = size
__a : Optional[Any] = do_rescale
__a : Optional[Any] = rescale_factor
__a : str = do_normalize
__a : Any = image_mean
__a : Optional[Any] = image_std
__a : Dict = do_pad
def __lowerCamelCase ( self ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_pad": self.do_pad,
}
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase=False ):
'''simple docstring'''
if not batched:
__a : Union[str, Any] = image_inputs[0]
if isinstance(__UpperCamelCase , Image.Image ):
__a , __a : Tuple = image.size
else:
__a , __a : Tuple = image.shape[1], image.shape[2]
if w < h:
__a : Optional[int] = int(self.size["""shortest_edge"""] * h / w )
__a : Tuple = self.size["""shortest_edge"""]
elif w > h:
__a : Optional[Any] = self.size["""shortest_edge"""]
__a : Any = int(self.size["""shortest_edge"""] * w / h )
else:
__a : Any = self.size["""shortest_edge"""]
__a : Optional[int] = self.size["""shortest_edge"""]
else:
__a : Any = []
for image in image_inputs:
__a , __a : Any = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
__a : List[Any] = max(__UpperCamelCase , key=lambda __UpperCamelCase : item[0] )[0]
__a : Optional[Any] = max(__UpperCamelCase , key=lambda __UpperCamelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase , unittest.TestCase ):
lowercase__ = DetrImageProcessor if is_vision_available() else None
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : str = DetrImageProcessingTester(self )
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Optional[int] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__UpperCamelCase , """image_mean""" ) )
self.assertTrue(hasattr(__UpperCamelCase , """image_std""" ) )
self.assertTrue(hasattr(__UpperCamelCase , """do_normalize""" ) )
self.assertTrue(hasattr(__UpperCamelCase , """do_rescale""" ) )
self.assertTrue(hasattr(__UpperCamelCase , """rescale_factor""" ) )
self.assertTrue(hasattr(__UpperCamelCase , """do_resize""" ) )
self.assertTrue(hasattr(__UpperCamelCase , """size""" ) )
self.assertTrue(hasattr(__UpperCamelCase , """do_pad""" ) )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 18, """longest_edge""": 1333} )
self.assertEqual(image_processor.do_pad , __UpperCamelCase )
__a : List[Any] = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=__UpperCamelCase )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42, """longest_edge""": 84} )
self.assertEqual(image_processor.do_pad , __UpperCamelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
pass
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__a : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCamelCase , Image.Image )
# Test not batched input
__a : Optional[Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
__a , __a : Any = self.image_processor_tester.get_expected_values(__UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__a , __a : Optional[int] = self.image_processor_tester.get_expected_values(__UpperCamelCase , batched=__UpperCamelCase )
__a : Any = image_processing(__UpperCamelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Dict = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__a : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCamelCase , numpify=__UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCamelCase , np.ndarray )
# Test not batched input
__a : Dict = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
__a , __a : Any = self.image_processor_tester.get_expected_values(__UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__a : List[str] = image_processing(__UpperCamelCase , return_tensors="""pt""" ).pixel_values
__a , __a : str = self.image_processor_tester.get_expected_values(__UpperCamelCase , batched=__UpperCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Any = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__a : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCamelCase , torchify=__UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCamelCase , torch.Tensor )
# Test not batched input
__a : Any = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
__a , __a : Any = self.image_processor_tester.get_expected_values(__UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__a : List[str] = image_processing(__UpperCamelCase , return_tensors="""pt""" ).pixel_values
__a , __a : Any = self.image_processor_tester.get_expected_values(__UpperCamelCase , batched=__UpperCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : List[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_annotations.txt""" , """r""" ) as f:
__a : Dict = json.loads(f.read() )
__a : Optional[int] = {"""image_id""": 3_9769, """annotations""": target}
# encode them
__a : List[str] = DetrImageProcessor.from_pretrained("""facebook/detr-resnet-50""" )
__a : Tuple = image_processing(images=__UpperCamelCase , annotations=__UpperCamelCase , return_tensors="""pt""" )
# verify pixel values
__a : Union[str, Any] = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["""pixel_values"""].shape , __UpperCamelCase )
__a : List[str] = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , __UpperCamelCase , atol=1E-4 ) )
# verify area
__a : List[Any] = torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , __UpperCamelCase ) )
# verify boxes
__a : Optional[int] = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , __UpperCamelCase )
__a : Any = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , __UpperCamelCase , atol=1E-3 ) )
# verify image_id
__a : Union[str, Any] = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , __UpperCamelCase ) )
# verify is_crowd
__a : List[Any] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , __UpperCamelCase ) )
# verify class_labels
__a : Any = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , __UpperCamelCase ) )
# verify orig_size
__a : Any = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , __UpperCamelCase ) )
# verify size
__a : str = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , __UpperCamelCase ) )
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : List[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt""" , """r""" ) as f:
__a : Tuple = json.loads(f.read() )
__a : str = {"""file_name""": """000000039769.png""", """image_id""": 3_9769, """segments_info""": target}
__a : int = pathlib.Path("""./tests/fixtures/tests_samples/COCO/coco_panoptic""" )
# encode them
__a : List[str] = DetrImageProcessor.from_pretrained("""facebook/detr-resnet-50-panoptic""" )
__a : Tuple = image_processing(images=__UpperCamelCase , annotations=__UpperCamelCase , masks_path=__UpperCamelCase , return_tensors="""pt""" )
# verify pixel values
__a : List[str] = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["""pixel_values"""].shape , __UpperCamelCase )
__a : Any = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , __UpperCamelCase , atol=1E-4 ) )
# verify area
__a : Optional[Any] = torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , __UpperCamelCase ) )
# verify boxes
__a : Optional[Any] = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , __UpperCamelCase )
__a : List[str] = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , __UpperCamelCase , atol=1E-3 ) )
# verify image_id
__a : List[str] = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , __UpperCamelCase ) )
# verify is_crowd
__a : Optional[int] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , __UpperCamelCase ) )
# verify class_labels
__a : Optional[int] = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , __UpperCamelCase ) )
# verify masks
__a : Union[str, Any] = 82_2873
self.assertEqual(encoding["""labels"""][0]["""masks"""].sum().item() , __UpperCamelCase )
# verify orig_size
__a : str = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , __UpperCamelCase ) )
# verify size
__a : List[Any] = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , __UpperCamelCase ) ) | 697 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__SCREAMING_SNAKE_CASE : Any = {'configuration_encoder_decoder': ['EncoderDecoderConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : str = ['EncoderDecoderModel']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Optional[int] = ['TFEncoderDecoderModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : str = ['FlaxEncoderDecoderModel']
if TYPE_CHECKING:
from .configuration_encoder_decoder import EncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encoder_decoder import EncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_encoder_decoder import TFEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_encoder_decoder import FlaxEncoderDecoderModel
else:
import sys
__SCREAMING_SNAKE_CASE : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 697 |
'''simple docstring'''
import argparse
import logging
import os
import time
import timeit
import datasets
import numpy as np
import pycuda.autoinit # noqa: F401
import pycuda.driver as cuda
import tensorrt as trt
import torch
from absl import logging as absl_logging
from accelerate import Accelerator
from datasets import load_dataset, load_metric
from torch.utils.data import DataLoader
from utils_qa import postprocess_qa_predictions
import transformers
from transformers import AutoTokenizer, EvalPrediction, default_data_collator, set_seed
from transformers.trainer_pt_utils import nested_concat, nested_truncate
__SCREAMING_SNAKE_CASE : Optional[int] = trt.Logger(trt.Logger.WARNING)
__SCREAMING_SNAKE_CASE : Tuple = absl_logging.get_absl_logger()
absl_logger.setLevel(logging.WARNING)
__SCREAMING_SNAKE_CASE : Any = logging.getLogger(__name__)
__SCREAMING_SNAKE_CASE : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--onnx_model_path',
default=None,
type=str,
required=True,
help='Path to ONNX model: ',
)
parser.add_argument(
'--output_dir',
default=None,
type=str,
required=True,
help='The output directory where the model checkpoints and predictions will be written.',
)
# Other parameters
parser.add_argument(
'--tokenizer_name',
default='',
type=str,
required=True,
help='Pretrained tokenizer name or path if not the same as model_name',
)
parser.add_argument(
'--version_2_with_negative',
action='store_true',
help='If true, the SQuAD examples contain some that do not have an answer.',
)
parser.add_argument(
'--null_score_diff_threshold',
type=float,
default=0.0,
help='If null_score - best_non_null is greater than the threshold predict null.',
)
parser.add_argument(
'--max_seq_length',
default=384,
type=int,
help=(
'The maximum total input sequence length after WordPiece tokenization. Sequences '
'longer than this will be truncated, and sequences shorter than this will be padded.'
),
)
parser.add_argument(
'--doc_stride',
default=128,
type=int,
help='When splitting up a long document into chunks, how much stride to take between chunks.',
)
parser.add_argument('--per_device_eval_batch_size', default=8, type=int, help='Batch size per GPU/CPU for evaluation.')
parser.add_argument(
'--n_best_size',
default=20,
type=int,
help='The total number of n-best predictions to generate in the nbest_predictions.json output file.',
)
parser.add_argument(
'--max_answer_length',
default=30,
type=int,
help=(
'The maximum length of an answer that can be generated. This is needed because the start '
'and end predictions are not conditioned on one another.'
),
)
parser.add_argument('--seed', type=int, default=42, help='random seed for initialization')
parser.add_argument(
'--dataset_name',
type=str,
default=None,
required=True,
help='The name of the dataset to use (via the datasets library).',
)
parser.add_argument(
'--dataset_config_name',
type=str,
default=None,
help='The configuration name of the dataset to use (via the datasets library).',
)
parser.add_argument(
'--preprocessing_num_workers', type=int, default=4, help='A csv or a json file containing the training data.'
)
parser.add_argument('--overwrite_cache', action='store_true', help='Overwrite the cached training and evaluation sets')
parser.add_argument(
'--fp16',
action='store_true',
help='Whether to use 16-bit (mixed) precision instead of 32-bit',
)
parser.add_argument(
'--int8',
action='store_true',
help='Whether to use INT8',
)
__SCREAMING_SNAKE_CASE : Optional[int] = parser.parse_args()
if args.tokenizer_name:
__SCREAMING_SNAKE_CASE : str = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=True)
else:
raise ValueError(
'You are instantiating a new tokenizer from scratch. This is not supported by this script.'
'You can do it from another script, save it, and load it from here, using --tokenizer_name.'
)
logger.info('Training/evaluation parameters %s', args)
__SCREAMING_SNAKE_CASE : List[Any] = args.per_device_eval_batch_size
__SCREAMING_SNAKE_CASE : int = (args.eval_batch_size, args.max_seq_length)
# TRT Engine properties
__SCREAMING_SNAKE_CASE : Optional[Any] = True
__SCREAMING_SNAKE_CASE : Tuple = 'temp_engine/bert-fp32.engine'
if args.fpaa:
__SCREAMING_SNAKE_CASE : Dict = 'temp_engine/bert-fp16.engine'
if args.inta:
__SCREAMING_SNAKE_CASE : Tuple = 'temp_engine/bert-int8.engine'
# import ONNX file
if not os.path.exists('temp_engine'):
os.makedirs('temp_engine')
__SCREAMING_SNAKE_CASE : Optional[Any] = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network, trt.OnnxParser(
network, TRT_LOGGER
) as parser:
with open(args.onnx_model_path, 'rb') as model:
if not parser.parse(model.read()):
for error in range(parser.num_errors):
print(parser.get_error(error))
# Query input names and shapes from parsed TensorRT network
__SCREAMING_SNAKE_CASE : List[Any] = [network.get_input(i) for i in range(network.num_inputs)]
__SCREAMING_SNAKE_CASE : List[Any] = [_input.name for _input in network_inputs] # ex: ["actual_input1"]
with builder.create_builder_config() as config:
__SCREAMING_SNAKE_CASE : Tuple = 1 << 50
if STRICT_TYPES:
config.set_flag(trt.BuilderFlag.STRICT_TYPES)
if args.fpaa:
config.set_flag(trt.BuilderFlag.FPaa)
if args.inta:
config.set_flag(trt.BuilderFlag.INTa)
__SCREAMING_SNAKE_CASE : Dict = builder.create_optimization_profile()
config.add_optimization_profile(profile)
for i in range(len(input_names)):
profile.set_shape(input_names[i], INPUT_SHAPE, INPUT_SHAPE, INPUT_SHAPE)
__SCREAMING_SNAKE_CASE : Union[str, Any] = builder.build_engine(network, config)
# serialize_engine and store in file (can be directly loaded and deserialized):
with open(engine_name, 'wb') as f:
f.write(engine.serialize())
def _snake_case ( lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> List[Any]:
__a : Dict = np.asarray(inputs["""input_ids"""] , dtype=np.intaa )
__a : List[Any] = np.asarray(inputs["""attention_mask"""] , dtype=np.intaa )
__a : str = np.asarray(inputs["""token_type_ids"""] , dtype=np.intaa )
# Copy inputs
cuda.memcpy_htod_async(d_inputs[0] , input_ids.ravel() , lowercase )
cuda.memcpy_htod_async(d_inputs[1] , attention_mask.ravel() , lowercase )
cuda.memcpy_htod_async(d_inputs[2] , token_type_ids.ravel() , lowercase )
# start time
__a : Optional[Any] = time.time()
# Run inference
context.execute_async(
bindings=[int(lowercase ) for d_inp in d_inputs] + [int(lowercase ), int(lowercase )] , stream_handle=stream.handle )
# Transfer predictions back from GPU
cuda.memcpy_dtoh_async(lowercase , lowercase , lowercase )
cuda.memcpy_dtoh_async(lowercase , lowercase , lowercase )
# Synchronize the stream and take time
stream.synchronize()
# end time
__a : str = time.time()
__a : Any = end_time - start_time
__a : Optional[int] = (h_outputa, h_outputa)
# print(outputs)
return outputs, infer_time
# Initialize the accelerator. We will let the accelerator handle device placement for us in this example.
__SCREAMING_SNAKE_CASE : Optional[Any] = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO,
)
# Setup logging, we only want one process per machine to log things on the screen.
# accelerator.is_local_main_process is only True for one process per machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR)
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed)
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
if args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
__SCREAMING_SNAKE_CASE : List[str] = load_dataset(args.dataset_name, args.dataset_config_name)
else:
raise ValueError('Evaluation requires a dataset name')
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Preprocessing the datasets.
# Preprocessing is slighlty different for training and evaluation.
__SCREAMING_SNAKE_CASE : int = raw_datasets['validation'].column_names
__SCREAMING_SNAKE_CASE : Tuple = 'question' if 'question' in column_names else column_names[0]
__SCREAMING_SNAKE_CASE : List[Any] = 'context' if 'context' in column_names else column_names[1]
__SCREAMING_SNAKE_CASE : Tuple = 'answers' if 'answers' in column_names else column_names[2]
# Padding side determines if we do (question|context) or (context|question).
__SCREAMING_SNAKE_CASE : Tuple = tokenizer.padding_side == 'right'
if args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f'''The max_seq_length passed ({args.max_seq_length}) is larger than the maximum length for the'''
f'''model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.'''
)
__SCREAMING_SNAKE_CASE : Dict = min(args.max_seq_length, tokenizer.model_max_length)
def _snake_case ( lowercase ) -> Tuple:
# Some of the questions have lots of whitespace on the left, which is not useful and will make the
# truncation of the context fail (the tokenized question will take a lots of space). So we remove that
# left whitespace
__a : Optional[Any] = [q.lstrip() for q in examples[question_column_name]]
# Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results
# in one example possible giving several features when a context is long, each of those features having a
# context that overlaps a bit the context of the previous feature.
__a : Optional[int] = tokenizer(
examples[question_column_name if pad_on_right else context_column_name] , examples[context_column_name if pad_on_right else question_column_name] , truncation="""only_second""" if pad_on_right else """only_first""" , max_length=lowercase , stride=args.doc_stride , return_overflowing_tokens=lowercase , return_offsets_mapping=lowercase , padding="""max_length""" , )
# Since one example might give us several features if it has a long context, we need a map from a feature to
# its corresponding example. This key gives us just that.
__a : Optional[Any] = tokenized_examples.pop("""overflow_to_sample_mapping""" )
# For evaluation, we will need to convert our predictions to substrings of the context, so we keep the
# corresponding example_id and we will store the offset mappings.
__a : Optional[Any] = []
for i in range(len(tokenized_examples["""input_ids"""] ) ):
# Grab the sequence corresponding to that example (to know what is the context and what is the question).
__a : Dict = tokenized_examples.sequence_ids(lowercase )
__a : Optional[Any] = 1 if pad_on_right else 0
# One example can give several spans, this is the index of the example containing this span of text.
__a : Union[str, Any] = sample_mapping[i]
tokenized_examples["example_id"].append(examples["""id"""][sample_index] )
# Set to None the offset_mapping that are not part of the context so it's easy to determine if a token
# position is part of the context or not.
__a : int = [
(o if sequence_ids[k] == context_index else None)
for k, o in enumerate(tokenized_examples["""offset_mapping"""][i] )
]
return tokenized_examples
__SCREAMING_SNAKE_CASE : int = raw_datasets['validation']
# Validation Feature Creation
__SCREAMING_SNAKE_CASE : Union[str, Any] = eval_examples.map(
prepare_validation_features,
batched=True,
num_proc=args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not args.overwrite_cache,
desc='Running tokenizer on validation dataset',
)
__SCREAMING_SNAKE_CASE : List[Any] = default_data_collator
__SCREAMING_SNAKE_CASE : Union[str, Any] = eval_dataset.remove_columns(['example_id', 'offset_mapping'])
__SCREAMING_SNAKE_CASE : List[str] = DataLoader(
eval_dataset_for_model, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size
)
def _snake_case ( lowercase , lowercase , lowercase , lowercase="eval" ) -> Any:
# Post-processing: we match the start logits and end logits to answers in the original context.
__a : List[str] = postprocess_qa_predictions(
examples=lowercase , features=lowercase , predictions=lowercase , version_2_with_negative=args.version_2_with_negative , n_best_size=args.n_best_size , max_answer_length=args.max_answer_length , null_score_diff_threshold=args.null_score_diff_threshold , output_dir=args.output_dir , prefix=lowercase , )
# Format the result to the format the metric expects.
if args.version_2_with_negative:
__a : List[str] = [
{"""id""": k, """prediction_text""": v, """no_answer_probability""": 0.0} for k, v in predictions.items()
]
else:
__a : List[str] = [{"""id""": k, """prediction_text""": v} for k, v in predictions.items()]
__a : Optional[Any] = [{"""id""": ex["""id"""], """answers""": ex[answer_column_name]} for ex in examples]
return EvalPrediction(predictions=lowercase , label_ids=lowercase )
__SCREAMING_SNAKE_CASE : List[Any] = load_metric('squad_v2' if args.version_2_with_negative else 'squad')
# Evaluation!
logger.info('Loading ONNX model %s for evaluation', args.onnx_model_path)
with open(engine_name, 'rb') as f, trt.Runtime(TRT_LOGGER) as runtime, runtime.deserialize_cuda_engine(
f.read()
) as engine, engine.create_execution_context() as context:
# setup for TRT inferrence
for i in range(len(input_names)):
context.set_binding_shape(i, INPUT_SHAPE)
assert context.all_binding_shapes_specified
def _snake_case ( lowercase ) -> Optional[int]:
return trt.volume(engine.get_binding_shape(lowercase ) ) * engine.get_binding_dtype(lowercase ).itemsize
# Allocate device memory for inputs and outputs.
__SCREAMING_SNAKE_CASE : List[str] = [cuda.mem_alloc(binding_nbytes(binding)) for binding in engine if engine.binding_is_input(binding)]
# Allocate output buffer
__SCREAMING_SNAKE_CASE : str = cuda.pagelocked_empty(tuple(context.get_binding_shape(3)), dtype=np.floataa)
__SCREAMING_SNAKE_CASE : Union[str, Any] = cuda.pagelocked_empty(tuple(context.get_binding_shape(4)), dtype=np.floataa)
__SCREAMING_SNAKE_CASE : str = cuda.mem_alloc(h_outputa.nbytes)
__SCREAMING_SNAKE_CASE : Tuple = cuda.mem_alloc(h_outputa.nbytes)
# Create a stream in which to copy inputs/outputs and run inference.
__SCREAMING_SNAKE_CASE : Tuple = cuda.Stream()
# Evaluation
logger.info('***** Running Evaluation *****')
logger.info(f''' Num examples = {len(eval_dataset)}''')
logger.info(f''' Batch size = {args.per_device_eval_batch_size}''')
__SCREAMING_SNAKE_CASE : Union[str, Any] = 0.0
__SCREAMING_SNAKE_CASE : str = 0
__SCREAMING_SNAKE_CASE : str = timeit.default_timer()
__SCREAMING_SNAKE_CASE : Dict = None
for step, batch in enumerate(eval_dataloader):
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Union[str, Any] = model_infer(batch, context, d_inputs, h_outputa, h_outputa, d_outputa, d_outputa, stream)
total_time += infer_time
niter += 1
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Optional[Any] = outputs
__SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor(start_logits)
__SCREAMING_SNAKE_CASE : Tuple = torch.tensor(end_logits)
# necessary to pad predictions and labels for being gathered
__SCREAMING_SNAKE_CASE : Optional[int] = accelerator.pad_across_processes(start_logits, dim=1, pad_index=-100)
__SCREAMING_SNAKE_CASE : Dict = accelerator.pad_across_processes(end_logits, dim=1, pad_index=-100)
__SCREAMING_SNAKE_CASE : List[str] = (accelerator.gather(start_logits).cpu().numpy(), accelerator.gather(end_logits).cpu().numpy())
__SCREAMING_SNAKE_CASE : List[str] = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-100)
if all_preds is not None:
__SCREAMING_SNAKE_CASE : Tuple = nested_truncate(all_preds, len(eval_dataset))
__SCREAMING_SNAKE_CASE : str = timeit.default_timer() - start_time
logger.info(' Evaluation done in total %f secs (%f sec per example)', evalTime, evalTime / len(eval_dataset))
# Inference time from TRT
logger.info('Average Inference Time = {:.3f} ms'.format(total_time * 1_000 / niter))
logger.info('Total Inference Time = {:.3f} ms'.format(total_time * 1_000))
logger.info('Total Number of Inference = %d', niter)
__SCREAMING_SNAKE_CASE : Optional[int] = post_processing_function(eval_examples, eval_dataset, all_preds)
__SCREAMING_SNAKE_CASE : List[Any] = metric.compute(predictions=prediction.predictions, references=prediction.label_ids)
logger.info(f'''Evaluation metrics: {eval_metric}''') | 697 | 1 |
'''simple docstring'''
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
__SCREAMING_SNAKE_CASE : List[str] = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : str = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
__SCREAMING_SNAKE_CASE : List[str] = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class SCREAMING_SNAKE_CASE__ :
lowercase__ = field(
default=__UpperCamelCase , metadata={"help": "Model type selected in the list: " + ", ".join(__UpperCamelCase )} )
lowercase__ = field(
default=__UpperCamelCase , metadata={"help": "The input data dir. Should contain the .json files for the SQuAD task."} )
lowercase__ = field(
default=1_28 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
lowercase__ = field(
default=1_28 , metadata={"help": "When splitting up a long document into chunks, how much stride to take between chunks."} , )
lowercase__ = field(
default=64 , metadata={
"help": (
"The maximum number of tokens for the question. Questions longer than this will "
"be truncated to this length."
)
} , )
lowercase__ = field(
default=30 , metadata={
"help": (
"The maximum length of an answer that can be generated. This is needed because the start "
"and end predictions are not conditioned on one another."
)
} , )
lowercase__ = field(
default=__UpperCamelCase , metadata={"help": "Overwrite the cached training and evaluation sets"} )
lowercase__ = field(
default=__UpperCamelCase , metadata={"help": "If true, the SQuAD examples contain some that do not have an answer."} )
lowercase__ = field(
default=0.0 , metadata={"help": "If null_score - best_non_null is greater than the threshold predict null."} )
lowercase__ = field(
default=20 , metadata={"help": "If null_score - best_non_null is greater than the threshold predict null."} )
lowercase__ = field(
default=0 , metadata={
"help": (
"language id of input for language-specific xlm models (see"
" tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)"
)
} , )
lowercase__ = field(default=1 , metadata={"help": "multiple threads for converting example to features"} )
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
lowercase__ = "train"
lowercase__ = "dev"
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
lowercase__ = 42
lowercase__ = 42
lowercase__ = 42
lowercase__ = 42
def __init__( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = Split.train , __UpperCamelCase = False , __UpperCamelCase = None , __UpperCamelCase = "pt" , ):
'''simple docstring'''
__a : Optional[int] = args
__a : Union[str, Any] = is_language_sensitive
__a : int = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(__UpperCamelCase , __UpperCamelCase ):
try:
__a : Union[str, Any] = Split[mode]
except KeyError:
raise KeyError("""mode is not a valid split name""" )
__a : List[Any] = mode
# Load data features from cache or dataset file
__a : Dict = """v2""" if args.version_2_with_negative else """v1"""
__a : List[str] = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , f"""cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}""" , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
__a : Tuple = cached_features_file + """.lock"""
with FileLock(__UpperCamelCase ):
if os.path.exists(__UpperCamelCase ) and not args.overwrite_cache:
__a : Tuple = time.time()
__a : Tuple = torch.load(__UpperCamelCase )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
__a : Dict = self.old_features["""features"""]
__a : List[Any] = self.old_features.get("""dataset""" , __UpperCamelCase )
__a : Optional[Any] = self.old_features.get("""examples""" , __UpperCamelCase )
logger.info(
f"""Loading features from cached file {cached_features_file} [took %.3f s]""" , time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
f"""Deleting cached file {cached_features_file} will allow dataset and examples to be cached in"""
""" future run""" )
else:
if mode == Split.dev:
__a : int = self.processor.get_dev_examples(args.data_dir )
else:
__a : List[str] = self.processor.get_train_examples(args.data_dir )
__a , __a : str = squad_convert_examples_to_features(
examples=self.examples , tokenizer=__UpperCamelCase , max_seq_length=args.max_seq_length , doc_stride=args.doc_stride , max_query_length=args.max_query_length , is_training=mode == Split.train , threads=args.threads , return_dataset=__UpperCamelCase , )
__a : List[str] = time.time()
torch.save(
{"""features""": self.features, """dataset""": self.dataset, """examples""": self.examples} , __UpperCamelCase , )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f"""Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]""" )
def __len__( self ):
'''simple docstring'''
return len(self.features )
def __getitem__( self , __UpperCamelCase ):
'''simple docstring'''
__a : Any = self.features[i]
__a : Tuple = torch.tensor(feature.input_ids , dtype=torch.long )
__a : List[str] = torch.tensor(feature.attention_mask , dtype=torch.long )
__a : int = torch.tensor(feature.token_type_ids , dtype=torch.long )
__a : int = torch.tensor(feature.cls_index , dtype=torch.long )
__a : Tuple = torch.tensor(feature.p_mask , dtype=torch.float )
__a : Dict = torch.tensor(feature.is_impossible , dtype=torch.float )
__a : Dict = {
"""input_ids""": input_ids,
"""attention_mask""": attention_mask,
"""token_type_ids""": token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({"""cls_index""": cls_index, """p_mask""": p_mask} )
if self.args.version_2_with_negative:
inputs.update({"""is_impossible""": is_impossible} )
if self.is_language_sensitive:
inputs.update({"""langs""": (torch.ones(input_ids.shape , dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
__a : Any = torch.tensor(feature.start_position , dtype=torch.long )
__a : Dict = torch.tensor(feature.end_position , dtype=torch.long )
inputs.update({"""start_positions""": start_positions, """end_positions""": end_positions} )
return inputs | 697 |
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import KarrasVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
lowercase__ = 42
lowercase__ = 42
def __init__( self , __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
super().__init__()
self.register_modules(unet=__UpperCamelCase , scheduler=__UpperCamelCase )
@torch.no_grad()
def __call__( self , __UpperCamelCase = 1 , __UpperCamelCase = 50 , __UpperCamelCase = None , __UpperCamelCase = "pil" , __UpperCamelCase = True , **__UpperCamelCase , ):
'''simple docstring'''
__a : int = self.unet.config.sample_size
__a : Optional[int] = (batch_size, 3, img_size, img_size)
__a : Union[str, Any] = self.unet
# sample x_0 ~ N(0, sigma_0^2 * I)
__a : Dict = randn_tensor(__UpperCamelCase , generator=__UpperCamelCase , device=self.device ) * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(__UpperCamelCase )
for t in self.progress_bar(self.scheduler.timesteps ):
# here sigma_t == t_i from the paper
__a : Dict = self.scheduler.schedule[t]
__a : Any = self.scheduler.schedule[t - 1] if t > 0 else 0
# 1. Select temporarily increased noise level sigma_hat
# 2. Add new noise to move from sample_i to sample_hat
__a , __a : Tuple = self.scheduler.add_noise_to_input(__UpperCamelCase , __UpperCamelCase , generator=__UpperCamelCase )
# 3. Predict the noise residual given the noise magnitude `sigma_hat`
# The model inputs and output are adjusted by following eq. (213) in [1].
__a : List[Any] = (sigma_hat / 2) * model((sample_hat + 1) / 2 , sigma_hat / 2 ).sample
# 4. Evaluate dx/dt at sigma_hat
# 5. Take Euler step from sigma to sigma_prev
__a : str = self.scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
if sigma_prev != 0:
# 6. Apply 2nd order correction
# The model inputs and output are adjusted by following eq. (213) in [1].
__a : Union[str, Any] = (sigma_prev / 2) * model((step_output.prev_sample + 1) / 2 , sigma_prev / 2 ).sample
__a : Tuple = self.scheduler.step_correct(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , step_output.prev_sample , step_output["""derivative"""] , )
__a : Tuple = step_output.prev_sample
__a : Optional[Any] = (sample / 2 + 0.5).clamp(0 , 1 )
__a : Dict = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__a : List[Any] = self.numpy_to_pil(__UpperCamelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__UpperCamelCase ) | 697 | 1 |
'''simple docstring'''
def _snake_case ( lowercase , lowercase , lowercase ) -> Optional[Any]:
if n == 0:
return 1
elif n % 2 == 1:
return (binary_exponentiation(lowercase , n - 1 , lowercase ) * a) % mod
else:
__a : Union[str, Any] = binary_exponentiation(lowercase , n / 2 , lowercase )
return (b * b) % mod
# a prime number
__SCREAMING_SNAKE_CASE : int = 701
__SCREAMING_SNAKE_CASE : str = 1_000_000_000
__SCREAMING_SNAKE_CASE : Tuple = 10
# using binary exponentiation function, O(log(p)):
print((a / b) % p == (a * binary_exponentiation(b, p - 2, p)) % p)
print((a / b) % p == (a * b ** (p - 2)) % p) | 697 |
'''simple docstring'''
def _snake_case ( lowercase ) -> bool:
if not isinstance(lowercase , lowercase ):
raise ValueError("""check_bouncy() accepts only integer arguments""" )
__a : str = str(lowercase )
__a : Any = """""".join(sorted(lowercase ) )
return sorted_str_n != str_n and sorted_str_n[::-1] != str_n
def _snake_case ( lowercase = 9_9 ) -> int:
if not 0 < percent < 1_0_0:
raise ValueError("""solution() only accepts values from 0 to 100""" )
__a : List[str] = 0
__a : Union[str, Any] = 1
while True:
if check_bouncy(lowercase ):
bouncy_num += 1
if (bouncy_num / num) * 1_0_0 >= percent:
return num
num += 1
if __name__ == "__main__":
from doctest import testmod
testmod()
print(f'''{solution(99)}''') | 697 | 1 |
'''simple docstring'''
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, PerceiverTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
__SCREAMING_SNAKE_CASE : str = 'pt'
elif is_tf_available():
__SCREAMING_SNAKE_CASE : str = 'tf'
else:
__SCREAMING_SNAKE_CASE : Union[str, Any] = 'jax'
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase , unittest.TestCase ):
lowercase__ = PerceiverTokenizer
lowercase__ = False
def __lowerCamelCase ( self ):
'''simple docstring'''
super().setUp()
__a : int = PerceiverTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def __lowerCamelCase ( self ):
'''simple docstring'''
return PerceiverTokenizer.from_pretrained("""deepmind/language-perceiver""" )
def __lowerCamelCase ( self , **__UpperCamelCase ):
'''simple docstring'''
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__UpperCamelCase )
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase=False , __UpperCamelCase=20 , __UpperCamelCase=5 ):
'''simple docstring'''
__a : Union[str, Any] = []
for i in range(len(__UpperCamelCase ) ):
try:
__a : str = tokenizer.decode([i] , clean_up_tokenization_spaces=__UpperCamelCase )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
__a : str = list(filter(lambda __UpperCamelCase : re.match(r"""^[ a-zA-Z]+$""" , t[1] ) , __UpperCamelCase ) )
__a : Tuple = list(filter(lambda __UpperCamelCase : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=__UpperCamelCase ) , __UpperCamelCase ) )
if max_length is not None and len(__UpperCamelCase ) > max_length:
__a : List[Any] = toks[:max_length]
if min_length is not None and len(__UpperCamelCase ) < min_length and len(__UpperCamelCase ) > 0:
while len(__UpperCamelCase ) < min_length:
__a : Optional[Any] = toks + toks
# toks_str = [t[1] for t in toks]
__a : int = [t[0] for t in toks]
# Ensure consistency
__a : Tuple = tokenizer.decode(__UpperCamelCase , clean_up_tokenization_spaces=__UpperCamelCase )
if " " not in output_txt and len(__UpperCamelCase ) > 1:
__a : List[str] = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=__UpperCamelCase )
+ """ """
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=__UpperCamelCase )
)
if with_prefix_space:
__a : List[Any] = """ """ + output_txt
__a : Any = tokenizer.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase )
return output_txt, output_ids
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Any = self.perceiver_tokenizer
__a : Optional[Any] = """Unicode €."""
__a : List[str] = tokenizer(__UpperCamelCase )
__a : str = [4, 91, 116, 111, 105, 117, 106, 107, 38, 232, 136, 178, 52, 5]
self.assertEqual(encoded["""input_ids"""] , __UpperCamelCase )
# decoding
__a : Tuple = tokenizer.decode(__UpperCamelCase )
self.assertEqual(__UpperCamelCase , """[CLS]Unicode €.[SEP]""" )
__a : str = tokenizer("""e è é ê ë""" )
__a : Tuple = [4, 107, 38, 201, 174, 38, 201, 175, 38, 201, 176, 38, 201, 177, 5]
self.assertEqual(encoded["""input_ids"""] , __UpperCamelCase )
# decoding
__a : Tuple = tokenizer.decode(__UpperCamelCase )
self.assertEqual(__UpperCamelCase , """[CLS]e è é ê ë[SEP]""" )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode("""e è é ê ë""" ) ) , """[CLS]e è é ê ë[SEP]""" )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Dict = self.perceiver_tokenizer
__a : int = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
# fmt: off
__a : str = [4, 71, 38, 114, 117, 116, 109, 38, 118, 103, 120, 103, 109, 120, 103, 118, 110, 38, 108, 117, 120, 38, 121, 123, 115, 115, 103, 120, 111, 128, 103, 122, 111, 117, 116, 52, 5, 0]
# fmt: on
__a : List[str] = tokenizer(__UpperCamelCase , padding=__UpperCamelCase , return_tensors=__UpperCamelCase )
self.assertIsInstance(__UpperCamelCase , __UpperCamelCase )
if FRAMEWORK != "jax":
__a : Union[str, Any] = list(batch.input_ids.numpy()[0] )
else:
__a : Tuple = list(batch.input_ids.tolist()[0] )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
self.assertEqual((2, 38) , batch.input_ids.shape )
self.assertEqual((2, 38) , batch.attention_mask.shape )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Dict = self.perceiver_tokenizer
__a : Tuple = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
__a : Tuple = tokenizer(__UpperCamelCase , padding=__UpperCamelCase , return_tensors=__UpperCamelCase )
# check if input_ids are returned and no decoder_input_ids
self.assertIn("""input_ids""" , __UpperCamelCase )
self.assertIn("""attention_mask""" , __UpperCamelCase )
self.assertNotIn("""decoder_input_ids""" , __UpperCamelCase )
self.assertNotIn("""decoder_attention_mask""" , __UpperCamelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : List[str] = self.perceiver_tokenizer
__a : str = [
"""Summary of the text.""",
"""Another summary.""",
]
__a : List[Any] = tokenizer(
text_target=__UpperCamelCase , max_length=32 , padding="""max_length""" , truncation=__UpperCamelCase , return_tensors=__UpperCamelCase )
self.assertEqual(32 , targets["""input_ids"""].shape[1] )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Union[str, Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
__a : int = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
# Isolate this from the other tests because we save additional tokens/etc
__a : Union[str, Any] = tempfile.mkdtemp()
__a : Optional[int] = """ He is very happy, UNwant\u00E9d,running"""
__a : Any = tokenizer.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase )
tokenizer.save_pretrained(__UpperCamelCase )
__a : str = tokenizer.__class__.from_pretrained(__UpperCamelCase )
__a : Any = after_tokenizer.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
shutil.rmtree(__UpperCamelCase )
__a : Union[str, Any] = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
# Isolate this from the other tests because we save additional tokens/etc
__a : Optional[Any] = tempfile.mkdtemp()
__a : int = """ He is very happy, UNwant\u00E9d,running"""
tokenizer.add_tokens(["""bim""", """bambam"""] )
__a : Optional[Any] = tokenizer.additional_special_tokens
additional_special_tokens.append("""new_additional_special_token""" )
tokenizer.add_special_tokens({"""additional_special_tokens""": additional_special_tokens} )
__a : Any = tokenizer.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase )
tokenizer.save_pretrained(__UpperCamelCase )
__a : List[Any] = tokenizer.__class__.from_pretrained(__UpperCamelCase )
__a : str = after_tokenizer.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
self.assertIn("""new_additional_special_token""" , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
__a : Tuple = tokenizer.__class__.from_pretrained(__UpperCamelCase , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(__UpperCamelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : int = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(__UpperCamelCase )
with open(os.path.join(__UpperCamelCase , """special_tokens_map.json""" ) , encoding="""utf-8""" ) as json_file:
__a : Any = json.load(__UpperCamelCase )
with open(os.path.join(__UpperCamelCase , """tokenizer_config.json""" ) , encoding="""utf-8""" ) as json_file:
__a : Dict = json.load(__UpperCamelCase )
__a : Union[str, Any] = [f"""<extra_id_{i}>""" for i in range(125 )]
__a : List[Any] = added_tokens_extra_ids + [
"""an_additional_special_token"""
]
__a : Union[str, Any] = added_tokens_extra_ids + [
"""an_additional_special_token"""
]
with open(os.path.join(__UpperCamelCase , """special_tokens_map.json""" ) , """w""" , encoding="""utf-8""" ) as outfile:
json.dump(__UpperCamelCase , __UpperCamelCase )
with open(os.path.join(__UpperCamelCase , """tokenizer_config.json""" ) , """w""" , encoding="""utf-8""" ) as outfile:
json.dump(__UpperCamelCase , __UpperCamelCase )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
__a : int = tokenizer_class.from_pretrained(
__UpperCamelCase , )
self.assertIn(
"""an_additional_special_token""" , tokenizer_without_change_in_init.additional_special_tokens )
self.assertEqual(
["""an_additional_special_token"""] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(["""an_additional_special_token"""] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
__a : Optional[Any] = added_tokens_extra_ids + [AddedToken("""a_new_additional_special_token""" , lstrip=__UpperCamelCase )]
__a : str = tokenizer_class.from_pretrained(
__UpperCamelCase , additional_special_tokens=__UpperCamelCase , )
self.assertIn("""a_new_additional_special_token""" , tokenizer.additional_special_tokens )
self.assertEqual(
["""a_new_additional_special_token"""] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(["""a_new_additional_special_token"""] ) ) , )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : List[str] = self.perceiver_tokenizer
self.assertEqual(tokenizer.decode([178] ) , """�""" )
def __lowerCamelCase ( self ):
'''simple docstring'''
pass
def __lowerCamelCase ( self ):
'''simple docstring'''
pass
def __lowerCamelCase ( self ):
'''simple docstring'''
pass
def __lowerCamelCase ( self ):
'''simple docstring'''
pass
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : List[str] = self.get_tokenizers(fast=__UpperCamelCase , do_lower_case=__UpperCamelCase )
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
__a : Tuple = ["""[CLS]""", """t""", """h""", """i""", """s""", """ """, """i""", """s""", """ """, """a""", """ """, """t""", """e""", """s""", """t""", """[SEP]"""]
__a : Optional[Any] = tokenizer.convert_tokens_to_string(__UpperCamelCase )
self.assertIsInstance(__UpperCamelCase , __UpperCamelCase ) | 697 |
'''simple docstring'''
import argparse
import torch
from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def _snake_case ( lowercase , lowercase , lowercase ) -> Any:
# Construct model
if gpta_config_file == "":
__a : Dict = GPTaConfig()
else:
__a : Optional[Any] = GPTaConfig.from_json_file(lowercase )
__a : Union[str, Any] = GPTaModel(lowercase )
# Load weights from numpy
load_tf_weights_in_gpta(lowercase , lowercase , lowercase )
# Save pytorch-model
__a : Optional[int] = pytorch_dump_folder_path + """/""" + WEIGHTS_NAME
__a : Dict = pytorch_dump_folder_path + """/""" + CONFIG_NAME
print(F"""Save PyTorch model to {pytorch_weights_dump_path}""" )
torch.save(model.state_dict() , lowercase )
print(F"""Save configuration file to {pytorch_config_dump_path}""" )
with open(lowercase , """w""" , encoding="""utf-8""" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--gpt2_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--gpt2_config_file',
default='',
type=str,
help=(
'An optional config json file corresponding to the pre-trained OpenAI model. \n'
'This specifies the model architecture.'
),
)
__SCREAMING_SNAKE_CASE : Dict = parser.parse_args()
convert_gpta_checkpoint_to_pytorch(args.gpta_checkpoint_path, args.gpta_config_file, args.pytorch_dump_folder_path) | 697 | 1 |
'''simple docstring'''
def _snake_case ( lowercase , lowercase , lowercase=False ) -> int:
if isinstance(lowercase , lowercase ) and isinstance(lowercase , lowercase ):
__a : int = len(set_a.intersection(lowercase ) )
if alternative_union:
__a : str = len(lowercase ) + len(lowercase )
else:
__a : int = len(set_a.union(lowercase ) )
return intersection / union
if isinstance(lowercase , (list, tuple) ) and isinstance(lowercase , (list, tuple) ):
__a : int = [element for element in set_a if element in set_b]
if alternative_union:
__a : Union[str, Any] = len(lowercase ) + len(lowercase )
return len(lowercase ) / union
else:
__a : Optional[int] = set_a + [element for element in set_b if element not in set_a]
return len(lowercase ) / len(lowercase )
return len(lowercase ) / len(lowercase )
return None
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : Any = {'a', 'b', 'c', 'd', 'e'}
__SCREAMING_SNAKE_CASE : List[Any] = {'c', 'd', 'e', 'f', 'h', 'i'}
print(jaccard_similarity(set_a, set_b)) | 697 |
'''simple docstring'''
import unittest
from transformers import (
MODEL_FOR_OBJECT_DETECTION_MAPPING,
AutoFeatureExtractor,
AutoModelForObjectDetection,
ObjectDetectionPipeline,
is_vision_available,
pipeline,
)
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_pytesseract,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class SCREAMING_SNAKE_CASE__ :
@staticmethod
def __lowerCamelCase ( *__UpperCamelCase , **__UpperCamelCase ):
'''simple docstring'''
pass
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
lowercase__ = MODEL_FOR_OBJECT_DETECTION_MAPPING
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
__a : Optional[Any] = ObjectDetectionPipeline(model=__UpperCamelCase , image_processor=__UpperCamelCase )
return object_detector, ["./tests/fixtures/tests_samples/COCO/000000039769.png"]
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
__a : List[str] = object_detector("""./tests/fixtures/tests_samples/COCO/000000039769.png""" , threshold=0.0 )
self.assertGreater(len(__UpperCamelCase ) , 0 )
for detected_object in outputs:
self.assertEqual(
__UpperCamelCase , {
"""score""": ANY(__UpperCamelCase ),
"""label""": ANY(__UpperCamelCase ),
"""box""": {"""xmin""": ANY(__UpperCamelCase ), """ymin""": ANY(__UpperCamelCase ), """xmax""": ANY(__UpperCamelCase ), """ymax""": ANY(__UpperCamelCase )},
} , )
import datasets
__a : Optional[int] = datasets.load_dataset("""hf-internal-testing/fixtures_image_utils""" , """image""" , split="""test""" )
__a : Tuple = [
Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ),
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
# RGBA
dataset[0]["""file"""],
# LA
dataset[1]["""file"""],
# L
dataset[2]["""file"""],
]
__a : Any = object_detector(__UpperCamelCase , threshold=0.0 )
self.assertEqual(len(__UpperCamelCase ) , len(__UpperCamelCase ) )
for outputs in batch_outputs:
self.assertGreater(len(__UpperCamelCase ) , 0 )
for detected_object in outputs:
self.assertEqual(
__UpperCamelCase , {
"""score""": ANY(__UpperCamelCase ),
"""label""": ANY(__UpperCamelCase ),
"""box""": {"""xmin""": ANY(__UpperCamelCase ), """ymin""": ANY(__UpperCamelCase ), """xmax""": ANY(__UpperCamelCase ), """ymax""": ANY(__UpperCamelCase )},
} , )
@require_tf
@unittest.skip("""Object detection not implemented in TF""" )
def __lowerCamelCase ( self ):
'''simple docstring'''
pass
@require_torch
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Optional[Any] = """hf-internal-testing/tiny-detr-mobilenetsv3"""
__a : Dict = AutoModelForObjectDetection.from_pretrained(__UpperCamelCase )
__a : Optional[Any] = AutoFeatureExtractor.from_pretrained(__UpperCamelCase )
__a : str = ObjectDetectionPipeline(model=__UpperCamelCase , feature_extractor=__UpperCamelCase )
__a : Optional[int] = object_detector("""http://images.cocodataset.org/val2017/000000039769.jpg""" , threshold=0.0 )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [
{"""score""": 0.3_3_7_6, """label""": """LABEL_0""", """box""": {"""xmin""": 159, """ymin""": 120, """xmax""": 480, """ymax""": 359}},
{"""score""": 0.3_3_7_6, """label""": """LABEL_0""", """box""": {"""xmin""": 159, """ymin""": 120, """xmax""": 480, """ymax""": 359}},
] , )
__a : Union[str, Any] = object_detector(
[
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
] , threshold=0.0 , )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [
[
{"""score""": 0.3_3_7_6, """label""": """LABEL_0""", """box""": {"""xmin""": 159, """ymin""": 120, """xmax""": 480, """ymax""": 359}},
{"""score""": 0.3_3_7_6, """label""": """LABEL_0""", """box""": {"""xmin""": 159, """ymin""": 120, """xmax""": 480, """ymax""": 359}},
],
[
{"""score""": 0.3_3_7_6, """label""": """LABEL_0""", """box""": {"""xmin""": 159, """ymin""": 120, """xmax""": 480, """ymax""": 359}},
{"""score""": 0.3_3_7_6, """label""": """LABEL_0""", """box""": {"""xmin""": 159, """ymin""": 120, """xmax""": 480, """ymax""": 359}},
],
] , )
@require_torch
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : str = """facebook/detr-resnet-50"""
__a : Dict = AutoModelForObjectDetection.from_pretrained(__UpperCamelCase )
__a : int = AutoFeatureExtractor.from_pretrained(__UpperCamelCase )
__a : int = ObjectDetectionPipeline(model=__UpperCamelCase , feature_extractor=__UpperCamelCase )
__a : Any = object_detector("""http://images.cocodataset.org/val2017/000000039769.jpg""" )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [
{"""score""": 0.9_9_8_2, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 175, """ymax""": 117}},
{"""score""": 0.9_9_6_0, """label""": """remote""", """box""": {"""xmin""": 333, """ymin""": 72, """xmax""": 368, """ymax""": 187}},
{"""score""": 0.9_9_5_5, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 639, """ymax""": 473}},
{"""score""": 0.9_9_8_8, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}},
{"""score""": 0.9_9_8_7, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}},
] , )
__a : Optional[Any] = object_detector(
[
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
] )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [
[
{"""score""": 0.9_9_8_2, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 175, """ymax""": 117}},
{"""score""": 0.9_9_6_0, """label""": """remote""", """box""": {"""xmin""": 333, """ymin""": 72, """xmax""": 368, """ymax""": 187}},
{"""score""": 0.9_9_5_5, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 639, """ymax""": 473}},
{"""score""": 0.9_9_8_8, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}},
{"""score""": 0.9_9_8_7, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}},
],
[
{"""score""": 0.9_9_8_2, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 175, """ymax""": 117}},
{"""score""": 0.9_9_6_0, """label""": """remote""", """box""": {"""xmin""": 333, """ymin""": 72, """xmax""": 368, """ymax""": 187}},
{"""score""": 0.9_9_5_5, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 639, """ymax""": 473}},
{"""score""": 0.9_9_8_8, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}},
{"""score""": 0.9_9_8_7, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}},
],
] , )
@require_torch
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : int = """facebook/detr-resnet-50"""
__a : Optional[int] = pipeline("""object-detection""" , model=__UpperCamelCase )
__a : Optional[int] = object_detector("""http://images.cocodataset.org/val2017/000000039769.jpg""" )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [
{"""score""": 0.9_9_8_2, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 175, """ymax""": 117}},
{"""score""": 0.9_9_6_0, """label""": """remote""", """box""": {"""xmin""": 333, """ymin""": 72, """xmax""": 368, """ymax""": 187}},
{"""score""": 0.9_9_5_5, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 639, """ymax""": 473}},
{"""score""": 0.9_9_8_8, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}},
{"""score""": 0.9_9_8_7, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}},
] , )
__a : List[str] = object_detector(
[
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
] )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [
[
{"""score""": 0.9_9_8_2, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 175, """ymax""": 117}},
{"""score""": 0.9_9_6_0, """label""": """remote""", """box""": {"""xmin""": 333, """ymin""": 72, """xmax""": 368, """ymax""": 187}},
{"""score""": 0.9_9_5_5, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 639, """ymax""": 473}},
{"""score""": 0.9_9_8_8, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}},
{"""score""": 0.9_9_8_7, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}},
],
[
{"""score""": 0.9_9_8_2, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 175, """ymax""": 117}},
{"""score""": 0.9_9_6_0, """label""": """remote""", """box""": {"""xmin""": 333, """ymin""": 72, """xmax""": 368, """ymax""": 187}},
{"""score""": 0.9_9_5_5, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 639, """ymax""": 473}},
{"""score""": 0.9_9_8_8, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}},
{"""score""": 0.9_9_8_7, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}},
],
] , )
@require_torch
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Union[str, Any] = 0.9_9_8_5
__a : Union[str, Any] = """facebook/detr-resnet-50"""
__a : Optional[int] = pipeline("""object-detection""" , model=__UpperCamelCase )
__a : Union[str, Any] = object_detector("""http://images.cocodataset.org/val2017/000000039769.jpg""" , threshold=__UpperCamelCase )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [
{"""score""": 0.9_9_8_8, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}},
{"""score""": 0.9_9_8_7, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}},
] , )
@require_torch
@require_pytesseract
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : str = """Narsil/layoutlmv3-finetuned-funsd"""
__a : List[Any] = 0.9_9_9_3
__a : Dict = pipeline("""object-detection""" , model=__UpperCamelCase , threshold=__UpperCamelCase )
__a : List[str] = object_detector(
"""https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png""" )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [
{"""score""": 0.9_9_9_3, """label""": """I-ANSWER""", """box""": {"""xmin""": 294, """ymin""": 254, """xmax""": 343, """ymax""": 264}},
{"""score""": 0.9_9_9_3, """label""": """I-ANSWER""", """box""": {"""xmin""": 294, """ymin""": 254, """xmax""": 343, """ymax""": 264}},
] , ) | 697 | 1 |
'''simple docstring'''
import json
import logging
import os
import sys
from pathlib import Path
import finetune_rag
from transformers.file_utils import is_apex_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
require_ray,
require_torch_gpu,
require_torch_multi_gpu,
)
logging.basicConfig(level=logging.DEBUG)
__SCREAMING_SNAKE_CASE : Optional[int] = logging.getLogger()
__SCREAMING_SNAKE_CASE : Dict = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
def __lowerCamelCase ( self , __UpperCamelCase ):
'''simple docstring'''
os.makedirs(__UpperCamelCase , exist_ok=__UpperCamelCase )
__a : int = {"""source""": """What is love ?""", """target""": """life"""}
__a : Tuple = {"""train""": 12, """val""": 2, """test""": 2}
for split in ["train", "test", "val"]:
for field in ["source", "target"]:
__a : Union[str, Any] = """\n""".join([contents[field]] * n_lines[split] )
with open(os.path.join(__UpperCamelCase , f"""{split}.{field}""" ) , """w""" ) as f:
f.write(__UpperCamelCase )
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase = "pytorch" ):
'''simple docstring'''
__a : Optional[Any] = self.get_auto_remove_tmp_dir()
__a : Tuple = os.path.join(__UpperCamelCase , """output""" )
__a : List[str] = os.path.join(__UpperCamelCase , """data""" )
self._create_dummy_data(data_dir=__UpperCamelCase )
__a : str = f"""
--data_dir {data_dir} \
--output_dir {output_dir} \
--model_name_or_path facebook/rag-sequence-base \
--model_type rag_sequence \
--do_train \
--do_predict \
--n_val -1 \
--val_check_interval 1.0 \
--train_batch_size 2 \
--eval_batch_size 1 \
--max_source_length 25 \
--max_target_length 25 \
--val_max_target_length 25 \
--test_max_target_length 25 \
--label_smoothing 0.1 \
--dropout 0.1 \
--attention_dropout 0.1 \
--weight_decay 0.001 \
--adam_epsilon 1e-08 \
--max_grad_norm 0.1 \
--lr_scheduler polynomial \
--learning_rate 3e-04 \
--num_train_epochs 1 \
--warmup_steps 4 \
--gradient_accumulation_steps 1 \
--distributed-port 8787 \
--use_dummy_dataset 1 \
--distributed_retriever {distributed_retriever} \
""".split()
if gpus > 0:
testargs.append(f"""--gpus={gpus}""" )
if is_apex_available():
testargs.append("""--fp16""" )
else:
testargs.append("""--gpus=0""" )
testargs.append("""--distributed_backend=ddp_cpu""" )
testargs.append("""--num_processes=2""" )
__a : List[str] = [sys.executable, str(Path(finetune_rag.__file__ ).resolve() )] + testargs
execute_subprocess_async(__UpperCamelCase , env=self.get_env() )
__a : Optional[Any] = os.path.join(__UpperCamelCase , """metrics.json""" )
with open(__UpperCamelCase ) as f:
__a : Union[str, Any] = json.load(__UpperCamelCase )
return result
@require_torch_gpu
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : List[str] = self._run_finetune(gpus=1 )
self.assertGreaterEqual(result["""test"""][0]["""test_avg_em"""] , 0.2 )
@require_torch_multi_gpu
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Tuple = self._run_finetune(gpus=2 )
self.assertGreaterEqual(result["""test"""][0]["""test_avg_em"""] , 0.2 )
@require_torch_gpu
@require_ray
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Tuple = self._run_finetune(gpus=1 , distributed_retriever="""ray""" )
self.assertGreaterEqual(result["""test"""][0]["""test_avg_em"""] , 0.2 )
@require_torch_multi_gpu
@require_ray
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Optional[Any] = self._run_finetune(gpus=1 , distributed_retriever="""ray""" )
self.assertGreaterEqual(result["""test"""][0]["""test_avg_em"""] , 0.2 ) | 697 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__SCREAMING_SNAKE_CASE : List[str] = {
'configuration_blenderbot_small': [
'BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP',
'BlenderbotSmallConfig',
'BlenderbotSmallOnnxConfig',
],
'tokenization_blenderbot_small': ['BlenderbotSmallTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Union[str, Any] = ['BlenderbotSmallTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : List[str] = [
'BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST',
'BlenderbotSmallForCausalLM',
'BlenderbotSmallForConditionalGeneration',
'BlenderbotSmallModel',
'BlenderbotSmallPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Optional[int] = [
'TFBlenderbotSmallForConditionalGeneration',
'TFBlenderbotSmallModel',
'TFBlenderbotSmallPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Optional[Any] = [
'FlaxBlenderbotSmallForConditionalGeneration',
'FlaxBlenderbotSmallModel',
'FlaxBlenderbotSmallPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotSmallConfig,
BlenderbotSmallOnnxConfig,
)
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_small_fast import BlenderbotSmallTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotSmallForCausalLM,
BlenderbotSmallForConditionalGeneration,
BlenderbotSmallModel,
BlenderbotSmallPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot_small import (
TFBlenderbotSmallForConditionalGeneration,
TFBlenderbotSmallModel,
TFBlenderbotSmallPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot_small import (
FlaxBlenderbotSmallForConditionalGeneration,
FlaxBlenderbotSmallModel,
FlaxBlenderbotSmallPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 697 | 1 |
'''simple docstring'''
import csv
from collections import defaultdict
from dataclasses import dataclass, field
from typing import List, Optional
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.ticker import ScalarFormatter
from transformers import HfArgumentParser
def _snake_case ( lowercase=None , lowercase=None ) -> List[Any]:
return field(default_factory=lambda: default , metadata=lowercase )
@dataclass
class SCREAMING_SNAKE_CASE__ :
lowercase__ = field(
metadata={"help": "The csv file to plot."} , )
lowercase__ = field(
default=__UpperCamelCase , metadata={"help": "Whether to plot along batch size or sequence length. Defaults to sequence length."} , )
lowercase__ = field(
default=__UpperCamelCase , metadata={"help": "Whether the csv file has time results or memory results. Defaults to memory results."} , )
lowercase__ = field(
default=__UpperCamelCase , metadata={"help": "Disable logarithmic scale when plotting"} , )
lowercase__ = field(
default=__UpperCamelCase , metadata={
"help": "Whether the csv file has training results or inference results. Defaults to inference results."
} , )
lowercase__ = field(
default=__UpperCamelCase , metadata={"help": "Filename under which the plot will be saved. If unused no plot is saved."} , )
lowercase__ = list_field(
default=__UpperCamelCase , metadata={"help": "List of model names that are used instead of the ones in the csv file."} )
def _snake_case ( lowercase ) -> Dict:
try:
int(lowercase )
return True
except ValueError:
return False
def _snake_case ( lowercase ) -> int:
try:
float(lowercase )
return True
except ValueError:
return False
class SCREAMING_SNAKE_CASE__ :
def __init__( self , __UpperCamelCase ):
'''simple docstring'''
__a : int = args
__a : Optional[Any] = defaultdict(lambda: {"bsz": [], "seq_len": [], "result": {}} )
with open(self.args.csv_file , newline="""""" ) as csv_file:
__a : Optional[int] = csv.DictReader(__UpperCamelCase )
for row in reader:
__a : Optional[int] = row["""model"""]
self.result_dict[model_name]["bsz"].append(int(row["""batch_size"""] ) )
self.result_dict[model_name]["seq_len"].append(int(row["""sequence_length"""] ) )
if can_convert_to_int(row["""result"""] ):
# value is not None
__a : str = int(row["""result"""] )
elif can_convert_to_float(row["""result"""] ):
# value is not None
__a : List[str] = float(row["""result"""] )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a , __a : int = plt.subplots()
__a : Any = """Time usage""" if self.args.is_time else """Memory usage"""
__a : int = title_str + """ for training""" if self.args.is_train else title_str + """ for inference"""
if not self.args.no_log_scale:
# set logarithm scales
ax.set_xscale("""log""" )
ax.set_yscale("""log""" )
for axis in [ax.xaxis, ax.yaxis]:
axis.set_major_formatter(ScalarFormatter() )
for model_name_idx, model_name in enumerate(self.result_dict.keys() ):
__a : Union[str, Any] = sorted(set(self.result_dict[model_name]["""bsz"""] ) )
__a : Union[str, Any] = sorted(set(self.result_dict[model_name]["""seq_len"""] ) )
__a : Dict = self.result_dict[model_name]["""result"""]
((__a) , (__a)) : str = (
(batch_sizes, sequence_lengths) if self.args.plot_along_batch else (sequence_lengths, batch_sizes)
)
__a : List[str] = (
model_name if self.args.short_model_names is None else self.args.short_model_names[model_name_idx]
)
for inner_loop_value in inner_loop_array:
if self.args.plot_along_batch:
__a : List[Any] = np.asarray(
[results[(x, inner_loop_value)] for x in x_axis_array if (x, inner_loop_value) in results] , dtype=__UpperCamelCase , )
else:
__a : List[str] = np.asarray(
[results[(inner_loop_value, x)] for x in x_axis_array if (inner_loop_value, x) in results] , dtype=np.floataa , )
((__a) , (__a)) : Optional[Any] = (
("""batch_size""", """len""") if self.args.plot_along_batch else ("""in #tokens""", """bsz""")
)
__a : List[Any] = np.asarray(__UpperCamelCase , __UpperCamelCase )[: len(__UpperCamelCase )]
plt.scatter(
__UpperCamelCase , __UpperCamelCase , label=f"""{label_model_name} - {inner_loop_label}: {inner_loop_value}""" )
plt.plot(__UpperCamelCase , __UpperCamelCase , """--""" )
title_str += f""" {label_model_name} vs."""
__a : Tuple = title_str[:-4]
__a : List[str] = """Time in s""" if self.args.is_time else """Memory in MB"""
# plot
plt.title(__UpperCamelCase )
plt.xlabel(__UpperCamelCase )
plt.ylabel(__UpperCamelCase )
plt.legend()
if self.args.figure_png_file is not None:
plt.savefig(self.args.figure_png_file )
else:
plt.show()
def _snake_case ( ) -> Dict:
__a : Optional[Any] = HfArgumentParser(lowercase )
__a : Union[str, Any] = parser.parse_args_into_dataclasses()[0]
__a : Any = Plot(args=lowercase )
plot.plot()
if __name__ == "__main__":
main() | 697 |
'''simple docstring'''
import numpy as np
import torch
from torch.utils.data import Dataset
from utils import logger
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
def __init__( self , __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
__a : Any = params
__a : Optional[Any] = np.array(__UpperCamelCase )
__a : Union[str, Any] = np.array([len(__UpperCamelCase ) for t in data] )
self.check()
self.remove_long_sequences()
self.remove_empty_sequences()
self.remove_unknown_sequences()
self.check()
self.print_statistics()
def __getitem__( self , __UpperCamelCase ):
'''simple docstring'''
return (self.token_ids[index], self.lengths[index])
def __len__( self ):
'''simple docstring'''
return len(self.lengths )
def __lowerCamelCase ( self ):
'''simple docstring'''
assert len(self.token_ids ) == len(self.lengths )
assert all(self.lengths[i] == len(self.token_ids[i] ) for i in range(len(self.lengths ) ) )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Tuple = self.params.max_model_input_size
__a : Union[str, Any] = self.lengths > max_len
logger.info(f"""Splitting {sum(__UpperCamelCase )} too long sequences.""" )
def divide_chunks(__UpperCamelCase , __UpperCamelCase ):
return [l[i : i + n] for i in range(0 , len(__UpperCamelCase ) , __UpperCamelCase )]
__a : int = []
__a : Union[str, Any] = []
if self.params.mlm:
__a , __a : Any = self.params.special_tok_ids["""cls_token"""], self.params.special_tok_ids["""sep_token"""]
else:
__a , __a : str = self.params.special_tok_ids["""bos_token"""], self.params.special_tok_ids["""eos_token"""]
for seq_, len_ in zip(self.token_ids , self.lengths ):
assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_
if len_ <= max_len:
new_tok_ids.append(seq_ )
new_lengths.append(len_ )
else:
__a : Any = []
for sub_s in divide_chunks(seq_ , max_len - 2 ):
if sub_s[0] != cls_id:
__a : int = np.insert(__UpperCamelCase , 0 , __UpperCamelCase )
if sub_s[-1] != sep_id:
__a : str = np.insert(__UpperCamelCase , len(__UpperCamelCase ) , __UpperCamelCase )
assert len(__UpperCamelCase ) <= max_len
assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s
sub_seqs.append(__UpperCamelCase )
new_tok_ids.extend(__UpperCamelCase )
new_lengths.extend([len(__UpperCamelCase ) for l in sub_seqs] )
__a : Dict = np.array(__UpperCamelCase )
__a : Tuple = np.array(__UpperCamelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : List[str] = len(self )
__a : List[str] = self.lengths > 11
__a : int = self.token_ids[indices]
__a : Union[str, Any] = self.lengths[indices]
__a : Any = len(self )
logger.info(f"""Remove {init_size - new_size} too short (<=11 tokens) sequences.""" )
def __lowerCamelCase ( self ):
'''simple docstring'''
if "unk_token" not in self.params.special_tok_ids:
return
else:
__a : List[str] = self.params.special_tok_ids["""unk_token"""]
__a : str = len(self )
__a : str = np.array([np.count_nonzero(a == unk_token_id ) for a in self.token_ids] )
__a : Optional[Any] = (unk_occs / self.lengths) < 0.5
__a : List[str] = self.token_ids[indices]
__a : Optional[int] = self.lengths[indices]
__a : Any = len(self )
logger.info(f"""Remove {init_size - new_size} sequences with a high level of unknown tokens (50%).""" )
def __lowerCamelCase ( self ):
'''simple docstring'''
if not self.params.is_master:
return
logger.info(f"""{len(self )} sequences""" )
# data_len = sum(self.lengths)
# nb_unique_tokens = len(Counter(list(chain(*self.token_ids))))
# logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)')
# unk_idx = self.params.special_tok_ids['unk_token']
# nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids])
# logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)')
def __lowerCamelCase ( self , __UpperCamelCase ):
'''simple docstring'''
__a : List[str] = [t[0] for t in batch]
__a : str = [t[1] for t in batch]
assert len(__UpperCamelCase ) == len(__UpperCamelCase )
# Max for paddings
__a : Optional[int] = max(__UpperCamelCase )
# Pad token ids
if self.params.mlm:
__a : int = self.params.special_tok_ids["""pad_token"""]
else:
__a : Tuple = self.params.special_tok_ids["""unk_token"""]
__a : Any = [list(t.astype(__UpperCamelCase ) ) + [pad_idx] * (max_seq_len_ - len(__UpperCamelCase )) for t in token_ids]
assert len(tk_ ) == len(__UpperCamelCase )
assert all(len(__UpperCamelCase ) == max_seq_len_ for t in tk_ )
__a : Any = torch.tensor(tk_ ) # (bs, max_seq_len_)
__a : Optional[Any] = torch.tensor(__UpperCamelCase ) # (bs)
return tk_t, lg_t | 697 | 1 |
'''simple docstring'''
def _snake_case ( lowercase ) -> list:
if any(not isinstance(lowercase , lowercase ) or x < 0 for x in sequence ):
raise TypeError("""Sequence must be list of non-negative integers""" )
for _ in range(len(lowercase ) ):
for i, (rod_upper, rod_lower) in enumerate(zip(lowercase , sequence[1:] ) ):
if rod_upper > rod_lower:
sequence[i] -= rod_upper - rod_lower
sequence[i + 1] += rod_upper - rod_lower
return sequence
if __name__ == "__main__":
assert bead_sort([5, 4, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bead_sort([7, 9, 4, 3, 5]) == [3, 4, 5, 7, 9] | 697 |
'''simple docstring'''
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
lowercase__ = ""
lowercase__ = "hf-legacy" # "hf://"" is reserved for hffs
def __init__( self , __UpperCamelCase = None , __UpperCamelCase = None , **__UpperCamelCase , ):
'''simple docstring'''
super().__init__(self , **__UpperCamelCase )
__a : int = repo_info
__a : int = token
__a : Any = None
def __lowerCamelCase ( self ):
'''simple docstring'''
if self.dir_cache is None:
__a : Union[str, Any] = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
__a : List[str] = {
"""name""": hf_file.rfilename,
"""size""": None,
"""type""": """file""",
}
self.dir_cache.update(
{
str(__UpperCamelCase ): {"""name""": str(__UpperCamelCase ), """size""": None, """type""": """directory"""}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase = "rb" , **__UpperCamelCase , ):
'''simple docstring'''
if not isinstance(self.repo_info , __UpperCamelCase ):
raise NotImplementedError(f"""Open is only implemented for dataset repositories, but got {self.repo_info}""" )
__a : Any = hf_hub_url(self.repo_info.id , __UpperCamelCase , revision=self.repo_info.sha )
return fsspec.open(
__UpperCamelCase , mode=__UpperCamelCase , headers=get_authentication_headers_for_url(__UpperCamelCase , use_auth_token=self.token ) , client_kwargs={"""trust_env""": True} , ).open()
def __lowerCamelCase ( self , __UpperCamelCase , **__UpperCamelCase ):
'''simple docstring'''
self._get_dirs()
__a : str = self._strip_protocol(__UpperCamelCase )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(__UpperCamelCase )
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase=False , **__UpperCamelCase ):
'''simple docstring'''
self._get_dirs()
__a : int = PurePosixPath(path.strip("""/""" ) )
__a : List[str] = {}
for p, f in self.dir_cache.items():
__a : str = PurePosixPath(p.strip("""/""" ) )
__a : Optional[int] = p.parent
if root == path:
__a : List[str] = f
__a : str = list(paths.values() )
if detail:
return out
else:
return sorted(f["""name"""] for f in out ) | 697 | 1 |
'''simple docstring'''
import json
import os
from pathlib import Path
import pytest
from datasets.download.download_config import DownloadConfig
from datasets.download.download_manager import DownloadManager
from datasets.utils.file_utils import hash_url_to_filename
__SCREAMING_SNAKE_CASE : Dict = 'http://www.mocksite.com/file1.txt'
__SCREAMING_SNAKE_CASE : Optional[int] = '"text": ["foo", "foo"]'
__SCREAMING_SNAKE_CASE : Optional[Any] = '6d8ce9aa78a471c7477201efbeabd3bb01ac2e7d100a6dc024ba1608361f90a8'
class SCREAMING_SNAKE_CASE__ :
lowercase__ = 2_00
lowercase__ = {"Content-Length": "100"}
lowercase__ = {}
def __lowerCamelCase ( self , **__UpperCamelCase ):
'''simple docstring'''
return [bytes(__UpperCamelCase , """utf-8""" )]
def _snake_case ( *lowercase , **lowercase ) -> Any:
return MockResponse()
@pytest.mark.parametrize("""urls_type""" , [str, list, dict] )
def _snake_case ( lowercase , lowercase , lowercase ) -> str:
import requests
monkeypatch.setattr(lowercase , """request""" , lowercase )
__a : Tuple = URL
if issubclass(lowercase , lowercase ):
__a : str = url
elif issubclass(lowercase , lowercase ):
__a : List[str] = [url]
elif issubclass(lowercase , lowercase ):
__a : str = {"""train""": url}
__a : List[Any] = """dummy"""
__a : int = """downloads"""
__a : Optional[int] = tmp_path
__a : Optional[int] = DownloadConfig(
cache_dir=os.path.join(lowercase , lowercase ) , use_etag=lowercase , )
__a : int = DownloadManager(dataset_name=lowercase , download_config=lowercase )
__a : Optional[int] = dl_manager.download(lowercase )
__a : Optional[int] = urls
for downloaded_paths in [downloaded_paths]:
if isinstance(lowercase , lowercase ):
__a : Tuple = [downloaded_paths]
__a : str = [urls]
elif isinstance(lowercase , lowercase ):
assert "train" in downloaded_paths.keys()
__a : List[str] = downloaded_paths.values()
__a : List[str] = urls.values()
assert downloaded_paths
for downloaded_path, input_url in zip(lowercase , lowercase ):
assert downloaded_path == dl_manager.downloaded_paths[input_url]
__a : Union[str, Any] = Path(lowercase )
__a : int = downloaded_path.parts
assert parts[-1] == HASH
assert parts[-2] == cache_subdir
assert downloaded_path.exists()
__a : List[str] = downloaded_path.read_text()
assert content == CONTENT
__a : Union[str, Any] = downloaded_path.with_suffix(""".json""" )
assert metadata_downloaded_path.exists()
__a : Union[str, Any] = json.loads(metadata_downloaded_path.read_text() )
assert metadata_content == {"url": URL, "etag": None}
@pytest.mark.parametrize("""paths_type""" , [str, list, dict] )
def _snake_case ( lowercase , lowercase , lowercase ) -> Tuple:
__a : str = str(lowercase )
if issubclass(lowercase , lowercase ):
__a : str = filename
elif issubclass(lowercase , lowercase ):
__a : Any = [filename]
elif issubclass(lowercase , lowercase ):
__a : Dict = {"""train""": filename}
__a : Union[str, Any] = """dummy"""
__a : List[str] = xz_file.parent
__a : int = """extracted"""
__a : Optional[int] = DownloadConfig(
cache_dir=lowercase , use_etag=lowercase , )
__a : Tuple = DownloadManager(dataset_name=lowercase , download_config=lowercase )
__a : Tuple = dl_manager.extract(lowercase )
__a : Any = paths
for extracted_paths in [extracted_paths]:
if isinstance(lowercase , lowercase ):
__a : Dict = [extracted_paths]
__a : Any = [paths]
elif isinstance(lowercase , lowercase ):
assert "train" in extracted_paths.keys()
__a : List[Any] = extracted_paths.values()
__a : Tuple = paths.values()
assert extracted_paths
for extracted_path, input_path in zip(lowercase , lowercase ):
assert extracted_path == dl_manager.extracted_paths[input_path]
__a : List[str] = Path(lowercase )
__a : Any = extracted_path.parts
assert parts[-1] == hash_url_to_filename(lowercase , etag=lowercase )
assert parts[-2] == extracted_subdir
assert extracted_path.exists()
__a : Optional[Any] = extracted_path.read_text()
__a : Any = text_file.read_text()
assert extracted_file_content == expected_file_content
def _snake_case ( lowercase , lowercase ) -> List[Any]:
assert path.endswith(""".jsonl""" )
for num_items, line in enumerate(lowercase , start=1 ):
__a : Tuple = json.loads(line.decode("""utf-8""" ) )
assert item.keys() == {"col_1", "col_2", "col_3"}
assert num_items == 4
@pytest.mark.parametrize("""archive_jsonl""" , ["""tar_jsonl_path""", """zip_jsonl_path"""] )
def _snake_case ( lowercase , lowercase ) -> Optional[int]:
__a : str = request.getfixturevalue(lowercase )
__a : Optional[int] = DownloadManager()
for num_jsonl, (path, file) in enumerate(dl_manager.iter_archive(lowercase ) , start=1 ):
_test_jsonl(lowercase , lowercase )
assert num_jsonl == 2
@pytest.mark.parametrize("""archive_nested_jsonl""" , ["""tar_nested_jsonl_path""", """zip_nested_jsonl_path"""] )
def _snake_case ( lowercase , lowercase ) -> List[Any]:
__a : List[str] = request.getfixturevalue(lowercase )
__a : Optional[Any] = DownloadManager()
for num_tar, (path, file) in enumerate(dl_manager.iter_archive(lowercase ) , start=1 ):
for num_jsonl, (subpath, subfile) in enumerate(dl_manager.iter_archive(lowercase ) , start=1 ):
_test_jsonl(lowercase , lowercase )
assert num_tar == 1
assert num_jsonl == 2
def _snake_case ( lowercase ) -> List[str]:
__a : Any = DownloadManager()
for num_file, file in enumerate(dl_manager.iter_files(lowercase ) , start=1 ):
assert os.path.basename(lowercase ) == ("test.txt" if num_file == 1 else "train.txt")
assert num_file == 2 | 697 |
'''simple docstring'''
import inspect
import unittest
from transformers import DPTConfig
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel
from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class SCREAMING_SNAKE_CASE__ :
def __init__( self , __UpperCamelCase , __UpperCamelCase=2 , __UpperCamelCase=32 , __UpperCamelCase=16 , __UpperCamelCase=3 , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=32 , __UpperCamelCase=4 , __UpperCamelCase=[0, 1, 2, 3] , __UpperCamelCase=4 , __UpperCamelCase=37 , __UpperCamelCase="gelu" , __UpperCamelCase=0.1 , __UpperCamelCase=0.1 , __UpperCamelCase=0.0_2 , __UpperCamelCase=3 , __UpperCamelCase=[1, 384, 24, 24] , __UpperCamelCase=True , __UpperCamelCase=None , ):
'''simple docstring'''
__a : List[str] = parent
__a : Tuple = batch_size
__a : str = image_size
__a : int = patch_size
__a : Dict = num_channels
__a : int = is_training
__a : Dict = use_labels
__a : Union[str, Any] = hidden_size
__a : Dict = num_hidden_layers
__a : Dict = backbone_out_indices
__a : Optional[int] = num_attention_heads
__a : List[str] = intermediate_size
__a : Optional[Any] = hidden_act
__a : Dict = hidden_dropout_prob
__a : Tuple = attention_probs_dropout_prob
__a : Any = initializer_range
__a : Any = num_labels
__a : Optional[Any] = backbone_featmap_shape
__a : List[Any] = scope
__a : List[str] = is_hybrid
# sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token)
__a : Union[str, Any] = (image_size // patch_size) ** 2
__a : List[str] = num_patches + 1
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__a : Union[str, Any] = None
if self.use_labels:
__a : str = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__a : Tuple = self.get_config()
return config, pixel_values, labels
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : List[str] = {
"""global_padding""": """same""",
"""layer_type""": """bottleneck""",
"""depths""": [3, 4, 9],
"""out_features""": ["""stage1""", """stage2""", """stage3"""],
"""embedding_dynamic_padding""": True,
"""hidden_sizes""": [96, 192, 384, 768],
"""num_groups""": 2,
}
return DPTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , backbone_out_indices=self.backbone_out_indices , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__UpperCamelCase , initializer_range=self.initializer_range , is_hybrid=self.is_hybrid , backbone_config=__UpperCamelCase , backbone_featmap_shape=self.backbone_featmap_shape , )
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
__a : Optional[Any] = DPTModel(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
__a : List[str] = model(__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
__a : List[str] = self.num_labels
__a : Union[str, Any] = DPTForDepthEstimation(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
__a : Tuple = model(__UpperCamelCase )
self.parent.assertEqual(result.predicted_depth.shape , (self.batch_size, self.image_size, self.image_size) )
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
__a : Dict = self.num_labels
__a : Tuple = DPTForSemanticSegmentation(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
__a : str = model(__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Optional[int] = self.prepare_config_and_inputs()
__a , __a , __a : Tuple = config_and_inputs
__a : List[str] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase , __UpperCamelCase , unittest.TestCase ):
lowercase__ = (DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else ()
lowercase__ = (
{
"depth-estimation": DPTForDepthEstimation,
"feature-extraction": DPTModel,
"image-segmentation": DPTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
lowercase__ = False
lowercase__ = False
lowercase__ = False
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Optional[int] = DPTModelTester(self )
__a : List[Any] = ConfigTester(self , config_class=__UpperCamelCase , has_text_modality=__UpperCamelCase , hidden_size=37 )
def __lowerCamelCase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="""DPT does not use inputs_embeds""" )
def __lowerCamelCase ( self ):
'''simple docstring'''
pass
def __lowerCamelCase ( self ):
'''simple docstring'''
__a , __a : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a : str = model_class(__UpperCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__a : Any = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__UpperCamelCase , nn.Linear ) )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a , __a : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a : Any = model_class(__UpperCamelCase )
__a : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__a : int = [*signature.parameters.keys()]
__a : List[str] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __UpperCamelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_depth_estimation(*__UpperCamelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__UpperCamelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
__a , __a : Dict = self.model_tester.prepare_config_and_inputs_for_common()
__a : List[Any] = True
if model_class in get_values(__UpperCamelCase ):
continue
__a : str = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.train()
__a : Union[str, Any] = self._prepare_for_class(__UpperCamelCase , __UpperCamelCase , return_labels=__UpperCamelCase )
__a : List[Any] = model(**__UpperCamelCase ).loss
loss.backward()
def __lowerCamelCase ( self ):
'''simple docstring'''
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
__a , __a : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
__a : Any = False
__a : Dict = True
if model_class in get_values(__UpperCamelCase ) or not model_class.supports_gradient_checkpointing:
continue
__a : Any = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.gradient_checkpointing_enable()
model.train()
__a : List[str] = self._prepare_for_class(__UpperCamelCase , __UpperCamelCase , return_labels=__UpperCamelCase )
__a : Dict = model(**__UpperCamelCase ).loss
loss.backward()
def __lowerCamelCase ( self ):
'''simple docstring'''
__a , __a : Any = self.model_tester.prepare_config_and_inputs_for_common()
__a : Any = _config_zero_init(__UpperCamelCase )
for model_class in self.all_model_classes:
__a : Any = model_class(config=__UpperCamelCase )
# Skip the check for the backbone
__a : Optional[Any] = []
for name, module in model.named_modules():
if module.__class__.__name__ == "DPTViTHybridEmbeddings":
__a : Optional[int] = [f"""{name}.{key}""" for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def __lowerCamelCase ( self ):
'''simple docstring'''
pass
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]:
__a : int = DPTModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a , __a : int = self.model_tester.prepare_config_and_inputs_for_common()
__a : Optional[int] = """add"""
with self.assertRaises(__UpperCamelCase ):
__a : int = DPTForDepthEstimation(__UpperCamelCase )
def _snake_case ( ) -> Any:
__a : Dict = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
@slow
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : int = DPTImageProcessor.from_pretrained("""Intel/dpt-hybrid-midas""" )
__a : int = DPTForDepthEstimation.from_pretrained("""Intel/dpt-hybrid-midas""" ).to(__UpperCamelCase )
__a : Union[str, Any] = prepare_img()
__a : Any = image_processor(images=__UpperCamelCase , return_tensors="""pt""" ).to(__UpperCamelCase )
# forward pass
with torch.no_grad():
__a : Optional[Any] = model(**__UpperCamelCase )
__a : int = outputs.predicted_depth
# verify the predicted depth
__a : Any = torch.Size((1, 384, 384) )
self.assertEqual(predicted_depth.shape , __UpperCamelCase )
__a : int = torch.tensor(
[[[5.6_4_3_7, 5.6_1_4_6, 5.6_5_1_1], [5.4_3_7_1, 5.5_6_4_9, 5.5_9_5_8], [5.5_2_1_5, 5.5_1_8_4, 5.5_2_9_3]]] ).to(__UpperCamelCase )
self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 100 , __UpperCamelCase , atol=1E-4 ) ) | 697 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__SCREAMING_SNAKE_CASE : List[Any] = {
'configuration_clip': [
'CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'CLIPConfig',
'CLIPOnnxConfig',
'CLIPTextConfig',
'CLIPVisionConfig',
],
'processing_clip': ['CLIPProcessor'],
'tokenization_clip': ['CLIPTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Optional[Any] = ['CLIPTokenizerFast']
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : int = ['CLIPFeatureExtractor']
__SCREAMING_SNAKE_CASE : int = ['CLIPImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Tuple = [
'CLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'CLIPModel',
'CLIPPreTrainedModel',
'CLIPTextModel',
'CLIPTextModelWithProjection',
'CLIPVisionModel',
'CLIPVisionModelWithProjection',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Tuple = [
'TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFCLIPModel',
'TFCLIPPreTrainedModel',
'TFCLIPTextModel',
'TFCLIPVisionModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Any = [
'FlaxCLIPModel',
'FlaxCLIPPreTrainedModel',
'FlaxCLIPTextModel',
'FlaxCLIPTextPreTrainedModel',
'FlaxCLIPVisionModel',
'FlaxCLIPVisionPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_clip import (
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPConfig,
CLIPOnnxConfig,
CLIPTextConfig,
CLIPVisionConfig,
)
from .processing_clip import CLIPProcessor
from .tokenization_clip import CLIPTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_clip_fast import CLIPTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clip import CLIPFeatureExtractor
from .image_processing_clip import CLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clip import (
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPModel,
CLIPPreTrainedModel,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPVisionModel,
CLIPVisionModelWithProjection,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_clip import (
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCLIPModel,
TFCLIPPreTrainedModel,
TFCLIPTextModel,
TFCLIPVisionModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_clip import (
FlaxCLIPModel,
FlaxCLIPPreTrainedModel,
FlaxCLIPTextModel,
FlaxCLIPTextPreTrainedModel,
FlaxCLIPVisionModel,
FlaxCLIPVisionPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 697 |
'''simple docstring'''
import unittest
from .lib import (
Matrix,
Vector,
axpy,
square_zero_matrix,
unit_basis_vector,
zero_vector,
)
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Dict = Vector([1, 2, 3] )
self.assertEqual(x.component(0 ) , 1 )
self.assertEqual(x.component(2 ) , 3 )
__a : Optional[int] = Vector()
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Any = Vector([0, 0, 0, 0, 0, 1] )
self.assertEqual(str(__UpperCamelCase ) , """(0,0,0,0,0,1)""" )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Tuple = Vector([1, 2, 3, 4] )
self.assertEqual(len(__UpperCamelCase ) , 4 )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Optional[Any] = Vector([1, 2] )
__a : List[str] = Vector([1, 2, 3, 4, 5] )
__a : Optional[int] = Vector([0, 0, 0, 0, 0, 0, 0, 0, 0, 0] )
__a : Dict = Vector([1, -1, 1, -1, 2, -3, 4, -5] )
self.assertAlmostEqual(x.euclidean_length() , 2.2_3_6 , 3 )
self.assertAlmostEqual(y.euclidean_length() , 7.4_1_6 , 3 )
self.assertEqual(z.euclidean_length() , 0 )
self.assertAlmostEqual(w.euclidean_length() , 7.6_1_6 , 3 )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Dict = Vector([1, 2, 3] )
__a : Union[str, Any] = Vector([1, 1, 1] )
self.assertEqual((x + y).component(0 ) , 2 )
self.assertEqual((x + y).component(1 ) , 3 )
self.assertEqual((x + y).component(2 ) , 4 )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : List[str] = Vector([1, 2, 3] )
__a : Any = Vector([1, 1, 1] )
self.assertEqual((x - y).component(0 ) , 0 )
self.assertEqual((x - y).component(1 ) , 1 )
self.assertEqual((x - y).component(2 ) , 2 )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Tuple = Vector([1, 2, 3] )
__a : Optional[Any] = Vector([2, -1, 4] ) # for test of dot product
__a : Union[str, Any] = Vector([1, -2, -1] )
self.assertEqual(str(x * 3.0 ) , """(3.0,6.0,9.0)""" )
self.assertEqual((a * b) , 0 )
def __lowerCamelCase ( self ):
'''simple docstring'''
self.assertEqual(str(zero_vector(10 ) ).count("""0""" ) , 10 )
def __lowerCamelCase ( self ):
'''simple docstring'''
self.assertEqual(str(unit_basis_vector(3 , 1 ) ) , """(0,1,0)""" )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Dict = Vector([1, 2, 3] )
__a : Optional[int] = Vector([1, 0, 1] )
self.assertEqual(str(axpy(2 , __UpperCamelCase , __UpperCamelCase ) ) , """(3,4,7)""" )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : int = Vector([1, 0, 0, 0, 0, 0] )
__a : Any = x.copy()
self.assertEqual(str(__UpperCamelCase ) , str(__UpperCamelCase ) )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Union[str, Any] = Vector([1, 0, 0] )
x.change_component(0 , 0 )
x.change_component(1 , 1 )
self.assertEqual(str(__UpperCamelCase ) , """(0,1,0)""" )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Dict = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual("""|1,2,3|\n|2,4,5|\n|6,7,8|\n""" , str(__UpperCamelCase ) )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : List[Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
__a : List[Any] = [[-3, -14, -10], [-5, -10, -5], [-2, -1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(minors[x][y] , a.minor(__UpperCamelCase , __UpperCamelCase ) )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : List[Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
__a : Any = [[-3, 14, -10], [5, -10, 5], [-2, 1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(cofactors[x][y] , a.cofactor(__UpperCamelCase , __UpperCamelCase ) )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : List[Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(-5 , a.determinant() )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Any = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]] , 3 , 3 )
__a : List[Any] = Vector([1, 2, 3] )
self.assertEqual("""(14,32,50)""" , str(a * x ) )
self.assertEqual("""|2,4,6|\n|8,10,12|\n|14,16,18|\n""" , str(a * 2 ) )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : List[str] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
a.change_component(0 , 2 , 5 )
self.assertEqual("""|1,2,5|\n|2,4,5|\n|6,7,8|\n""" , str(__UpperCamelCase ) )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Tuple = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(7 , a.component(2 , 1 ) , 0.0_1 )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Dict = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
__a : Union[str, Any] = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 )
self.assertEqual("""|2,4,10|\n|4,8,10|\n|12,14,18|\n""" , str(a + b ) )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : List[Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
__a : List[str] = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 )
self.assertEqual("""|0,0,-4|\n|0,0,0|\n|0,0,-2|\n""" , str(a - b ) )
def __lowerCamelCase ( self ):
'''simple docstring'''
self.assertEqual(
"""|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n""" , str(square_zero_matrix(5 ) ) , )
if __name__ == "__main__":
unittest.main() | 697 | 1 |
'''simple docstring'''
import numpy as np
import torch
from imwatermark import WatermarkEncoder
# Copied from https://github.com/Stability-AI/generative-models/blob/613af104c6b85184091d42d374fef420eddb356d/scripts/demo/streamlit_helpers.py#L66
__SCREAMING_SNAKE_CASE : List[Any] = 0B10_11_00_11_11_10_11_00_10_01_00_00_01_11_10_11_10_11_00_01_10_01_11_10
# bin(x)[2:] gives bits of x as str, use int to convert them to 0/1
__SCREAMING_SNAKE_CASE : List[str] = [int(bit) for bit in bin(WATERMARK_MESSAGE)[2:]]
class SCREAMING_SNAKE_CASE__ :
def __init__( self ):
'''simple docstring'''
__a : int = WATERMARK_BITS
__a : str = WatermarkEncoder()
self.encoder.set_watermark("""bits""" , self.watermark )
def __lowerCamelCase ( self , __UpperCamelCase ):
'''simple docstring'''
if images.shape[-1] < 256:
return images
__a : Union[str, Any] = (255 * (images / 2 + 0.5)).cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
__a : Tuple = [self.encoder.encode(__UpperCamelCase , """dwtDct""" ) for image in images]
__a : Optional[Any] = torch.from_numpy(np.array(__UpperCamelCase ) ).permute(0 , 3 , 1 , 2 )
__a : Union[str, Any] = torch.clamp(2 * (images / 255 - 0.5) , min=-1.0 , max=1.0 )
return images | 697 |
'''simple docstring'''
import os
from itertools import chain
from random import randrange, shuffle
import pytest
from .sola import PokerHand
__SCREAMING_SNAKE_CASE : List[str] = (
'4S 3H 2C 7S 5H',
'9D 8H 2C 6S 7H',
'2D 6D 9D TH 7D',
'TC 8C 2S JH 6C',
'JH 8S TH AH QH',
'TS KS 5S 9S AC',
'KD 6S 9D TH AD',
'KS 8D 4D 9S 4S', # pair
'8C 4S KH JS 4D', # pair
'QH 8H KD JH 8S', # pair
'KC 4H KS 2H 8D', # pair
'KD 4S KC 3H 8S', # pair
'AH 8S AS KC JH', # pair
'3H 4C 4H 3S 2H', # 2 pairs
'5S 5D 2C KH KH', # 2 pairs
'3C KH 5D 5S KH', # 2 pairs
'AS 3C KH AD KH', # 2 pairs
'7C 7S 3S 7H 5S', # 3 of a kind
'7C 7S KH 2H 7H', # 3 of a kind
'AC KH QH AH AS', # 3 of a kind
'2H 4D 3C AS 5S', # straight (low ace)
'3C 5C 4C 2C 6H', # straight
'6S 8S 7S 5H 9H', # straight
'JS QS 9H TS KH', # straight
'QC KH TS JS AH', # straight (high ace)
'8C 9C 5C 3C TC', # flush
'3S 8S 9S 5S KS', # flush
'4C 5C 9C 8C KC', # flush
'JH 8H AH KH QH', # flush
'3D 2H 3H 2C 2D', # full house
'2H 2C 3S 3H 3D', # full house
'KH KC 3S 3H 3D', # full house
'JC 6H JS JD JH', # 4 of a kind
'JC 7H JS JD JH', # 4 of a kind
'JC KH JS JD JH', # 4 of a kind
'2S AS 4S 5S 3S', # straight flush (low ace)
'2D 6D 3D 4D 5D', # straight flush
'5C 6C 3C 7C 4C', # straight flush
'JH 9H TH KH QH', # straight flush
'JH AH TH KH QH', # royal flush (high ace straight flush)
)
__SCREAMING_SNAKE_CASE : Optional[Any] = (
('2H 3H 4H 5H 6H', 'KS AS TS QS JS', 'Loss'),
('2H 3H 4H 5H 6H', 'AS AD AC AH JD', 'Win'),
('AS AH 2H AD AC', 'JS JD JC JH 3D', 'Win'),
('2S AH 2H AS AC', 'JS JD JC JH AD', 'Loss'),
('2S AH 2H AS AC', '2H 3H 5H 6H 7H', 'Win'),
('AS 3S 4S 8S 2S', '2H 3H 5H 6H 7H', 'Win'),
('2H 3H 5H 6H 7H', '2S 3H 4H 5S 6C', 'Win'),
('2S 3H 4H 5S 6C', '3D 4C 5H 6H 2S', 'Tie'),
('2S 3H 4H 5S 6C', 'AH AC 5H 6H AS', 'Win'),
('2S 2H 4H 5S 4C', 'AH AC 5H 6H AS', 'Loss'),
('2S 2H 4H 5S 4C', 'AH AC 5H 6H 7S', 'Win'),
('6S AD 7H 4S AS', 'AH AC 5H 6H 7S', 'Loss'),
('2S AH 4H 5S KC', 'AH AC 5H 6H 7S', 'Loss'),
('2S 3H 6H 7S 9C', '7H 3C TH 6H 9S', 'Loss'),
('4S 5H 6H TS AC', '3S 5H 6H TS AC', 'Win'),
('2S AH 4H 5S 6C', 'AD 4C 5H 6H 2C', 'Tie'),
('AS AH 3H AD AC', 'AS AH 2H AD AC', 'Win'),
('AH AC 5H 5C QS', 'AH AC 5H 5C KS', 'Loss'),
('AH AC 5H 5C QS', 'KH KC 5H 5C QS', 'Win'),
('7C 7S KH 2H 7H', '3C 3S AH 2H 3H', 'Win'),
('3C 3S AH 2H 3H', '7C 7S KH 2H 7H', 'Loss'),
('6H 5H 4H 3H 2H', '5H 4H 3H 2H AH', 'Win'),
('5H 4H 3H 2H AH', '5H 4H 3H 2H AH', 'Tie'),
('5H 4H 3H 2H AH', '6H 5H 4H 3H 2H', 'Loss'),
('AH AD KS KC AC', 'AH KD KH AC KC', 'Win'),
('2H 4D 3C AS 5S', '2H 4D 3C 6S 5S', 'Loss'),
('2H 3S 3C 3H 2S', '3S 3C 2S 2H 2D', 'Win'),
('4D 6D 5D 2D JH', '3S 8S 3H TC KH', 'Loss'),
('4S 6C 8S 3S 7S', 'AD KS 2D 7D 7C', 'Loss'),
('6S 4C 7H 8C 3H', '5H JC AH 9D 9C', 'Loss'),
('9D 9H JH TC QH', '3C 2S JS 5C 7H', 'Win'),
('2H TC 8S AD 9S', '4H TS 7H 2C 5C', 'Win'),
('9D 3S 2C 7S 7C', 'JC TD 3C TC 9H', 'Loss'),
)
__SCREAMING_SNAKE_CASE : Tuple = (
('2H 3H 4H 5H 6H', True),
('AS AH 2H AD AC', False),
('2H 3H 5H 6H 7H', True),
('KS AS TS QS JS', True),
('8H 9H QS JS TH', False),
('AS 3S 4S 8S 2S', True),
)
__SCREAMING_SNAKE_CASE : Dict = (
('2H 3H 4H 5H 6H', True),
('AS AH 2H AD AC', False),
('2H 3H 5H 6H 7H', False),
('KS AS TS QS JS', True),
('8H 9H QS JS TH', True),
)
__SCREAMING_SNAKE_CASE : Optional[int] = (
('2H 4D 3C AS 5S', True, [5, 4, 3, 2, 14]),
('2H 5D 3C AS 5S', False, [14, 5, 5, 3, 2]),
('JH QD KC AS TS', False, [14, 13, 12, 11, 10]),
('9D 3S 2C 7S 7C', False, [9, 7, 7, 3, 2]),
)
__SCREAMING_SNAKE_CASE : int = (
('JH AH TH KH QH', 0),
('JH 9H TH KH QH', 0),
('JC KH JS JD JH', 7),
('KH KC 3S 3H 3D', 6),
('8C 9C 5C 3C TC', 0),
('JS QS 9H TS KH', 0),
('7C 7S KH 2H 7H', 3),
('3C KH 5D 5S KH', 2),
('QH 8H KD JH 8S', 1),
('2D 6D 9D TH 7D', 0),
)
__SCREAMING_SNAKE_CASE : int = (
('JH AH TH KH QH', 23),
('JH 9H TH KH QH', 22),
('JC KH JS JD JH', 21),
('KH KC 3S 3H 3D', 20),
('8C 9C 5C 3C TC', 19),
('JS QS 9H TS KH', 18),
('7C 7S KH 2H 7H', 17),
('3C KH 5D 5S KH', 16),
('QH 8H KD JH 8S', 15),
('2D 6D 9D TH 7D', 14),
)
def _snake_case ( ) -> List[str]:
__a , __a : List[Any] = randrange(len(lowercase ) ), randrange(len(lowercase ) )
__a : int = ["""Loss""", """Tie""", """Win"""][(play >= oppo) + (play > oppo)]
__a , __a : int = SORTED_HANDS[play], SORTED_HANDS[oppo]
return hand, other, expected
def _snake_case ( lowercase = 1_0_0 ) -> Any:
return (generate_random_hand() for _ in range(lowercase ))
@pytest.mark.parametrize("""hand, expected""" , lowercase )
def _snake_case ( lowercase , lowercase ) -> int:
assert PokerHand(lowercase )._is_flush() == expected
@pytest.mark.parametrize("""hand, expected""" , lowercase )
def _snake_case ( lowercase , lowercase ) -> Any:
assert PokerHand(lowercase )._is_straight() == expected
@pytest.mark.parametrize("""hand, expected, card_values""" , lowercase )
def _snake_case ( lowercase , lowercase , lowercase ) -> List[str]:
__a : Union[str, Any] = PokerHand(lowercase )
assert player._is_five_high_straight() == expected
assert player._card_values == card_values
@pytest.mark.parametrize("""hand, expected""" , lowercase )
def _snake_case ( lowercase , lowercase ) -> Optional[int]:
assert PokerHand(lowercase )._is_same_kind() == expected
@pytest.mark.parametrize("""hand, expected""" , lowercase )
def _snake_case ( lowercase , lowercase ) -> Union[str, Any]:
assert PokerHand(lowercase )._hand_type == expected
@pytest.mark.parametrize("""hand, other, expected""" , lowercase )
def _snake_case ( lowercase , lowercase , lowercase ) -> Optional[int]:
assert PokerHand(lowercase ).compare_with(PokerHand(lowercase ) ) == expected
@pytest.mark.parametrize("""hand, other, expected""" , generate_random_hands() )
def _snake_case ( lowercase , lowercase , lowercase ) -> int:
assert PokerHand(lowercase ).compare_with(PokerHand(lowercase ) ) == expected
def _snake_case ( ) -> Union[str, Any]:
__a : Tuple = [PokerHand(lowercase ) for hand in SORTED_HANDS]
__a : Optional[int] = poker_hands.copy()
shuffle(lowercase )
__a : List[str] = chain(sorted(lowercase ) )
for index, hand in enumerate(lowercase ):
assert hand == poker_hands[index]
def _snake_case ( ) -> List[str]:
# Test that five high straights are compared correctly.
__a : Optional[int] = [PokerHand("""2D AC 3H 4H 5S""" ), PokerHand("""2S 3H 4H 5S 6C""" )]
pokerhands.sort(reverse=lowercase )
assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C"
def _snake_case ( ) -> List[str]:
# Multiple calls to five_high_straight function should still return True
# and shouldn't mutate the list in every call other than the first.
__a : Dict = PokerHand("""2C 4S AS 3D 5C""" )
__a : Dict = True
__a : Optional[int] = [5, 4, 3, 2, 1_4]
for _ in range(1_0 ):
assert pokerhand._is_five_high_straight() == expected
assert pokerhand._card_values == expected_card_values
def _snake_case ( ) -> Dict:
# Problem number 54 from Project Euler
# Testing from poker_hands.txt file
__a : Tuple = 0
__a : int = os.path.abspath(os.path.dirname(lowercase ) )
__a : Union[str, Any] = os.path.join(lowercase , """poker_hands.txt""" )
with open(lowercase ) as file_hand:
for line in file_hand:
__a : Union[str, Any] = line[:1_4].strip()
__a : Optional[Any] = line[1_5:].strip()
__a , __a : List[str] = PokerHand(lowercase ), PokerHand(lowercase )
__a : str = player.compare_with(lowercase )
if output == "Win":
answer += 1
assert answer == 3_7_6 | 697 | 1 |
'''simple docstring'''
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
lowercase__ = ""
lowercase__ = "hf-legacy" # "hf://"" is reserved for hffs
def __init__( self , __UpperCamelCase = None , __UpperCamelCase = None , **__UpperCamelCase , ):
'''simple docstring'''
super().__init__(self , **__UpperCamelCase )
__a : int = repo_info
__a : int = token
__a : Any = None
def __lowerCamelCase ( self ):
'''simple docstring'''
if self.dir_cache is None:
__a : Union[str, Any] = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
__a : List[str] = {
"""name""": hf_file.rfilename,
"""size""": None,
"""type""": """file""",
}
self.dir_cache.update(
{
str(__UpperCamelCase ): {"""name""": str(__UpperCamelCase ), """size""": None, """type""": """directory"""}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase = "rb" , **__UpperCamelCase , ):
'''simple docstring'''
if not isinstance(self.repo_info , __UpperCamelCase ):
raise NotImplementedError(f"""Open is only implemented for dataset repositories, but got {self.repo_info}""" )
__a : Any = hf_hub_url(self.repo_info.id , __UpperCamelCase , revision=self.repo_info.sha )
return fsspec.open(
__UpperCamelCase , mode=__UpperCamelCase , headers=get_authentication_headers_for_url(__UpperCamelCase , use_auth_token=self.token ) , client_kwargs={"""trust_env""": True} , ).open()
def __lowerCamelCase ( self , __UpperCamelCase , **__UpperCamelCase ):
'''simple docstring'''
self._get_dirs()
__a : str = self._strip_protocol(__UpperCamelCase )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(__UpperCamelCase )
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase=False , **__UpperCamelCase ):
'''simple docstring'''
self._get_dirs()
__a : int = PurePosixPath(path.strip("""/""" ) )
__a : List[str] = {}
for p, f in self.dir_cache.items():
__a : str = PurePosixPath(p.strip("""/""" ) )
__a : Optional[int] = p.parent
if root == path:
__a : List[str] = f
__a : str = list(paths.values() )
if detail:
return out
else:
return sorted(f["""name"""] for f in out ) | 697 |
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__SCREAMING_SNAKE_CASE : Optional[Any] = {'configuration_focalnet': ['FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FocalNetConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : List[Any] = [
'FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'FocalNetForImageClassification',
'FocalNetForMaskedImageModeling',
'FocalNetBackbone',
'FocalNetModel',
'FocalNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 697 | 1 |
'''simple docstring'''
import argparse
import json
import pickle
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, MaskFormerImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
__SCREAMING_SNAKE_CASE : int = logging.get_logger(__name__)
def _snake_case ( lowercase ) -> Optional[int]:
__a : Optional[int] = SwinConfig.from_pretrained(
"""microsoft/swin-tiny-patch4-window7-224""" , out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] )
__a : List[Any] = MaskFormerConfig(backbone_config=lowercase )
__a : Any = """huggingface/label-files"""
if "ade20k-full" in model_name:
# this should be ok
__a : Optional[int] = 8_4_7
__a : Optional[int] = """maskformer-ade20k-full-id2label.json"""
elif "ade" in model_name:
# this should be ok
__a : int = 1_5_0
__a : Optional[Any] = """ade20k-id2label.json"""
elif "coco-stuff" in model_name:
# this should be ok
__a : Optional[int] = 1_7_1
__a : Union[str, Any] = """maskformer-coco-stuff-id2label.json"""
elif "coco" in model_name:
# TODO
__a : str = 1_3_3
__a : Optional[int] = """coco-panoptic-id2label.json"""
elif "cityscapes" in model_name:
# this should be ok
__a : Optional[Any] = 1_9
__a : Optional[Any] = """cityscapes-id2label.json"""
elif "vistas" in model_name:
# this should be ok
__a : Optional[Any] = 6_5
__a : Union[str, Any] = """mapillary-vistas-id2label.json"""
__a : int = json.load(open(hf_hub_download(lowercase , lowercase , repo_type="""dataset""" ) , """r""" ) )
__a : Any = {int(lowercase ): v for k, v in idalabel.items()}
return config
def _snake_case ( lowercase ) -> List[Any]:
__a : Tuple = []
# stem
# fmt: off
rename_keys.append(("""backbone.patch_embed.proj.weight""", """model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.weight""") )
rename_keys.append(("""backbone.patch_embed.proj.bias""", """model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.bias""") )
rename_keys.append(("""backbone.patch_embed.norm.weight""", """model.pixel_level_module.encoder.model.embeddings.norm.weight""") )
rename_keys.append(("""backbone.patch_embed.norm.bias""", """model.pixel_level_module.encoder.model.embeddings.norm.bias""") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.norm1.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.norm1.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.attn.relative_position_bias_table""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.attn.relative_position_index""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.attn.proj.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.attn.proj.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.norm2.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.norm2.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.mlp.fc1.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.mlp.fc1.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.mlp.fc2.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.weight""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.mlp.fc2.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.bias""") )
if i < 3:
rename_keys.append((F"""backbone.layers.{i}.downsample.reduction.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.reduction.weight""") )
rename_keys.append((F"""backbone.layers.{i}.downsample.norm.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.weight""") )
rename_keys.append((F"""backbone.layers.{i}.downsample.norm.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.bias""") )
rename_keys.append((F"""backbone.norm{i}.weight""", F"""model.pixel_level_module.encoder.hidden_states_norms.{i}.weight""") )
rename_keys.append((F"""backbone.norm{i}.bias""", F"""model.pixel_level_module.encoder.hidden_states_norms.{i}.bias""") )
# FPN
rename_keys.append(("""sem_seg_head.layer_4.weight""", """model.pixel_level_module.decoder.fpn.stem.0.weight""") )
rename_keys.append(("""sem_seg_head.layer_4.norm.weight""", """model.pixel_level_module.decoder.fpn.stem.1.weight""") )
rename_keys.append(("""sem_seg_head.layer_4.norm.bias""", """model.pixel_level_module.decoder.fpn.stem.1.bias""") )
for source_index, target_index in zip(range(3 , 0 , -1 ) , range(0 , 3 ) ):
rename_keys.append((F"""sem_seg_head.adapter_{source_index}.weight""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.0.weight""") )
rename_keys.append((F"""sem_seg_head.adapter_{source_index}.norm.weight""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.weight""") )
rename_keys.append((F"""sem_seg_head.adapter_{source_index}.norm.bias""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.bias""") )
rename_keys.append((F"""sem_seg_head.layer_{source_index}.weight""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.0.weight""") )
rename_keys.append((F"""sem_seg_head.layer_{source_index}.norm.weight""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.weight""") )
rename_keys.append((F"""sem_seg_head.layer_{source_index}.norm.bias""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.bias""") )
rename_keys.append(("""sem_seg_head.mask_features.weight""", """model.pixel_level_module.decoder.mask_projection.weight""") )
rename_keys.append(("""sem_seg_head.mask_features.bias""", """model.pixel_level_module.decoder.mask_projection.bias""") )
# Transformer decoder
for idx in range(config.decoder_config.decoder_layers ):
# self-attention out projection
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.weight""", F"""model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.bias""", F"""model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.bias""") )
# cross-attention out projection
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.weight""", F"""model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.bias""", F"""model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.bias""") )
# MLP 1
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.weight""", F"""model.transformer_module.decoder.layers.{idx}.fc1.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.bias""", F"""model.transformer_module.decoder.layers.{idx}.fc1.bias""") )
# MLP 2
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.weight""", F"""model.transformer_module.decoder.layers.{idx}.fc2.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.bias""", F"""model.transformer_module.decoder.layers.{idx}.fc2.bias""") )
# layernorm 1 (self-attention layernorm)
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.weight""", F"""model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.bias""", F"""model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.bias""") )
# layernorm 2 (cross-attention layernorm)
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.weight""", F"""model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.bias""", F"""model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.bias""") )
# layernorm 3 (final layernorm)
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.weight""", F"""model.transformer_module.decoder.layers.{idx}.final_layer_norm.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.bias""", F"""model.transformer_module.decoder.layers.{idx}.final_layer_norm.bias""") )
rename_keys.append(("""sem_seg_head.predictor.transformer.decoder.norm.weight""", """model.transformer_module.decoder.layernorm.weight""") )
rename_keys.append(("""sem_seg_head.predictor.transformer.decoder.norm.bias""", """model.transformer_module.decoder.layernorm.bias""") )
# heads on top
rename_keys.append(("""sem_seg_head.predictor.query_embed.weight""", """model.transformer_module.queries_embedder.weight""") )
rename_keys.append(("""sem_seg_head.predictor.input_proj.weight""", """model.transformer_module.input_projection.weight""") )
rename_keys.append(("""sem_seg_head.predictor.input_proj.bias""", """model.transformer_module.input_projection.bias""") )
rename_keys.append(("""sem_seg_head.predictor.class_embed.weight""", """class_predictor.weight""") )
rename_keys.append(("""sem_seg_head.predictor.class_embed.bias""", """class_predictor.bias""") )
for i in range(3 ):
rename_keys.append((F"""sem_seg_head.predictor.mask_embed.layers.{i}.weight""", F"""mask_embedder.{i}.0.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.mask_embed.layers.{i}.bias""", F"""mask_embedder.{i}.0.bias""") )
# fmt: on
return rename_keys
def _snake_case ( lowercase , lowercase , lowercase ) -> Union[str, Any]:
__a : Optional[Any] = dct.pop(lowercase )
__a : Tuple = val
def _snake_case ( lowercase , lowercase ) -> Tuple:
__a : Dict = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
__a : int = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
__a : List[str] = state_dict.pop(F"""backbone.layers.{i}.blocks.{j}.attn.qkv.weight""" )
__a : int = state_dict.pop(F"""backbone.layers.{i}.blocks.{j}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
__a : Optional[int] = in_proj_weight[:dim, :]
__a : List[str] = in_proj_bias[: dim]
__a : Union[str, Any] = in_proj_weight[
dim : dim * 2, :
]
__a : Tuple = in_proj_bias[
dim : dim * 2
]
__a : List[str] = in_proj_weight[
-dim :, :
]
__a : Dict = in_proj_bias[-dim :]
# fmt: on
def _snake_case ( lowercase , lowercase ) -> Dict:
# fmt: off
__a : str = config.decoder_config.hidden_size
for idx in range(config.decoder_config.decoder_layers ):
# read in weights + bias of self-attention input projection layer (in the original implementation, this is a single matrix + bias)
__a : Optional[int] = state_dict.pop(F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_weight""" )
__a : Optional[Any] = state_dict.pop(F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
__a : List[str] = in_proj_weight[: hidden_size, :]
__a : Optional[Any] = in_proj_bias[:config.hidden_size]
__a : Tuple = in_proj_weight[hidden_size : hidden_size * 2, :]
__a : Dict = in_proj_bias[hidden_size : hidden_size * 2]
__a : List[str] = in_proj_weight[-hidden_size :, :]
__a : List[Any] = in_proj_bias[-hidden_size :]
# read in weights + bias of cross-attention input projection layer (in the original implementation, this is a single matrix + bias)
__a : Tuple = state_dict.pop(F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_weight""" )
__a : Union[str, Any] = state_dict.pop(F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
__a : Dict = in_proj_weight[: hidden_size, :]
__a : List[Any] = in_proj_bias[:config.hidden_size]
__a : Dict = in_proj_weight[hidden_size : hidden_size * 2, :]
__a : str = in_proj_bias[hidden_size : hidden_size * 2]
__a : Tuple = in_proj_weight[-hidden_size :, :]
__a : Any = in_proj_bias[-hidden_size :]
# fmt: on
def _snake_case ( ) -> torch.Tensor:
__a : Dict = """http://images.cocodataset.org/val2017/000000039769.jpg"""
__a : str = Image.open(requests.get(lowercase , stream=lowercase ).raw )
return im
@torch.no_grad()
def _snake_case ( lowercase , lowercase , lowercase , lowercase = False ) -> str:
__a : Optional[Any] = get_maskformer_config(lowercase )
# load original state_dict
with open(lowercase , """rb""" ) as f:
__a : Dict = pickle.load(lowercase )
__a : Optional[Any] = data["""model"""]
# for name, param in state_dict.items():
# print(name, param.shape)
# rename keys
__a : List[str] = create_rename_keys(lowercase )
for src, dest in rename_keys:
rename_key(lowercase , lowercase , lowercase )
read_in_swin_q_k_v(lowercase , config.backbone_config )
read_in_decoder_q_k_v(lowercase , lowercase )
# update to torch tensors
for key, value in state_dict.items():
__a : Optional[int] = torch.from_numpy(lowercase )
# load 🤗 model
__a : List[str] = MaskFormerForInstanceSegmentation(lowercase )
model.eval()
for name, param in model.named_parameters():
print(lowercase , param.shape )
__a , __a : Union[str, Any] = model.load_state_dict(lowercase , strict=lowercase )
assert missing_keys == [
"model.pixel_level_module.encoder.model.layernorm.weight",
"model.pixel_level_module.encoder.model.layernorm.bias",
]
assert len(lowercase ) == 0, F"""Unexpected keys: {unexpected_keys}"""
# verify results
__a : List[str] = prepare_img()
if "vistas" in model_name:
__a : Optional[int] = 6_5
elif "cityscapes" in model_name:
__a : Dict = 6_5_5_3_5
else:
__a : Union[str, Any] = 2_5_5
__a : Dict = True if """ade""" in model_name else False
__a : Dict = MaskFormerImageProcessor(ignore_index=lowercase , reduce_labels=lowercase )
__a : List[str] = image_processor(lowercase , return_tensors="""pt""" )
__a : List[Any] = model(**lowercase )
print("""Logits:""" , outputs.class_queries_logits[0, :3, :3] )
if model_name == "maskformer-swin-tiny-ade":
__a : Dict = torch.tensor(
[[3.6_3_5_3, -4.4_7_7_0, -2.6_0_6_5], [0.5_0_8_1, -4.2_3_9_4, -3.5_3_4_3], [2.1_9_0_9, -5.0_3_5_3, -1.9_3_2_3]] )
assert torch.allclose(outputs.class_queries_logits[0, :3, :3] , lowercase , atol=1E-4 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
print(F"""Saving model and image processor to {pytorch_dump_folder_path}""" )
Path(lowercase ).mkdir(exist_ok=lowercase )
model.save_pretrained(lowercase )
image_processor.save_pretrained(lowercase )
if push_to_hub:
print("""Pushing model and image processor to the hub...""" )
model.push_to_hub(F"""nielsr/{model_name}""" )
image_processor.push_to_hub(F"""nielsr/{model_name}""" )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='maskformer-swin-tiny-ade',
type=str,
help=('Name of the MaskFormer model you\'d like to convert',),
)
parser.add_argument(
'--checkpoint_path',
default='/Users/nielsrogge/Documents/MaskFormer_checkpoints/MaskFormer-Swin-tiny-ADE20k/model.pkl',
type=str,
help='Path to the original state dict (.pth file).',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
__SCREAMING_SNAKE_CASE : Union[str, Any] = parser.parse_args()
convert_maskformer_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
) | 697 |
'''simple docstring'''
from __future__ import annotations
import bisect
def _snake_case ( lowercase , lowercase , lowercase = 0 , lowercase = -1 ) -> int:
if hi < 0:
__a : Union[str, Any] = len(lowercase )
while lo < hi:
__a : List[str] = lo + (hi - lo) // 2
if sorted_collection[mid] < item:
__a : int = mid + 1
else:
__a : int = mid
return lo
def _snake_case ( lowercase , lowercase , lowercase = 0 , lowercase = -1 ) -> int:
if hi < 0:
__a : Any = len(lowercase )
while lo < hi:
__a : Any = lo + (hi - lo) // 2
if sorted_collection[mid] <= item:
__a : List[str] = mid + 1
else:
__a : Any = mid
return lo
def _snake_case ( lowercase , lowercase , lowercase = 0 , lowercase = -1 ) -> None:
sorted_collection.insert(bisect_left(lowercase , lowercase , lowercase , lowercase ) , lowercase )
def _snake_case ( lowercase , lowercase , lowercase = 0 , lowercase = -1 ) -> None:
sorted_collection.insert(bisect_right(lowercase , lowercase , lowercase , lowercase ) , lowercase )
def _snake_case ( lowercase , lowercase ) -> int | None:
__a : Dict = 0
__a : Any = len(lowercase ) - 1
while left <= right:
__a : str = left + (right - left) // 2
__a : List[Any] = sorted_collection[midpoint]
if current_item == item:
return midpoint
elif item < current_item:
__a : Optional[Any] = midpoint - 1
else:
__a : Optional[int] = midpoint + 1
return None
def _snake_case ( lowercase , lowercase ) -> int | None:
__a : Optional[int] = bisect.bisect_left(lowercase , lowercase )
if index != len(lowercase ) and sorted_collection[index] == item:
return index
return None
def _snake_case ( lowercase , lowercase , lowercase , lowercase ) -> int | None:
if right < left:
return None
__a : Any = left + (right - left) // 2
if sorted_collection[midpoint] == item:
return midpoint
elif sorted_collection[midpoint] > item:
return binary_search_by_recursion(lowercase , lowercase , lowercase , midpoint - 1 )
else:
return binary_search_by_recursion(lowercase , lowercase , midpoint + 1 , lowercase )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : List[Any] = input('Enter numbers separated by comma:\n').strip()
__SCREAMING_SNAKE_CASE : Optional[Any] = sorted(int(item) for item in user_input.split(','))
__SCREAMING_SNAKE_CASE : List[str] = int(input('Enter a single number to be found in the list:\n'))
__SCREAMING_SNAKE_CASE : Optional[int] = binary_search(collection, target)
if result is None:
print(f'''{target} was not found in {collection}.''')
else:
print(f'''{target} was found at position {result} in {collection}.''') | 697 | 1 |
'''simple docstring'''
import os
import unittest
from transformers import LxmertTokenizer, LxmertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase , unittest.TestCase ):
lowercase__ = LxmertTokenizer
lowercase__ = LxmertTokenizerFast
lowercase__ = True
lowercase__ = True
def __lowerCamelCase ( self ):
'''simple docstring'''
super().setUp()
__a : int = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
__a : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
def __lowerCamelCase ( self , __UpperCamelCase ):
'''simple docstring'''
__a : List[Any] = """UNwant\u00E9d,running"""
__a : List[str] = """unwanted, running"""
return input_text, output_text
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Union[str, Any] = self.tokenizer_class(self.vocab_file )
__a : Optional[Any] = tokenizer.tokenize("""UNwant\u00E9d,running""" )
self.assertListEqual(__UpperCamelCase , ["""un""", """##want""", """##ed""", """,""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCamelCase ) , [7, 4, 5, 10, 8, 9] )
def __lowerCamelCase ( self ):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
__a : str = self.get_tokenizer()
__a : Union[str, Any] = self.get_rust_tokenizer()
__a : Union[str, Any] = """I was born in 92000, and this is falsé."""
__a : List[Any] = tokenizer.tokenize(__UpperCamelCase )
__a : List[str] = rust_tokenizer.tokenize(__UpperCamelCase )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
__a : List[Any] = tokenizer.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase )
__a : str = rust_tokenizer.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
__a : Optional[Any] = self.get_rust_tokenizer()
__a : Tuple = tokenizer.encode(__UpperCamelCase )
__a : Dict = rust_tokenizer.encode(__UpperCamelCase )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase ) | 697 |
'''simple docstring'''
from itertools import product
def _snake_case ( lowercase , lowercase ) -> list[int]:
__a : Optional[int] = sides_number
__a : Union[str, Any] = max_face_number * dice_number
__a : Optional[Any] = [0] * (max_total + 1)
__a : Dict = 1
__a : str = range(lowercase , max_face_number + 1 )
for dice_numbers in product(lowercase , repeat=lowercase ):
__a : int = sum(lowercase )
totals_frequencies[total] += 1
return totals_frequencies
def _snake_case ( ) -> float:
__a : Tuple = total_frequency_distribution(
sides_number=4 , dice_number=9 )
__a : Union[str, Any] = total_frequency_distribution(
sides_number=6 , dice_number=6 )
__a : str = 0
__a : Dict = 9
__a : str = 4 * 9
__a : Any = 6
for peter_total in range(lowercase , max_peter_total + 1 ):
peter_wins_count += peter_totals_frequencies[peter_total] * sum(
colin_totals_frequencies[min_colin_total:peter_total] )
__a : str = (4**9) * (6**6)
__a : List[Any] = peter_wins_count / total_games_number
__a : List[Any] = round(lowercase , ndigits=7 )
return rounded_peter_win_probability
if __name__ == "__main__":
print(f'''{solution() = }''') | 697 | 1 |
'''simple docstring'''
def _snake_case ( lowercase , lowercase ) -> str:
if not isinstance(lowercase , lowercase ):
raise ValueError("""iterations must be defined as integers""" )
if not isinstance(lowercase , lowercase ) or not number >= 1:
raise ValueError(
"""starting number must be
and integer and be more than 0""" )
if not iterations >= 1:
raise ValueError("""Iterations must be done more than 0 times to play FizzBuzz""" )
__a : List[str] = """"""
while number <= iterations:
if number % 3 == 0:
out += "Fizz"
if number % 5 == 0:
out += "Buzz"
if 0 not in (number % 3, number % 5):
out += str(lowercase )
# print(out)
number += 1
out += " "
return out
if __name__ == "__main__":
import doctest
doctest.testmod() | 697 |
'''simple docstring'''
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import DiffusionPipeline
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import logging
__SCREAMING_SNAKE_CASE : Union[str, Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
def __init__( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , ):
'''simple docstring'''
super().__init__()
self.register_modules(
vae=__UpperCamelCase , text_encoder=__UpperCamelCase , tokenizer=__UpperCamelCase , unet=__UpperCamelCase , scheduler=__UpperCamelCase , safety_checker=__UpperCamelCase , feature_extractor=__UpperCamelCase , )
def __lowerCamelCase ( self , __UpperCamelCase = "auto" ):
'''simple docstring'''
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
__a : Union[str, Any] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(__UpperCamelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
self.enable_attention_slicing(__UpperCamelCase )
@torch.no_grad()
def __call__( self , __UpperCamelCase , __UpperCamelCase = 512 , __UpperCamelCase = 512 , __UpperCamelCase = 50 , __UpperCamelCase = 7.5 , __UpperCamelCase = None , __UpperCamelCase = 1 , __UpperCamelCase = 0.0 , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = "pil" , __UpperCamelCase = True , __UpperCamelCase = None , __UpperCamelCase = 1 , __UpperCamelCase = None , **__UpperCamelCase , ):
'''simple docstring'''
if isinstance(__UpperCamelCase , __UpperCamelCase ):
__a : Union[str, Any] = 1
elif isinstance(__UpperCamelCase , __UpperCamelCase ):
__a : Tuple = len(__UpperCamelCase )
else:
raise ValueError(f"""`prompt` has to be of type `str` or `list` but is {type(__UpperCamelCase )}""" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"""`height` and `width` have to be divisible by 8 but are {height} and {width}.""" )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(__UpperCamelCase , __UpperCamelCase ) or callback_steps <= 0)
):
raise ValueError(
f"""`callback_steps` has to be a positive integer but is {callback_steps} of type"""
f""" {type(__UpperCamelCase )}.""" )
# get prompt text embeddings
__a : Tuple = self.tokenizer(
__UpperCamelCase , padding="""max_length""" , max_length=self.tokenizer.model_max_length , return_tensors="""pt""" , )
__a : Union[str, Any] = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
__a : str = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"""The following part of your input was truncated because CLIP can only handle sequences up to"""
f""" {self.tokenizer.model_max_length} tokens: {removed_text}""" )
__a : Optional[int] = text_input_ids[:, : self.tokenizer.model_max_length]
if text_embeddings is None:
__a : int = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
__a , __a , __a : Union[str, Any] = text_embeddings.shape
__a : Optional[Any] = text_embeddings.repeat(1 , __UpperCamelCase , 1 )
__a : Union[str, Any] = text_embeddings.view(bs_embed * num_images_per_prompt , __UpperCamelCase , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
__a : Any = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
__a : List[str]
if negative_prompt is None:
__a : Optional[Any] = [""""""]
elif type(__UpperCamelCase ) is not type(__UpperCamelCase ):
raise TypeError(
f"""`negative_prompt` should be the same type to `prompt`, but got {type(__UpperCamelCase )} !="""
f""" {type(__UpperCamelCase )}.""" )
elif isinstance(__UpperCamelCase , __UpperCamelCase ):
__a : Any = [negative_prompt]
elif batch_size != len(__UpperCamelCase ):
raise ValueError(
f"""`negative_prompt`: {negative_prompt} has batch size {len(__UpperCamelCase )}, but `prompt`:"""
f""" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"""
""" the batch size of `prompt`.""" )
else:
__a : Tuple = negative_prompt
__a : Any = text_input_ids.shape[-1]
__a : List[str] = self.tokenizer(
__UpperCamelCase , padding="""max_length""" , max_length=__UpperCamelCase , truncation=__UpperCamelCase , return_tensors="""pt""" , )
__a : str = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
__a : List[str] = uncond_embeddings.shape[1]
__a : List[Any] = uncond_embeddings.repeat(__UpperCamelCase , __UpperCamelCase , 1 )
__a : Tuple = uncond_embeddings.view(batch_size * num_images_per_prompt , __UpperCamelCase , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
__a : List[Any] = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
__a : Tuple = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
__a : List[Any] = (batch_size * num_images_per_prompt, self.unet.config.in_channels, 64, 64)
__a : int = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
__a : Any = torch.randn(
__UpperCamelCase , generator=__UpperCamelCase , device="""cpu""" , dtype=__UpperCamelCase ).to(self.device )
__a : Optional[Any] = torch.randn(__UpperCamelCase , generator=__UpperCamelCase , device="""cpu""" , dtype=__UpperCamelCase ).to(
self.device )
else:
__a : Optional[int] = torch.randn(
__UpperCamelCase , generator=__UpperCamelCase , device=self.device , dtype=__UpperCamelCase )
__a : str = torch.randn(__UpperCamelCase , generator=__UpperCamelCase , device=self.device , dtype=__UpperCamelCase )
else:
if latents_reference.shape != latents_shape:
raise ValueError(f"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" )
__a : Optional[Any] = latents_reference.to(self.device )
__a : str = latents.to(self.device )
# This is the key part of the pipeline where we
# try to ensure that the generated images w/ the same seed
# but different sizes actually result in similar images
__a : List[str] = (latents_shape[3] - latents_shape_reference[3]) // 2
__a : int = (latents_shape[2] - latents_shape_reference[2]) // 2
__a : int = latents_shape_reference[3] if dx >= 0 else latents_shape_reference[3] + 2 * dx
__a : Tuple = latents_shape_reference[2] if dy >= 0 else latents_shape_reference[2] + 2 * dy
__a : Optional[Any] = 0 if dx < 0 else dx
__a : Optional[Any] = 0 if dy < 0 else dy
__a : Optional[int] = max(-dx , 0 )
__a : Optional[Any] = max(-dy , 0 )
# import pdb
# pdb.set_trace()
__a : Optional[int] = latents_reference[:, :, dy : dy + h, dx : dx + w]
# set timesteps
self.scheduler.set_timesteps(__UpperCamelCase )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
__a : Dict = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
__a : Any = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
__a : List[Any] = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
__a : Optional[Any] = {}
if accepts_eta:
__a : Union[str, Any] = eta
for i, t in enumerate(self.progress_bar(__UpperCamelCase ) ):
# expand the latents if we are doing classifier free guidance
__a : List[str] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
__a : Tuple = self.scheduler.scale_model_input(__UpperCamelCase , __UpperCamelCase )
# predict the noise residual
__a : Union[str, Any] = self.unet(__UpperCamelCase , __UpperCamelCase , encoder_hidden_states=__UpperCamelCase ).sample
# perform guidance
if do_classifier_free_guidance:
__a , __a : List[str] = noise_pred.chunk(2 )
__a : Optional[int] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
__a : List[Any] = self.scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
__a : Optional[Any] = 1 / 0.1_8_2_1_5 * latents
__a : Optional[int] = self.vae.decode(__UpperCamelCase ).sample
__a : List[str] = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
__a : int = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if self.safety_checker is not None:
__a : List[str] = self.feature_extractor(self.numpy_to_pil(__UpperCamelCase ) , return_tensors="""pt""" ).to(
self.device )
__a , __a : int = self.safety_checker(
images=__UpperCamelCase , clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype ) )
else:
__a : Optional[int] = None
if output_type == "pil":
__a : str = self.numpy_to_pil(__UpperCamelCase )
if not return_dict:
return (image, has_nsfw_concept)
return StableDiffusionPipelineOutput(images=__UpperCamelCase , nsfw_content_detected=__UpperCamelCase ) | 697 | 1 |
'''simple docstring'''
import json
import logging
import math
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from datasets import Dataset, load_dataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_FOR_MASKED_LM_MAPPING,
AutoConfig,
AutoModelForMaskedLM,
AutoTokenizer,
DataCollatorForWholeWordMask,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
__SCREAMING_SNAKE_CASE : List[Any] = logging.getLogger(__name__)
__SCREAMING_SNAKE_CASE : str = list(MODEL_FOR_MASKED_LM_MAPPING.keys())
__SCREAMING_SNAKE_CASE : Tuple = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class SCREAMING_SNAKE_CASE__ :
lowercase__ = field(
default=__UpperCamelCase , metadata={
"help": (
"The model checkpoint for weights initialization.Don't set if you want to train a model from scratch."
)
} , )
lowercase__ = field(
default=__UpperCamelCase , metadata={"help": "If training from scratch, pass a model type from the list: " + ", ".join(__UpperCamelCase )} , )
lowercase__ = field(
default=__UpperCamelCase , metadata={
"help": (
"Override some existing default config settings when a model is trained from scratch. Example: "
"n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index"
)
} , )
lowercase__ = field(
default=__UpperCamelCase , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
lowercase__ = field(
default=__UpperCamelCase , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
lowercase__ = field(
default=__UpperCamelCase , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
lowercase__ = field(
default=__UpperCamelCase , metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."} , )
lowercase__ = field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
lowercase__ = field(
default=__UpperCamelCase , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
def __lowerCamelCase ( self ):
'''simple docstring'''
if self.config_overrides is not None and (self.config_name is not None or self.model_name_or_path is not None):
raise ValueError(
"""--config_overrides can't be used in combination with --config_name or --model_name_or_path""" )
@dataclass
class SCREAMING_SNAKE_CASE__ :
lowercase__ = field(
default=__UpperCamelCase , metadata={"help": "The name of the dataset to use (via the datasets library)."} )
lowercase__ = field(
default=__UpperCamelCase , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} )
lowercase__ = field(default=__UpperCamelCase , metadata={"help": "The input training data file (a text file)."} )
lowercase__ = field(
default=__UpperCamelCase , metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."} , )
lowercase__ = field(
default=__UpperCamelCase , metadata={"help": "An optional input train ref data file for whole word masking in Chinese."} , )
lowercase__ = field(
default=__UpperCamelCase , metadata={"help": "An optional input validation ref data file for whole word masking in Chinese."} , )
lowercase__ = field(
default=__UpperCamelCase , metadata={"help": "Overwrite the cached training and evaluation sets"} )
lowercase__ = field(
default=5 , metadata={
"help": "The percentage of the train set used as validation set in case there's no validation split"
} , )
lowercase__ = field(
default=__UpperCamelCase , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated. Default to the max input length of the model."
)
} , )
lowercase__ = field(
default=__UpperCamelCase , metadata={"help": "The number of processes to use for the preprocessing."} , )
lowercase__ = field(
default=0.1_5 , metadata={"help": "Ratio of tokens to mask for masked language modeling loss"} )
lowercase__ = field(
default=__UpperCamelCase , metadata={
"help": (
"Whether to pad all samples to `max_seq_length`. "
"If False, will pad the samples dynamically when batching to the maximum length in the batch."
)
} , )
def __lowerCamelCase ( self ):
'''simple docstring'''
if self.train_file is not None:
__a : Optional[Any] = self.train_file.split(""".""" )[-1]
assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file."
if self.validation_file is not None:
__a : List[Any] = self.validation_file.split(""".""" )[-1]
assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file."
def _snake_case ( lowercase , lowercase ) -> Any:
with open(lowercase , """r""" , encoding="""utf-8""" ) as f:
__a : int = [json.loads(lowercase ) for line in f.read().splitlines() if (len(lowercase ) > 0 and not line.isspace())]
assert len(lowercase ) == len(lowercase )
__a : Optional[Any] = {c: dataset[c] for c in dataset.column_names}
__a : Optional[int] = refs
return Dataset.from_dict(lowercase )
def _snake_case ( ) -> Any:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__a : Tuple = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__a , __a , __a : Union[str, Any] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__a , __a , __a : Union[str, Any] = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
__a : List[Any] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__a : int = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("""Training/evaluation parameters %s""" , lowercase )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
__a : List[Any] = load_dataset(data_args.dataset_name , data_args.dataset_config_name )
if "validation" not in datasets.keys():
__a : int = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"""train[:{data_args.validation_split_percentage}%]""" , )
__a : Tuple = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"""train[{data_args.validation_split_percentage}%:]""" , )
else:
__a : Optional[Any] = {}
if data_args.train_file is not None:
__a : Any = data_args.train_file
if data_args.validation_file is not None:
__a : int = data_args.validation_file
__a : Optional[int] = data_args.train_file.split(""".""" )[-1]
if extension == "txt":
__a : Optional[Any] = """text"""
__a : Tuple = load_dataset(lowercase , data_files=lowercase )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__a : str = {
"""cache_dir""": model_args.cache_dir,
"""revision""": model_args.model_revision,
"""use_auth_token""": True if model_args.use_auth_token else None,
}
if model_args.config_name:
__a : Dict = AutoConfig.from_pretrained(model_args.config_name , **lowercase )
elif model_args.model_name_or_path:
__a : str = AutoConfig.from_pretrained(model_args.model_name_or_path , **lowercase )
else:
__a : List[str] = CONFIG_MAPPING[model_args.model_type]()
logger.warning("""You are instantiating a new config instance from scratch.""" )
if model_args.config_overrides is not None:
logger.info(F"""Overriding config: {model_args.config_overrides}""" )
config.update_from_string(model_args.config_overrides )
logger.info(F"""New config: {config}""" )
__a : Union[str, Any] = {
"""cache_dir""": model_args.cache_dir,
"""use_fast""": model_args.use_fast_tokenizer,
"""revision""": model_args.model_revision,
"""use_auth_token""": True if model_args.use_auth_token else None,
}
if model_args.tokenizer_name:
__a : List[Any] = AutoTokenizer.from_pretrained(model_args.tokenizer_name , **lowercase )
elif model_args.model_name_or_path:
__a : List[Any] = AutoTokenizer.from_pretrained(model_args.model_name_or_path , **lowercase )
else:
raise ValueError(
"""You are instantiating a new tokenizer from scratch. This is not supported by this script."""
"""You can do it from another script, save it, and load it from here, using --tokenizer_name.""" )
if model_args.model_name_or_path:
__a : Any = AutoModelForMaskedLM.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=lowercase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info("""Training new model from scratch""" )
__a : Union[str, Any] = AutoModelForMaskedLM.from_config(lowercase )
model.resize_token_embeddings(len(lowercase ) )
# Preprocessing the datasets.
# First we tokenize all the texts.
if training_args.do_train:
__a : List[Any] = datasets["""train"""].column_names
else:
__a : Optional[int] = datasets["""validation"""].column_names
__a : str = """text""" if """text""" in column_names else column_names[0]
__a : List[Any] = """max_length""" if data_args.pad_to_max_length else False
def tokenize_function(lowercase ):
# Remove empty lines
__a : Dict = [line for line in examples["""text"""] if len(lowercase ) > 0 and not line.isspace()]
return tokenizer(examples["""text"""] , padding=lowercase , truncation=lowercase , max_length=data_args.max_seq_length )
__a : Optional[Any] = datasets.map(
lowercase , batched=lowercase , num_proc=data_args.preprocessing_num_workers , remove_columns=[text_column_name] , load_from_cache_file=not data_args.overwrite_cache , )
# Add the chinese references if provided
if data_args.train_ref_file is not None:
__a : str = add_chinese_references(tokenized_datasets["""train"""] , data_args.train_ref_file )
if data_args.validation_ref_file is not None:
__a : Optional[Any] = add_chinese_references(
tokenized_datasets["""validation"""] , data_args.validation_ref_file )
# If we have ref files, need to avoid it removed by trainer
__a : Optional[Any] = data_args.train_ref_file or data_args.validation_ref_file
if has_ref:
__a : int = False
# Data collator
# This one will take care of randomly masking the tokens.
__a : Tuple = DataCollatorForWholeWordMask(tokenizer=lowercase , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
__a : List[str] = Trainer(
model=lowercase , args=lowercase , train_dataset=tokenized_datasets["""train"""] if training_args.do_train else None , eval_dataset=tokenized_datasets["""validation"""] if training_args.do_eval else None , tokenizer=lowercase , data_collator=lowercase , )
# Training
if training_args.do_train:
if last_checkpoint is not None:
__a : Optional[int] = last_checkpoint
elif model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path ):
__a : Dict = model_args.model_name_or_path
else:
__a : Optional[int] = None
__a : Any = trainer.train(resume_from_checkpoint=lowercase )
trainer.save_model() # Saves the tokenizer too for easy upload
__a : Any = os.path.join(training_args.output_dir , """train_results.txt""" )
if trainer.is_world_process_zero():
with open(lowercase , """w""" ) as writer:
logger.info("""***** Train results *****""" )
for key, value in sorted(train_result.metrics.items() ):
logger.info(F""" {key} = {value}""" )
writer.write(F"""{key} = {value}\n""" )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , """trainer_state.json""" ) )
# Evaluation
__a : Union[str, Any] = {}
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
__a : Optional[Any] = trainer.evaluate()
__a : Union[str, Any] = math.exp(eval_output["""eval_loss"""] )
__a : List[Any] = perplexity
__a : List[Any] = os.path.join(training_args.output_dir , """eval_results_mlm_wwm.txt""" )
if trainer.is_world_process_zero():
with open(lowercase , """w""" ) as writer:
logger.info("""***** Eval results *****""" )
for key, value in sorted(results.items() ):
logger.info(F""" {key} = {value}""" )
writer.write(F"""{key} = {value}\n""" )
return results
def _snake_case ( lowercase ) -> Dict:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main() | 697 |
'''simple docstring'''
import numpy as np
from PIL import Image
def _snake_case ( lowercase , lowercase , lowercase ) -> np.ndarray:
__a : Any = np.array(lowercase )
if arr.shape[0] != arr.shape[1]:
raise ValueError("""The input array is not a square matrix""" )
__a : Union[str, Any] = 0
__a : Dict = 0
__a : Optional[Any] = 0
__a : Tuple = 0
# compute the shape of the output matrix
__a : Optional[int] = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape maxpool_shape
__a : int = np.zeros((maxpool_shape, maxpool_shape) )
while i < arr.shape[0]:
if i + size > arr.shape[0]:
# if the end of the matrix is reached, break
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the maximum of the pooling matrix
__a : Optional[Any] = np.max(arr[i : i + size, j : j + size] )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
__a : Optional[Any] = 0
__a : str = 0
return updated_arr
def _snake_case ( lowercase , lowercase , lowercase ) -> np.ndarray:
__a : int = np.array(lowercase )
if arr.shape[0] != arr.shape[1]:
raise ValueError("""The input array is not a square matrix""" )
__a : int = 0
__a : Optional[Any] = 0
__a : str = 0
__a : List[Any] = 0
# compute the shape of the output matrix
__a : int = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape avgpool_shape
__a : Optional[int] = np.zeros((avgpool_shape, avgpool_shape) )
while i < arr.shape[0]:
# if the end of the matrix is reached, break
if i + size > arr.shape[0]:
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the average of the pooling matrix
__a : Any = int(np.average(arr[i : i + size, j : j + size] ) )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
__a : str = 0
__a : List[Any] = 0
return updated_arr
# Main Function
if __name__ == "__main__":
from doctest import testmod
testmod(name='avgpooling', verbose=True)
# Loading the image
__SCREAMING_SNAKE_CASE : str = Image.open('path_to_image')
# Converting the image to numpy array and maxpooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(maxpooling(np.array(image), size=3, stride=2)).show()
# Converting the image to numpy array and averagepooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(avgpooling(np.array(image), size=3, stride=2)).show() | 697 | 1 |
'''simple docstring'''
import re
from flax.core.frozen_dict import freeze
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.experimental import PartitionSpec as P
# Sentinels
__SCREAMING_SNAKE_CASE : Optional[int] = object()
# For specifying empty leaf dict `{}`
__SCREAMING_SNAKE_CASE : int = object()
def _snake_case ( lowercase , lowercase ) -> Tuple:
__a : Any = tuple((re.compile(x + """$""" ) for x in qs) )
for i in range(len(lowercase ) - len(lowercase ) + 1 ):
__a : Dict = [x.match(lowercase ) for x, y in zip(lowercase , ks[i:] )]
if matches and all(lowercase ):
return True
return False
def _snake_case ( lowercase ) -> int:
def replace(lowercase , lowercase ):
for rule, replacement in rules:
if _match(lowercase , lowercase ):
return replacement
return val
return replace
def _snake_case ( ) -> Optional[int]:
return [
# embeddings
(("transformer", "wpe", "embedding"), P("""mp""" , lowercase )),
(("transformer", "wte", "embedding"), P("""mp""" , lowercase )),
# atention
(("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(lowercase , """mp""" )),
(("attention", "out_proj", "kernel"), P("""mp""" , lowercase )),
(("attention", "out_proj", "bias"), None),
# mlp
(("mlp", "c_fc", "kernel"), P(lowercase , """mp""" )),
(("mlp", "c_fc", "bias"), P("""mp""" )),
(("mlp", "c_proj", "kernel"), P("""mp""" , lowercase )),
(("mlp", "c_proj", "bias"), None),
# layer norms
((r"ln_\d+", "bias"), None),
((r"\d+", r"ln_\d+", "scale"), None),
(("ln_f", "bias"), None),
(("ln_f", "scale"), None),
]
def _snake_case ( lowercase ) -> Optional[int]:
__a : List[str] = _get_partition_rules()
__a : Tuple = _replacement_rules(lowercase )
__a : Any = {k: _unmatched for k in flatten_dict(lowercase )}
__a : Optional[Any] = {k: replace(lowercase , lowercase ) for k, v in initd.items()}
assert _unmatched not in result.values(), "Incomplete partition spec."
return freeze(unflatten_dict(lowercase ) ) | 697 |
'''simple docstring'''
import qiskit
def _snake_case ( lowercase , lowercase ) -> qiskit.result.counts.Counts:
__a : Any = qiskit.Aer.get_backend("""aer_simulator""" )
# Create a Quantum Circuit acting on the q register
__a : str = qiskit.QuantumCircuit(lowercase , lowercase )
# Map the quantum measurement to the classical bits
circuit.measure([0] , [0] )
# Execute the circuit on the simulator
__a : Any = qiskit.execute(lowercase , lowercase , shots=1_0_0_0 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(lowercase )
if __name__ == "__main__":
print(f'''Total count for various states are: {single_qubit_measure(1, 1)}''') | 697 | 1 |
'''simple docstring'''
import argparse
import json
import os
import tensorstore as ts
import torch
from flax import serialization
from flax.traverse_util import flatten_dict, unflatten_dict
from tensorflow.io import gfile
from transformers.modeling_utils import dtype_byte_size
from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import (
rename_keys,
)
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
from transformers.utils.hub import convert_file_size_to_int
def _snake_case ( lowercase , lowercase ) -> List[Any]:
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3:
# expert layer
__a : Dict = flax_key_tuple[:-1] + ("""weight""",)
__a : Optional[int] = torch.permute(lowercase , (0, 2, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(lowercase ):
# linear layer
__a : List[str] = flax_key_tuple[:-1] + ("""weight""",)
__a : List[Any] = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
__a : List[str] = flax_key_tuple[:-1] + ("""weight""",)
return flax_key_tuple, flax_tensor
def _snake_case ( lowercase , lowercase , lowercase ) -> Any:
if "metadata" in layer:
__a : Optional[Any] = layer.split("""metadata""" )
__a : int = """""".join(split_layer[0] )[:-1]
__a : Union[str, Any] = [tuple(("""metadata""" + split_layer[1]).split("""/""" ) )]
elif "kvstore" in layer:
__a : Dict = layer.split("""kvstore""" )
__a : Union[str, Any] = """""".join(split_layer[0] )[:-1]
__a : List[str] = [tuple(("""kvstore""" + split_layer[1]).split("""/""" ) )]
else:
__a : int = layer.split("""/""" )
__a : Union[str, Any] = """/""".join(split_layer[:-1] )
__a : str = (split_layer[-1],)
if "kvstore/path" in layer:
__a : Optional[int] = F"""{switch_checkpoint_path}/{checkpoint_info[layer]}"""
elif "kvstore/driver" in layer:
__a : str = """file"""
else:
__a : str = checkpoint_info[layer]
return curr_real_layer_name, split_layer, content
def _snake_case ( lowercase , lowercase ) -> Optional[Any]:
__a : int = rename_keys(lowercase )
__a : List[Any] = {}
for k, v in current_block.items():
__a : Any = v
__a : List[str] = new_current_block
torch.save(lowercase , lowercase )
def _snake_case ( lowercase , lowercase , lowercase , lowercase , lowercase = WEIGHTS_NAME ) -> str:
__a : Optional[Any] = convert_file_size_to_int(lowercase )
__a : Tuple = []
__a : str = {}
__a : List[Any] = 0
__a : Optional[int] = 0
os.makedirs(lowercase , exist_ok=lowercase )
with gfile.GFile(switch_checkpoint_path + """/checkpoint""" , """rb""" ) as fp:
__a : List[Any] = serialization.msgpack_restore(fp.read() )["""optimizer"""]["""target"""]
__a : int = flatten_dict(lowercase , sep="""/""" )
__a : List[Any] = {}
for layer in checkpoint_info.keys():
__a , __a , __a : Tuple = get_key_and_tensorstore_dict(
lowercase , lowercase , lowercase )
if curr_real_layer_name in all_layers:
__a : Any = content
else:
__a : Optional[Any] = {split_layer[-1]: content}
for key in all_layers.keys():
# open tensorstore file
__a : Any = ts.open(unflatten_dict(all_layers[key] ) ).result().read().result()
__a : List[Any] = torch.tensor(lowercase )
__a : List[Any] = raw_weights.numel() * dtype_byte_size(raw_weights.dtype )
# use the renaming pattern from the small conversion scripts
__a , __a : List[Any] = rename_base_flax_keys(tuple(key.split("""/""" ) ) , lowercase )
__a : str = """/""".join(lowercase )
# If this weight is going to tip up over the maximal size, we split.
if current_block_size + weight_size > max_shard_size:
__a : List[Any] = os.path.join(
lowercase , weights_name.replace(""".bin""" , F"""-{len(lowercase )+1:05d}-of-???.bin""" ) )
rename_and_save_block(lowercase , lowercase )
sharded_state_dicts.append(current_block.keys() )
del current_block
__a : int = {}
__a : int = 0
__a : str = raw_weights.to(getattr(lowercase , lowercase ) )
current_block_size += weight_size
total_size += weight_size
# Add the last block
__a : List[Any] = os.path.join(lowercase , weights_name.replace(""".bin""" , F"""-{len(lowercase )+1:05d}-of-???.bin""" ) )
rename_and_save_block(lowercase , lowercase )
sharded_state_dicts.append(current_block.keys() )
# If we only have one shard, we return it
if len(lowercase ) == 1:
return {weights_name: sharded_state_dicts[0]}, None
# Otherwise, let's build the index
__a : str = {}
__a : Any = {}
for idx, shard in enumerate(lowercase ):
__a : str = weights_name.replace(
""".bin""" , F"""-{idx+1:05d}-of-{len(lowercase ):05d}.bin""" ) # len(sharded_state_dicts):05d}
__a : Dict = os.path.join(lowercase , weights_name.replace(""".bin""" , F"""-{idx+1:05d}-of-???.bin""" ) )
os.rename(lowercase , os.path.join(lowercase , lowercase ) )
__a : Tuple = shard
for key in shard:
__a : Optional[Any] = shard_file
# Add the metadata
__a : int = {"""total_size""": total_size}
__a : int = {"""metadata""": metadata, """weight_map""": weight_map}
with open(os.path.join(lowercase , lowercase ) , """w""" , encoding="""utf-8""" ) as f:
__a : Union[str, Any] = json.dumps(lowercase , indent=2 , sort_keys=lowercase ) + """\n"""
f.write(lowercase )
return metadata, index
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--switch_t5x_checkpoint_path',
default='/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600',
type=str,
required=False,
help='Path to a directory containing a folder per layer. Follows the original Google format.',
)
parser.add_argument('--max_shard_size', default='10GB', required=False, help='Max shard size')
parser.add_argument('--dtype', default='bfloat16', type=str, required=False, help='dtype of the saved model')
parser.add_argument(
'--pytorch_dump_folder_path',
default='/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted',
type=str,
required=False,
help='Path to the output pytorch model.',
)
__SCREAMING_SNAKE_CASE : Union[str, Any] = parser.parse_args()
shard_on_the_fly(
args.switch_tax_checkpoint_path,
args.pytorch_dump_folder_path,
args.max_shard_size,
args.dtype,
)
def _snake_case ( ) -> Optional[int]:
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer
__a : Dict = SwitchTransformersConfig.from_pretrained("""google/switch-base-8""" )
config.save_pretrained("""/home/arthur_huggingface_co/transformers/switch_converted""" )
__a : Dict = SwitchTransformersForConditionalGeneration.from_pretrained(
"""/home/arthur_huggingface_co/transformers/switch_converted""" , device_map="""auto""" )
__a : Optional[int] = TaTokenizer.from_pretrained("""t5-small""" )
__a : Any = """A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>."""
__a : Any = tokenizer(lowercase , return_tensors="""pt""" ).input_ids
__a : str = model.generate(lowercase , decoder_start_token_id=0 )
print(tokenizer.decode(out[0] ) ) | 697 |
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConformerConfig,
WavaVecaConformerForCTC,
WavaVecaConformerForPreTraining,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
__SCREAMING_SNAKE_CASE : str = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : Any = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.linear_k': 'encoder.layers.*.self_attn.linear_k',
'self_attn.linear_v': 'encoder.layers.*.self_attn.linear_v',
'self_attn.linear_q': 'encoder.layers.*.self_attn.linear_q',
'self_attn.pos_bias_u': 'encoder.layers.*.self_attn.pos_bias_u',
'self_attn.pos_bias_v': 'encoder.layers.*.self_attn.pos_bias_v',
'self_attn.linear_out': 'encoder.layers.*.self_attn.linear_out',
'self_attn.linear_pos': 'encoder.layers.*.self_attn.linear_pos',
'self_attn.rotary_emb': 'encoder.embed_positions',
'self_attn_layer_norm': 'encoder.layers.*.self_attn_layer_norm',
'conv_module.pointwise_conv1': 'encoder.layers.*.conv_module.pointwise_conv1',
'conv_module.pointwise_conv2': 'encoder.layers.*.conv_module.pointwise_conv2',
'conv_module.depthwise_conv': 'encoder.layers.*.conv_module.depthwise_conv',
'conv_module.batch_norm': 'encoder.layers.*.conv_module.batch_norm',
'conv_module.layer_norm': 'encoder.layers.*.conv_module.layer_norm',
'ffn1.w_1': 'encoder.layers.*.ffn1.intermediate_dense',
'ffn1.w_2': 'encoder.layers.*.ffn1.output_dense',
'ffn1.layer_norm': 'encoder.layers.*.ffn1_layer_norm',
'ffn2.w_1': 'encoder.layers.*.ffn2.intermediate_dense',
'ffn2.w_2': 'encoder.layers.*.ffn2.output_dense',
'ffn2.layer_norm': 'encoder.layers.*.ffn2_layer_norm',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
}
__SCREAMING_SNAKE_CASE : Optional[Any] = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def _snake_case ( lowercase , lowercase , lowercase , lowercase , lowercase ) -> List[Any]:
for attribute in key.split(""".""" ):
__a : str = getattr(lowercase , lowercase )
if weight_type is not None:
__a : Dict = getattr(lowercase , lowercase ).shape
else:
__a : Dict = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}""" )
if weight_type == "weight":
__a : Any = value
elif weight_type == "weight_g":
__a : int = value
elif weight_type == "weight_v":
__a : int = value
elif weight_type == "bias":
__a : List[Any] = value
elif weight_type == "running_mean":
__a : Union[str, Any] = value
elif weight_type == "running_var":
__a : Tuple = value
elif weight_type == "num_batches_tracked":
__a : Optional[int] = value
elif weight_type == "inv_freq":
__a : List[str] = value
else:
__a : List[str] = value
logger.info(F"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def _snake_case ( lowercase , lowercase , lowercase ) -> Dict:
__a : Dict = []
__a : Dict = fairseq_model.state_dict()
__a : Tuple = hf_model.wavaveca_conformer.feature_extractor
for name, value in fairseq_dict.items():
__a : int = False
if "conv_layers" in name:
load_conv_layer(
lowercase , lowercase , lowercase , lowercase , hf_model.config.feat_extract_norm == """group""" , )
__a : List[Any] = True
else:
for key, mapped_key in MAPPING.items():
__a : Optional[int] = """wav2vec2_conformer.""" + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
__a : str = True
if "*" in mapped_key:
__a : Optional[int] = name.split(lowercase )[0].split(""".""" )[-2]
__a : List[Any] = mapped_key.replace("""*""" , lowercase )
if "pos_bias_u" in name:
__a : Union[str, Any] = None
elif "pos_bias_v" in name:
__a : List[Any] = None
elif "weight_g" in name:
__a : List[Any] = """weight_g"""
elif "weight_v" in name:
__a : List[Any] = """weight_v"""
elif "bias" in name:
__a : Optional[int] = """bias"""
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__a : str = """weight"""
elif "running_mean" in name:
__a : List[str] = """running_mean"""
elif "inv_freq" in name:
__a : Dict = """inv_freq"""
elif "running_var" in name:
__a : Union[str, Any] = """running_var"""
elif "num_batches_tracked" in name:
__a : int = """num_batches_tracked"""
else:
__a : Optional[int] = None
set_recursively(lowercase , lowercase , lowercase , lowercase , lowercase )
continue
if not is_used:
unused_weights.append(lowercase )
logger.warning(F"""Unused weights: {unused_weights}""" )
def _snake_case ( lowercase , lowercase , lowercase , lowercase , lowercase ) -> List[str]:
__a : Optional[Any] = full_name.split("""conv_layers.""" )[-1]
__a : Union[str, Any] = name.split(""".""" )
__a : Optional[Any] = int(items[0] )
__a : int = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
__a : Dict = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
__a : str = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" )
__a : Dict = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" )
__a : Union[str, Any] = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(lowercase )
@torch.no_grad()
def _snake_case ( lowercase , lowercase , lowercase=None , lowercase=None , lowercase=True ) -> Optional[Any]:
if config_path is not None:
__a : Any = WavaVecaConformerConfig.from_pretrained(lowercase , hidden_act="""swish""" )
else:
__a : Optional[int] = WavaVecaConformerConfig()
if "rope" in checkpoint_path:
__a : Optional[Any] = """rotary"""
if is_finetuned:
if dict_path:
__a : List[Any] = Dictionary.load(lowercase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
__a : int = target_dict.pad_index
__a : List[str] = target_dict.bos_index
__a : str = target_dict.eos_index
__a : Dict = len(target_dict.symbols )
__a : Any = os.path.join(lowercase , """vocab.json""" )
if not os.path.isdir(lowercase ):
logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(lowercase ) )
return
os.makedirs(lowercase , exist_ok=lowercase )
__a : Dict = target_dict.indices
# fairseq has the <pad> and <s> switched
__a : Optional[Any] = 0
__a : List[Any] = 1
with open(lowercase , """w""" , encoding="""utf-8""" ) as vocab_handle:
json.dump(lowercase , lowercase )
__a : int = WavaVecaCTCTokenizer(
lowercase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=lowercase , )
__a : Optional[int] = True if config.feat_extract_norm == """layer""" else False
__a : Dict = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6_0_0_0 , padding_value=0 , do_normalize=lowercase , return_attention_mask=lowercase , )
__a : str = WavaVecaProcessor(feature_extractor=lowercase , tokenizer=lowercase )
processor.save_pretrained(lowercase )
__a : List[str] = WavaVecaConformerForCTC(lowercase )
else:
__a : Optional[int] = WavaVecaConformerForPreTraining(lowercase )
if is_finetuned:
__a , __a , __a : Dict = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
else:
__a : Optional[int] = argparse.Namespace(task="""audio_pretraining""" )
__a : Tuple = fairseq.tasks.setup_task(lowercase )
__a , __a , __a : int = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=lowercase )
__a : Any = model[0].eval()
recursively_load_weights(lowercase , lowercase , not is_finetuned )
hf_wavavec.save_pretrained(lowercase )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : Dict = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
__SCREAMING_SNAKE_CASE : int = parser.parse_args()
convert_wavaveca_conformer_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
) | 697 | 1 |
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
from ....audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ....feature_extraction_sequence_utils import SequenceFeatureExtractor
from ....feature_extraction_utils import BatchFeature
from ....file_utils import PaddingStrategy, TensorType
from ....utils import logging
__SCREAMING_SNAKE_CASE : Tuple = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
lowercase__ = ["input_features", "attention_mask"]
def __init__( self , __UpperCamelCase=80 , __UpperCamelCase=1_6000 , __UpperCamelCase=0.0 , __UpperCamelCase=10 , __UpperCamelCase=25 , __UpperCamelCase="hamming_window" , __UpperCamelCase=3_2_7_6_8.0 , __UpperCamelCase=0.9_7 , __UpperCamelCase=1.0 , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=False , **__UpperCamelCase , ):
'''simple docstring'''
super().__init__(feature_size=__UpperCamelCase , sampling_rate=__UpperCamelCase , padding_value=__UpperCamelCase , **__UpperCamelCase )
__a : List[str] = feature_size
__a : List[str] = sampling_rate
__a : int = padding_value
__a : Any = hop_length
__a : int = win_length
__a : Tuple = frame_signal_scale
__a : Union[str, Any] = preemphasis_coeff
__a : List[str] = mel_floor
__a : Union[str, Any] = normalize_means
__a : Optional[Any] = normalize_vars
__a : Optional[Any] = win_function
__a : Union[str, Any] = return_attention_mask
__a : List[Any] = win_length * sampling_rate // 1000
__a : List[Any] = hop_length * sampling_rate // 1000
__a : Optional[Any] = optimal_fft_length(self.sample_size )
__a : Any = (self.n_fft // 2) + 1
def __lowerCamelCase ( self , __UpperCamelCase ):
'''simple docstring'''
if self.win_function == "hamming_window":
__a : str = window_function(window_length=self.sample_size , name=self.win_function , periodic=__UpperCamelCase )
else:
__a : Dict = window_function(window_length=self.sample_size , name=self.win_function )
__a : Optional[Any] = mel_filter_bank(
num_frequency_bins=self.n_freqs , num_mel_filters=self.feature_size , min_frequency=0.0 , max_frequency=self.sampling_rate / 2.0 , sampling_rate=self.sampling_rate , )
__a : Any = spectrogram(
one_waveform * self.frame_signal_scale , window=__UpperCamelCase , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , center=__UpperCamelCase , preemphasis=self.preemphasis_coeff , mel_filters=__UpperCamelCase , mel_floor=self.mel_floor , log_mel="""log""" , )
return msfc_features.T
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
if self.normalize_means:
__a : int = x[:input_length].mean(axis=0 )
__a : str = np.subtract(__UpperCamelCase , __UpperCamelCase )
if self.normalize_vars:
__a : Dict = x[:input_length].std(axis=0 )
__a : Dict = np.divide(__UpperCamelCase , __UpperCamelCase )
if input_length < x.shape[0]:
__a : Union[str, Any] = padding_value
# make sure array is in float32
__a : Any = x.astype(np.floataa )
return x
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase = None ):
'''simple docstring'''
__a : Tuple = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [self._normalize_one(__UpperCamelCase , __UpperCamelCase , self.padding_value ) for x, n in zip(__UpperCamelCase , __UpperCamelCase )]
def __call__( self , __UpperCamelCase , __UpperCamelCase = False , __UpperCamelCase = None , __UpperCamelCase = False , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , **__UpperCamelCase , ):
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"""The model corresponding to this feature extractor: {self} was trained using a sampling rate of"""
f""" {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with"""
f""" {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
"""It is strongly recommended to pass the ``sampling_rate`` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
__a : Tuple = isinstance(__UpperCamelCase , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f"""Only mono-channel audio is supported for input to {self}""" )
__a : Tuple = is_batched_numpy or (
isinstance(__UpperCamelCase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
__a : Tuple = [np.asarray(__UpperCamelCase , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(__UpperCamelCase , np.ndarray ):
__a : List[str] = np.asarray(__UpperCamelCase , dtype=np.floataa )
elif isinstance(__UpperCamelCase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
__a : str = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
__a : Any = [raw_speech]
# extract fbank features
__a : str = [self._extract_mfsc_features(__UpperCamelCase ) for one_waveform in raw_speech]
# convert into correct format for padding
__a : Optional[Any] = BatchFeature({"""input_features""": features} )
__a : Any = self.pad(
__UpperCamelCase , padding=__UpperCamelCase , max_length=__UpperCamelCase , truncation=__UpperCamelCase , pad_to_multiple_of=__UpperCamelCase , return_attention_mask=__UpperCamelCase , **__UpperCamelCase , )
# make sure list is in array format
__a : int = padded_inputs.get("""input_features""" )
if isinstance(input_features[0] , __UpperCamelCase ):
__a : Union[str, Any] = [np.asarray(__UpperCamelCase , dtype=np.floataa ) for feature in input_features]
__a : List[str] = padded_inputs.get("""attention_mask""" )
if attention_mask is not None:
__a : Optional[int] = [np.asarray(__UpperCamelCase , dtype=np.intaa ) for array in attention_mask]
if self.normalize_means or self.normalize_vars:
__a : Optional[Any] = (
np.array(__UpperCamelCase , dtype=np.intaa )
if self._get_padding_strategies(__UpperCamelCase , max_length=__UpperCamelCase ) is not PaddingStrategy.DO_NOT_PAD
and padding
else None
)
__a : int = self.normalize(
padded_inputs["""input_features"""] , attention_mask=__UpperCamelCase )
if return_tensors is not None:
__a : List[Any] = padded_inputs.convert_to_tensors(__UpperCamelCase )
return padded_inputs | 697 |
'''simple docstring'''
import warnings
from functools import wraps
from typing import Callable
def _snake_case ( lowercase ) -> Callable:
@wraps(lowercase )
def _inner_fn(*lowercase , **lowercase ):
warnings.warn(
(F"""'{fn.__name__}' is experimental and might be subject to breaking changes in the future.""") , lowercase , )
return fn(*lowercase , **lowercase )
return _inner_fn | 697 | 1 |
'''simple docstring'''
from __future__ import annotations
import unittest
import numpy as np
from transformers import OPTConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import GPTaTokenizer, TFOPTForCausalLM, TFOPTModel
def _snake_case ( lowercase , lowercase , lowercase=None , lowercase=None ) -> List[str]:
if attention_mask is None:
__a : List[str] = tf.cast(tf.math.not_equal(lowercase , config.pad_token_id ) , tf.inta )
return {"input_ids": input_ids, "attention_mask": attention_mask}
@require_tf
class SCREAMING_SNAKE_CASE__ :
lowercase__ = OPTConfig
lowercase__ = {}
lowercase__ = "gelu"
def __init__( self , __UpperCamelCase , __UpperCamelCase=13 , __UpperCamelCase=7 , __UpperCamelCase=True , __UpperCamelCase=False , __UpperCamelCase=99 , __UpperCamelCase=16 , __UpperCamelCase=2 , __UpperCamelCase=4 , __UpperCamelCase=4 , __UpperCamelCase="gelu" , __UpperCamelCase=0.1 , __UpperCamelCase=0.1 , __UpperCamelCase=20 , __UpperCamelCase=2 , __UpperCamelCase=1 , __UpperCamelCase=0 , __UpperCamelCase=16 , __UpperCamelCase=16 , ):
'''simple docstring'''
__a : List[str] = parent
__a : int = batch_size
__a : Optional[int] = seq_length
__a : List[Any] = is_training
__a : List[str] = use_labels
__a : Any = vocab_size
__a : Union[str, Any] = hidden_size
__a : Union[str, Any] = num_hidden_layers
__a : int = num_attention_heads
__a : Optional[Any] = intermediate_size
__a : Tuple = hidden_act
__a : List[str] = hidden_dropout_prob
__a : Optional[int] = attention_probs_dropout_prob
__a : Union[str, Any] = max_position_embeddings
__a : str = eos_token_id
__a : List[Any] = pad_token_id
__a : List[Any] = bos_token_id
__a : str = embed_dim
__a : Dict = word_embed_proj_dim
__a : List[Any] = False
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Tuple = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
__a : Optional[Any] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
__a : Optional[int] = tf.concat([input_ids, eos_tensor] , axis=1 )
__a : Optional[int] = self.config_cls(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , embed_dim=self.embed_dim , word_embed_proj_dim=self.word_embed_proj_dim , is_encoder_decoder=__UpperCamelCase , **self.config_updates , )
__a : Optional[int] = prepare_opt_inputs_dict(__UpperCamelCase , __UpperCamelCase )
return config, inputs_dict
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
__a : Any = TFOPTModel(config=__UpperCamelCase )
__a : Tuple = inputs_dict["""input_ids"""]
__a : str = input_ids[:1, :]
__a : List[Any] = inputs_dict["""attention_mask"""][:1, :]
__a : Tuple = 1
# first forward pass
__a : Dict = model(__UpperCamelCase , attention_mask=__UpperCamelCase , use_cache=__UpperCamelCase )
__a , __a : List[str] = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
__a : Optional[int] = ids_tensor((self.batch_size, 3) , config.vocab_size )
__a : List[Any] = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
__a : List[str] = tf.concat([input_ids, next_tokens] , axis=-1 )
__a : Optional[int] = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
__a : str = model(__UpperCamelCase , attention_mask=__UpperCamelCase )[0]
__a : List[Any] = model(__UpperCamelCase , attention_mask=__UpperCamelCase , past_key_values=__UpperCamelCase )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
__a : Optional[Any] = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
__a : Any = output_from_no_past[:, -3:, random_slice_idx]
__a : Optional[Any] = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(__UpperCamelCase , __UpperCamelCase , rtol=1E-3 )
@require_tf
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase , __UpperCamelCase , unittest.TestCase ):
lowercase__ = (TFOPTModel, TFOPTForCausalLM) if is_tf_available() else ()
lowercase__ = (TFOPTForCausalLM,) if is_tf_available() else ()
lowercase__ = (
{"feature-extraction": TFOPTModel, "text-generation": TFOPTForCausalLM} if is_tf_available() else {}
)
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = 10
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Dict = TFOPTModelTester(self )
__a : List[str] = ConfigTester(self , config_class=__UpperCamelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : int = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*__UpperCamelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a , __a : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
def _get_word_embedding_weight(__UpperCamelCase , __UpperCamelCase ):
if hasattr(__UpperCamelCase , """weight""" ):
return embedding_layer.weight
else:
# Here we build the word embeddings weights if not exists.
# And then we retry to get the attribute once built.
model.build()
if hasattr(__UpperCamelCase , """weight""" ):
return embedding_layer.weight
else:
return None
for model_class in self.all_model_classes:
for size in [config.vocab_size - 10, config.vocab_size + 10]:
# build the embeddings
__a : Any = model_class(config=__UpperCamelCase )
__a : Dict = _get_word_embedding_weight(__UpperCamelCase , model.get_input_embeddings() )
__a : Optional[int] = _get_word_embedding_weight(__UpperCamelCase , model.get_output_embeddings() )
# reshape the embeddings
model.resize_token_embeddings(__UpperCamelCase )
__a : Union[str, Any] = _get_word_embedding_weight(__UpperCamelCase , model.get_input_embeddings() )
__a : Tuple = _get_word_embedding_weight(__UpperCamelCase , model.get_output_embeddings() )
# check that the resized embeddings size matches the desired size.
__a : str = size if size is not None else config.vocab_size
self.assertEqual(new_input_embeddings.shape[0] , __UpperCamelCase )
# check that weights remain the same after resizing
__a : Any = True
for pa, pa in zip(old_input_embeddings.value() , new_input_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
__a : List[str] = False
self.assertTrue(__UpperCamelCase )
if old_output_embeddings is not None and new_output_embeddings is not None:
self.assertEqual(new_output_embeddings.shape[0] , __UpperCamelCase )
__a : Dict = True
for pa, pa in zip(old_output_embeddings.value() , new_output_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
__a : Any = False
self.assertTrue(__UpperCamelCase )
def _snake_case ( lowercase ) -> Optional[int]:
return tf.constant(lowercase , dtype=tf.intaa )
@require_tf
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
lowercase__ = 99
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : List[str] = tf.ones((4, 1) , dtype=tf.intaa ) * 2
__a : Tuple = tf.concat([ids_tensor((4, 6) , self.vocab_size - 3 ) + 3, eos_column_vector] , axis=1 )
__a : str = input_ids.shape[0]
__a : Optional[Any] = OPTConfig(
vocab_size=self.vocab_size , hidden_size=24 , num_hidden_layers=2 , num_attention_heads=2 , ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
@require_sentencepiece
@require_tf
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Any = TFOPTModel.from_pretrained("""facebook/opt-350m""" )
__a : Optional[int] = _long_tensor([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]] )
__a : Union[str, Any] = tf.not_equal(__UpperCamelCase , model.config.pad_token_id )
with tf.GradientTape():
__a : str = model(input_ids=__UpperCamelCase , attention_mask=__UpperCamelCase ).last_hidden_state
__a : List[str] = (1, 11, 512)
self.assertEqual(output.shape , __UpperCamelCase )
__a : Tuple = tf.constant(
[[-0.2_8_7_3, -1.9_2_1_8, -0.3_0_3_3], [-1.2_7_1_0, -0.1_3_3_8, -0.1_9_0_2], [0.4_0_9_5, 0.1_2_1_4, -1.3_1_2_1]] )
self.assertTrue(np.allclose(output[:, :3, :3] , __UpperCamelCase , atol=4E-3 ) )
__a : Any = tf.function(__UpperCamelCase , jit_compile=__UpperCamelCase )
__a : Tuple = xla_generate(__UpperCamelCase , __UpperCamelCase )[0]
self.assertTrue(np.allclose(output[:, :3, :3] , __UpperCamelCase , atol=4E-2 ) )
@require_tf
@slow
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def __lowerCamelCase ( self ):
'''simple docstring'''
super().setUp()
__a : str = """facebook/opt-350m"""
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : List[Any] = TFOPTForCausalLM.from_pretrained(self.path_model )
__a : Union[str, Any] = GPTaTokenizer.from_pretrained(self.path_model )
__a : Dict = [
"""Today is a beautiful day and I want to""",
"""In the city of""",
"""Paris is the capital of France and""",
"""Computers and mobile phones have taken""",
]
# verify that prompt without BOS token is identical to Metaseq -> add_special_tokens=False
__a : Tuple = tokenizer(__UpperCamelCase , return_tensors="""tf""" , padding=__UpperCamelCase , add_special_tokens=__UpperCamelCase )
__a : str = tf.math.reduce_mean(model(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
__a : Optional[int] = tf.constant(
[
[1.3_8_5_1, -1_3.8_9_2_3, -1_0.5_2_2_9, -1_0.7_5_3_3, -0.2_3_0_9, -1_0.2_3_8_4, -0.5_3_6_5, -9.0_9_4_7, -5.1_6_7_0],
[-4.7_0_7_3, -1_0.6_2_7_6, -3.9_4_1_5, -2_1.5_2_4_2, -0.2_8_2_2, -0.2_8_2_2, -0.2_8_2_2, -0.2_8_2_2, -0.2_8_2_2],
[0.6_2_4_7, -3.4_2_2_9, -8.9_1_7_9, -1.4_2_9_7, -1_4.1_6_5_0, 1.4_1_4_6, -9.0_2_1_8, -0.2_7_0_3, -0.2_7_0_3],
[6.4_7_8_3, -1.9_9_1_3, -1_0.7_9_2_6, -2.3_3_3_6, 1.5_0_9_2, -0.9_9_7_4, -6.8_2_1_3, 1.3_4_7_7, 1.3_4_7_7],
] )
self.assertTrue(np.allclose(__UpperCamelCase , __UpperCamelCase , atol=1E-4 ) )
__a : List[str] = tf.function(__UpperCamelCase , jit_compile=__UpperCamelCase )
__a : str = tf.math.reduce_mean(xla_generate(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
self.assertTrue(np.allclose(__UpperCamelCase , __UpperCamelCase , atol=1E-4 ) )
@require_tf
@slow
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return [
"Today is a beautiful day and I want",
"In the city of",
"Paris is the capital of France and",
"Computers and mobile phones have taken",
]
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : List[Any] = """facebook/opt-125m"""
__a : Tuple = [
"""Today is a beautiful day and I want to""",
"""In the city of New York, the city""",
"""Paris is the capital of France and the capital""",
"""Computers and mobile phones have taken over the""",
]
__a : Tuple = []
__a : int = GPTaTokenizer.from_pretrained(__UpperCamelCase )
__a : Optional[int] = TFOPTForCausalLM.from_pretrained(__UpperCamelCase )
for prompt in self.prompts:
__a : List[Any] = tokenizer(__UpperCamelCase , return_tensors="""tf""" ).input_ids
__a : Optional[Any] = model.generate(__UpperCamelCase , max_length=10 )
__a : Dict = tokenizer.batch_decode(__UpperCamelCase , skip_special_tokens=__UpperCamelCase )
predicted_outputs += generated_string
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : List[str] = """facebook/opt-350m"""
__a : str = GPTaTokenizer.from_pretrained(__UpperCamelCase )
__a : Any = TFOPTForCausalLM.from_pretrained(__UpperCamelCase )
__a : Tuple = """left"""
# use different length sentences to test batching
__a : Optional[Any] = [
"""Hello, my dog is a little""",
"""Today, I""",
]
__a : Dict = tokenizer(__UpperCamelCase , return_tensors="""tf""" , padding=__UpperCamelCase )
__a : List[Any] = inputs["""input_ids"""]
__a : Optional[int] = model.generate(input_ids=__UpperCamelCase , attention_mask=inputs["""attention_mask"""] )
__a : List[Any] = tokenizer(sentences[0] , return_tensors="""tf""" ).input_ids
__a : List[Any] = model.generate(input_ids=__UpperCamelCase )
__a : Optional[Any] = inputs_non_padded.shape[-1] - tf.math.reduce_sum(
tf.cast(inputs["""attention_mask"""][-1] , tf.intaa ) )
__a : Union[str, Any] = tokenizer(sentences[1] , return_tensors="""tf""" ).input_ids
__a : Tuple = model.generate(input_ids=__UpperCamelCase , max_length=model.config.max_length - num_paddings )
__a : Any = tokenizer.batch_decode(__UpperCamelCase , skip_special_tokens=__UpperCamelCase )
__a : Tuple = tokenizer.decode(output_non_padded[0] , skip_special_tokens=__UpperCamelCase )
__a : Any = tokenizer.decode(output_padded[0] , skip_special_tokens=__UpperCamelCase )
__a : int = [
"""Hello, my dog is a little bit of a dork.\nI'm a little bit""",
"""Today, I was in the middle of a conversation with a friend about the""",
]
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
self.assertListEqual(__UpperCamelCase , [non_padded_sentence, padded_sentence] )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Any = """facebook/opt-350m"""
__a : str = [
"""Today is a beautiful day and I want to""",
"""In the city of San Francisco, the city""",
"""Paris is the capital of France and the capital""",
"""Computers and mobile phones have taken over the""",
]
__a : int = []
__a : int = GPTaTokenizer.from_pretrained(__UpperCamelCase )
__a : Tuple = TFOPTForCausalLM.from_pretrained(__UpperCamelCase )
for prompt in self.prompts:
__a : List[Any] = tokenizer(__UpperCamelCase , return_tensors="""tf""" ).input_ids
__a : str = model.generate(__UpperCamelCase , max_length=10 )
__a : Tuple = tokenizer.batch_decode(__UpperCamelCase , skip_special_tokens=__UpperCamelCase )
predicted_outputs += generated_string
self.assertListEqual(__UpperCamelCase , __UpperCamelCase ) | 697 |
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
from ....audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ....feature_extraction_sequence_utils import SequenceFeatureExtractor
from ....feature_extraction_utils import BatchFeature
from ....file_utils import PaddingStrategy, TensorType
from ....utils import logging
__SCREAMING_SNAKE_CASE : Tuple = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
lowercase__ = ["input_features", "attention_mask"]
def __init__( self , __UpperCamelCase=80 , __UpperCamelCase=1_6000 , __UpperCamelCase=0.0 , __UpperCamelCase=10 , __UpperCamelCase=25 , __UpperCamelCase="hamming_window" , __UpperCamelCase=3_2_7_6_8.0 , __UpperCamelCase=0.9_7 , __UpperCamelCase=1.0 , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=False , **__UpperCamelCase , ):
'''simple docstring'''
super().__init__(feature_size=__UpperCamelCase , sampling_rate=__UpperCamelCase , padding_value=__UpperCamelCase , **__UpperCamelCase )
__a : List[str] = feature_size
__a : List[str] = sampling_rate
__a : int = padding_value
__a : Any = hop_length
__a : int = win_length
__a : Tuple = frame_signal_scale
__a : Union[str, Any] = preemphasis_coeff
__a : List[str] = mel_floor
__a : Union[str, Any] = normalize_means
__a : Optional[Any] = normalize_vars
__a : Optional[Any] = win_function
__a : Union[str, Any] = return_attention_mask
__a : List[Any] = win_length * sampling_rate // 1000
__a : List[Any] = hop_length * sampling_rate // 1000
__a : Optional[Any] = optimal_fft_length(self.sample_size )
__a : Any = (self.n_fft // 2) + 1
def __lowerCamelCase ( self , __UpperCamelCase ):
'''simple docstring'''
if self.win_function == "hamming_window":
__a : str = window_function(window_length=self.sample_size , name=self.win_function , periodic=__UpperCamelCase )
else:
__a : Dict = window_function(window_length=self.sample_size , name=self.win_function )
__a : Optional[Any] = mel_filter_bank(
num_frequency_bins=self.n_freqs , num_mel_filters=self.feature_size , min_frequency=0.0 , max_frequency=self.sampling_rate / 2.0 , sampling_rate=self.sampling_rate , )
__a : Any = spectrogram(
one_waveform * self.frame_signal_scale , window=__UpperCamelCase , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , center=__UpperCamelCase , preemphasis=self.preemphasis_coeff , mel_filters=__UpperCamelCase , mel_floor=self.mel_floor , log_mel="""log""" , )
return msfc_features.T
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
if self.normalize_means:
__a : int = x[:input_length].mean(axis=0 )
__a : str = np.subtract(__UpperCamelCase , __UpperCamelCase )
if self.normalize_vars:
__a : Dict = x[:input_length].std(axis=0 )
__a : Dict = np.divide(__UpperCamelCase , __UpperCamelCase )
if input_length < x.shape[0]:
__a : Union[str, Any] = padding_value
# make sure array is in float32
__a : Any = x.astype(np.floataa )
return x
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase = None ):
'''simple docstring'''
__a : Tuple = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [self._normalize_one(__UpperCamelCase , __UpperCamelCase , self.padding_value ) for x, n in zip(__UpperCamelCase , __UpperCamelCase )]
def __call__( self , __UpperCamelCase , __UpperCamelCase = False , __UpperCamelCase = None , __UpperCamelCase = False , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , **__UpperCamelCase , ):
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"""The model corresponding to this feature extractor: {self} was trained using a sampling rate of"""
f""" {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with"""
f""" {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
"""It is strongly recommended to pass the ``sampling_rate`` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
__a : Tuple = isinstance(__UpperCamelCase , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f"""Only mono-channel audio is supported for input to {self}""" )
__a : Tuple = is_batched_numpy or (
isinstance(__UpperCamelCase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
__a : Tuple = [np.asarray(__UpperCamelCase , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(__UpperCamelCase , np.ndarray ):
__a : List[str] = np.asarray(__UpperCamelCase , dtype=np.floataa )
elif isinstance(__UpperCamelCase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
__a : str = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
__a : Any = [raw_speech]
# extract fbank features
__a : str = [self._extract_mfsc_features(__UpperCamelCase ) for one_waveform in raw_speech]
# convert into correct format for padding
__a : Optional[Any] = BatchFeature({"""input_features""": features} )
__a : Any = self.pad(
__UpperCamelCase , padding=__UpperCamelCase , max_length=__UpperCamelCase , truncation=__UpperCamelCase , pad_to_multiple_of=__UpperCamelCase , return_attention_mask=__UpperCamelCase , **__UpperCamelCase , )
# make sure list is in array format
__a : int = padded_inputs.get("""input_features""" )
if isinstance(input_features[0] , __UpperCamelCase ):
__a : Union[str, Any] = [np.asarray(__UpperCamelCase , dtype=np.floataa ) for feature in input_features]
__a : List[str] = padded_inputs.get("""attention_mask""" )
if attention_mask is not None:
__a : Optional[int] = [np.asarray(__UpperCamelCase , dtype=np.intaa ) for array in attention_mask]
if self.normalize_means or self.normalize_vars:
__a : Optional[Any] = (
np.array(__UpperCamelCase , dtype=np.intaa )
if self._get_padding_strategies(__UpperCamelCase , max_length=__UpperCamelCase ) is not PaddingStrategy.DO_NOT_PAD
and padding
else None
)
__a : int = self.normalize(
padded_inputs["""input_features"""] , attention_mask=__UpperCamelCase )
if return_tensors is not None:
__a : List[Any] = padded_inputs.convert_to_tensors(__UpperCamelCase )
return padded_inputs | 697 | 1 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_segformer import SegformerImageProcessor
__SCREAMING_SNAKE_CASE : Optional[Any] = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
def __init__( self , *__UpperCamelCase , **__UpperCamelCase ):
'''simple docstring'''
warnings.warn(
"""The class SegformerFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use SegformerImageProcessor instead.""" , __UpperCamelCase , )
super().__init__(*__UpperCamelCase , **__UpperCamelCase ) | 697 |
'''simple docstring'''
__SCREAMING_SNAKE_CASE : int = 9.80_665
def _snake_case ( lowercase , lowercase , lowercase = g ) -> float:
if fluid_density <= 0:
raise ValueError("""Impossible fluid density""" )
if volume < 0:
raise ValueError("""Impossible Object volume""" )
if gravity <= 0:
raise ValueError("""Impossible Gravity""" )
return fluid_density * gravity * volume
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod() | 697 | 1 |
'''simple docstring'''
from __future__ import annotations
import matplotlib.pyplot as plt # type: ignore
import numpy
# initial triangle of Koch snowflake
__SCREAMING_SNAKE_CASE : Tuple = numpy.array([0, 0])
__SCREAMING_SNAKE_CASE : Any = numpy.array([0.5, 0.8_660_254])
__SCREAMING_SNAKE_CASE : Any = numpy.array([1, 0])
__SCREAMING_SNAKE_CASE : Optional[int] = [VECTOR_1, VECTOR_2, VECTOR_3, VECTOR_1]
def _snake_case ( lowercase , lowercase ) -> list[numpy.ndarray]:
__a : List[str] = initial_vectors
for _ in range(lowercase ):
__a : Optional[int] = iteration_step(lowercase )
return vectors
def _snake_case ( lowercase ) -> list[numpy.ndarray]:
__a : Dict = []
for i, start_vector in enumerate(vectors[:-1] ):
__a : List[Any] = vectors[i + 1]
new_vectors.append(lowercase )
__a : Optional[int] = end_vector - start_vector
new_vectors.append(start_vector + difference_vector / 3 )
new_vectors.append(
start_vector + difference_vector / 3 + rotate(difference_vector / 3 , 6_0 ) )
new_vectors.append(start_vector + difference_vector * 2 / 3 )
new_vectors.append(vectors[-1] )
return new_vectors
def _snake_case ( lowercase , lowercase ) -> numpy.ndarray:
__a : Optional[Any] = numpy.radians(lowercase )
__a , __a : Tuple = numpy.cos(lowercase ), numpy.sin(lowercase )
__a : Dict = numpy.array(((c, -s), (s, c)) )
return numpy.dot(lowercase , lowercase )
def _snake_case ( lowercase ) -> None:
__a : List[Any] = plt.gca()
axes.set_aspect("""equal""" )
# matplotlib.pyplot.plot takes a list of all x-coordinates and a list of all
# y-coordinates as inputs, which are constructed from the vector-list using
# zip()
__a , __a : Any = zip(*lowercase )
plt.plot(lowercase , lowercase )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
__SCREAMING_SNAKE_CASE : Tuple = iterate(INITIAL_VECTORS, 5)
plot(processed_vectors) | 697 |
'''simple docstring'''
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetrImageProcessor
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def __init__( self , __UpperCamelCase , __UpperCamelCase=7 , __UpperCamelCase=3 , __UpperCamelCase=30 , __UpperCamelCase=400 , __UpperCamelCase=True , __UpperCamelCase=None , __UpperCamelCase=True , __UpperCamelCase=1 / 255 , __UpperCamelCase=True , __UpperCamelCase=[0.5, 0.5, 0.5] , __UpperCamelCase=[0.5, 0.5, 0.5] , __UpperCamelCase=True , ):
'''simple docstring'''
__a : List[Any] = size if size is not None else {"""shortest_edge""": 18, """longest_edge""": 1333}
__a : Dict = parent
__a : Union[str, Any] = batch_size
__a : Optional[int] = num_channels
__a : Dict = min_resolution
__a : List[Any] = max_resolution
__a : int = do_resize
__a : str = size
__a : Optional[Any] = do_rescale
__a : Optional[Any] = rescale_factor
__a : str = do_normalize
__a : Any = image_mean
__a : Optional[Any] = image_std
__a : Dict = do_pad
def __lowerCamelCase ( self ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_pad": self.do_pad,
}
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase=False ):
'''simple docstring'''
if not batched:
__a : Union[str, Any] = image_inputs[0]
if isinstance(__UpperCamelCase , Image.Image ):
__a , __a : Tuple = image.size
else:
__a , __a : Tuple = image.shape[1], image.shape[2]
if w < h:
__a : Optional[int] = int(self.size["""shortest_edge"""] * h / w )
__a : Tuple = self.size["""shortest_edge"""]
elif w > h:
__a : Optional[Any] = self.size["""shortest_edge"""]
__a : Any = int(self.size["""shortest_edge"""] * w / h )
else:
__a : Any = self.size["""shortest_edge"""]
__a : Optional[int] = self.size["""shortest_edge"""]
else:
__a : Any = []
for image in image_inputs:
__a , __a : Any = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
__a : List[Any] = max(__UpperCamelCase , key=lambda __UpperCamelCase : item[0] )[0]
__a : Optional[Any] = max(__UpperCamelCase , key=lambda __UpperCamelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase , unittest.TestCase ):
lowercase__ = DetrImageProcessor if is_vision_available() else None
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : str = DetrImageProcessingTester(self )
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Optional[int] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__UpperCamelCase , """image_mean""" ) )
self.assertTrue(hasattr(__UpperCamelCase , """image_std""" ) )
self.assertTrue(hasattr(__UpperCamelCase , """do_normalize""" ) )
self.assertTrue(hasattr(__UpperCamelCase , """do_rescale""" ) )
self.assertTrue(hasattr(__UpperCamelCase , """rescale_factor""" ) )
self.assertTrue(hasattr(__UpperCamelCase , """do_resize""" ) )
self.assertTrue(hasattr(__UpperCamelCase , """size""" ) )
self.assertTrue(hasattr(__UpperCamelCase , """do_pad""" ) )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 18, """longest_edge""": 1333} )
self.assertEqual(image_processor.do_pad , __UpperCamelCase )
__a : List[Any] = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=__UpperCamelCase )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42, """longest_edge""": 84} )
self.assertEqual(image_processor.do_pad , __UpperCamelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
pass
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__a : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCamelCase , Image.Image )
# Test not batched input
__a : Optional[Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
__a , __a : Any = self.image_processor_tester.get_expected_values(__UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__a , __a : Optional[int] = self.image_processor_tester.get_expected_values(__UpperCamelCase , batched=__UpperCamelCase )
__a : Any = image_processing(__UpperCamelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Dict = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__a : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCamelCase , numpify=__UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCamelCase , np.ndarray )
# Test not batched input
__a : Dict = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
__a , __a : Any = self.image_processor_tester.get_expected_values(__UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__a : List[str] = image_processing(__UpperCamelCase , return_tensors="""pt""" ).pixel_values
__a , __a : str = self.image_processor_tester.get_expected_values(__UpperCamelCase , batched=__UpperCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Any = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__a : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCamelCase , torchify=__UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCamelCase , torch.Tensor )
# Test not batched input
__a : Any = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
__a , __a : Any = self.image_processor_tester.get_expected_values(__UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__a : List[str] = image_processing(__UpperCamelCase , return_tensors="""pt""" ).pixel_values
__a , __a : Any = self.image_processor_tester.get_expected_values(__UpperCamelCase , batched=__UpperCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : List[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_annotations.txt""" , """r""" ) as f:
__a : Dict = json.loads(f.read() )
__a : Optional[int] = {"""image_id""": 3_9769, """annotations""": target}
# encode them
__a : List[str] = DetrImageProcessor.from_pretrained("""facebook/detr-resnet-50""" )
__a : Tuple = image_processing(images=__UpperCamelCase , annotations=__UpperCamelCase , return_tensors="""pt""" )
# verify pixel values
__a : Union[str, Any] = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["""pixel_values"""].shape , __UpperCamelCase )
__a : List[str] = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , __UpperCamelCase , atol=1E-4 ) )
# verify area
__a : List[Any] = torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , __UpperCamelCase ) )
# verify boxes
__a : Optional[int] = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , __UpperCamelCase )
__a : Any = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , __UpperCamelCase , atol=1E-3 ) )
# verify image_id
__a : Union[str, Any] = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , __UpperCamelCase ) )
# verify is_crowd
__a : List[Any] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , __UpperCamelCase ) )
# verify class_labels
__a : Any = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , __UpperCamelCase ) )
# verify orig_size
__a : Any = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , __UpperCamelCase ) )
# verify size
__a : str = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , __UpperCamelCase ) )
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : List[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt""" , """r""" ) as f:
__a : Tuple = json.loads(f.read() )
__a : str = {"""file_name""": """000000039769.png""", """image_id""": 3_9769, """segments_info""": target}
__a : int = pathlib.Path("""./tests/fixtures/tests_samples/COCO/coco_panoptic""" )
# encode them
__a : List[str] = DetrImageProcessor.from_pretrained("""facebook/detr-resnet-50-panoptic""" )
__a : Tuple = image_processing(images=__UpperCamelCase , annotations=__UpperCamelCase , masks_path=__UpperCamelCase , return_tensors="""pt""" )
# verify pixel values
__a : List[str] = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["""pixel_values"""].shape , __UpperCamelCase )
__a : Any = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , __UpperCamelCase , atol=1E-4 ) )
# verify area
__a : Optional[Any] = torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , __UpperCamelCase ) )
# verify boxes
__a : Optional[Any] = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , __UpperCamelCase )
__a : List[str] = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , __UpperCamelCase , atol=1E-3 ) )
# verify image_id
__a : List[str] = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , __UpperCamelCase ) )
# verify is_crowd
__a : Optional[int] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , __UpperCamelCase ) )
# verify class_labels
__a : Optional[int] = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , __UpperCamelCase ) )
# verify masks
__a : Union[str, Any] = 82_2873
self.assertEqual(encoding["""labels"""][0]["""masks"""].sum().item() , __UpperCamelCase )
# verify orig_size
__a : str = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , __UpperCamelCase ) )
# verify size
__a : List[Any] = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , __UpperCamelCase ) ) | 697 | 1 |
'''simple docstring'''
import torch
import torch.nn as nn
from transformers.modeling_utils import ModuleUtilsMixin
from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
@register_to_config
def __init__( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = False , ):
'''simple docstring'''
super().__init__()
__a : Any = nn.Embedding(__UpperCamelCase , __UpperCamelCase )
__a : Tuple = nn.Embedding(__UpperCamelCase , __UpperCamelCase )
__a : str = False
__a : str = nn.Dropout(p=__UpperCamelCase )
__a : Dict = TaConfig(
vocab_size=__UpperCamelCase , d_model=__UpperCamelCase , num_heads=__UpperCamelCase , d_kv=__UpperCamelCase , d_ff=__UpperCamelCase , dropout_rate=__UpperCamelCase , feed_forward_proj=__UpperCamelCase , is_decoder=__UpperCamelCase , is_encoder_decoder=__UpperCamelCase , )
__a : Optional[Any] = nn.ModuleList()
for lyr_num in range(__UpperCamelCase ):
__a : Dict = TaBlock(__UpperCamelCase )
self.encoders.append(__UpperCamelCase )
__a : Optional[Any] = TaLayerNorm(__UpperCamelCase )
__a : Optional[Any] = nn.Dropout(p=__UpperCamelCase )
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
__a : int = self.token_embedder(__UpperCamelCase )
__a : List[Any] = encoder_input_tokens.shape[1]
__a : Optional[int] = torch.arange(__UpperCamelCase , device=encoder_input_tokens.device )
x += self.position_encoding(__UpperCamelCase )
__a : Union[str, Any] = self.dropout_pre(__UpperCamelCase )
# inverted the attention mask
__a : List[str] = encoder_input_tokens.size()
__a : List[str] = self.get_extended_attention_mask(__UpperCamelCase , __UpperCamelCase )
for lyr in self.encoders:
__a : List[Any] = lyr(__UpperCamelCase , __UpperCamelCase )[0]
__a : Optional[Any] = self.layer_norm(__UpperCamelCase )
return self.dropout_post(__UpperCamelCase ), encoder_inputs_mask | 697 |
'''simple docstring'''
import argparse
import logging
import os
import time
import timeit
import datasets
import numpy as np
import pycuda.autoinit # noqa: F401
import pycuda.driver as cuda
import tensorrt as trt
import torch
from absl import logging as absl_logging
from accelerate import Accelerator
from datasets import load_dataset, load_metric
from torch.utils.data import DataLoader
from utils_qa import postprocess_qa_predictions
import transformers
from transformers import AutoTokenizer, EvalPrediction, default_data_collator, set_seed
from transformers.trainer_pt_utils import nested_concat, nested_truncate
__SCREAMING_SNAKE_CASE : Optional[int] = trt.Logger(trt.Logger.WARNING)
__SCREAMING_SNAKE_CASE : Tuple = absl_logging.get_absl_logger()
absl_logger.setLevel(logging.WARNING)
__SCREAMING_SNAKE_CASE : Any = logging.getLogger(__name__)
__SCREAMING_SNAKE_CASE : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--onnx_model_path',
default=None,
type=str,
required=True,
help='Path to ONNX model: ',
)
parser.add_argument(
'--output_dir',
default=None,
type=str,
required=True,
help='The output directory where the model checkpoints and predictions will be written.',
)
# Other parameters
parser.add_argument(
'--tokenizer_name',
default='',
type=str,
required=True,
help='Pretrained tokenizer name or path if not the same as model_name',
)
parser.add_argument(
'--version_2_with_negative',
action='store_true',
help='If true, the SQuAD examples contain some that do not have an answer.',
)
parser.add_argument(
'--null_score_diff_threshold',
type=float,
default=0.0,
help='If null_score - best_non_null is greater than the threshold predict null.',
)
parser.add_argument(
'--max_seq_length',
default=384,
type=int,
help=(
'The maximum total input sequence length after WordPiece tokenization. Sequences '
'longer than this will be truncated, and sequences shorter than this will be padded.'
),
)
parser.add_argument(
'--doc_stride',
default=128,
type=int,
help='When splitting up a long document into chunks, how much stride to take between chunks.',
)
parser.add_argument('--per_device_eval_batch_size', default=8, type=int, help='Batch size per GPU/CPU for evaluation.')
parser.add_argument(
'--n_best_size',
default=20,
type=int,
help='The total number of n-best predictions to generate in the nbest_predictions.json output file.',
)
parser.add_argument(
'--max_answer_length',
default=30,
type=int,
help=(
'The maximum length of an answer that can be generated. This is needed because the start '
'and end predictions are not conditioned on one another.'
),
)
parser.add_argument('--seed', type=int, default=42, help='random seed for initialization')
parser.add_argument(
'--dataset_name',
type=str,
default=None,
required=True,
help='The name of the dataset to use (via the datasets library).',
)
parser.add_argument(
'--dataset_config_name',
type=str,
default=None,
help='The configuration name of the dataset to use (via the datasets library).',
)
parser.add_argument(
'--preprocessing_num_workers', type=int, default=4, help='A csv or a json file containing the training data.'
)
parser.add_argument('--overwrite_cache', action='store_true', help='Overwrite the cached training and evaluation sets')
parser.add_argument(
'--fp16',
action='store_true',
help='Whether to use 16-bit (mixed) precision instead of 32-bit',
)
parser.add_argument(
'--int8',
action='store_true',
help='Whether to use INT8',
)
__SCREAMING_SNAKE_CASE : Optional[int] = parser.parse_args()
if args.tokenizer_name:
__SCREAMING_SNAKE_CASE : str = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=True)
else:
raise ValueError(
'You are instantiating a new tokenizer from scratch. This is not supported by this script.'
'You can do it from another script, save it, and load it from here, using --tokenizer_name.'
)
logger.info('Training/evaluation parameters %s', args)
__SCREAMING_SNAKE_CASE : List[Any] = args.per_device_eval_batch_size
__SCREAMING_SNAKE_CASE : int = (args.eval_batch_size, args.max_seq_length)
# TRT Engine properties
__SCREAMING_SNAKE_CASE : Optional[Any] = True
__SCREAMING_SNAKE_CASE : Tuple = 'temp_engine/bert-fp32.engine'
if args.fpaa:
__SCREAMING_SNAKE_CASE : Dict = 'temp_engine/bert-fp16.engine'
if args.inta:
__SCREAMING_SNAKE_CASE : Tuple = 'temp_engine/bert-int8.engine'
# import ONNX file
if not os.path.exists('temp_engine'):
os.makedirs('temp_engine')
__SCREAMING_SNAKE_CASE : Optional[Any] = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network, trt.OnnxParser(
network, TRT_LOGGER
) as parser:
with open(args.onnx_model_path, 'rb') as model:
if not parser.parse(model.read()):
for error in range(parser.num_errors):
print(parser.get_error(error))
# Query input names and shapes from parsed TensorRT network
__SCREAMING_SNAKE_CASE : List[Any] = [network.get_input(i) for i in range(network.num_inputs)]
__SCREAMING_SNAKE_CASE : List[Any] = [_input.name for _input in network_inputs] # ex: ["actual_input1"]
with builder.create_builder_config() as config:
__SCREAMING_SNAKE_CASE : Tuple = 1 << 50
if STRICT_TYPES:
config.set_flag(trt.BuilderFlag.STRICT_TYPES)
if args.fpaa:
config.set_flag(trt.BuilderFlag.FPaa)
if args.inta:
config.set_flag(trt.BuilderFlag.INTa)
__SCREAMING_SNAKE_CASE : Dict = builder.create_optimization_profile()
config.add_optimization_profile(profile)
for i in range(len(input_names)):
profile.set_shape(input_names[i], INPUT_SHAPE, INPUT_SHAPE, INPUT_SHAPE)
__SCREAMING_SNAKE_CASE : Union[str, Any] = builder.build_engine(network, config)
# serialize_engine and store in file (can be directly loaded and deserialized):
with open(engine_name, 'wb') as f:
f.write(engine.serialize())
def _snake_case ( lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> List[Any]:
__a : Dict = np.asarray(inputs["""input_ids"""] , dtype=np.intaa )
__a : List[Any] = np.asarray(inputs["""attention_mask"""] , dtype=np.intaa )
__a : str = np.asarray(inputs["""token_type_ids"""] , dtype=np.intaa )
# Copy inputs
cuda.memcpy_htod_async(d_inputs[0] , input_ids.ravel() , lowercase )
cuda.memcpy_htod_async(d_inputs[1] , attention_mask.ravel() , lowercase )
cuda.memcpy_htod_async(d_inputs[2] , token_type_ids.ravel() , lowercase )
# start time
__a : Optional[Any] = time.time()
# Run inference
context.execute_async(
bindings=[int(lowercase ) for d_inp in d_inputs] + [int(lowercase ), int(lowercase )] , stream_handle=stream.handle )
# Transfer predictions back from GPU
cuda.memcpy_dtoh_async(lowercase , lowercase , lowercase )
cuda.memcpy_dtoh_async(lowercase , lowercase , lowercase )
# Synchronize the stream and take time
stream.synchronize()
# end time
__a : str = time.time()
__a : Any = end_time - start_time
__a : Optional[int] = (h_outputa, h_outputa)
# print(outputs)
return outputs, infer_time
# Initialize the accelerator. We will let the accelerator handle device placement for us in this example.
__SCREAMING_SNAKE_CASE : Optional[Any] = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO,
)
# Setup logging, we only want one process per machine to log things on the screen.
# accelerator.is_local_main_process is only True for one process per machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR)
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed)
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
if args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
__SCREAMING_SNAKE_CASE : List[str] = load_dataset(args.dataset_name, args.dataset_config_name)
else:
raise ValueError('Evaluation requires a dataset name')
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Preprocessing the datasets.
# Preprocessing is slighlty different for training and evaluation.
__SCREAMING_SNAKE_CASE : int = raw_datasets['validation'].column_names
__SCREAMING_SNAKE_CASE : Tuple = 'question' if 'question' in column_names else column_names[0]
__SCREAMING_SNAKE_CASE : List[Any] = 'context' if 'context' in column_names else column_names[1]
__SCREAMING_SNAKE_CASE : Tuple = 'answers' if 'answers' in column_names else column_names[2]
# Padding side determines if we do (question|context) or (context|question).
__SCREAMING_SNAKE_CASE : Tuple = tokenizer.padding_side == 'right'
if args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f'''The max_seq_length passed ({args.max_seq_length}) is larger than the maximum length for the'''
f'''model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.'''
)
__SCREAMING_SNAKE_CASE : Dict = min(args.max_seq_length, tokenizer.model_max_length)
def _snake_case ( lowercase ) -> Tuple:
# Some of the questions have lots of whitespace on the left, which is not useful and will make the
# truncation of the context fail (the tokenized question will take a lots of space). So we remove that
# left whitespace
__a : Optional[Any] = [q.lstrip() for q in examples[question_column_name]]
# Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results
# in one example possible giving several features when a context is long, each of those features having a
# context that overlaps a bit the context of the previous feature.
__a : Optional[int] = tokenizer(
examples[question_column_name if pad_on_right else context_column_name] , examples[context_column_name if pad_on_right else question_column_name] , truncation="""only_second""" if pad_on_right else """only_first""" , max_length=lowercase , stride=args.doc_stride , return_overflowing_tokens=lowercase , return_offsets_mapping=lowercase , padding="""max_length""" , )
# Since one example might give us several features if it has a long context, we need a map from a feature to
# its corresponding example. This key gives us just that.
__a : Optional[Any] = tokenized_examples.pop("""overflow_to_sample_mapping""" )
# For evaluation, we will need to convert our predictions to substrings of the context, so we keep the
# corresponding example_id and we will store the offset mappings.
__a : Optional[Any] = []
for i in range(len(tokenized_examples["""input_ids"""] ) ):
# Grab the sequence corresponding to that example (to know what is the context and what is the question).
__a : Dict = tokenized_examples.sequence_ids(lowercase )
__a : Optional[Any] = 1 if pad_on_right else 0
# One example can give several spans, this is the index of the example containing this span of text.
__a : Union[str, Any] = sample_mapping[i]
tokenized_examples["example_id"].append(examples["""id"""][sample_index] )
# Set to None the offset_mapping that are not part of the context so it's easy to determine if a token
# position is part of the context or not.
__a : int = [
(o if sequence_ids[k] == context_index else None)
for k, o in enumerate(tokenized_examples["""offset_mapping"""][i] )
]
return tokenized_examples
__SCREAMING_SNAKE_CASE : int = raw_datasets['validation']
# Validation Feature Creation
__SCREAMING_SNAKE_CASE : Union[str, Any] = eval_examples.map(
prepare_validation_features,
batched=True,
num_proc=args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not args.overwrite_cache,
desc='Running tokenizer on validation dataset',
)
__SCREAMING_SNAKE_CASE : List[Any] = default_data_collator
__SCREAMING_SNAKE_CASE : Union[str, Any] = eval_dataset.remove_columns(['example_id', 'offset_mapping'])
__SCREAMING_SNAKE_CASE : List[str] = DataLoader(
eval_dataset_for_model, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size
)
def _snake_case ( lowercase , lowercase , lowercase , lowercase="eval" ) -> Any:
# Post-processing: we match the start logits and end logits to answers in the original context.
__a : List[str] = postprocess_qa_predictions(
examples=lowercase , features=lowercase , predictions=lowercase , version_2_with_negative=args.version_2_with_negative , n_best_size=args.n_best_size , max_answer_length=args.max_answer_length , null_score_diff_threshold=args.null_score_diff_threshold , output_dir=args.output_dir , prefix=lowercase , )
# Format the result to the format the metric expects.
if args.version_2_with_negative:
__a : List[str] = [
{"""id""": k, """prediction_text""": v, """no_answer_probability""": 0.0} for k, v in predictions.items()
]
else:
__a : List[str] = [{"""id""": k, """prediction_text""": v} for k, v in predictions.items()]
__a : Optional[Any] = [{"""id""": ex["""id"""], """answers""": ex[answer_column_name]} for ex in examples]
return EvalPrediction(predictions=lowercase , label_ids=lowercase )
__SCREAMING_SNAKE_CASE : List[Any] = load_metric('squad_v2' if args.version_2_with_negative else 'squad')
# Evaluation!
logger.info('Loading ONNX model %s for evaluation', args.onnx_model_path)
with open(engine_name, 'rb') as f, trt.Runtime(TRT_LOGGER) as runtime, runtime.deserialize_cuda_engine(
f.read()
) as engine, engine.create_execution_context() as context:
# setup for TRT inferrence
for i in range(len(input_names)):
context.set_binding_shape(i, INPUT_SHAPE)
assert context.all_binding_shapes_specified
def _snake_case ( lowercase ) -> Optional[int]:
return trt.volume(engine.get_binding_shape(lowercase ) ) * engine.get_binding_dtype(lowercase ).itemsize
# Allocate device memory for inputs and outputs.
__SCREAMING_SNAKE_CASE : List[str] = [cuda.mem_alloc(binding_nbytes(binding)) for binding in engine if engine.binding_is_input(binding)]
# Allocate output buffer
__SCREAMING_SNAKE_CASE : str = cuda.pagelocked_empty(tuple(context.get_binding_shape(3)), dtype=np.floataa)
__SCREAMING_SNAKE_CASE : Union[str, Any] = cuda.pagelocked_empty(tuple(context.get_binding_shape(4)), dtype=np.floataa)
__SCREAMING_SNAKE_CASE : str = cuda.mem_alloc(h_outputa.nbytes)
__SCREAMING_SNAKE_CASE : Tuple = cuda.mem_alloc(h_outputa.nbytes)
# Create a stream in which to copy inputs/outputs and run inference.
__SCREAMING_SNAKE_CASE : Tuple = cuda.Stream()
# Evaluation
logger.info('***** Running Evaluation *****')
logger.info(f''' Num examples = {len(eval_dataset)}''')
logger.info(f''' Batch size = {args.per_device_eval_batch_size}''')
__SCREAMING_SNAKE_CASE : Union[str, Any] = 0.0
__SCREAMING_SNAKE_CASE : str = 0
__SCREAMING_SNAKE_CASE : str = timeit.default_timer()
__SCREAMING_SNAKE_CASE : Dict = None
for step, batch in enumerate(eval_dataloader):
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Union[str, Any] = model_infer(batch, context, d_inputs, h_outputa, h_outputa, d_outputa, d_outputa, stream)
total_time += infer_time
niter += 1
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Optional[Any] = outputs
__SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor(start_logits)
__SCREAMING_SNAKE_CASE : Tuple = torch.tensor(end_logits)
# necessary to pad predictions and labels for being gathered
__SCREAMING_SNAKE_CASE : Optional[int] = accelerator.pad_across_processes(start_logits, dim=1, pad_index=-100)
__SCREAMING_SNAKE_CASE : Dict = accelerator.pad_across_processes(end_logits, dim=1, pad_index=-100)
__SCREAMING_SNAKE_CASE : List[str] = (accelerator.gather(start_logits).cpu().numpy(), accelerator.gather(end_logits).cpu().numpy())
__SCREAMING_SNAKE_CASE : List[str] = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-100)
if all_preds is not None:
__SCREAMING_SNAKE_CASE : Tuple = nested_truncate(all_preds, len(eval_dataset))
__SCREAMING_SNAKE_CASE : str = timeit.default_timer() - start_time
logger.info(' Evaluation done in total %f secs (%f sec per example)', evalTime, evalTime / len(eval_dataset))
# Inference time from TRT
logger.info('Average Inference Time = {:.3f} ms'.format(total_time * 1_000 / niter))
logger.info('Total Inference Time = {:.3f} ms'.format(total_time * 1_000))
logger.info('Total Number of Inference = %d', niter)
__SCREAMING_SNAKE_CASE : Optional[int] = post_processing_function(eval_examples, eval_dataset, all_preds)
__SCREAMING_SNAKE_CASE : List[Any] = metric.compute(predictions=prediction.predictions, references=prediction.label_ids)
logger.info(f'''Evaluation metrics: {eval_metric}''') | 697 | 1 |
'''simple docstring'''
import math
import random
from typing import Any
from .hill_climbing import SearchProblem
def _snake_case ( lowercase , lowercase = True , lowercase = math.inf , lowercase = -math.inf , lowercase = math.inf , lowercase = -math.inf , lowercase = False , lowercase = 1_0_0 , lowercase = 0.0_1 , lowercase = 1 , ) -> Any:
__a : Union[str, Any] = False
__a : str = search_prob
__a : int = start_temperate
__a : List[Any] = []
__a : Union[str, Any] = 0
__a : int = None
while not search_end:
__a : List[Any] = current_state.score()
if best_state is None or current_score > best_state.score():
__a : Optional[int] = current_state
scores.append(SCREAMING_SNAKE_CASE_ )
iterations += 1
__a : Any = None
__a : int = current_state.get_neighbors()
while (
next_state is None and neighbors
): # till we do not find a neighbor that we can move to
__a : str = random.randint(0 , len(SCREAMING_SNAKE_CASE_ ) - 1 ) # picking a random neighbor
__a : List[str] = neighbors.pop(SCREAMING_SNAKE_CASE_ )
__a : List[Any] = picked_neighbor.score() - current_score
if (
picked_neighbor.x > max_x
or picked_neighbor.x < min_x
or picked_neighbor.y > max_y
or picked_neighbor.y < min_y
):
continue # neighbor outside our bounds
if not find_max:
__a : Any = change * -1 # in case we are finding minimum
if change > 0: # improves the solution
__a : Any = picked_neighbor
else:
__a : List[str] = (math.e) ** (
change / current_temp
) # probability generation function
if random.random() < probability: # random number within probability
__a : Dict = picked_neighbor
__a : Optional[int] = current_temp - (current_temp * rate_of_decrease)
if current_temp < threshold_temp or next_state is None:
# temperature below threshold, or could not find a suitable neighbor
__a : Any = True
else:
__a : Tuple = next_state
if visualization:
from matplotlib import pyplot as plt
plt.plot(range(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
plt.xlabel("""Iterations""" )
plt.ylabel("""Function values""" )
plt.show()
return best_state
if __name__ == "__main__":
def _snake_case ( lowercase , lowercase ) -> str:
return (x**2) + (y**2)
# starting the problem with initial coordinates (12, 47)
__SCREAMING_SNAKE_CASE : int = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
__SCREAMING_SNAKE_CASE : List[str] = simulated_annealing(
prob, find_max=False, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
'The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 '
f'''and 50 > y > - 5 found via hill climbing: {local_min.score()}'''
)
# starting the problem with initial coordinates (12, 47)
__SCREAMING_SNAKE_CASE : int = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
__SCREAMING_SNAKE_CASE : int = simulated_annealing(
prob, find_max=True, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
'The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 '
f'''and 50 > y > - 5 found via hill climbing: {local_min.score()}'''
)
def _snake_case ( lowercase , lowercase ) -> Optional[Any]:
return (3 * x**2) - (6 * y)
__SCREAMING_SNAKE_CASE : List[Any] = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
__SCREAMING_SNAKE_CASE : int = simulated_annealing(prob, find_max=False, visualization=True)
print(
'The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: '
f'''{local_min.score()}'''
)
__SCREAMING_SNAKE_CASE : Union[str, Any] = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
__SCREAMING_SNAKE_CASE : Dict = simulated_annealing(prob, find_max=True, visualization=True)
print(
'The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: '
f'''{local_min.score()}'''
) | 700 |
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import KarrasVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
lowercase__ = 42
lowercase__ = 42
def __init__( self , __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
super().__init__()
self.register_modules(unet=__UpperCamelCase , scheduler=__UpperCamelCase )
@torch.no_grad()
def __call__( self , __UpperCamelCase = 1 , __UpperCamelCase = 50 , __UpperCamelCase = None , __UpperCamelCase = "pil" , __UpperCamelCase = True , **__UpperCamelCase , ):
'''simple docstring'''
__a : int = self.unet.config.sample_size
__a : Optional[int] = (batch_size, 3, img_size, img_size)
__a : Union[str, Any] = self.unet
# sample x_0 ~ N(0, sigma_0^2 * I)
__a : Dict = randn_tensor(__UpperCamelCase , generator=__UpperCamelCase , device=self.device ) * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(__UpperCamelCase )
for t in self.progress_bar(self.scheduler.timesteps ):
# here sigma_t == t_i from the paper
__a : Dict = self.scheduler.schedule[t]
__a : Any = self.scheduler.schedule[t - 1] if t > 0 else 0
# 1. Select temporarily increased noise level sigma_hat
# 2. Add new noise to move from sample_i to sample_hat
__a , __a : Tuple = self.scheduler.add_noise_to_input(__UpperCamelCase , __UpperCamelCase , generator=__UpperCamelCase )
# 3. Predict the noise residual given the noise magnitude `sigma_hat`
# The model inputs and output are adjusted by following eq. (213) in [1].
__a : List[Any] = (sigma_hat / 2) * model((sample_hat + 1) / 2 , sigma_hat / 2 ).sample
# 4. Evaluate dx/dt at sigma_hat
# 5. Take Euler step from sigma to sigma_prev
__a : str = self.scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
if sigma_prev != 0:
# 6. Apply 2nd order correction
# The model inputs and output are adjusted by following eq. (213) in [1].
__a : Union[str, Any] = (sigma_prev / 2) * model((step_output.prev_sample + 1) / 2 , sigma_prev / 2 ).sample
__a : Tuple = self.scheduler.step_correct(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , step_output.prev_sample , step_output["""derivative"""] , )
__a : Tuple = step_output.prev_sample
__a : Optional[Any] = (sample / 2 + 0.5).clamp(0 , 1 )
__a : Dict = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__a : List[Any] = self.numpy_to_pil(__UpperCamelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__UpperCamelCase ) | 697 | 0 |
'''simple docstring'''
# flake8: noqa
# Lint as: python3
__SCREAMING_SNAKE_CASE : Union[str, Any] = [
'VerificationMode',
'Version',
'disable_progress_bar',
'enable_progress_bar',
'is_progress_bar_enabled',
'experimental',
]
from .info_utils import VerificationMode
from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled
from .version import Version
from .experimental import experimental | 701 |
'''simple docstring'''
def _snake_case ( lowercase ) -> bool:
if not isinstance(lowercase , lowercase ):
raise ValueError("""check_bouncy() accepts only integer arguments""" )
__a : str = str(lowercase )
__a : Any = """""".join(sorted(lowercase ) )
return sorted_str_n != str_n and sorted_str_n[::-1] != str_n
def _snake_case ( lowercase = 9_9 ) -> int:
if not 0 < percent < 1_0_0:
raise ValueError("""solution() only accepts values from 0 to 100""" )
__a : List[str] = 0
__a : Union[str, Any] = 1
while True:
if check_bouncy(lowercase ):
bouncy_num += 1
if (bouncy_num / num) * 1_0_0 >= percent:
return num
num += 1
if __name__ == "__main__":
from doctest import testmod
testmod()
print(f'''{solution(99)}''') | 697 | 0 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
__SCREAMING_SNAKE_CASE : int = logging.get_logger(__name__)
def _snake_case ( lowercase , lowercase ) -> Optional[int]:
__a : Union[str, Any] = b.T
__a : Union[str, Any] = np.sum(np.square(_SCREAMING_SNAKE_CASE ) , axis=1 )
__a : List[Any] = np.sum(np.square(_SCREAMING_SNAKE_CASE ) , axis=0 )
__a : str = np.matmul(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__a : Optional[int] = aa[:, None] - 2 * ab + ba[None, :]
return d
def _snake_case ( lowercase , lowercase ) -> str:
__a : Optional[Any] = x.reshape(-1 , 3 )
__a : Any = squared_euclidean_distance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return np.argmin(_SCREAMING_SNAKE_CASE , axis=1 )
class SCREAMING_SNAKE_CASE__ ( snake_case__ ):
lowercase__ = ["""pixel_values"""]
def __init__( self , __UpperCamelCase = None , __UpperCamelCase = True , __UpperCamelCase = None , __UpperCamelCase = PILImageResampling.BILINEAR , __UpperCamelCase = True , __UpperCamelCase = True , **__UpperCamelCase , ):
'''simple docstring'''
super().__init__(**UpperCAmelCase_ )
__a : List[str] = size if size is not None else {"""height""": 256, """width""": 256}
__a : str = get_size_dict(UpperCAmelCase_ )
__a : Dict = np.array(UpperCAmelCase_ ) if clusters is not None else None
__a : int = do_resize
__a : Optional[Any] = size
__a : List[str] = resample
__a : Dict = do_normalize
__a : List[str] = do_color_quantize
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = PILImageResampling.BILINEAR , __UpperCamelCase = None , **__UpperCamelCase , ):
'''simple docstring'''
__a : List[Any] = get_size_dict(UpperCAmelCase_ )
if "height" not in size or "width" not in size:
raise ValueError(f"""Size dictionary must contain both height and width keys. Got {size.keys()}""" )
return resize(
UpperCAmelCase_ , size=(size["""height"""], size["""width"""]) , resample=UpperCAmelCase_ , data_format=UpperCAmelCase_ , **UpperCAmelCase_ )
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase = None , ):
'''simple docstring'''
__a : Dict = rescale(image=UpperCAmelCase_ , scale=1 / 1_2_7.5 , data_format=UpperCAmelCase_ )
__a : Optional[int] = image - 1
return image
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = ChannelDimension.FIRST , **__UpperCamelCase , ):
'''simple docstring'''
__a : int = do_resize if do_resize is not None else self.do_resize
__a : Tuple = size if size is not None else self.size
__a : Dict = get_size_dict(UpperCAmelCase_ )
__a : List[str] = resample if resample is not None else self.resample
__a : Optional[int] = do_normalize if do_normalize is not None else self.do_normalize
__a : List[str] = do_color_quantize if do_color_quantize is not None else self.do_color_quantize
__a : str = clusters if clusters is not None else self.clusters
__a : int = np.array(UpperCAmelCase_ )
__a : str = make_list_of_images(UpperCAmelCase_ )
if not valid_images(UpperCAmelCase_ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_color_quantize and clusters is None:
raise ValueError("""Clusters must be specified if do_color_quantize is True.""" )
# All transformations expect numpy arrays.
__a : Optional[int] = [to_numpy_array(UpperCAmelCase_ ) for image in images]
if do_resize:
__a : Union[str, Any] = [self.resize(image=UpperCAmelCase_ , size=UpperCAmelCase_ , resample=UpperCAmelCase_ ) for image in images]
if do_normalize:
__a : Tuple = [self.normalize(image=UpperCAmelCase_ ) for image in images]
if do_color_quantize:
__a : Tuple = [to_channel_dimension_format(UpperCAmelCase_ , ChannelDimension.LAST ) for image in images]
# color quantize from (batch_size, height, width, 3) to (batch_size, height, width)
__a : Any = np.array(UpperCAmelCase_ )
__a : List[Any] = color_quantize(UpperCAmelCase_ , UpperCAmelCase_ ).reshape(images.shape[:-1] )
# flatten to (batch_size, height*width)
__a : Tuple = images.shape[0]
__a : List[Any] = images.reshape(UpperCAmelCase_ , -1 )
# We need to convert back to a list of images to keep consistent behaviour across processors.
__a : Tuple = list(UpperCAmelCase_ )
else:
__a : str = [to_channel_dimension_format(UpperCAmelCase_ , UpperCAmelCase_ ) for image in images]
__a : Optional[int] = {"""input_ids""": images}
return BatchFeature(data=UpperCAmelCase_ , tensor_type=UpperCAmelCase_ ) | 702 |
'''simple docstring'''
import argparse
import torch
from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def _snake_case ( lowercase , lowercase , lowercase ) -> Any:
# Construct model
if gpta_config_file == "":
__a : Dict = GPTaConfig()
else:
__a : Optional[Any] = GPTaConfig.from_json_file(lowercase )
__a : Union[str, Any] = GPTaModel(lowercase )
# Load weights from numpy
load_tf_weights_in_gpta(lowercase , lowercase , lowercase )
# Save pytorch-model
__a : Optional[int] = pytorch_dump_folder_path + """/""" + WEIGHTS_NAME
__a : Dict = pytorch_dump_folder_path + """/""" + CONFIG_NAME
print(F"""Save PyTorch model to {pytorch_weights_dump_path}""" )
torch.save(model.state_dict() , lowercase )
print(F"""Save configuration file to {pytorch_config_dump_path}""" )
with open(lowercase , """w""" , encoding="""utf-8""" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--gpt2_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--gpt2_config_file',
default='',
type=str,
help=(
'An optional config json file corresponding to the pre-trained OpenAI model. \n'
'This specifies the model architecture.'
),
)
__SCREAMING_SNAKE_CASE : Dict = parser.parse_args()
convert_gpta_checkpoint_to_pytorch(args.gpta_checkpoint_path, args.gpta_config_file, args.pytorch_dump_folder_path) | 697 | 0 |
'''simple docstring'''
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from torch.utils.data import DistributedSampler, RandomSampler
from transformers import PreTrainedModel, Trainer, logging
from transformers.integrations import is_fairscale_available
from transformers.models.fsmt.configuration_fsmt import FSMTConfig
from transformers.optimization import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.trainer_pt_utils import get_tpu_sampler
from transformers.training_args import ParallelMode
from transformers.utils import is_torch_tpu_available
if is_fairscale_available():
from fairscale.optim import OSS
__SCREAMING_SNAKE_CASE : Optional[int] = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : str = {
"linear": get_linear_schedule_with_warmup,
"cosine": get_cosine_schedule_with_warmup,
"cosine_w_restarts": get_cosine_with_hard_restarts_schedule_with_warmup,
"polynomial": get_polynomial_decay_schedule_with_warmup,
"constant": get_constant_schedule,
"constant_w_warmup": get_constant_schedule_with_warmup,
}
class SCREAMING_SNAKE_CASE__ ( _SCREAMING_SNAKE_CASE ):
def __init__( self , __UpperCamelCase=None , __UpperCamelCase=None , *__UpperCamelCase , **__UpperCamelCase ):
'''simple docstring'''
super().__init__(*A_ , **A_ )
if config is None:
assert isinstance(self.model , A_ ), (
"If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is"
f""" {self.model.__class__}"""
)
__a : Dict = self.model.config
else:
__a : List[str] = config
__a : Dict = data_args
__a : Tuple = self.config.tgt_vocab_size if isinstance(self.config , A_ ) else self.config.vocab_size
if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss):
assert self.config.pad_token_id is not None, (
"Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss"
" calculation or doing label smoothing."
)
if self.config.pad_token_id is None and self.config.eos_token_id is not None:
logger.warning(
f"""The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for"""
""" padding..""" )
if self.args.label_smoothing == 0:
__a : Union[str, Any] = torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id )
else:
# dynamically import label_smoothed_nll_loss
from utils import label_smoothed_nll_loss
__a : Optional[int] = label_smoothed_nll_loss
def __lowerCamelCase ( self , __UpperCamelCase ):
'''simple docstring'''
if self.optimizer is None:
__a : List[Any] = ["""bias""", """LayerNorm.weight"""]
__a : Optional[Any] = [
{
"""params""": [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )],
"""weight_decay""": self.args.weight_decay,
},
{
"""params""": [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )],
"""weight_decay""": 0.0,
},
]
__a : Optional[Any] = Adafactor if self.args.adafactor else AdamW
if self.args.adafactor:
__a : Any = Adafactor
__a : int = {"""scale_parameter""": False, """relative_step""": False}
else:
__a : Optional[Any] = AdamW
__a : List[str] = {
"""betas""": (self.args.adam_betaa, self.args.adam_betaa),
"""eps""": self.args.adam_epsilon,
}
__a : List[str] = self.args.learning_rate
if self.sharded_ddp:
__a : Any = OSS(
params=A_ , optim=A_ , **A_ , )
else:
__a : List[str] = optimizer_cls(A_ , **A_ )
if self.lr_scheduler is None:
__a : Tuple = self._get_lr_scheduler(A_ )
else: # ignoring --lr_scheduler
logger.warning("""scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored.""" )
def __lowerCamelCase ( self , __UpperCamelCase ):
'''simple docstring'''
__a : Tuple = arg_to_scheduler[self.args.lr_scheduler]
if self.args.lr_scheduler == "constant":
__a : Union[str, Any] = schedule_func(self.optimizer )
elif self.args.lr_scheduler == "constant_w_warmup":
__a : List[Any] = schedule_func(self.optimizer , num_warmup_steps=self.args.warmup_steps )
else:
__a : int = schedule_func(
self.optimizer , num_warmup_steps=self.args.warmup_steps , num_training_steps=A_ )
return scheduler
def __lowerCamelCase ( self ):
'''simple docstring'''
if isinstance(self.train_dataset , torch.utils.data.IterableDataset ):
return None
elif is_torch_tpu_available():
return get_tpu_sampler(self.train_dataset )
else:
if self.args.sortish_sampler:
self.train_dataset.make_sortish_sampler(
self.args.per_device_train_batch_size , distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED) , )
return (
RandomSampler(self.train_dataset )
if self.args.local_rank == -1
else DistributedSampler(self.train_dataset )
)
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
if self.args.label_smoothing == 0:
if self.data_args is not None and self.data_args.ignore_pad_token_for_loss:
# force training to ignore pad token
__a : Tuple = model(**A_ , use_cache=A_ )[0]
__a : List[Any] = self.loss_fn(logits.view(-1 , logits.shape[-1] ) , labels.view(-1 ) )
else:
# compute usual loss via models
__a , __a : Dict = model(**A_ , labels=A_ , use_cache=A_ )[:2]
else:
# compute label smoothed loss
__a : Optional[int] = model(**A_ , use_cache=A_ )[0]
__a : str = torch.nn.functional.log_softmax(A_ , dim=-1 )
__a , __a : str = self.loss_fn(A_ , A_ , self.args.label_smoothing , ignore_index=self.config.pad_token_id )
return loss, logits
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
__a : Tuple = inputs.pop("""labels""" )
__a , __a : List[str] = self._compute_loss(A_ , A_ , A_ )
return loss
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = None , ):
'''simple docstring'''
__a : Union[str, Any] = self._prepare_inputs(A_ )
__a : str = {
"""max_length""": self.data_args.val_max_target_length
if self.data_args is not None
else self.config.max_length,
"""num_beams""": self.data_args.eval_beams if self.data_args is not None else self.config.num_beams,
}
if self.args.predict_with_generate and not self.args.prediction_loss_only:
__a : Any = self.model.generate(
inputs["""input_ids"""] , attention_mask=inputs["""attention_mask"""] , **A_ , )
# in case the batch is shorter than max length, the output should be padded
if generated_tokens.shape[-1] < gen_kwargs["max_length"]:
__a : Tuple = self._pad_tensors_to_max_len(A_ , gen_kwargs["""max_length"""] )
__a : Any = inputs.pop("""labels""" )
with torch.no_grad():
# compute loss on predict data
__a , __a : Optional[int] = self._compute_loss(A_ , A_ , A_ )
__a : Optional[int] = loss.mean().detach()
if self.args.prediction_loss_only:
return (loss, None, None)
__a : Any = generated_tokens if self.args.predict_with_generate else logits
if labels.shape[-1] < gen_kwargs["max_length"]:
__a : str = self._pad_tensors_to_max_len(A_ , gen_kwargs["""max_length"""] )
return (loss, logits, labels)
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
__a : Any = self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id
if pad_token_id is None:
raise ValueError(
"""Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be"""
f""" padded to `max_length`={max_length}""" )
__a : Optional[int] = pad_token_id * torch.ones(
(tensor.shape[0], max_length) , dtype=tensor.dtype , device=tensor.device )
__a : str = tensor
return padded_tensor | 703 |
'''simple docstring'''
import unittest
from transformers import (
MODEL_FOR_OBJECT_DETECTION_MAPPING,
AutoFeatureExtractor,
AutoModelForObjectDetection,
ObjectDetectionPipeline,
is_vision_available,
pipeline,
)
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_pytesseract,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class SCREAMING_SNAKE_CASE__ :
@staticmethod
def __lowerCamelCase ( *__UpperCamelCase , **__UpperCamelCase ):
'''simple docstring'''
pass
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
lowercase__ = MODEL_FOR_OBJECT_DETECTION_MAPPING
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
__a : Optional[Any] = ObjectDetectionPipeline(model=__UpperCamelCase , image_processor=__UpperCamelCase )
return object_detector, ["./tests/fixtures/tests_samples/COCO/000000039769.png"]
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
__a : List[str] = object_detector("""./tests/fixtures/tests_samples/COCO/000000039769.png""" , threshold=0.0 )
self.assertGreater(len(__UpperCamelCase ) , 0 )
for detected_object in outputs:
self.assertEqual(
__UpperCamelCase , {
"""score""": ANY(__UpperCamelCase ),
"""label""": ANY(__UpperCamelCase ),
"""box""": {"""xmin""": ANY(__UpperCamelCase ), """ymin""": ANY(__UpperCamelCase ), """xmax""": ANY(__UpperCamelCase ), """ymax""": ANY(__UpperCamelCase )},
} , )
import datasets
__a : Optional[int] = datasets.load_dataset("""hf-internal-testing/fixtures_image_utils""" , """image""" , split="""test""" )
__a : Tuple = [
Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ),
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
# RGBA
dataset[0]["""file"""],
# LA
dataset[1]["""file"""],
# L
dataset[2]["""file"""],
]
__a : Any = object_detector(__UpperCamelCase , threshold=0.0 )
self.assertEqual(len(__UpperCamelCase ) , len(__UpperCamelCase ) )
for outputs in batch_outputs:
self.assertGreater(len(__UpperCamelCase ) , 0 )
for detected_object in outputs:
self.assertEqual(
__UpperCamelCase , {
"""score""": ANY(__UpperCamelCase ),
"""label""": ANY(__UpperCamelCase ),
"""box""": {"""xmin""": ANY(__UpperCamelCase ), """ymin""": ANY(__UpperCamelCase ), """xmax""": ANY(__UpperCamelCase ), """ymax""": ANY(__UpperCamelCase )},
} , )
@require_tf
@unittest.skip("""Object detection not implemented in TF""" )
def __lowerCamelCase ( self ):
'''simple docstring'''
pass
@require_torch
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Optional[Any] = """hf-internal-testing/tiny-detr-mobilenetsv3"""
__a : Dict = AutoModelForObjectDetection.from_pretrained(__UpperCamelCase )
__a : Optional[Any] = AutoFeatureExtractor.from_pretrained(__UpperCamelCase )
__a : str = ObjectDetectionPipeline(model=__UpperCamelCase , feature_extractor=__UpperCamelCase )
__a : Optional[int] = object_detector("""http://images.cocodataset.org/val2017/000000039769.jpg""" , threshold=0.0 )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [
{"""score""": 0.3_3_7_6, """label""": """LABEL_0""", """box""": {"""xmin""": 159, """ymin""": 120, """xmax""": 480, """ymax""": 359}},
{"""score""": 0.3_3_7_6, """label""": """LABEL_0""", """box""": {"""xmin""": 159, """ymin""": 120, """xmax""": 480, """ymax""": 359}},
] , )
__a : Union[str, Any] = object_detector(
[
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
] , threshold=0.0 , )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [
[
{"""score""": 0.3_3_7_6, """label""": """LABEL_0""", """box""": {"""xmin""": 159, """ymin""": 120, """xmax""": 480, """ymax""": 359}},
{"""score""": 0.3_3_7_6, """label""": """LABEL_0""", """box""": {"""xmin""": 159, """ymin""": 120, """xmax""": 480, """ymax""": 359}},
],
[
{"""score""": 0.3_3_7_6, """label""": """LABEL_0""", """box""": {"""xmin""": 159, """ymin""": 120, """xmax""": 480, """ymax""": 359}},
{"""score""": 0.3_3_7_6, """label""": """LABEL_0""", """box""": {"""xmin""": 159, """ymin""": 120, """xmax""": 480, """ymax""": 359}},
],
] , )
@require_torch
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : str = """facebook/detr-resnet-50"""
__a : Dict = AutoModelForObjectDetection.from_pretrained(__UpperCamelCase )
__a : int = AutoFeatureExtractor.from_pretrained(__UpperCamelCase )
__a : int = ObjectDetectionPipeline(model=__UpperCamelCase , feature_extractor=__UpperCamelCase )
__a : Any = object_detector("""http://images.cocodataset.org/val2017/000000039769.jpg""" )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [
{"""score""": 0.9_9_8_2, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 175, """ymax""": 117}},
{"""score""": 0.9_9_6_0, """label""": """remote""", """box""": {"""xmin""": 333, """ymin""": 72, """xmax""": 368, """ymax""": 187}},
{"""score""": 0.9_9_5_5, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 639, """ymax""": 473}},
{"""score""": 0.9_9_8_8, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}},
{"""score""": 0.9_9_8_7, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}},
] , )
__a : Optional[Any] = object_detector(
[
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
] )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [
[
{"""score""": 0.9_9_8_2, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 175, """ymax""": 117}},
{"""score""": 0.9_9_6_0, """label""": """remote""", """box""": {"""xmin""": 333, """ymin""": 72, """xmax""": 368, """ymax""": 187}},
{"""score""": 0.9_9_5_5, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 639, """ymax""": 473}},
{"""score""": 0.9_9_8_8, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}},
{"""score""": 0.9_9_8_7, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}},
],
[
{"""score""": 0.9_9_8_2, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 175, """ymax""": 117}},
{"""score""": 0.9_9_6_0, """label""": """remote""", """box""": {"""xmin""": 333, """ymin""": 72, """xmax""": 368, """ymax""": 187}},
{"""score""": 0.9_9_5_5, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 639, """ymax""": 473}},
{"""score""": 0.9_9_8_8, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}},
{"""score""": 0.9_9_8_7, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}},
],
] , )
@require_torch
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : int = """facebook/detr-resnet-50"""
__a : Optional[int] = pipeline("""object-detection""" , model=__UpperCamelCase )
__a : Optional[int] = object_detector("""http://images.cocodataset.org/val2017/000000039769.jpg""" )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [
{"""score""": 0.9_9_8_2, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 175, """ymax""": 117}},
{"""score""": 0.9_9_6_0, """label""": """remote""", """box""": {"""xmin""": 333, """ymin""": 72, """xmax""": 368, """ymax""": 187}},
{"""score""": 0.9_9_5_5, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 639, """ymax""": 473}},
{"""score""": 0.9_9_8_8, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}},
{"""score""": 0.9_9_8_7, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}},
] , )
__a : List[str] = object_detector(
[
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
] )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [
[
{"""score""": 0.9_9_8_2, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 175, """ymax""": 117}},
{"""score""": 0.9_9_6_0, """label""": """remote""", """box""": {"""xmin""": 333, """ymin""": 72, """xmax""": 368, """ymax""": 187}},
{"""score""": 0.9_9_5_5, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 639, """ymax""": 473}},
{"""score""": 0.9_9_8_8, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}},
{"""score""": 0.9_9_8_7, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}},
],
[
{"""score""": 0.9_9_8_2, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 175, """ymax""": 117}},
{"""score""": 0.9_9_6_0, """label""": """remote""", """box""": {"""xmin""": 333, """ymin""": 72, """xmax""": 368, """ymax""": 187}},
{"""score""": 0.9_9_5_5, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 639, """ymax""": 473}},
{"""score""": 0.9_9_8_8, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}},
{"""score""": 0.9_9_8_7, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}},
],
] , )
@require_torch
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Union[str, Any] = 0.9_9_8_5
__a : Union[str, Any] = """facebook/detr-resnet-50"""
__a : Optional[int] = pipeline("""object-detection""" , model=__UpperCamelCase )
__a : Union[str, Any] = object_detector("""http://images.cocodataset.org/val2017/000000039769.jpg""" , threshold=__UpperCamelCase )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [
{"""score""": 0.9_9_8_8, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}},
{"""score""": 0.9_9_8_7, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}},
] , )
@require_torch
@require_pytesseract
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : str = """Narsil/layoutlmv3-finetuned-funsd"""
__a : List[Any] = 0.9_9_9_3
__a : Dict = pipeline("""object-detection""" , model=__UpperCamelCase , threshold=__UpperCamelCase )
__a : List[str] = object_detector(
"""https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png""" )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [
{"""score""": 0.9_9_9_3, """label""": """I-ANSWER""", """box""": {"""xmin""": 294, """ymin""": 254, """xmax""": 343, """ymax""": 264}},
{"""score""": 0.9_9_9_3, """label""": """I-ANSWER""", """box""": {"""xmin""": 294, """ymin""": 254, """xmax""": 343, """ymax""": 264}},
] , ) | 697 | 0 |
'''simple docstring'''
from __future__ import annotations
import math
from collections import Counter
from string import ascii_lowercase
def _snake_case ( lowercase ) -> None:
__a : List[Any] = analyze_text(_lowercase )
__a : Any = list(""" """ + ascii_lowercase )
# what is our total sum of probabilities.
__a : Union[str, Any] = sum(single_char_strings.values() )
# one length string
__a : Union[str, Any] = 0
# for each alpha we go in our dict and if it is in it we calculate entropy
for ch in my_alphas:
if ch in single_char_strings:
__a : Any = single_char_strings[ch]
__a : int = my_str / all_sum
my_fir_sum += prob * math.loga(_lowercase ) # entropy formula.
# print entropy
print(F"""{round(-1 * my_fir_sum ):.1f}""" )
# two len string
__a : str = sum(two_char_strings.values() )
__a : str = 0
# for each alpha (two in size) calculate entropy.
for cha in my_alphas:
for cha in my_alphas:
__a : Optional[Any] = cha + cha
if sequence in two_char_strings:
__a : int = two_char_strings[sequence]
__a : Optional[int] = int(_lowercase ) / all_sum
my_sec_sum += prob * math.loga(_lowercase )
# print second entropy
print(F"""{round(-1 * my_sec_sum ):.1f}""" )
# print the difference between them
print(F"""{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}""" )
def _snake_case ( lowercase ) -> tuple[dict, dict]:
__a : Optional[Any] = Counter() # type: ignore
__a : List[Any] = Counter() # type: ignore
single_char_strings[text[-1]] += 1
# first case when we have space at start.
two_char_strings[" " + text[0]] += 1
for i in range(0 , len(_lowercase ) - 1 ):
single_char_strings[text[i]] += 1
two_char_strings[text[i : i + 2]] += 1
return single_char_strings, two_char_strings
def _snake_case ( ) -> List[Any]:
import doctest
doctest.testmod()
# text = (
# "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark "
# "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest "
# "jointure saw horrible. He private he on be imagine suppose. Fertile "
# "beloved evident through no service elderly is. Blind there if every no so "
# "at. Own neglected you preferred way sincerity delivered his attempted. To "
# "of message cottage windows do besides against uncivil. Delightful "
# "unreserved impossible few estimating men favourable see entreaties. She "
# "propriety immediate was improving. He or entrance humoured likewise "
# "moderate. Much nor game son say feel. Fat make met can must form into "
# "gate. Me we offending prevailed discovery. "
# )
# calculate_prob(text)
if __name__ == "__main__":
main() | 704 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__SCREAMING_SNAKE_CASE : List[str] = {
'configuration_blenderbot_small': [
'BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP',
'BlenderbotSmallConfig',
'BlenderbotSmallOnnxConfig',
],
'tokenization_blenderbot_small': ['BlenderbotSmallTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Union[str, Any] = ['BlenderbotSmallTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : List[str] = [
'BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST',
'BlenderbotSmallForCausalLM',
'BlenderbotSmallForConditionalGeneration',
'BlenderbotSmallModel',
'BlenderbotSmallPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Optional[int] = [
'TFBlenderbotSmallForConditionalGeneration',
'TFBlenderbotSmallModel',
'TFBlenderbotSmallPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Optional[Any] = [
'FlaxBlenderbotSmallForConditionalGeneration',
'FlaxBlenderbotSmallModel',
'FlaxBlenderbotSmallPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotSmallConfig,
BlenderbotSmallOnnxConfig,
)
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_small_fast import BlenderbotSmallTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotSmallForCausalLM,
BlenderbotSmallForConditionalGeneration,
BlenderbotSmallModel,
BlenderbotSmallPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot_small import (
TFBlenderbotSmallForConditionalGeneration,
TFBlenderbotSmallModel,
TFBlenderbotSmallPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot_small import (
FlaxBlenderbotSmallForConditionalGeneration,
FlaxBlenderbotSmallModel,
FlaxBlenderbotSmallPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 697 | 0 |
from __future__ import annotations
from collections import deque
from collections.abc import Iterator
from dataclasses import dataclass
@dataclass
class SCREAMING_SNAKE_CASE__ :
lowercase__ = 42
lowercase__ = 42
class SCREAMING_SNAKE_CASE__ :
def __init__( self , __UpperCamelCase ):
'''simple docstring'''
__a : list[list[Edge]] = [[] for _ in range(__lowerCamelCase )]
__a : int = size
def __getitem__( self , __UpperCamelCase ):
'''simple docstring'''
return iter(self._graph[vertex] )
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self._size
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
if weight not in (0, 1):
raise ValueError("""Edge weight must be either 0 or 1.""" )
if to_vertex < 0 or to_vertex >= self.size:
raise ValueError("""Vertex indexes must be in [0; size).""" )
self._graph[from_vertex].append(Edge(__lowerCamelCase , __lowerCamelCase ) )
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
__a : int = deque([start_vertex] )
__a : list[int | None] = [None] * self.size
__a : Union[str, Any] = 0
while queue:
__a : str = queue.popleft()
__a : str = distances[current_vertex]
if current_distance is None:
continue
for edge in self[current_vertex]:
__a : int = current_distance + edge.weight
__a : int = distances[edge.destination_vertex]
if (
isinstance(__lowerCamelCase , __lowerCamelCase )
and new_distance >= dest_vertex_distance
):
continue
__a : List[str] = new_distance
if edge.weight == 0:
queue.appendleft(edge.destination_vertex )
else:
queue.append(edge.destination_vertex )
if distances[finish_vertex] is None:
raise ValueError("""No path from start_vertex to finish_vertex.""" )
return distances[finish_vertex]
if __name__ == "__main__":
import doctest
doctest.testmod() | 705 |
'''simple docstring'''
import numpy as np
import torch
from torch.utils.data import Dataset
from utils import logger
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
def __init__( self , __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
__a : Any = params
__a : Optional[Any] = np.array(__UpperCamelCase )
__a : Union[str, Any] = np.array([len(__UpperCamelCase ) for t in data] )
self.check()
self.remove_long_sequences()
self.remove_empty_sequences()
self.remove_unknown_sequences()
self.check()
self.print_statistics()
def __getitem__( self , __UpperCamelCase ):
'''simple docstring'''
return (self.token_ids[index], self.lengths[index])
def __len__( self ):
'''simple docstring'''
return len(self.lengths )
def __lowerCamelCase ( self ):
'''simple docstring'''
assert len(self.token_ids ) == len(self.lengths )
assert all(self.lengths[i] == len(self.token_ids[i] ) for i in range(len(self.lengths ) ) )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Tuple = self.params.max_model_input_size
__a : Union[str, Any] = self.lengths > max_len
logger.info(f"""Splitting {sum(__UpperCamelCase )} too long sequences.""" )
def divide_chunks(__UpperCamelCase , __UpperCamelCase ):
return [l[i : i + n] for i in range(0 , len(__UpperCamelCase ) , __UpperCamelCase )]
__a : int = []
__a : Union[str, Any] = []
if self.params.mlm:
__a , __a : Any = self.params.special_tok_ids["""cls_token"""], self.params.special_tok_ids["""sep_token"""]
else:
__a , __a : str = self.params.special_tok_ids["""bos_token"""], self.params.special_tok_ids["""eos_token"""]
for seq_, len_ in zip(self.token_ids , self.lengths ):
assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_
if len_ <= max_len:
new_tok_ids.append(seq_ )
new_lengths.append(len_ )
else:
__a : Any = []
for sub_s in divide_chunks(seq_ , max_len - 2 ):
if sub_s[0] != cls_id:
__a : int = np.insert(__UpperCamelCase , 0 , __UpperCamelCase )
if sub_s[-1] != sep_id:
__a : str = np.insert(__UpperCamelCase , len(__UpperCamelCase ) , __UpperCamelCase )
assert len(__UpperCamelCase ) <= max_len
assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s
sub_seqs.append(__UpperCamelCase )
new_tok_ids.extend(__UpperCamelCase )
new_lengths.extend([len(__UpperCamelCase ) for l in sub_seqs] )
__a : Dict = np.array(__UpperCamelCase )
__a : Tuple = np.array(__UpperCamelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : List[str] = len(self )
__a : List[str] = self.lengths > 11
__a : int = self.token_ids[indices]
__a : Union[str, Any] = self.lengths[indices]
__a : Any = len(self )
logger.info(f"""Remove {init_size - new_size} too short (<=11 tokens) sequences.""" )
def __lowerCamelCase ( self ):
'''simple docstring'''
if "unk_token" not in self.params.special_tok_ids:
return
else:
__a : List[str] = self.params.special_tok_ids["""unk_token"""]
__a : str = len(self )
__a : str = np.array([np.count_nonzero(a == unk_token_id ) for a in self.token_ids] )
__a : Optional[Any] = (unk_occs / self.lengths) < 0.5
__a : List[str] = self.token_ids[indices]
__a : Optional[int] = self.lengths[indices]
__a : Any = len(self )
logger.info(f"""Remove {init_size - new_size} sequences with a high level of unknown tokens (50%).""" )
def __lowerCamelCase ( self ):
'''simple docstring'''
if not self.params.is_master:
return
logger.info(f"""{len(self )} sequences""" )
# data_len = sum(self.lengths)
# nb_unique_tokens = len(Counter(list(chain(*self.token_ids))))
# logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)')
# unk_idx = self.params.special_tok_ids['unk_token']
# nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids])
# logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)')
def __lowerCamelCase ( self , __UpperCamelCase ):
'''simple docstring'''
__a : List[str] = [t[0] for t in batch]
__a : str = [t[1] for t in batch]
assert len(__UpperCamelCase ) == len(__UpperCamelCase )
# Max for paddings
__a : Optional[int] = max(__UpperCamelCase )
# Pad token ids
if self.params.mlm:
__a : int = self.params.special_tok_ids["""pad_token"""]
else:
__a : Tuple = self.params.special_tok_ids["""unk_token"""]
__a : Any = [list(t.astype(__UpperCamelCase ) ) + [pad_idx] * (max_seq_len_ - len(__UpperCamelCase )) for t in token_ids]
assert len(tk_ ) == len(__UpperCamelCase )
assert all(len(__UpperCamelCase ) == max_seq_len_ for t in tk_ )
__a : Any = torch.tensor(tk_ ) # (bs, max_seq_len_)
__a : Optional[Any] = torch.tensor(__UpperCamelCase ) # (bs)
return tk_t, lg_t | 697 | 0 |
'''simple docstring'''
import re
from pathlib import Path
from unittest import TestCase
import pytest
@pytest.mark.integration
class SCREAMING_SNAKE_CASE__ ( __lowerCamelCase ):
def __lowerCamelCase ( self , __UpperCamelCase ):
'''simple docstring'''
with open(UpperCAmelCase_ , encoding="""utf-8""" ) as input_file:
__a : List[Any] = re.compile(r"""(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)""" )
__a : Dict = input_file.read()
__a : Dict = regexp.search(UpperCAmelCase_ )
return match
def __lowerCamelCase ( self , __UpperCamelCase ):
'''simple docstring'''
with open(UpperCAmelCase_ , encoding="""utf-8""" ) as input_file:
__a : Tuple = re.compile(r"""#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()""" , re.DOTALL )
__a : List[str] = input_file.read()
# use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search`
__a : Union[str, Any] = regexp.finditer(UpperCAmelCase_ )
__a : Tuple = [match for match in matches if match is not None and match.group(1 ) is not None]
return matches[0] if matches else None
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Dict = Path("""./datasets""" )
__a : List[str] = list(dataset_paths.absolute().glob("""**/*.py""" ) )
for dataset in dataset_files:
if self._no_encoding_on_file_open(str(UpperCAmelCase_ ) ):
raise AssertionError(f"""open(...) must use utf-8 encoding in {dataset}""" )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Optional[Any] = Path("""./datasets""" )
__a : str = list(dataset_paths.absolute().glob("""**/*.py""" ) )
for dataset in dataset_files:
if self._no_print_statements(str(UpperCAmelCase_ ) ):
raise AssertionError(f"""print statement found in {dataset}. Use datasets.logger/logging instead.""" ) | 706 |
'''simple docstring'''
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
lowercase__ = ""
lowercase__ = "hf-legacy" # "hf://"" is reserved for hffs
def __init__( self , __UpperCamelCase = None , __UpperCamelCase = None , **__UpperCamelCase , ):
'''simple docstring'''
super().__init__(self , **__UpperCamelCase )
__a : int = repo_info
__a : int = token
__a : Any = None
def __lowerCamelCase ( self ):
'''simple docstring'''
if self.dir_cache is None:
__a : Union[str, Any] = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
__a : List[str] = {
"""name""": hf_file.rfilename,
"""size""": None,
"""type""": """file""",
}
self.dir_cache.update(
{
str(__UpperCamelCase ): {"""name""": str(__UpperCamelCase ), """size""": None, """type""": """directory"""}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase = "rb" , **__UpperCamelCase , ):
'''simple docstring'''
if not isinstance(self.repo_info , __UpperCamelCase ):
raise NotImplementedError(f"""Open is only implemented for dataset repositories, but got {self.repo_info}""" )
__a : Any = hf_hub_url(self.repo_info.id , __UpperCamelCase , revision=self.repo_info.sha )
return fsspec.open(
__UpperCamelCase , mode=__UpperCamelCase , headers=get_authentication_headers_for_url(__UpperCamelCase , use_auth_token=self.token ) , client_kwargs={"""trust_env""": True} , ).open()
def __lowerCamelCase ( self , __UpperCamelCase , **__UpperCamelCase ):
'''simple docstring'''
self._get_dirs()
__a : str = self._strip_protocol(__UpperCamelCase )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(__UpperCamelCase )
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase=False , **__UpperCamelCase ):
'''simple docstring'''
self._get_dirs()
__a : int = PurePosixPath(path.strip("""/""" ) )
__a : List[str] = {}
for p, f in self.dir_cache.items():
__a : str = PurePosixPath(p.strip("""/""" ) )
__a : Optional[int] = p.parent
if root == path:
__a : List[str] = f
__a : str = list(paths.values() )
if detail:
return out
else:
return sorted(f["""name"""] for f in out ) | 697 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__SCREAMING_SNAKE_CASE : Dict = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : Optional[Any] = {
"facebook/s2t-small-librispeech-asr": (
"https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/config.json"
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech_to_text
}
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase_ ):
lowercase__ = 'speech_to_text'
lowercase__ = ['past_key_values']
lowercase__ = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self , __UpperCamelCase=1_0000 , __UpperCamelCase=12 , __UpperCamelCase=2048 , __UpperCamelCase=4 , __UpperCamelCase=6 , __UpperCamelCase=2048 , __UpperCamelCase=4 , __UpperCamelCase=0.0 , __UpperCamelCase=0.0 , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase="relu" , __UpperCamelCase=256 , __UpperCamelCase=0.1 , __UpperCamelCase=0.0 , __UpperCamelCase=0.0 , __UpperCamelCase=0.0_2 , __UpperCamelCase=2 , __UpperCamelCase=True , __UpperCamelCase=1 , __UpperCamelCase=0 , __UpperCamelCase=2 , __UpperCamelCase=6000 , __UpperCamelCase=1024 , __UpperCamelCase=2 , __UpperCamelCase=(5, 5) , __UpperCamelCase=1024 , __UpperCamelCase=80 , __UpperCamelCase=1 , **__UpperCamelCase , ):
'''simple docstring'''
__a : List[Any] = vocab_size
__a : str = d_model
__a : Optional[Any] = encoder_ffn_dim
__a : Tuple = encoder_layers
__a : int = encoder_attention_heads
__a : List[str] = decoder_ffn_dim
__a : List[str] = decoder_layers
__a : List[str] = decoder_attention_heads
__a : List[Any] = dropout
__a : List[Any] = attention_dropout
__a : List[str] = activation_dropout
__a : List[Any] = activation_function
__a : Any = init_std
__a : List[Any] = encoder_layerdrop
__a : Dict = decoder_layerdrop
__a : str = use_cache
__a : List[Any] = encoder_layers
__a : Optional[Any] = scale_embedding # scale factor will be sqrt(d_model) if True
__a : Any = max_source_positions
__a : str = max_target_positions
__a : Dict = num_conv_layers
__a : List[str] = list(_lowercase )
__a : Optional[int] = conv_channels
__a : Optional[int] = input_feat_per_channel
__a : int = input_channels
if len(self.conv_kernel_sizes ) != self.num_conv_layers:
raise ValueError(
"""Configuration for convolutional module is incorrect. """
"""It is required that `len(config.conv_kernel_sizes)` == `config.num_conv_layers` """
f"""but is `len(config.conv_kernel_sizes) = {len(self.conv_kernel_sizes )}`, """
f"""`config.num_conv_layers = {self.num_conv_layers}`.""" )
super().__init__(
pad_token_id=_lowercase , bos_token_id=_lowercase , eos_token_id=_lowercase , is_encoder_decoder=_lowercase , decoder_start_token_id=_lowercase , **_lowercase , ) | 707 |
'''simple docstring'''
import inspect
import unittest
from transformers import DPTConfig
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel
from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class SCREAMING_SNAKE_CASE__ :
def __init__( self , __UpperCamelCase , __UpperCamelCase=2 , __UpperCamelCase=32 , __UpperCamelCase=16 , __UpperCamelCase=3 , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=32 , __UpperCamelCase=4 , __UpperCamelCase=[0, 1, 2, 3] , __UpperCamelCase=4 , __UpperCamelCase=37 , __UpperCamelCase="gelu" , __UpperCamelCase=0.1 , __UpperCamelCase=0.1 , __UpperCamelCase=0.0_2 , __UpperCamelCase=3 , __UpperCamelCase=[1, 384, 24, 24] , __UpperCamelCase=True , __UpperCamelCase=None , ):
'''simple docstring'''
__a : List[str] = parent
__a : Tuple = batch_size
__a : str = image_size
__a : int = patch_size
__a : Dict = num_channels
__a : int = is_training
__a : Dict = use_labels
__a : Union[str, Any] = hidden_size
__a : Dict = num_hidden_layers
__a : Dict = backbone_out_indices
__a : Optional[int] = num_attention_heads
__a : List[str] = intermediate_size
__a : Optional[Any] = hidden_act
__a : Dict = hidden_dropout_prob
__a : Tuple = attention_probs_dropout_prob
__a : Any = initializer_range
__a : Any = num_labels
__a : Optional[Any] = backbone_featmap_shape
__a : List[Any] = scope
__a : List[str] = is_hybrid
# sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token)
__a : Union[str, Any] = (image_size // patch_size) ** 2
__a : List[str] = num_patches + 1
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__a : Union[str, Any] = None
if self.use_labels:
__a : str = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__a : Tuple = self.get_config()
return config, pixel_values, labels
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : List[str] = {
"""global_padding""": """same""",
"""layer_type""": """bottleneck""",
"""depths""": [3, 4, 9],
"""out_features""": ["""stage1""", """stage2""", """stage3"""],
"""embedding_dynamic_padding""": True,
"""hidden_sizes""": [96, 192, 384, 768],
"""num_groups""": 2,
}
return DPTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , backbone_out_indices=self.backbone_out_indices , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__UpperCamelCase , initializer_range=self.initializer_range , is_hybrid=self.is_hybrid , backbone_config=__UpperCamelCase , backbone_featmap_shape=self.backbone_featmap_shape , )
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
__a : Optional[Any] = DPTModel(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
__a : List[str] = model(__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
__a : List[str] = self.num_labels
__a : Union[str, Any] = DPTForDepthEstimation(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
__a : Tuple = model(__UpperCamelCase )
self.parent.assertEqual(result.predicted_depth.shape , (self.batch_size, self.image_size, self.image_size) )
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
__a : Dict = self.num_labels
__a : Tuple = DPTForSemanticSegmentation(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
__a : str = model(__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Optional[int] = self.prepare_config_and_inputs()
__a , __a , __a : Tuple = config_and_inputs
__a : List[str] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase , __UpperCamelCase , unittest.TestCase ):
lowercase__ = (DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else ()
lowercase__ = (
{
"depth-estimation": DPTForDepthEstimation,
"feature-extraction": DPTModel,
"image-segmentation": DPTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
lowercase__ = False
lowercase__ = False
lowercase__ = False
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Optional[int] = DPTModelTester(self )
__a : List[Any] = ConfigTester(self , config_class=__UpperCamelCase , has_text_modality=__UpperCamelCase , hidden_size=37 )
def __lowerCamelCase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="""DPT does not use inputs_embeds""" )
def __lowerCamelCase ( self ):
'''simple docstring'''
pass
def __lowerCamelCase ( self ):
'''simple docstring'''
__a , __a : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a : str = model_class(__UpperCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__a : Any = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__UpperCamelCase , nn.Linear ) )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a , __a : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a : Any = model_class(__UpperCamelCase )
__a : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__a : int = [*signature.parameters.keys()]
__a : List[str] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __UpperCamelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_depth_estimation(*__UpperCamelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__UpperCamelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
__a , __a : Dict = self.model_tester.prepare_config_and_inputs_for_common()
__a : List[Any] = True
if model_class in get_values(__UpperCamelCase ):
continue
__a : str = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.train()
__a : Union[str, Any] = self._prepare_for_class(__UpperCamelCase , __UpperCamelCase , return_labels=__UpperCamelCase )
__a : List[Any] = model(**__UpperCamelCase ).loss
loss.backward()
def __lowerCamelCase ( self ):
'''simple docstring'''
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
__a , __a : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
__a : Any = False
__a : Dict = True
if model_class in get_values(__UpperCamelCase ) or not model_class.supports_gradient_checkpointing:
continue
__a : Any = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.gradient_checkpointing_enable()
model.train()
__a : List[str] = self._prepare_for_class(__UpperCamelCase , __UpperCamelCase , return_labels=__UpperCamelCase )
__a : Dict = model(**__UpperCamelCase ).loss
loss.backward()
def __lowerCamelCase ( self ):
'''simple docstring'''
__a , __a : Any = self.model_tester.prepare_config_and_inputs_for_common()
__a : Any = _config_zero_init(__UpperCamelCase )
for model_class in self.all_model_classes:
__a : Any = model_class(config=__UpperCamelCase )
# Skip the check for the backbone
__a : Optional[Any] = []
for name, module in model.named_modules():
if module.__class__.__name__ == "DPTViTHybridEmbeddings":
__a : Optional[int] = [f"""{name}.{key}""" for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def __lowerCamelCase ( self ):
'''simple docstring'''
pass
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]:
__a : int = DPTModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a , __a : int = self.model_tester.prepare_config_and_inputs_for_common()
__a : Optional[int] = """add"""
with self.assertRaises(__UpperCamelCase ):
__a : int = DPTForDepthEstimation(__UpperCamelCase )
def _snake_case ( ) -> Any:
__a : Dict = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
@slow
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : int = DPTImageProcessor.from_pretrained("""Intel/dpt-hybrid-midas""" )
__a : int = DPTForDepthEstimation.from_pretrained("""Intel/dpt-hybrid-midas""" ).to(__UpperCamelCase )
__a : Union[str, Any] = prepare_img()
__a : Any = image_processor(images=__UpperCamelCase , return_tensors="""pt""" ).to(__UpperCamelCase )
# forward pass
with torch.no_grad():
__a : Optional[Any] = model(**__UpperCamelCase )
__a : int = outputs.predicted_depth
# verify the predicted depth
__a : Any = torch.Size((1, 384, 384) )
self.assertEqual(predicted_depth.shape , __UpperCamelCase )
__a : int = torch.tensor(
[[[5.6_4_3_7, 5.6_1_4_6, 5.6_5_1_1], [5.4_3_7_1, 5.5_6_4_9, 5.5_9_5_8], [5.5_2_1_5, 5.5_1_8_4, 5.5_2_9_3]]] ).to(__UpperCamelCase )
self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 100 , __UpperCamelCase , atol=1E-4 ) ) | 697 | 0 |
'''simple docstring'''
import re
from flax.core.frozen_dict import freeze
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.experimental import PartitionSpec as P
# Sentinels
__SCREAMING_SNAKE_CASE : List[Any] = object()
# For specifying empty leaf dict `{}`
__SCREAMING_SNAKE_CASE : Any = object()
def _snake_case ( lowercase , lowercase ) -> Optional[Any]:
__a : str = tuple((re.compile(x + """$""" ) for x in qs) )
for i in range(len(__A ) - len(__A ) + 1 ):
__a : Optional[int] = [x.match(__A ) for x, y in zip(__A , ks[i:] )]
if matches and all(__A ):
return True
return False
def _snake_case ( lowercase ) -> Dict:
def replace(lowercase , lowercase ):
for rule, replacement in rules:
if _match(__A , __A ):
return replacement
return val
return replace
def _snake_case ( ) -> Dict:
return [
# embeddings
(("transformer", "wpe", "embedding"), P("""mp""" , __A )),
(("transformer", "wte", "embedding"), P("""mp""" , __A )),
# atention
(("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(__A , """mp""" )),
(("attention", "out_proj", "kernel"), P("""mp""" , __A )),
(("attention", "out_proj", "bias"), None),
# mlp
(("mlp", "c_fc", "kernel"), P(__A , """mp""" )),
(("mlp", "c_fc", "bias"), P("""mp""" )),
(("mlp", "c_proj", "kernel"), P("""mp""" , __A )),
(("mlp", "c_proj", "bias"), None),
# layer norms
((r"ln_\d+", "bias"), None),
((r"\d+", r"ln_\d+", "scale"), None),
(("ln_f", "bias"), None),
(("ln_f", "scale"), None),
]
def _snake_case ( lowercase ) -> Optional[Any]:
__a : int = _get_partition_rules()
__a : Optional[int] = _replacement_rules(__A )
__a : Dict = {k: _unmatched for k in flatten_dict(__A )}
__a : int = {k: replace(__A , __A ) for k, v in initd.items()}
assert _unmatched not in result.values(), "Incomplete partition spec."
return freeze(unflatten_dict(__A ) ) | 708 |
'''simple docstring'''
import unittest
from .lib import (
Matrix,
Vector,
axpy,
square_zero_matrix,
unit_basis_vector,
zero_vector,
)
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Dict = Vector([1, 2, 3] )
self.assertEqual(x.component(0 ) , 1 )
self.assertEqual(x.component(2 ) , 3 )
__a : Optional[int] = Vector()
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Any = Vector([0, 0, 0, 0, 0, 1] )
self.assertEqual(str(__UpperCamelCase ) , """(0,0,0,0,0,1)""" )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Tuple = Vector([1, 2, 3, 4] )
self.assertEqual(len(__UpperCamelCase ) , 4 )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Optional[Any] = Vector([1, 2] )
__a : List[str] = Vector([1, 2, 3, 4, 5] )
__a : Optional[int] = Vector([0, 0, 0, 0, 0, 0, 0, 0, 0, 0] )
__a : Dict = Vector([1, -1, 1, -1, 2, -3, 4, -5] )
self.assertAlmostEqual(x.euclidean_length() , 2.2_3_6 , 3 )
self.assertAlmostEqual(y.euclidean_length() , 7.4_1_6 , 3 )
self.assertEqual(z.euclidean_length() , 0 )
self.assertAlmostEqual(w.euclidean_length() , 7.6_1_6 , 3 )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Dict = Vector([1, 2, 3] )
__a : Union[str, Any] = Vector([1, 1, 1] )
self.assertEqual((x + y).component(0 ) , 2 )
self.assertEqual((x + y).component(1 ) , 3 )
self.assertEqual((x + y).component(2 ) , 4 )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : List[str] = Vector([1, 2, 3] )
__a : Any = Vector([1, 1, 1] )
self.assertEqual((x - y).component(0 ) , 0 )
self.assertEqual((x - y).component(1 ) , 1 )
self.assertEqual((x - y).component(2 ) , 2 )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Tuple = Vector([1, 2, 3] )
__a : Optional[Any] = Vector([2, -1, 4] ) # for test of dot product
__a : Union[str, Any] = Vector([1, -2, -1] )
self.assertEqual(str(x * 3.0 ) , """(3.0,6.0,9.0)""" )
self.assertEqual((a * b) , 0 )
def __lowerCamelCase ( self ):
'''simple docstring'''
self.assertEqual(str(zero_vector(10 ) ).count("""0""" ) , 10 )
def __lowerCamelCase ( self ):
'''simple docstring'''
self.assertEqual(str(unit_basis_vector(3 , 1 ) ) , """(0,1,0)""" )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Dict = Vector([1, 2, 3] )
__a : Optional[int] = Vector([1, 0, 1] )
self.assertEqual(str(axpy(2 , __UpperCamelCase , __UpperCamelCase ) ) , """(3,4,7)""" )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : int = Vector([1, 0, 0, 0, 0, 0] )
__a : Any = x.copy()
self.assertEqual(str(__UpperCamelCase ) , str(__UpperCamelCase ) )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Union[str, Any] = Vector([1, 0, 0] )
x.change_component(0 , 0 )
x.change_component(1 , 1 )
self.assertEqual(str(__UpperCamelCase ) , """(0,1,0)""" )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Dict = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual("""|1,2,3|\n|2,4,5|\n|6,7,8|\n""" , str(__UpperCamelCase ) )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : List[Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
__a : List[Any] = [[-3, -14, -10], [-5, -10, -5], [-2, -1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(minors[x][y] , a.minor(__UpperCamelCase , __UpperCamelCase ) )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : List[Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
__a : Any = [[-3, 14, -10], [5, -10, 5], [-2, 1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(cofactors[x][y] , a.cofactor(__UpperCamelCase , __UpperCamelCase ) )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : List[Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(-5 , a.determinant() )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Any = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]] , 3 , 3 )
__a : List[Any] = Vector([1, 2, 3] )
self.assertEqual("""(14,32,50)""" , str(a * x ) )
self.assertEqual("""|2,4,6|\n|8,10,12|\n|14,16,18|\n""" , str(a * 2 ) )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : List[str] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
a.change_component(0 , 2 , 5 )
self.assertEqual("""|1,2,5|\n|2,4,5|\n|6,7,8|\n""" , str(__UpperCamelCase ) )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Tuple = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(7 , a.component(2 , 1 ) , 0.0_1 )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Dict = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
__a : Union[str, Any] = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 )
self.assertEqual("""|2,4,10|\n|4,8,10|\n|12,14,18|\n""" , str(a + b ) )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : List[Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
__a : List[str] = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 )
self.assertEqual("""|0,0,-4|\n|0,0,0|\n|0,0,-2|\n""" , str(a - b ) )
def __lowerCamelCase ( self ):
'''simple docstring'''
self.assertEqual(
"""|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n""" , str(square_zero_matrix(5 ) ) , )
if __name__ == "__main__":
unittest.main() | 697 | 0 |
'''simple docstring'''
def _snake_case ( lowercase ) -> int:
__a : Any = []
for data in source_data:
for i, el in enumerate(__lowerCAmelCase ):
if len(__lowerCAmelCase ) < i + 1:
data_lists.append([] )
data_lists[i].append(float(__lowerCAmelCase ) )
return data_lists
def _snake_case ( lowercase , lowercase ) -> Any:
__a : Dict = []
for dlist, weight in zip(__lowerCAmelCase , __lowerCAmelCase ):
__a : Optional[Any] = min(__lowerCAmelCase )
__a : List[str] = max(__lowerCAmelCase )
__a : List[str] = []
# for weight 0 score is 1 - actual score
if weight == 0:
for item in dlist:
try:
score.append(1 - ((item - mind) / (maxd - mind)) )
except ZeroDivisionError:
score.append(1 )
elif weight == 1:
for item in dlist:
try:
score.append((item - mind) / (maxd - mind) )
except ZeroDivisionError:
score.append(0 )
# weight not 0 or 1
else:
__a : Tuple = F"""Invalid weight of {weight:f} provided"""
raise ValueError(__lowerCAmelCase )
score_lists.append(__lowerCAmelCase )
return score_lists
def _snake_case ( lowercase ) -> Optional[Any]:
__a : int = [0 for i in range(len(score_lists[0] ) )]
for slist in score_lists:
for j, ele in enumerate(__lowerCAmelCase ):
__a : Dict = final_scores[j] + ele
return final_scores
def _snake_case ( lowercase , lowercase ) -> int:
__a : str = get_data(__lowerCAmelCase )
__a : Optional[int] = calculate_each_score(__lowerCAmelCase , __lowerCAmelCase )
__a : List[str] = generate_final_scores(__lowerCAmelCase )
# append scores to source data
for i, ele in enumerate(__lowerCAmelCase ):
source_data[i].append(__lowerCAmelCase )
return source_data | 709 |
'''simple docstring'''
import os
from itertools import chain
from random import randrange, shuffle
import pytest
from .sola import PokerHand
__SCREAMING_SNAKE_CASE : List[str] = (
'4S 3H 2C 7S 5H',
'9D 8H 2C 6S 7H',
'2D 6D 9D TH 7D',
'TC 8C 2S JH 6C',
'JH 8S TH AH QH',
'TS KS 5S 9S AC',
'KD 6S 9D TH AD',
'KS 8D 4D 9S 4S', # pair
'8C 4S KH JS 4D', # pair
'QH 8H KD JH 8S', # pair
'KC 4H KS 2H 8D', # pair
'KD 4S KC 3H 8S', # pair
'AH 8S AS KC JH', # pair
'3H 4C 4H 3S 2H', # 2 pairs
'5S 5D 2C KH KH', # 2 pairs
'3C KH 5D 5S KH', # 2 pairs
'AS 3C KH AD KH', # 2 pairs
'7C 7S 3S 7H 5S', # 3 of a kind
'7C 7S KH 2H 7H', # 3 of a kind
'AC KH QH AH AS', # 3 of a kind
'2H 4D 3C AS 5S', # straight (low ace)
'3C 5C 4C 2C 6H', # straight
'6S 8S 7S 5H 9H', # straight
'JS QS 9H TS KH', # straight
'QC KH TS JS AH', # straight (high ace)
'8C 9C 5C 3C TC', # flush
'3S 8S 9S 5S KS', # flush
'4C 5C 9C 8C KC', # flush
'JH 8H AH KH QH', # flush
'3D 2H 3H 2C 2D', # full house
'2H 2C 3S 3H 3D', # full house
'KH KC 3S 3H 3D', # full house
'JC 6H JS JD JH', # 4 of a kind
'JC 7H JS JD JH', # 4 of a kind
'JC KH JS JD JH', # 4 of a kind
'2S AS 4S 5S 3S', # straight flush (low ace)
'2D 6D 3D 4D 5D', # straight flush
'5C 6C 3C 7C 4C', # straight flush
'JH 9H TH KH QH', # straight flush
'JH AH TH KH QH', # royal flush (high ace straight flush)
)
__SCREAMING_SNAKE_CASE : Optional[Any] = (
('2H 3H 4H 5H 6H', 'KS AS TS QS JS', 'Loss'),
('2H 3H 4H 5H 6H', 'AS AD AC AH JD', 'Win'),
('AS AH 2H AD AC', 'JS JD JC JH 3D', 'Win'),
('2S AH 2H AS AC', 'JS JD JC JH AD', 'Loss'),
('2S AH 2H AS AC', '2H 3H 5H 6H 7H', 'Win'),
('AS 3S 4S 8S 2S', '2H 3H 5H 6H 7H', 'Win'),
('2H 3H 5H 6H 7H', '2S 3H 4H 5S 6C', 'Win'),
('2S 3H 4H 5S 6C', '3D 4C 5H 6H 2S', 'Tie'),
('2S 3H 4H 5S 6C', 'AH AC 5H 6H AS', 'Win'),
('2S 2H 4H 5S 4C', 'AH AC 5H 6H AS', 'Loss'),
('2S 2H 4H 5S 4C', 'AH AC 5H 6H 7S', 'Win'),
('6S AD 7H 4S AS', 'AH AC 5H 6H 7S', 'Loss'),
('2S AH 4H 5S KC', 'AH AC 5H 6H 7S', 'Loss'),
('2S 3H 6H 7S 9C', '7H 3C TH 6H 9S', 'Loss'),
('4S 5H 6H TS AC', '3S 5H 6H TS AC', 'Win'),
('2S AH 4H 5S 6C', 'AD 4C 5H 6H 2C', 'Tie'),
('AS AH 3H AD AC', 'AS AH 2H AD AC', 'Win'),
('AH AC 5H 5C QS', 'AH AC 5H 5C KS', 'Loss'),
('AH AC 5H 5C QS', 'KH KC 5H 5C QS', 'Win'),
('7C 7S KH 2H 7H', '3C 3S AH 2H 3H', 'Win'),
('3C 3S AH 2H 3H', '7C 7S KH 2H 7H', 'Loss'),
('6H 5H 4H 3H 2H', '5H 4H 3H 2H AH', 'Win'),
('5H 4H 3H 2H AH', '5H 4H 3H 2H AH', 'Tie'),
('5H 4H 3H 2H AH', '6H 5H 4H 3H 2H', 'Loss'),
('AH AD KS KC AC', 'AH KD KH AC KC', 'Win'),
('2H 4D 3C AS 5S', '2H 4D 3C 6S 5S', 'Loss'),
('2H 3S 3C 3H 2S', '3S 3C 2S 2H 2D', 'Win'),
('4D 6D 5D 2D JH', '3S 8S 3H TC KH', 'Loss'),
('4S 6C 8S 3S 7S', 'AD KS 2D 7D 7C', 'Loss'),
('6S 4C 7H 8C 3H', '5H JC AH 9D 9C', 'Loss'),
('9D 9H JH TC QH', '3C 2S JS 5C 7H', 'Win'),
('2H TC 8S AD 9S', '4H TS 7H 2C 5C', 'Win'),
('9D 3S 2C 7S 7C', 'JC TD 3C TC 9H', 'Loss'),
)
__SCREAMING_SNAKE_CASE : Tuple = (
('2H 3H 4H 5H 6H', True),
('AS AH 2H AD AC', False),
('2H 3H 5H 6H 7H', True),
('KS AS TS QS JS', True),
('8H 9H QS JS TH', False),
('AS 3S 4S 8S 2S', True),
)
__SCREAMING_SNAKE_CASE : Dict = (
('2H 3H 4H 5H 6H', True),
('AS AH 2H AD AC', False),
('2H 3H 5H 6H 7H', False),
('KS AS TS QS JS', True),
('8H 9H QS JS TH', True),
)
__SCREAMING_SNAKE_CASE : Optional[int] = (
('2H 4D 3C AS 5S', True, [5, 4, 3, 2, 14]),
('2H 5D 3C AS 5S', False, [14, 5, 5, 3, 2]),
('JH QD KC AS TS', False, [14, 13, 12, 11, 10]),
('9D 3S 2C 7S 7C', False, [9, 7, 7, 3, 2]),
)
__SCREAMING_SNAKE_CASE : int = (
('JH AH TH KH QH', 0),
('JH 9H TH KH QH', 0),
('JC KH JS JD JH', 7),
('KH KC 3S 3H 3D', 6),
('8C 9C 5C 3C TC', 0),
('JS QS 9H TS KH', 0),
('7C 7S KH 2H 7H', 3),
('3C KH 5D 5S KH', 2),
('QH 8H KD JH 8S', 1),
('2D 6D 9D TH 7D', 0),
)
__SCREAMING_SNAKE_CASE : int = (
('JH AH TH KH QH', 23),
('JH 9H TH KH QH', 22),
('JC KH JS JD JH', 21),
('KH KC 3S 3H 3D', 20),
('8C 9C 5C 3C TC', 19),
('JS QS 9H TS KH', 18),
('7C 7S KH 2H 7H', 17),
('3C KH 5D 5S KH', 16),
('QH 8H KD JH 8S', 15),
('2D 6D 9D TH 7D', 14),
)
def _snake_case ( ) -> List[str]:
__a , __a : List[Any] = randrange(len(lowercase ) ), randrange(len(lowercase ) )
__a : int = ["""Loss""", """Tie""", """Win"""][(play >= oppo) + (play > oppo)]
__a , __a : int = SORTED_HANDS[play], SORTED_HANDS[oppo]
return hand, other, expected
def _snake_case ( lowercase = 1_0_0 ) -> Any:
return (generate_random_hand() for _ in range(lowercase ))
@pytest.mark.parametrize("""hand, expected""" , lowercase )
def _snake_case ( lowercase , lowercase ) -> int:
assert PokerHand(lowercase )._is_flush() == expected
@pytest.mark.parametrize("""hand, expected""" , lowercase )
def _snake_case ( lowercase , lowercase ) -> Any:
assert PokerHand(lowercase )._is_straight() == expected
@pytest.mark.parametrize("""hand, expected, card_values""" , lowercase )
def _snake_case ( lowercase , lowercase , lowercase ) -> List[str]:
__a : Union[str, Any] = PokerHand(lowercase )
assert player._is_five_high_straight() == expected
assert player._card_values == card_values
@pytest.mark.parametrize("""hand, expected""" , lowercase )
def _snake_case ( lowercase , lowercase ) -> Optional[int]:
assert PokerHand(lowercase )._is_same_kind() == expected
@pytest.mark.parametrize("""hand, expected""" , lowercase )
def _snake_case ( lowercase , lowercase ) -> Union[str, Any]:
assert PokerHand(lowercase )._hand_type == expected
@pytest.mark.parametrize("""hand, other, expected""" , lowercase )
def _snake_case ( lowercase , lowercase , lowercase ) -> Optional[int]:
assert PokerHand(lowercase ).compare_with(PokerHand(lowercase ) ) == expected
@pytest.mark.parametrize("""hand, other, expected""" , generate_random_hands() )
def _snake_case ( lowercase , lowercase , lowercase ) -> int:
assert PokerHand(lowercase ).compare_with(PokerHand(lowercase ) ) == expected
def _snake_case ( ) -> Union[str, Any]:
__a : Tuple = [PokerHand(lowercase ) for hand in SORTED_HANDS]
__a : Optional[int] = poker_hands.copy()
shuffle(lowercase )
__a : List[str] = chain(sorted(lowercase ) )
for index, hand in enumerate(lowercase ):
assert hand == poker_hands[index]
def _snake_case ( ) -> List[str]:
# Test that five high straights are compared correctly.
__a : Optional[int] = [PokerHand("""2D AC 3H 4H 5S""" ), PokerHand("""2S 3H 4H 5S 6C""" )]
pokerhands.sort(reverse=lowercase )
assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C"
def _snake_case ( ) -> List[str]:
# Multiple calls to five_high_straight function should still return True
# and shouldn't mutate the list in every call other than the first.
__a : Dict = PokerHand("""2C 4S AS 3D 5C""" )
__a : Dict = True
__a : Optional[int] = [5, 4, 3, 2, 1_4]
for _ in range(1_0 ):
assert pokerhand._is_five_high_straight() == expected
assert pokerhand._card_values == expected_card_values
def _snake_case ( ) -> Dict:
# Problem number 54 from Project Euler
# Testing from poker_hands.txt file
__a : Tuple = 0
__a : int = os.path.abspath(os.path.dirname(lowercase ) )
__a : Union[str, Any] = os.path.join(lowercase , """poker_hands.txt""" )
with open(lowercase ) as file_hand:
for line in file_hand:
__a : Union[str, Any] = line[:1_4].strip()
__a : Optional[Any] = line[1_5:].strip()
__a , __a : List[str] = PokerHand(lowercase ), PokerHand(lowercase )
__a : str = player.compare_with(lowercase )
if output == "Win":
answer += 1
assert answer == 3_7_6 | 697 | 0 |
'''simple docstring'''
from __future__ import annotations
from collections import deque
from collections.abc import Sequence
from dataclasses import dataclass
from typing import Any
@dataclass
class SCREAMING_SNAKE_CASE__ :
lowercase__ = 42
lowercase__ = None
lowercase__ = None
def _snake_case ( ) -> Node | None:
__a : Optional[Any] = Node(1 )
__a : Union[str, Any] = Node(2 )
__a : Optional[int] = Node(3 )
__a : Optional[int] = Node(4 )
__a : List[Any] = Node(5 )
return tree
def _snake_case ( lowercase ) -> list[int]:
return [root.data, *preorder(root.left ), *preorder(root.right )] if root else []
def _snake_case ( lowercase ) -> list[int]:
return postorder(root.left ) + postorder(root.right ) + [root.data] if root else []
def _snake_case ( lowercase ) -> list[int]:
return [*inorder(root.left ), root.data, *inorder(root.right )] if root else []
def _snake_case ( lowercase ) -> int:
return (max(height(root.left ) , height(root.right ) ) + 1) if root else 0
def _snake_case ( lowercase ) -> Sequence[Node | None]:
__a : list[Any] = []
if root is None:
return output
__a : int = deque([root] )
while process_queue:
__a : int = process_queue.popleft()
output.append(node.data )
if node.left:
process_queue.append(node.left )
if node.right:
process_queue.append(node.right )
return output
def _snake_case ( lowercase , lowercase ) -> Sequence[Node | None]:
__a : list[Any] = []
def populate_output(lowercase , lowercase ) -> None:
if not root:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.left , level - 1 )
populate_output(root.right , level - 1 )
populate_output(lowerCamelCase__ , lowerCamelCase__ )
return output
def _snake_case ( lowercase , lowercase ) -> Sequence[Node | None]:
__a : list[Any] = []
def populate_output(lowercase , lowercase ) -> None:
if root is None:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.right , level - 1 )
populate_output(root.left , level - 1 )
populate_output(lowerCamelCase__ , lowerCamelCase__ )
return output
def _snake_case ( lowercase ) -> Sequence[Node | None] | list[Any]:
if root is None:
return []
__a : list[Sequence[Node | None]] = []
__a : str = 0
__a : Tuple = height(lowerCamelCase__ )
for h in range(1 , height_tree + 1 ):
if not flag:
output.append(get_nodes_from_left_to_right(lowerCamelCase__ , lowerCamelCase__ ) )
__a : Union[str, Any] = 1
else:
output.append(get_nodes_from_right_to_left(lowerCamelCase__ , lowerCamelCase__ ) )
__a : Optional[int] = 0
return output
def _snake_case ( ) -> None: # Main function for testing.
__a : int = make_tree()
print(F"""In-order Traversal: {inorder(lowerCamelCase__ )}""" )
print(F"""Pre-order Traversal: {preorder(lowerCamelCase__ )}""" )
print(F"""Post-order Traversal: {postorder(lowerCamelCase__ )}""" , """\n""" )
print(F"""Height of Tree: {height(lowerCamelCase__ )}""" , """\n""" )
print("""Complete Level Order Traversal: """ )
print(level_order(lowerCamelCase__ ) , """\n""" )
print("""Level-wise order Traversal: """ )
for level in range(1 , height(lowerCamelCase__ ) + 1 ):
print(F"""Level {level}:""" , get_nodes_from_left_to_right(lowerCamelCase__ , level=lowerCamelCase__ ) )
print("""\nZigZag order Traversal: """ )
print(zigzag(lowerCamelCase__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 710 |
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__SCREAMING_SNAKE_CASE : Optional[Any] = {'configuration_focalnet': ['FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FocalNetConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : List[Any] = [
'FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'FocalNetForImageClassification',
'FocalNetForMaskedImageModeling',
'FocalNetBackbone',
'FocalNetModel',
'FocalNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 697 | 0 |
'''simple docstring'''
import math
def _snake_case ( lowercase ) -> Any:
__a : Optional[int] = math.loga(math.sqrt(4 * positive_integer + 1 ) / 2 + 1 / 2 )
return exponent == int(_lowerCAmelCase )
def _snake_case ( lowercase = 1 / 1_2_3_4_5 ) -> Union[str, Any]:
__a : Optional[int] = 0
__a : int = 0
__a : List[str] = 3
while True:
__a : List[Any] = (integer**2 - 1) / 4
# if candidate is an integer, then there is a partition for k
if partition_candidate == int(_lowerCAmelCase ):
__a : Any = int(_lowerCAmelCase )
total_partitions += 1
if check_partition_perfect(_lowerCAmelCase ):
perfect_partitions += 1
if perfect_partitions > 0:
if perfect_partitions / total_partitions < max_proportion:
return int(_lowerCAmelCase )
integer += 1
if __name__ == "__main__":
print(f'''{solution() = }''') | 711 |
'''simple docstring'''
from __future__ import annotations
import bisect
def _snake_case ( lowercase , lowercase , lowercase = 0 , lowercase = -1 ) -> int:
if hi < 0:
__a : Union[str, Any] = len(lowercase )
while lo < hi:
__a : List[str] = lo + (hi - lo) // 2
if sorted_collection[mid] < item:
__a : int = mid + 1
else:
__a : int = mid
return lo
def _snake_case ( lowercase , lowercase , lowercase = 0 , lowercase = -1 ) -> int:
if hi < 0:
__a : Any = len(lowercase )
while lo < hi:
__a : Any = lo + (hi - lo) // 2
if sorted_collection[mid] <= item:
__a : List[str] = mid + 1
else:
__a : Any = mid
return lo
def _snake_case ( lowercase , lowercase , lowercase = 0 , lowercase = -1 ) -> None:
sorted_collection.insert(bisect_left(lowercase , lowercase , lowercase , lowercase ) , lowercase )
def _snake_case ( lowercase , lowercase , lowercase = 0 , lowercase = -1 ) -> None:
sorted_collection.insert(bisect_right(lowercase , lowercase , lowercase , lowercase ) , lowercase )
def _snake_case ( lowercase , lowercase ) -> int | None:
__a : Dict = 0
__a : Any = len(lowercase ) - 1
while left <= right:
__a : str = left + (right - left) // 2
__a : List[Any] = sorted_collection[midpoint]
if current_item == item:
return midpoint
elif item < current_item:
__a : Optional[Any] = midpoint - 1
else:
__a : Optional[int] = midpoint + 1
return None
def _snake_case ( lowercase , lowercase ) -> int | None:
__a : Optional[int] = bisect.bisect_left(lowercase , lowercase )
if index != len(lowercase ) and sorted_collection[index] == item:
return index
return None
def _snake_case ( lowercase , lowercase , lowercase , lowercase ) -> int | None:
if right < left:
return None
__a : Any = left + (right - left) // 2
if sorted_collection[midpoint] == item:
return midpoint
elif sorted_collection[midpoint] > item:
return binary_search_by_recursion(lowercase , lowercase , lowercase , midpoint - 1 )
else:
return binary_search_by_recursion(lowercase , lowercase , midpoint + 1 , lowercase )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : List[Any] = input('Enter numbers separated by comma:\n').strip()
__SCREAMING_SNAKE_CASE : Optional[Any] = sorted(int(item) for item in user_input.split(','))
__SCREAMING_SNAKE_CASE : List[str] = int(input('Enter a single number to be found in the list:\n'))
__SCREAMING_SNAKE_CASE : Optional[int] = binary_search(collection, target)
if result is None:
print(f'''{target} was not found in {collection}.''')
else:
print(f'''{target} was found at position {result} in {collection}.''') | 697 | 0 |
import argparse
from ...utils.dataclasses import (
ComputeEnvironment,
DistributedType,
DynamoBackend,
PrecisionType,
SageMakerDistributedType,
)
from ..menu import BulletMenu
__SCREAMING_SNAKE_CASE : Optional[Any] = [
"EAGER",
"AOT_EAGER",
"INDUCTOR",
"NVFUSER",
"AOT_NVFUSER",
"AOT_CUDAGRAPHS",
"OFI",
"FX2TRT",
"ONNXRT",
"IPEX",
]
def _snake_case ( lowercase , lowercase=None , lowercase=None , lowercase=None ) -> List[Any]:
__a : Optional[Any] = True
while ask_again:
__a : Tuple = input(__lowerCAmelCase )
try:
if default is not None and len(__lowerCAmelCase ) == 0:
return default
return convert_value(__lowerCAmelCase ) if convert_value is not None else result
except Exception:
if error_message is not None:
print(__lowerCAmelCase )
def _snake_case ( lowercase , lowercase=[] , lowercase=None , lowercase=0 ) -> Union[str, Any]:
__a : Union[str, Any] = BulletMenu(__lowerCAmelCase , __lowerCAmelCase )
__a : str = menu.run(default_choice=__lowerCAmelCase )
return convert_value(__lowerCAmelCase ) if convert_value is not None else result
def _snake_case ( lowercase ) -> List[Any]:
__a : int = int(__lowerCAmelCase )
return ComputeEnvironment(["""LOCAL_MACHINE""", """AMAZON_SAGEMAKER"""][value] )
def _snake_case ( lowercase ) -> Optional[int]:
__a : List[Any] = int(__lowerCAmelCase )
return DistributedType(["""NO""", """MULTI_CPU""", """MULTI_XPU""", """MULTI_GPU""", """MULTI_NPU""", """TPU"""][value] )
def _snake_case ( lowercase ) -> Optional[Any]:
__a : List[str] = int(__lowerCAmelCase )
return DynamoBackend(DYNAMO_BACKENDS[value] ).value
def _snake_case ( lowercase ) -> int:
__a : Optional[Any] = int(__lowerCAmelCase )
return PrecisionType(["""no""", """fp16""", """bf16""", """fp8"""][value] )
def _snake_case ( lowercase ) -> Union[str, Any]:
__a : Union[str, Any] = int(__lowerCAmelCase )
return SageMakerDistributedType(["""NO""", """DATA_PARALLEL""", """MODEL_PARALLEL"""][value] )
def _snake_case ( lowercase ) -> str:
return {"yes": True, "no": False}[value.lower()]
class SCREAMING_SNAKE_CASE__ ( argparse.RawDescriptionHelpFormatter ):
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
__a : Dict = super()._format_usage(_a , _a , _a , _a )
__a : Any = usage.replace("""<command> [<args>] """ , """""" )
return usage | 712 |
'''simple docstring'''
from itertools import product
def _snake_case ( lowercase , lowercase ) -> list[int]:
__a : Optional[int] = sides_number
__a : Union[str, Any] = max_face_number * dice_number
__a : Optional[Any] = [0] * (max_total + 1)
__a : Dict = 1
__a : str = range(lowercase , max_face_number + 1 )
for dice_numbers in product(lowercase , repeat=lowercase ):
__a : int = sum(lowercase )
totals_frequencies[total] += 1
return totals_frequencies
def _snake_case ( ) -> float:
__a : Tuple = total_frequency_distribution(
sides_number=4 , dice_number=9 )
__a : Union[str, Any] = total_frequency_distribution(
sides_number=6 , dice_number=6 )
__a : str = 0
__a : Dict = 9
__a : str = 4 * 9
__a : Any = 6
for peter_total in range(lowercase , max_peter_total + 1 ):
peter_wins_count += peter_totals_frequencies[peter_total] * sum(
colin_totals_frequencies[min_colin_total:peter_total] )
__a : str = (4**9) * (6**6)
__a : List[Any] = peter_wins_count / total_games_number
__a : List[Any] = round(lowercase , ndigits=7 )
return rounded_peter_win_probability
if __name__ == "__main__":
print(f'''{solution() = }''') | 697 | 0 |
Subsets and Splits