code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
"""simple docstring"""
import json
import os
import unittest
from transformers import DebertaTokenizer, DebertaTokenizerFast
from transformers.models.deberta.tokenization_deberta import VOCAB_FILES_NAMES
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class a ( lowerCamelCase__, unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase = DebertaTokenizer
UpperCAmelCase = True
UpperCAmelCase = DebertaTokenizerFast
def UpperCamelCase ( self: List[str] ):
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
A__ = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""[UNK]""",
]
A__ = dict(zip(lowercase__ , range(len(lowercase__ ) ) ) )
A__ = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
A__ = {"""unk_token""": """[UNK]"""}
A__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
A__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(lowercase__ ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(lowercase__ ) )
def UpperCamelCase ( self: int , **UpperCamelCase: Any ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowercase__ )
def UpperCamelCase ( self: Union[str, Any] , UpperCamelCase: int ):
"""simple docstring"""
A__ = """lower newer"""
A__ = """lower newer"""
return input_text, output_text
def UpperCamelCase ( self: List[str] ):
"""simple docstring"""
A__ = self.get_tokenizer()
A__ = """lower newer"""
A__ = ["""l""", """o""", """w""", """er""", """\u0120""", """n""", """e""", """w""", """er"""]
A__ = tokenizer.tokenize(lowercase__ )
self.assertListEqual(lowercase__ , lowercase__ )
A__ = tokens + [tokenizer.unk_token]
A__ = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase__ ) , lowercase__ )
def UpperCamelCase ( self: str ):
"""simple docstring"""
A__ = self.get_tokenizer()
A__ = tokenizer("""Hello""" , """World""" )
A__ = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]
self.assertListEqual(tokd["""token_type_ids"""] , lowercase__ )
@slow
def UpperCamelCase ( self: Any ):
"""simple docstring"""
A__ = self.tokenizer_class.from_pretrained("""microsoft/deberta-base""" )
A__ = tokenizer.encode("""sequence builders""" , add_special_tokens=lowercase__ )
A__ = tokenizer.encode("""multi-sequence build""" , add_special_tokens=lowercase__ )
A__ = tokenizer.encode(
"""sequence builders""" , add_special_tokens=lowercase__ , add_prefix_space=lowercase__ )
A__ = tokenizer.encode(
"""sequence builders""" , """multi-sequence build""" , add_special_tokens=lowercase__ , add_prefix_space=lowercase__ )
A__ = tokenizer.build_inputs_with_special_tokens(lowercase__ )
A__ = tokenizer.build_inputs_with_special_tokens(lowercase__ , lowercase__ )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
@slow
def UpperCamelCase ( self: Optional[Any] ):
"""simple docstring"""
A__ = [self.tokenizer_class]
if self.test_rust_tokenizer:
tokenizer_classes.append(self.rust_tokenizer_class )
for tokenizer_class in tokenizer_classes:
A__ = tokenizer_class.from_pretrained("""microsoft/deberta-base""" )
A__ = [
"""ALBERT: A Lite BERT for Self-supervised Learning of Language Representations""",
"""ALBERT incorporates two parameter reduction techniques""",
"""The first one is a factorized embedding parameterization. By decomposing the large vocabulary"""
""" embedding matrix into two small matrices, we separate the size of the hidden layers from the size of"""
""" vocabulary embedding.""",
]
A__ = tokenizer(lowercase__ , padding=lowercase__ )
A__ = [tokenizer.decode(lowercase__ , skip_special_tokens=lowercase__ ) for seq in encoding["""input_ids"""]]
# fmt: off
A__ = {
"""input_ids""": [
[1, 21_18, 1_11_26, 5_65, 35, 83, 2_51_91, 1_63, 1_88_54, 13, 1_21_56, 12, 1_61_01, 2_53_76, 1_38_07, 9, 2_22_05, 2_78_93, 16_35, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 21_18, 1_11_26, 5_65, 2_45_36, 80, 4_37_97, 48_78, 73_73, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1_33, 78, 65, 16, 10, 37_24, 15_38, 3_31_83, 1_13_03, 4_37_97, 19_38, 4, 8_70, 2_41_65, 2_91_05, 5, 7_39, 3_26_44, 3_31_83, 1_13_03, 3_61_73, 88, 80, 6_50, 78_21, 4_59_40, 6, 52, 25_59, 5, 18_36, 9, 5, 73_97, 1_31_71, 31, 5, 18_36, 9, 3_26_44, 3_31_83, 1_13_03, 4, 2]
],
"""token_type_ids""": [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
],
"""attention_mask""": [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
]
}
# fmt: on
A__ = [
"""ALBERT: A Lite BERT for Self-supervised Learning of Language Representations""",
"""ALBERT incorporates two parameter reduction techniques""",
"""The first one is a factorized embedding parameterization. By decomposing the large vocabulary"""
""" embedding matrix into two small matrices, we separate the size of the hidden layers from the size of"""
""" vocabulary embedding.""",
]
self.assertDictEqual(encoding.data , lowercase__ )
for expected, decoded in zip(lowercase__ , lowercase__ ):
self.assertEqual(lowercase__ , lowercase__ )
| 335 |
'''simple docstring'''
import argparse
import json
import logging
import os
import shutil
import sys
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.utils import write_basic_config
from transformers.testing_utils import TestCasePlus, get_gpu_count, run_command, slow, torch_device
from transformers.utils import is_apex_available
logging.basicConfig(level=logging.DEBUG)
lowerCAmelCase__ = logging.getLogger()
def _A ( ):
"""simple docstring"""
__lowercase = argparse.ArgumentParser()
parser.add_argument('''-f''' )
__lowercase = parser.parse_args()
return args.f
def _A ( A__ ):
"""simple docstring"""
__lowercase = {}
__lowercase = os.path.join(A__ , '''all_results.json''' )
if os.path.exists(A__ ):
with open(A__ , '''r''' ) as f:
__lowercase = json.load(A__ )
else:
raise ValueError(F"can't find {path}" )
return results
def _A ( ):
"""simple docstring"""
__lowercase = torch.cuda.is_available() and torch_device == '''cuda'''
return is_using_cuda and is_apex_available()
lowerCAmelCase__ = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
@classmethod
def SCREAMING_SNAKE_CASE ( cls : List[str] ):
# Write Accelerate config, will pick up on CPU, GPU, and multi-GPU
__lowercase = tempfile.mkdtemp()
__lowercase = os.path.join(cls.tmpdir ,'''default_config.yml''' )
write_basic_config(save_location=cls.configPath )
__lowercase = ['''accelerate''', '''launch''', '''--config_file''', cls.configPath]
@classmethod
def SCREAMING_SNAKE_CASE ( cls : str ):
shutil.rmtree(cls.tmpdir )
@mock.patch.dict(os.environ ,{'''WANDB_MODE''': '''offline'''} )
def SCREAMING_SNAKE_CASE ( self : Any ):
__lowercase = self.get_auto_remove_tmp_dir()
__lowercase = F"\n {self.examples_dir}/pytorch/text-classification/run_glue_no_trainer.py\n --model_name_or_path distilbert-base-uncased\n --output_dir {tmp_dir}\n --train_file ./tests/fixtures/tests_samples/MRPC/train.csv\n --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --learning_rate=1e-4\n --seed=42\n --checkpointing_steps epoch\n --with_tracking\n ".split()
if is_cuda_and_apex_available():
testargs.append('''--fp16''' )
run_command(self._launch_args + testargs )
__lowercase = get_results(lowercase__ )
self.assertGreaterEqual(result['''eval_accuracy'''] ,0.7_5 )
self.assertTrue(os.path.exists(os.path.join(lowercase__ ,'''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(lowercase__ ,'''glue_no_trainer''' ) ) )
@mock.patch.dict(os.environ ,{'''WANDB_MODE''': '''offline'''} )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
__lowercase = self.get_auto_remove_tmp_dir()
__lowercase = F"\n {self.examples_dir}/pytorch/language-modeling/run_clm_no_trainer.py\n --model_name_or_path distilgpt2\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --block_size 128\n --per_device_train_batch_size 5\n --per_device_eval_batch_size 5\n --num_train_epochs 2\n --output_dir {tmp_dir}\n --checkpointing_steps epoch\n --with_tracking\n ".split()
if torch.cuda.device_count() > 1:
# Skipping because there are not enough batches to train the model + would need a drop_last to work.
return
run_command(self._launch_args + testargs )
__lowercase = get_results(lowercase__ )
self.assertLess(result['''perplexity'''] ,1_0_0 )
self.assertTrue(os.path.exists(os.path.join(lowercase__ ,'''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(lowercase__ ,'''clm_no_trainer''' ) ) )
@mock.patch.dict(os.environ ,{'''WANDB_MODE''': '''offline'''} )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
__lowercase = self.get_auto_remove_tmp_dir()
__lowercase = F"\n {self.examples_dir}/pytorch/language-modeling/run_mlm_no_trainer.py\n --model_name_or_path distilroberta-base\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --output_dir {tmp_dir}\n --num_train_epochs=1\n --checkpointing_steps epoch\n --with_tracking\n ".split()
run_command(self._launch_args + testargs )
__lowercase = get_results(lowercase__ )
self.assertLess(result['''perplexity'''] ,4_2 )
self.assertTrue(os.path.exists(os.path.join(lowercase__ ,'''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(lowercase__ ,'''mlm_no_trainer''' ) ) )
@mock.patch.dict(os.environ ,{'''WANDB_MODE''': '''offline'''} )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
# with so little data distributed training needs more epochs to get the score on par with 0/1 gpu
__lowercase = 7 if get_gpu_count() > 1 else 2
__lowercase = self.get_auto_remove_tmp_dir()
__lowercase = F"\n {self.examples_dir}/pytorch/token-classification/run_ner_no_trainer.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/conll/sample.json\n --validation_file tests/fixtures/tests_samples/conll/sample.json\n --output_dir {tmp_dir}\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=2\n --num_train_epochs={epochs}\n --seed 7\n --checkpointing_steps epoch\n --with_tracking\n ".split()
run_command(self._launch_args + testargs )
__lowercase = get_results(lowercase__ )
self.assertGreaterEqual(result['''eval_accuracy'''] ,0.7_5 )
self.assertLess(result['''train_loss'''] ,0.5 )
self.assertTrue(os.path.exists(os.path.join(lowercase__ ,'''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(lowercase__ ,'''ner_no_trainer''' ) ) )
@unittest.skip(reason='''Fix me @muellerzr''' )
@mock.patch.dict(os.environ ,{'''WANDB_MODE''': '''offline'''} )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
__lowercase = self.get_auto_remove_tmp_dir()
__lowercase = F"\n {self.examples_dir}/pytorch/question-answering/run_qa_no_trainer.py\n --model_name_or_path bert-base-uncased\n --version_2_with_negative\n --train_file tests/fixtures/tests_samples/SQUAD/sample.json\n --validation_file tests/fixtures/tests_samples/SQUAD/sample.json\n --output_dir {tmp_dir}\n --seed=42\n --max_train_steps=10\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n --with_tracking\n ".split()
run_command(self._launch_args + testargs )
__lowercase = get_results(lowercase__ )
# Because we use --version_2_with_negative the testing script uses SQuAD v2 metrics.
self.assertGreaterEqual(result['''eval_f1'''] ,2_8 )
self.assertGreaterEqual(result['''eval_exact'''] ,2_8 )
self.assertTrue(os.path.exists(os.path.join(lowercase__ ,'''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(lowercase__ ,'''qa_no_trainer''' ) ) )
@mock.patch.dict(os.environ ,{'''WANDB_MODE''': '''offline'''} )
def SCREAMING_SNAKE_CASE ( self : Dict ):
__lowercase = self.get_auto_remove_tmp_dir()
__lowercase = F"\n {self.examples_dir}/pytorch/multiple-choice/run_swag_no_trainer.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/swag/sample.json\n --validation_file tests/fixtures/tests_samples/swag/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=20\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --with_tracking\n ".split()
run_command(self._launch_args + testargs )
__lowercase = get_results(lowercase__ )
self.assertGreaterEqual(result['''eval_accuracy'''] ,0.8 )
self.assertTrue(os.path.exists(os.path.join(lowercase__ ,'''swag_no_trainer''' ) ) )
@slow
@mock.patch.dict(os.environ ,{'''WANDB_MODE''': '''offline'''} )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
__lowercase = self.get_auto_remove_tmp_dir()
__lowercase = F"\n {self.examples_dir}/pytorch/summarization/run_summarization_no_trainer.py\n --model_name_or_path t5-small\n --train_file tests/fixtures/tests_samples/xsum/sample.json\n --validation_file tests/fixtures/tests_samples/xsum/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=50\n --num_warmup_steps=8\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n --with_tracking\n ".split()
run_command(self._launch_args + testargs )
__lowercase = get_results(lowercase__ )
self.assertGreaterEqual(result['''eval_rouge1'''] ,1_0 )
self.assertGreaterEqual(result['''eval_rouge2'''] ,2 )
self.assertGreaterEqual(result['''eval_rougeL'''] ,7 )
self.assertGreaterEqual(result['''eval_rougeLsum'''] ,7 )
self.assertTrue(os.path.exists(os.path.join(lowercase__ ,'''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(lowercase__ ,'''summarization_no_trainer''' ) ) )
@slow
@mock.patch.dict(os.environ ,{'''WANDB_MODE''': '''offline'''} )
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
__lowercase = self.get_auto_remove_tmp_dir()
__lowercase = F"\n {self.examples_dir}/pytorch/translation/run_translation_no_trainer.py\n --model_name_or_path sshleifer/student_marian_en_ro_6_1\n --source_lang en\n --target_lang ro\n --train_file tests/fixtures/tests_samples/wmt16/sample.json\n --validation_file tests/fixtures/tests_samples/wmt16/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=50\n --num_warmup_steps=8\n --num_beams=6\n --learning_rate=3e-3\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --source_lang en_XX\n --target_lang ro_RO\n --checkpointing_steps epoch\n --with_tracking\n ".split()
run_command(self._launch_args + testargs )
__lowercase = get_results(lowercase__ )
self.assertGreaterEqual(result['''eval_bleu'''] ,3_0 )
self.assertTrue(os.path.exists(os.path.join(lowercase__ ,'''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(lowercase__ ,'''translation_no_trainer''' ) ) )
@slow
def SCREAMING_SNAKE_CASE ( self : str ):
__lowercase = logging.StreamHandler(sys.stdout )
logger.addHandler(lowercase__ )
__lowercase = self.get_auto_remove_tmp_dir()
__lowercase = F"\n {self.examples_dir}/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py\n --dataset_name huggingface/semantic-segmentation-test-sample\n --output_dir {tmp_dir}\n --max_train_steps=10\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n ".split()
run_command(self._launch_args + testargs )
__lowercase = get_results(lowercase__ )
self.assertGreaterEqual(result['''eval_overall_accuracy'''] ,0.1_0 )
@mock.patch.dict(os.environ ,{'''WANDB_MODE''': '''offline'''} )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
__lowercase = self.get_auto_remove_tmp_dir()
__lowercase = F"\n {self.examples_dir}/pytorch/image-classification/run_image_classification_no_trainer.py\n --model_name_or_path google/vit-base-patch16-224-in21k\n --dataset_name hf-internal-testing/cats_vs_dogs_sample\n --learning_rate 1e-4\n --per_device_train_batch_size 2\n --per_device_eval_batch_size 1\n --max_train_steps 2\n --train_val_split 0.1\n --seed 42\n --output_dir {tmp_dir}\n --with_tracking\n --checkpointing_steps 1\n ".split()
if is_cuda_and_apex_available():
testargs.append('''--fp16''' )
run_command(self._launch_args + testargs )
__lowercase = get_results(lowercase__ )
# The base model scores a 25%
self.assertGreaterEqual(result['''eval_accuracy'''] ,0.6 )
self.assertTrue(os.path.exists(os.path.join(lowercase__ ,'''step_1''' ) ) )
self.assertTrue(os.path.exists(os.path.join(lowercase__ ,'''image_classification_no_trainer''' ) ) )
| 104 | 0 |
"""simple docstring"""
import copy
from typing import Dict, List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowerCamelCase__ = {
'''facebook/mask2former-swin-small-coco-instance''': (
'''https://huggingface.co/facebook/mask2former-swin-small-coco-instance/blob/main/config.json'''
)
# See all Mask2Former models at https://huggingface.co/models?filter=mask2former
}
lowerCamelCase__ = logging.get_logger(__name__)
class A__ ( A__):
A_ : Optional[int] = 'mask2former'
A_ : List[str] = ['swin']
A_ : Optional[int] = {'hidden_size': 'hidden_dim'}
def __init__( self , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 2_56 , _SCREAMING_SNAKE_CASE = 2_56 , _SCREAMING_SNAKE_CASE = 2_56 , _SCREAMING_SNAKE_CASE = 10_24 , _SCREAMING_SNAKE_CASE = "relu" , _SCREAMING_SNAKE_CASE = 6 , _SCREAMING_SNAKE_CASE = 10 , _SCREAMING_SNAKE_CASE = 8 , _SCREAMING_SNAKE_CASE = 0.0 , _SCREAMING_SNAKE_CASE = 20_48 , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = 4 , _SCREAMING_SNAKE_CASE = 2_55 , _SCREAMING_SNAKE_CASE = 1_00 , _SCREAMING_SNAKE_CASE = 0.1 , _SCREAMING_SNAKE_CASE = 2.0 , _SCREAMING_SNAKE_CASE = 5.0 , _SCREAMING_SNAKE_CASE = 5.0 , _SCREAMING_SNAKE_CASE = 1_25_44 , _SCREAMING_SNAKE_CASE = 3.0 , _SCREAMING_SNAKE_CASE = 0.75 , _SCREAMING_SNAKE_CASE = 0.02 , _SCREAMING_SNAKE_CASE = 1.0 , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = [4, 8, 16, 32] , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , ):
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `Swin` backbone.' )
__lowerCAmelCase : Dict = CONFIG_MAPPING['swin'](
image_size=2_24 , in_channels=3 , patch_size=4 , embed_dim=96 , depths=[2, 2, 18, 2] , num_heads=[3, 6, 12, 24] , window_size=7 , drop_path_rate=0.3 , use_absolute_embeddings=lowerCamelCase__ , out_features=['stage1', 'stage2', 'stage3', 'stage4'] , )
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
__lowerCAmelCase : Dict = backbone_config.pop('model_type' )
__lowerCAmelCase : int = CONFIG_MAPPING[backbone_model_type]
__lowerCAmelCase : Dict = config_class.from_dict(lowerCamelCase__ )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
f"Backbone {backbone_config.model_type} is not a supported model and may not be compatible with Mask2Former. "
f"Supported model types: {','.join(self.backbones_supported )}" )
__lowerCAmelCase : List[Any] = backbone_config
__lowerCAmelCase : Dict = feature_size
__lowerCAmelCase : int = mask_feature_size
__lowerCAmelCase : str = hidden_dim
__lowerCAmelCase : Tuple = encoder_feedforward_dim
__lowerCAmelCase : str = activation_function
__lowerCAmelCase : Tuple = encoder_layers
__lowerCAmelCase : Optional[int] = decoder_layers
__lowerCAmelCase : Union[str, Any] = num_attention_heads
__lowerCAmelCase : int = dropout
__lowerCAmelCase : List[Any] = dim_feedforward
__lowerCAmelCase : int = pre_norm
__lowerCAmelCase : int = enforce_input_projection
__lowerCAmelCase : Dict = common_stride
__lowerCAmelCase : Optional[Any] = ignore_value
__lowerCAmelCase : Optional[Any] = num_queries
__lowerCAmelCase : Union[str, Any] = no_object_weight
__lowerCAmelCase : Tuple = class_weight
__lowerCAmelCase : Optional[int] = mask_weight
__lowerCAmelCase : Tuple = dice_weight
__lowerCAmelCase : Dict = train_num_points
__lowerCAmelCase : Union[str, Any] = oversample_ratio
__lowerCAmelCase : Dict = importance_sample_ratio
__lowerCAmelCase : Optional[Any] = init_std
__lowerCAmelCase : Any = init_xavier_std
__lowerCAmelCase : List[str] = use_auxiliary_loss
__lowerCAmelCase : Dict = feature_strides
__lowerCAmelCase : List[Any] = output_auxiliary_logits
__lowerCAmelCase : List[Any] = decoder_layers
super().__init__(**lowerCamelCase__ )
@classmethod
def __lowerCamelCase ( cls , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
return cls(
backbone_config=lowerCamelCase__ , **lowerCamelCase__ , )
def __lowerCamelCase ( self ):
__lowerCAmelCase : str = copy.deepcopy(self.__dict__ )
__lowerCAmelCase : Optional[Any] = self.backbone_config.to_dict()
__lowerCAmelCase : Tuple = self.__class__.model_type
return output
| 357 |
"""simple docstring"""
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Value
from .base import TaskTemplate
@dataclass(frozen=_lowerCamelCase)
class A__ ( _lowerCamelCase):
# `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization
A_ : str = field(default='text-classification' , metadata={'include_in_asdict_even_if_is_default': True})
A_ : ClassVar[Features] = Features({'text': Value('string')})
A_ : ClassVar[Features] = Features({'labels': ClassLabel})
A_ : str = "text"
A_ : str = "labels"
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ):
if self.label_column not in features:
raise ValueError(f"Column {self.label_column} is not present in features." )
if not isinstance(features[self.label_column] , _SCREAMING_SNAKE_CASE ):
raise ValueError(f"Column {self.label_column} is not a ClassLabel." )
__lowerCAmelCase : Any = copy.deepcopy(self )
__lowerCAmelCase : Dict = self.label_schema.copy()
__lowerCAmelCase : List[Any] = features[self.label_column]
__lowerCAmelCase : Dict = label_schema
return task_template
@property
def __lowerCamelCase ( self ):
return {
self.text_column: "text",
self.label_column: "labels",
} | 182 | 0 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPImageProcessor, CLIPProcessor
@require_vision
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = tempfile.mkdtemp()
# fmt: off
__SCREAMING_SNAKE_CASE = ["""l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """lo""", """l</w>""", """w</w>""", """r</w>""", """t</w>""", """low</w>""", """er</w>""", """lowest</w>""", """newer</w>""", """wider""", """<unk>""", """<|startoftext|>""", """<|endoftext|>"""]
# fmt: on
__SCREAMING_SNAKE_CASE = dict(zip(__SCREAMING_SNAKE_CASE , range(len(__SCREAMING_SNAKE_CASE ) ) ) )
__SCREAMING_SNAKE_CASE = ["""#version: 0.2""", """l o""", """lo w</w>""", """e r</w>""", """"""]
__SCREAMING_SNAKE_CASE = {"""unk_token""": """<unk>"""}
__SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
__SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(__SCREAMING_SNAKE_CASE ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(__SCREAMING_SNAKE_CASE ) )
__SCREAMING_SNAKE_CASE = {
"""do_resize""": True,
"""size""": 20,
"""do_center_crop""": True,
"""crop_size""": 18,
"""do_normalize""": True,
"""image_mean""": [0.48145466, 0.4578275, 0.40821073],
"""image_std""": [0.26862954, 0.26130258, 0.27577711],
}
__SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , __SCREAMING_SNAKE_CASE )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : Optional[int] , **__SCREAMING_SNAKE_CASE : Optional[int] ) -> str:
"""simple docstring"""
return CLIPTokenizer.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : Tuple , **__SCREAMING_SNAKE_CASE : Any ) -> int:
"""simple docstring"""
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : Optional[Any] , **__SCREAMING_SNAKE_CASE : Optional[int] ) -> List[str]:
"""simple docstring"""
return CLIPImageProcessor.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : Tuple ) -> str:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase__ ( self : Optional[Any] ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
__SCREAMING_SNAKE_CASE = [Image.fromarray(np.moveaxis(__SCREAMING_SNAKE_CASE , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def UpperCAmelCase__ ( self : Optional[int] ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.get_tokenizer()
__SCREAMING_SNAKE_CASE = self.get_rust_tokenizer()
__SCREAMING_SNAKE_CASE = self.get_image_processor()
__SCREAMING_SNAKE_CASE = CLIPProcessor(tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE )
processor_slow.save_pretrained(self.tmpdirname )
__SCREAMING_SNAKE_CASE = CLIPProcessor.from_pretrained(self.tmpdirname , use_fast=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = CLIPProcessor(tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE )
processor_fast.save_pretrained(self.tmpdirname )
__SCREAMING_SNAKE_CASE = CLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , __SCREAMING_SNAKE_CASE )
self.assertIsInstance(processor_fast.tokenizer , __SCREAMING_SNAKE_CASE )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , __SCREAMING_SNAKE_CASE )
self.assertIsInstance(processor_fast.image_processor , __SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : Optional[int] ) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = CLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__SCREAMING_SNAKE_CASE = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
__SCREAMING_SNAKE_CASE = self.get_image_processor(do_normalize=__SCREAMING_SNAKE_CASE , padding_value=1.0 )
__SCREAMING_SNAKE_CASE = CLIPProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=__SCREAMING_SNAKE_CASE , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __SCREAMING_SNAKE_CASE )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : List[str] ) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.get_image_processor()
__SCREAMING_SNAKE_CASE = self.get_tokenizer()
__SCREAMING_SNAKE_CASE = CLIPProcessor(tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = self.prepare_image_inputs()
__SCREAMING_SNAKE_CASE = image_processor(__SCREAMING_SNAKE_CASE , return_tensors="""np""" )
__SCREAMING_SNAKE_CASE = processor(images=__SCREAMING_SNAKE_CASE , return_tensors="""np""" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def UpperCAmelCase__ ( self : List[Any] ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.get_image_processor()
__SCREAMING_SNAKE_CASE = self.get_tokenizer()
__SCREAMING_SNAKE_CASE = CLIPProcessor(tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = """lower newer"""
__SCREAMING_SNAKE_CASE = processor(text=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = tokenizer(__SCREAMING_SNAKE_CASE )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def UpperCAmelCase__ ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.get_image_processor()
__SCREAMING_SNAKE_CASE = self.get_tokenizer()
__SCREAMING_SNAKE_CASE = CLIPProcessor(tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = """lower newer"""
__SCREAMING_SNAKE_CASE = self.prepare_image_inputs()
__SCREAMING_SNAKE_CASE = processor(text=__SCREAMING_SNAKE_CASE , images=__SCREAMING_SNAKE_CASE )
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(__SCREAMING_SNAKE_CASE ):
processor()
def UpperCAmelCase__ ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.get_image_processor()
__SCREAMING_SNAKE_CASE = self.get_tokenizer()
__SCREAMING_SNAKE_CASE = CLIPProcessor(tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__SCREAMING_SNAKE_CASE = processor.batch_decode(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = tokenizer.batch_decode(__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : int ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.get_image_processor()
__SCREAMING_SNAKE_CASE = self.get_tokenizer()
__SCREAMING_SNAKE_CASE = CLIPProcessor(tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = """lower newer"""
__SCREAMING_SNAKE_CASE = self.prepare_image_inputs()
__SCREAMING_SNAKE_CASE = processor(text=__SCREAMING_SNAKE_CASE , images=__SCREAMING_SNAKE_CASE )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 267 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
UpperCAmelCase : List[str] = logging.get_logger(__name__)
class lowerCAmelCase__ ( a , a ):
"""simple docstring"""
lowerCAmelCase__ = "maskformer-swin"
lowerCAmelCase__ = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self : str , __SCREAMING_SNAKE_CASE : Tuple=224 , __SCREAMING_SNAKE_CASE : str=4 , __SCREAMING_SNAKE_CASE : Union[str, Any]=3 , __SCREAMING_SNAKE_CASE : Optional[Any]=96 , __SCREAMING_SNAKE_CASE : Optional[Any]=[2, 2, 6, 2] , __SCREAMING_SNAKE_CASE : Any=[3, 6, 12, 24] , __SCREAMING_SNAKE_CASE : Dict=7 , __SCREAMING_SNAKE_CASE : Dict=4.0 , __SCREAMING_SNAKE_CASE : Dict=True , __SCREAMING_SNAKE_CASE : str=0.0 , __SCREAMING_SNAKE_CASE : int=0.0 , __SCREAMING_SNAKE_CASE : str=0.1 , __SCREAMING_SNAKE_CASE : List[Any]="gelu" , __SCREAMING_SNAKE_CASE : str=False , __SCREAMING_SNAKE_CASE : Optional[int]=0.02 , __SCREAMING_SNAKE_CASE : Optional[int]=1E-5 , __SCREAMING_SNAKE_CASE : Dict=None , __SCREAMING_SNAKE_CASE : Dict=None , **__SCREAMING_SNAKE_CASE : Tuple , ) -> Tuple:
"""simple docstring"""
super().__init__(**__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = image_size
__SCREAMING_SNAKE_CASE = patch_size
__SCREAMING_SNAKE_CASE = num_channels
__SCREAMING_SNAKE_CASE = embed_dim
__SCREAMING_SNAKE_CASE = depths
__SCREAMING_SNAKE_CASE = len(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = num_heads
__SCREAMING_SNAKE_CASE = window_size
__SCREAMING_SNAKE_CASE = mlp_ratio
__SCREAMING_SNAKE_CASE = qkv_bias
__SCREAMING_SNAKE_CASE = hidden_dropout_prob
__SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE = drop_path_rate
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = use_absolute_embeddings
__SCREAMING_SNAKE_CASE = layer_norm_eps
__SCREAMING_SNAKE_CASE = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
__SCREAMING_SNAKE_CASE = int(embed_dim * 2 ** (len(__SCREAMING_SNAKE_CASE ) - 1) )
__SCREAMING_SNAKE_CASE = ["""stem"""] + [f'stage{idx}' for idx in range(1 , len(__SCREAMING_SNAKE_CASE ) + 1 )]
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = get_aligned_output_features_output_indices(
out_features=__SCREAMING_SNAKE_CASE , out_indices=__SCREAMING_SNAKE_CASE , stage_names=self.stage_names )
| 267 | 1 |
"""simple docstring"""
from random import randint, random
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = False , __UpperCAmelCase = False , __UpperCAmelCase = 5 , ) -> list:
lowerCAmelCase__ : List[Any] = [[-1] * number_of_cells] # Create a highway without any car
lowerCAmelCase__ : Any = 0
lowerCAmelCase__ : Optional[Any] = max(__UpperCAmelCase , 0 )
while i < number_of_cells:
lowerCAmelCase__ : Any = (
randint(0 , __UpperCAmelCase ) if random_speed else initial_speed
) # Place the cars
i += (
randint(1 , max_speed * 2 ) if random_frequency else frequency
) # Arbitrary number, may need tuning
return highway
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase ) -> int:
lowerCAmelCase__ : int = 0
lowerCAmelCase__ : str = highway_now[car_index + 1 :]
for cell in range(len(__UpperCAmelCase ) ): # May need a better name for this
if cells[cell] != -1: # If the cell is not empty then
return distance # we have the distance we wanted
distance += 1
# Here if the car is near the end of the highway
return distance + get_distance(__UpperCAmelCase , -1 )
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> list:
lowerCAmelCase__ : int = len(__UpperCAmelCase )
# Beforce calculations, the highway is empty
lowerCAmelCase__ : List[Any] = [-1] * number_of_cells
for car_index in range(__UpperCAmelCase ):
if highway_now[car_index] != -1:
# Add 1 to the current speed of the car and cap the speed
lowerCAmelCase__ : Optional[int] = min(highway_now[car_index] + 1 , __UpperCAmelCase )
# Number of empty cell before the next car
lowerCAmelCase__ : Any = get_distance(__UpperCAmelCase , __UpperCAmelCase ) - 1
# We can't have the car causing an accident
lowerCAmelCase__ : Dict = min(next_highway[car_index] , __UpperCAmelCase )
if random() < probability:
# Randomly, a driver will slow down
lowerCAmelCase__ : Any = max(next_highway[car_index] - 1 , 0 )
return next_highway
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> list:
lowerCAmelCase__ : Optional[Any] = len(highway[0] )
for i in range(__UpperCAmelCase ):
lowerCAmelCase__ : Optional[int] = update(highway[i] , __UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ : int = [-1] * number_of_cells
for car_index in range(__UpperCAmelCase ):
lowerCAmelCase__ : str = next_speeds_calculated[car_index]
if speed != -1:
# Change the position based on the speed (with % to create the loop)
lowerCAmelCase__ : List[str] = (car_index + speed) % number_of_cells
# Commit the change of position
lowerCAmelCase__ : str = speed
highway.append(__UpperCAmelCase )
return highway
if __name__ == "__main__":
import doctest
doctest.testmod()
| 212 |
"""simple docstring"""
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _lowerCamelCase ( a_ ):
_lowerCamelCase :Dict = ["image_processor", "tokenizer"]
_lowerCamelCase :Dict = "BlipImageProcessor"
_lowerCamelCase :Any = ("BertTokenizer", "BertTokenizerFast")
def __init__( self : Dict , UpperCamelCase : Optional[int] , UpperCamelCase : Union[str, Any] ) -> str:
"""simple docstring"""
lowerCAmelCase__ : Optional[Any] = False
super().__init__(UpperCamelCase , UpperCamelCase )
lowerCAmelCase__ : List[Any] = self.image_processor
def __call__( self : int , UpperCamelCase : ImageInput = None , UpperCamelCase : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , UpperCamelCase : bool = True , UpperCamelCase : Union[bool, str, PaddingStrategy] = False , UpperCamelCase : Union[bool, str, TruncationStrategy] = None , UpperCamelCase : Optional[int] = None , UpperCamelCase : int = 0 , UpperCamelCase : Optional[int] = None , UpperCamelCase : Optional[bool] = None , UpperCamelCase : bool = False , UpperCamelCase : bool = False , UpperCamelCase : bool = False , UpperCamelCase : bool = False , UpperCamelCase : bool = False , UpperCamelCase : bool = True , UpperCamelCase : Optional[Union[str, TensorType]] = None , **UpperCamelCase : Optional[int] , ) -> BatchEncoding:
"""simple docstring"""
if images is None and text is None:
raise ValueError("""You have to specify either images or text.""" )
# Get only text
if images is None:
lowerCAmelCase__ : Any = self.tokenizer
lowerCAmelCase__ : Optional[int] = self.tokenizer(
text=UpperCamelCase , add_special_tokens=UpperCamelCase , padding=UpperCamelCase , truncation=UpperCamelCase , max_length=UpperCamelCase , stride=UpperCamelCase , pad_to_multiple_of=UpperCamelCase , return_attention_mask=UpperCamelCase , return_overflowing_tokens=UpperCamelCase , return_special_tokens_mask=UpperCamelCase , return_offsets_mapping=UpperCamelCase , return_token_type_ids=UpperCamelCase , return_length=UpperCamelCase , verbose=UpperCamelCase , return_tensors=UpperCamelCase , **UpperCamelCase , )
return text_encoding
# add pixel_values
lowerCAmelCase__ : Tuple = self.image_processor(UpperCamelCase , return_tensors=UpperCamelCase )
if text is not None:
lowerCAmelCase__ : Optional[int] = self.tokenizer(
text=UpperCamelCase , add_special_tokens=UpperCamelCase , padding=UpperCamelCase , truncation=UpperCamelCase , max_length=UpperCamelCase , stride=UpperCamelCase , pad_to_multiple_of=UpperCamelCase , return_attention_mask=UpperCamelCase , return_overflowing_tokens=UpperCamelCase , return_special_tokens_mask=UpperCamelCase , return_offsets_mapping=UpperCamelCase , return_token_type_ids=UpperCamelCase , return_length=UpperCamelCase , verbose=UpperCamelCase , return_tensors=UpperCamelCase , **UpperCamelCase , )
else:
lowerCAmelCase__ : Tuple = None
if text_encoding is not None:
encoding_image_processor.update(UpperCamelCase )
return encoding_image_processor
def _lowerCAmelCase ( self : int , *UpperCamelCase : Union[str, Any] , **UpperCamelCase : List[str] ) -> Dict:
"""simple docstring"""
return self.tokenizer.batch_decode(*UpperCamelCase , **UpperCamelCase )
def _lowerCAmelCase ( self : List[Any] , *UpperCamelCase : Tuple , **UpperCamelCase : List[str] ) -> List[Any]:
"""simple docstring"""
return self.tokenizer.decode(*UpperCamelCase , **UpperCamelCase )
@property
def _lowerCAmelCase ( self : List[Any] ) -> Tuple:
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = self.tokenizer.model_input_names
lowerCAmelCase__ : Union[str, Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 212 | 1 |
from __future__ import annotations
UpperCAmelCase__ = {
"A": ["B", "C", "E"],
"B": ["A", "D", "E"],
"C": ["A", "F", "G"],
"D": ["B"],
"E": ["A", "B", "D"],
"F": ["C"],
"G": ["C"],
}
class lowercase_ :
'''simple docstring'''
def __init__( self : Dict , __UpperCAmelCase : dict[str, list[str]] , __UpperCAmelCase : str ) ->None:
"""simple docstring"""
a = graph
# mapping node to its parent in resulting breadth first tree
a = {}
a = source_vertex
def __lowerCAmelCase ( self : Union[str, Any] ) ->None:
"""simple docstring"""
a = {self.source_vertex}
a = None
a = [self.source_vertex] # first in first out queue
while queue:
a = queue.pop(0 )
for adjacent_vertex in self.graph[vertex]:
if adjacent_vertex not in visited:
visited.add(__UpperCAmelCase )
a = vertex
queue.append(__UpperCAmelCase )
def __lowerCAmelCase ( self : Optional[int] , __UpperCAmelCase : str ) ->str:
"""simple docstring"""
if target_vertex == self.source_vertex:
return self.source_vertex
a = self.parent.get(__UpperCAmelCase )
if target_vertex_parent is None:
a = (
F"""No path from vertex: {self.source_vertex} to vertex: {target_vertex}"""
)
raise ValueError(__UpperCAmelCase )
return self.shortest_path(__UpperCAmelCase ) + F"""->{target_vertex}"""
if __name__ == "__main__":
UpperCAmelCase__ = Graph(graph, "G")
g.breath_first_search()
print(g.shortest_path("D"))
print(g.shortest_path("G"))
print(g.shortest_path("Foo"))
| 0 |
from __future__ import annotations
UpperCAmelCase__ = "Muhammad Umer Farooq"
UpperCAmelCase__ = "MIT"
UpperCAmelCase__ = "1.0.0"
UpperCAmelCase__ = "Muhammad Umer Farooq"
UpperCAmelCase__ = "[email protected]"
UpperCAmelCase__ = "Alpha"
import re
from html.parser import HTMLParser
from urllib import parse
import requests
class lowercase_ ( lowercase ):
'''simple docstring'''
def __init__( self : Union[str, Any] , __UpperCAmelCase : str ) ->None:
"""simple docstring"""
super().__init__()
a = []
a = domain
def __lowerCAmelCase ( self : List[str] , __UpperCAmelCase : str , __UpperCAmelCase : list[tuple[str, str | None]] ) ->None:
"""simple docstring"""
if tag == "a":
# Check the list of defined attributes.
for name, value in attrs:
# If href is defined, and not empty nor # print it.
if name == "href" and value != "#" and value != "":
# If not already in urls.
if value not in self.urls:
a = parse.urljoin(self.domain , __UpperCAmelCase )
self.urls.append(__UpperCAmelCase )
def _a ( a :str ) -> str:
return ".".join(get_sub_domain_name(a ).split('''.''' )[-2:] )
def _a ( a :str ) -> str:
return parse.urlparse(a ).netloc
def _a ( a :str = "https://github.com" ) -> list[str]:
a = get_domain_name(a )
# Initialize the parser
a = Parser(a )
try:
# Open URL
a = requests.get(a )
# pass the raw HTML to the parser to get links
parser.feed(r.text )
# Get links and loop through
a = set()
for link in parser.urls:
# open URL.
# read = requests.get(link)
try:
a = requests.get(a )
# Get the valid email.
a = re.findall('''[a-zA-Z0-9]+@''' + domain , read.text )
# If not in list then append it.
for email in emails:
valid_emails.add(a )
except ValueError:
pass
except ValueError:
raise SystemExit(1 )
# Finally return a sorted list of email addresses with no duplicates.
return sorted(a )
if __name__ == "__main__":
UpperCAmelCase__ = emails_from_url("https://github.com")
print(f"""{len(emails)} emails found:""")
print("\n".join(sorted(emails)))
| 0 | 1 |
'''simple docstring'''
import collections
import json
import os
import re
from typing import TYPE_CHECKING, List, Optional, Tuple
import numpy as np
from ...tokenization_utils_fast import PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowerCAmelCase :str = logging.get_logger(__name__)
lowerCAmelCase :Tuple = {'''vocab_file''': '''vocab.txt''', '''emoji_file''': '''emoji.json'''}
lowerCAmelCase :Tuple = {
'''vocab_file''': {
'''abeja/gpt-neox-japanese-2.7b''': '''https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt''',
},
'''emoji_file''': {
'''abeja/gpt-neox-japanese-2.7b''': '''https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json''',
},
}
lowerCAmelCase :int = {
'''abeja/gpt-neox-japanese-2.7b''': 2_0_4_8,
}
def lowerCamelCase ( lowerCAmelCase : Optional[int] , lowerCAmelCase : Tuple ):
"""simple docstring"""
with open(lowerCAmelCase , 'r' , encoding='utf-8' ) as f:
__magic_name__ : Optional[Any] = json.loads(f.read() )
__magic_name__ : str = collections.OrderedDict()
__magic_name__ : Optional[int] = collections.OrderedDict()
__magic_name__ : List[Any] = collections.OrderedDict()
with open(lowerCAmelCase , 'r' , encoding='utf-8' ) as f:
__magic_name__ : Optional[int] = f.readlines()
__magic_name__ : str = [[t.rstrip('\n' )] if (t == ',' or ',' not in t) else t.rstrip('\n' ).split(',' ) for t in token]
for idx, b in enumerate(lowerCAmelCase ):
__magic_name__ : int = b
__magic_name__ : Dict = idx
for wd in b:
__magic_name__ : List[Any] = idx
return vocab, raw_vocab, ids_to_tokens, emoji
class _lowerCamelCase ( lowercase__ ):
'''simple docstring'''
A_ : Optional[Any] = VOCAB_FILES_NAMES
A_ : Any = PRETRAINED_VOCAB_FILES_MAP
A_ : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A_ : List[Any] = ["""input_ids""", """attention_mask"""]
def __init__( self : Tuple , _A : str , _A : Tuple , _A : Dict="<|endoftext|>" , _A : List[str]="<|endoftext|>" , _A : List[str]="<|startoftext|>" , _A : List[Any]="<|endoftext|>" , _A : Union[str, Any]=False , **_A : int , ) -> List[str]:
super().__init__(
unk_token=_A , pad_token=_A , bos_token=_A , eos_token=_A , do_clean_text=_A , **_A , )
if not os.path.isfile(_A ):
raise ValueError(
F'Can\'t find a vocabulary file at path \'{vocab_file}\'. To load the vocabulary from a Google pretrained'
' model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`' )
if not os.path.isfile(_A ):
raise ValueError(
F'Can\'t find a emoji file at path \'{emoji_file}\'. To load the emoji information from a Google'
' pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`' )
__magic_name__ : Any = do_clean_text
__magic_name__ : int = load_vocab_and_emoji(_A , _A )
__magic_name__ : Optional[Any] = SubWordJapaneseTokenizer(
vocab=self.vocab , ids_to_tokens=self.ids_to_tokens , emoji=self.emoji )
@property
def __lowerCAmelCase ( self : int ) -> List[Any]:
# self.vocab contains support for character fluctuation unique to Japanese, and has a large number of vocab
return len(self.raw_vocab )
def __lowerCAmelCase ( self : int ) -> Optional[Any]:
return dict(self.raw_vocab , **self.added_tokens_encoder )
def __lowerCAmelCase ( self : Tuple , _A : Any ) -> Union[str, Any]:
return self.subword_tokenizer.tokenize(_A , clean=self.do_clean_text )
def __lowerCAmelCase ( self : List[str] , _A : Union[str, Any] ) -> Optional[Any]:
return self.vocab.get(_A , self.vocab.get(self.unk_token ) )
def __lowerCAmelCase ( self : Any , _A : List[str] ) -> Dict:
return self.subword_tokenizer.convert_id_to_token(_A )
def __lowerCAmelCase ( self : Optional[Any] , _A : Tuple ) -> Optional[Any]:
__magic_name__ : Any = ''.join(_A ).strip()
return out_string
def __lowerCAmelCase ( self : Dict , _A : "Conversation" ) -> List[int]:
__magic_name__ : Union[str, Any] = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(_A , add_special_tokens=_A ) + [self.eos_token_id] )
if len(_A ) > self.model_max_length:
__magic_name__ : Tuple = input_ids[-self.model_max_length :]
return input_ids
def __lowerCAmelCase ( self : int , _A : str , _A : Optional[str] = None ) -> Tuple[str]:
__magic_name__ : Optional[Any] = 0
if os.path.isdir(_A ):
__magic_name__ : List[Any] = os.path.join(
_A , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
__magic_name__ : List[str] = os.path.join(
_A , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['emoji_file'] )
else:
__magic_name__ : List[str] = (
(filename_prefix + '-' if filename_prefix else '') + save_directory + VOCAB_FILES_NAMES['vocab_file']
)
__magic_name__ : Union[str, Any] = (
(filename_prefix + '-' if filename_prefix else '') + save_directory + VOCAB_FILES_NAMES['emoji_file']
)
with open(_A , 'w' , encoding='utf-8' ) as writer:
for token_index, token in self.ids_to_tokens.items():
if index != token_index:
logger.warning(
F'Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'
' Please check that the vocabulary is not corrupted!' )
__magic_name__ : List[Any] = token_index
writer.write(','.join(_A ) + '\n' )
index += 1
with open(_A , 'w' , encoding='utf-8' ) as writer:
json.dump(self.emoji , _A )
return vocab_file, emoji_file
class _lowerCamelCase ( lowercase__ ):
'''simple docstring'''
def __init__( self : Optional[int] , _A : List[str] , _A : Dict , _A : List[str] ) -> Optional[Any]:
__magic_name__ : List[str] = vocab # same as swe
__magic_name__ : Tuple = ids_to_tokens # same as bpe
__magic_name__ : Optional[int] = emoji
__magic_name__ : Optional[int] = np.max([len(_A ) for w in self.vocab.keys()] )
__magic_name__ : Tuple = re.compile(R'(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)' )
__magic_name__ : Union[str, Any] = re.compile(R'[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*' )
__magic_name__ : Optional[int] = re.compile(R'[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}' )
__magic_name__ : List[str] = re.compile(
R'([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*' )
__magic_name__ : Dict = re.compile(
R'(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*' )
__magic_name__ : Optional[int] = re.compile(
R'((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*' )
__magic_name__ : Union[str, Any] = '─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿'
__magic_name__ : List[Any] = '▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟'
__magic_name__ : int = str.maketrans({k: '<BLOCK>' for k in keisen + blocks} )
def __len__( self : int ) -> List[str]:
return len(self.ids_to_tokens )
def __lowerCAmelCase ( self : Union[str, Any] , _A : Tuple ) -> List[str]:
__magic_name__ : List[str] = self.content_repattera.sub('<URL>' , _A )
__magic_name__ : Union[str, Any] = self.content_repattera.sub('<EMAIL>' , _A )
__magic_name__ : Any = self.content_repattera.sub('<TEL>' , _A )
__magic_name__ : Dict = self.content_repattera.sub('<DATE>' , _A )
__magic_name__ : Any = self.content_repattera.sub('<DATE>' , _A )
__magic_name__ : str = self.content_repattera.sub('<PRICE>' , _A )
__magic_name__ : Union[str, Any] = content.translate(self.content_transa )
while "<BLOCK><BLOCK>" in content:
__magic_name__ : Dict = content.replace('<BLOCK><BLOCK>' , '<BLOCK>' )
return content
def __lowerCAmelCase ( self : Optional[int] , _A : List[str] , _A : Dict=False ) -> List[Any]:
__magic_name__ : int = text.replace(' ' , '<SP>' )
__magic_name__ : Any = text.replace(' ' , '<SP>' )
__magic_name__ : str = text.replace('\r\n' , '<BR>' )
__magic_name__ : Tuple = text.replace('\n' , '<BR>' )
__magic_name__ : Optional[int] = text.replace('\r' , '<BR>' )
__magic_name__ : Tuple = text.replace('\t' , '<TAB>' )
__magic_name__ : Tuple = text.replace('—' , 'ー' )
__magic_name__ : Dict = text.replace('−' , 'ー' )
for k, v in self.emoji["emoji"].items():
if k in text:
__magic_name__ : Union[str, Any] = text.replace(_A , _A )
if clean:
__magic_name__ : List[str] = self.clean_text(_A )
def check_simbol(_A : Any ):
__magic_name__ : Any = x.encode()
if len(_A ) == 1 and len(_A ) == 2:
__magic_name__ : int = (int(e[0] ) << 8) + int(e[1] )
if (
(c >= 0XC_2_A_1 and c <= 0XC_2_B_F)
or (c >= 0XC_7_8_0 and c <= 0XC_7_8_3)
or (c >= 0XC_A_B_9 and c <= 0XC_B_B_F)
or (c >= 0XC_C_8_0 and c <= 0XC_D_A_2)
):
return True
return False
def checkuae(_A : Tuple ):
__magic_name__ : Dict = x.encode()
if len(_A ) == 1 and len(_A ) == 3:
__magic_name__ : str = (int(e[0] ) << 16) + (int(e[1] ) << 8) + int(e[2] )
if c >= 0XE_2_8_0_8_0 and c <= 0XE_2_B_0_7_F:
return True
return False
__magic_name__ : List[Any] = 0
__magic_name__ : Union[str, Any] = []
while pos < len(_A ):
__magic_name__ : Tuple = min(len(_A ) , pos + self.maxlen + 1 ) if text[pos] == '<' else pos + 3
__magic_name__ : List[str] = [] # (token_id, token, pos)
for e in range(_A , _A , -1 ):
__magic_name__ : List[Any] = text[pos:e]
if wd in self.vocab:
if wd[0] == "<" and len(_A ) > 2:
__magic_name__ : Union[str, Any] = [(self.vocab[wd], wd, e)]
break
else:
candidates.append((self.vocab[wd], wd, e) )
if len(_A ) > 0:
# the smallest token_id is adopted
__magic_name__ : Dict = sorted(_A , key=lambda _A : x[0] )[0]
result.append(_A )
__magic_name__ : int = e
else:
__magic_name__ : List[Any] = pos + 1
__magic_name__ : Optional[Any] = text[pos:end]
if check_simbol(_A ):
result.append('<KIGOU>' )
elif checkuae(_A ):
result.append('<U2000U2BFF>' )
else:
for i in wd.encode('utf-8' ):
result.append('<|byte%d|>' % i )
__magic_name__ : List[str] = end
return result
def __lowerCAmelCase ( self : Optional[Any] , _A : Optional[Any] , _A : Optional[int]="\n" ) -> Tuple:
__magic_name__ : str = []
__magic_name__ : Tuple = []
__magic_name__ : Any = self.ids_to_tokens[index][0]
if word[:6] == "<|byte" and word[-2:] == "|>":
byte_tokens.append(int(word[6:-2] ) )
else:
if len(_A ) > 0:
words.append(bytearray(_A ).decode('utf-8' , errors='replace' ) )
__magic_name__ : Optional[int] = []
if word[:7] == "<|emoji" and word[-2:] == "|>":
words.append(self.emoji['emoji_inv'][word] )
elif word == "<SP>":
words.append(' ' )
elif word == "<BR>":
words.append(_A )
elif word == "<TAB>":
words.append('\t' )
elif word == "<BLOCK>":
words.append('▀' )
elif word == "<KIGOU>":
words.append('ǀ' )
elif word == "<U2000U2BFF>":
words.append('‖' )
else:
words.append(_A )
if len(_A ) > 0:
words.append(bytearray(_A ).decode('utf-8' , errors='replace' ) )
__magic_name__ : str = ''.join(_A )
return text
| 370 |
'''simple docstring'''
import socket
def lowerCamelCase ( ):
"""simple docstring"""
__magic_name__ : List[str] = socket.socket(socket.AF_INET , socket.SOCK_STREAM )
__magic_name__ : Union[str, Any] = socket.gethostname()
__magic_name__ : int = 1_2312
sock.connect((host, port) )
sock.send(b'Hello server!' )
with open('Received_file' , 'wb' ) as out_file:
print('File opened' )
print('Receiving data...' )
while True:
__magic_name__ : Optional[int] = sock.recv(1024 )
if not data:
break
out_file.write(lowerCAmelCase )
print('Successfully received the file' )
sock.close()
print('Connection closed' )
if __name__ == "__main__":
main() | 275 | 0 |
"""simple docstring"""
import argparse
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
SCREAMING_SNAKE_CASE : int = 16
SCREAMING_SNAKE_CASE : List[str] = 32
def lowercase ( _snake_case : Accelerator , _snake_case : int = 16 ) ->Dict:
"""simple docstring"""
__snake_case : Optional[Any] = AutoTokenizer.from_pretrained('''bert-base-cased''' )
__snake_case : Any = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(_snake_case : Union[str, Any] ):
# max_length=None => use the model max length (it's actually the default)
__snake_case : int = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
__snake_case : Tuple = datasets.map(
SCREAMING_SNAKE_CASE__ , batched=SCREAMING_SNAKE_CASE__ , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__snake_case : List[Any] = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(_snake_case : List[str] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
__snake_case : List[str] = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
__snake_case : Optional[int] = 16
elif accelerator.mixed_precision != "no":
__snake_case : Optional[int] = 8
else:
__snake_case : Tuple = None
return tokenizer.pad(
SCREAMING_SNAKE_CASE__ , padding='''longest''' , max_length=SCREAMING_SNAKE_CASE__ , pad_to_multiple_of=SCREAMING_SNAKE_CASE__ , return_tensors='''pt''' , )
# Instantiate dataloaders.
__snake_case : str = DataLoader(
tokenized_datasets['''train'''] , shuffle=SCREAMING_SNAKE_CASE__ , collate_fn=SCREAMING_SNAKE_CASE__ , batch_size=SCREAMING_SNAKE_CASE__ , drop_last=SCREAMING_SNAKE_CASE__ )
__snake_case : Tuple = DataLoader(
tokenized_datasets['''validation'''] , shuffle=SCREAMING_SNAKE_CASE__ , collate_fn=SCREAMING_SNAKE_CASE__ , batch_size=SCREAMING_SNAKE_CASE__ , drop_last=(accelerator.mixed_precision == '''fp8''') , )
return train_dataloader, eval_dataloader
def lowercase ( _snake_case : Optional[Any] , _snake_case : int ) ->Union[str, Any]:
"""simple docstring"""
__snake_case : Optional[Any] = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__snake_case : Tuple = config['lr']
__snake_case : List[str] = int(config['''num_epochs'''] )
__snake_case : Any = int(config['''seed'''] )
__snake_case : Optional[int] = int(config['''batch_size'''] )
__snake_case : Any = evaluate.load('''glue''' , '''mrpc''' )
# If the batch size is too big we use gradient accumulation
__snake_case : str = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
__snake_case : Optional[int] = batch_size // MAX_GPU_BATCH_SIZE
__snake_case : Union[str, Any] = MAX_GPU_BATCH_SIZE
set_seed(SCREAMING_SNAKE_CASE__ )
__snake_case : List[Any] = get_dataloaders(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__snake_case : List[str] = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=SCREAMING_SNAKE_CASE__ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
__snake_case : Tuple = model.to(accelerator.device )
# Instantiate optimizer
__snake_case : Any = AdamW(params=model.parameters() , lr=SCREAMING_SNAKE_CASE__ )
# Instantiate scheduler
__snake_case : int = get_linear_schedule_with_warmup(
optimizer=SCREAMING_SNAKE_CASE__ , num_warmup_steps=100 , num_training_steps=(len(SCREAMING_SNAKE_CASE__ ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__snake_case : Optional[Any] = accelerator.prepare(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Now we train the model
for epoch in range(SCREAMING_SNAKE_CASE__ ):
model.train()
for step, batch in enumerate(SCREAMING_SNAKE_CASE__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
__snake_case : Dict = model(**SCREAMING_SNAKE_CASE__ )
__snake_case : int = outputs.loss
__snake_case : List[str] = loss / gradient_accumulation_steps
accelerator.backward(SCREAMING_SNAKE_CASE__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(SCREAMING_SNAKE_CASE__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__snake_case : Union[str, Any] = model(**SCREAMING_SNAKE_CASE__ )
__snake_case : List[str] = outputs.logits.argmax(dim=-1 )
__snake_case : Tuple = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=SCREAMING_SNAKE_CASE__ , references=SCREAMING_SNAKE_CASE__ , )
__snake_case : Dict = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"""epoch {epoch}:""" , SCREAMING_SNAKE_CASE__ )
def lowercase ( ) ->Union[str, Any]:
"""simple docstring"""
__snake_case : Dict = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=SCREAMING_SNAKE_CASE__ , default=SCREAMING_SNAKE_CASE__ , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
__snake_case : List[str] = parser.parse_args()
__snake_case : Optional[Any] = {'lr': 2e-5, 'num_epochs': 3, 'seed': 42, 'batch_size': 16}
training_function(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
main()
| 102 |
# using dfs for finding eulerian path traversal
def a_ ( SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[int]=None ):
'''simple docstring'''
_lowerCamelCase : Optional[Any] =(path or []) + [u]
for v in graph[u]:
if visited_edge[u][v] is False:
_lowerCamelCase , _lowerCamelCase : Dict =True, True
_lowerCamelCase : Optional[Any] =dfs(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return path
def a_ ( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[Any] ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] =0
_lowerCamelCase : Union[str, Any] =-1
for i in range(SCREAMING_SNAKE_CASE__ ):
if i not in graph.keys():
continue
if len(graph[i] ) % 2 == 1:
odd_degree_nodes += 1
_lowerCamelCase : Tuple =i
if odd_degree_nodes == 0:
return 1, odd_node
if odd_degree_nodes == 2:
return 2, odd_node
return 3, odd_node
def a_ ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Any ):
'''simple docstring'''
_lowerCamelCase : Tuple =[[False for _ in range(max_node + 1 )] for _ in range(max_node + 1 )]
_lowerCamelCase , _lowerCamelCase : Optional[int] =check_circuit_or_path(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if check == 3:
print('graph is not Eulerian' )
print('no path' )
return
_lowerCamelCase : Any =1
if check == 2:
_lowerCamelCase : Tuple =odd_node
print('graph has a Euler path' )
if check == 1:
print('graph has a Euler cycle' )
_lowerCamelCase : int =dfs(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
print(SCREAMING_SNAKE_CASE__ )
def a_ ( ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] ={1: [2, 3, 4], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [4]}
_lowerCamelCase : str ={1: [2, 3, 4, 5], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [1, 4]}
_lowerCamelCase : List[Any] ={1: [2, 3, 4], 2: [1, 3, 4], 3: [1, 2], 4: [1, 2, 5], 5: [4]}
_lowerCamelCase : Any ={1: [2, 3], 2: [1, 3], 3: [1, 2]}
_lowerCamelCase : Dict ={
1: [],
2: []
# all degree is zero
}
_lowerCamelCase : str =10
check_euler(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
check_euler(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
check_euler(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
check_euler(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
check_euler(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
main()
| 199 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCamelCase_ = {"configuration_van": ["VAN_PRETRAINED_CONFIG_ARCHIVE_MAP", "VanConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = [
"VAN_PRETRAINED_MODEL_ARCHIVE_LIST",
"VanForImageClassification",
"VanModel",
"VanPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_van import VAN_PRETRAINED_CONFIG_ARCHIVE_MAP, VanConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_van import (
VAN_PRETRAINED_MODEL_ARCHIVE_LIST,
VanForImageClassification,
VanModel,
VanPreTrainedModel,
)
else:
import sys
lowerCamelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure) | 350 |
"""simple docstring"""
from torch import nn
class _SCREAMING_SNAKE_CASE( nn.Module ):
def __init__( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ) -> List[str]:
"""simple docstring"""
super().__init__()
__SCREAMING_SNAKE_CASE :Tuple = class_size
__SCREAMING_SNAKE_CASE :str = embed_size
# self.mlp1 = nn.Linear(embed_size, embed_size)
# self.mlp2 = (nn.Linear(embed_size, class_size))
__SCREAMING_SNAKE_CASE :Optional[Any] = nn.Linear(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ) -> Optional[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Optional[Any] = self.mlp(SCREAMING_SNAKE_CASE__ )
return logits | 239 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = {
'''facebook/deit-base-distilled-patch16-224''': (
'''https://huggingface.co/facebook/deit-base-patch16-224/resolve/main/config.json'''
),
# See all DeiT models at https://huggingface.co/models?filter=deit
}
class UpperCamelCase ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = "deit"
def __init__( self, lowerCAmelCase__=768, lowerCAmelCase__=12, lowerCAmelCase__=12, lowerCAmelCase__=3072, lowerCAmelCase__="gelu", lowerCAmelCase__=0.0, lowerCAmelCase__=0.0, lowerCAmelCase__=0.02, lowerCAmelCase__=1e-12, lowerCAmelCase__=224, lowerCAmelCase__=16, lowerCAmelCase__=3, lowerCAmelCase__=True, lowerCAmelCase__=16, **lowerCAmelCase__, ) -> List[Any]:
super().__init__(**lowerCAmelCase__)
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = initializer_range
snake_case_ = layer_norm_eps
snake_case_ = image_size
snake_case_ = patch_size
snake_case_ = num_channels
snake_case_ = qkv_bias
snake_case_ = encoder_stride
class UpperCamelCase ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = version.parse("1.11" )
@property
def a_ ( self) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
])
@property
def a_ ( self) -> float:
return 1e-4
| 69 | def A ( _lowercase , _lowercase ):
SCREAMING_SNAKE_CASE : Union[str, Any] = len(_lowercase )
SCREAMING_SNAKE_CASE : Any = len(_lowercase )
SCREAMING_SNAKE_CASE : Optional[int] = [[False for _ in range(m + 1 )] for _ in range(n + 1 )]
SCREAMING_SNAKE_CASE : Union[str, Any] = True
for i in range(_lowercase ):
for j in range(m + 1 ):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
SCREAMING_SNAKE_CASE : List[str] = True
if a[i].islower():
SCREAMING_SNAKE_CASE : Dict = True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 182 | 0 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import importlib.metadata
import json
import os
from dataclasses import dataclass
from typing import Any, Dict, Union
from packaging import version
from ..utils import is_torch_available, logging
if is_torch_available():
import torch
UpperCAmelCase_ = logging.get_logger(__name__)
@dataclass
class UpperCamelCase_ :
def __init__( self , lowerCAmelCase_=False , lowerCAmelCase_=False , lowerCAmelCase_=6.0 , lowerCAmelCase_=None , lowerCAmelCase_=False , lowerCAmelCase_=False , lowerCAmelCase_=None , lowerCAmelCase_="fp4" , lowerCAmelCase_=False , **lowerCAmelCase_ , ) -> Tuple:
_snake_case = load_in_abit
_snake_case = load_in_abit
_snake_case = llm_inta_threshold
_snake_case = llm_inta_skip_modules
_snake_case = llm_inta_enable_fpaa_cpu_offload
_snake_case = llm_inta_has_fpaa_weight
_snake_case = bnb_abit_quant_type
_snake_case = bnb_abit_use_double_quant
if bnb_abit_compute_dtype is None:
_snake_case = torch.floataa
elif isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
_snake_case = getattr(lowerCAmelCase_ , lowerCAmelCase_ )
elif isinstance(lowerCAmelCase_ , torch.dtype ):
_snake_case = bnb_abit_compute_dtype
else:
raise ValueError('bnb_4bit_compute_dtype must be a string or a torch.dtype' )
self.post_init()
def lowerCAmelCase ( self ) -> Tuple:
if not isinstance(self.llm_inta_threshold , lowerCAmelCase_ ):
raise ValueError('llm_int8_threshold must be a float' )
if self.llm_inta_skip_modules is not None and not isinstance(self.llm_inta_skip_modules , lowerCAmelCase_ ):
raise ValueError('llm_int8_skip_modules must be a list of strings' )
if not isinstance(self.llm_inta_enable_fpaa_cpu_offload , lowerCAmelCase_ ):
raise ValueError('llm_int8_enable_fp32_cpu_offload must be a boolean' )
if not isinstance(self.llm_inta_has_fpaa_weight , lowerCAmelCase_ ):
raise ValueError('llm_int8_has_fp16_weight must be a boolean' )
if self.bnb_abit_compute_dtype is not None and not isinstance(self.bnb_abit_compute_dtype , torch.dtype ):
raise ValueError('bnb_4bit_compute_dtype must be torch.dtype' )
if not isinstance(self.bnb_abit_quant_type , lowerCAmelCase_ ):
raise ValueError('bnb_4bit_quant_type must be a string' )
if not isinstance(self.bnb_abit_use_double_quant , lowerCAmelCase_ ):
raise ValueError('bnb_4bit_use_double_quant must be a boolean' )
if self.load_in_abit and not version.parse(importlib.metadata.version('bitsandbytes' ) ) >= version.parse(
'0.39.0' ):
raise ValueError(
'4 bit quantization requires bitsandbytes>=0.39.0 - please upgrade your bitsandbytes version' )
def lowerCAmelCase ( self ) -> Optional[Any]:
return self.load_in_abit or self.load_in_abit
def lowerCAmelCase ( self ) -> str:
if self.load_in_abit:
return "llm_int8"
elif self.load_in_abit and self.bnb_abit_quant_type == "fp4":
return "fp4"
elif self.load_in_abit and self.bnb_abit_quant_type == "nf4":
return "nf4"
else:
return None
@classmethod
def lowerCAmelCase ( cls , lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_ ) -> List[Any]:
_snake_case = cls(**lowerCAmelCase_ )
_snake_case = []
for key, value in kwargs.items():
if hasattr(lowerCAmelCase_ , lowerCAmelCase_ ):
setattr(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
to_remove.append(lowerCAmelCase_ )
for key in to_remove:
kwargs.pop(lowerCAmelCase_ , lowerCAmelCase_ )
if return_unused_kwargs:
return config, kwargs
else:
return config
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Optional[int]:
with open(lowerCAmelCase_ , 'w' , encoding='utf-8' ) as writer:
_snake_case = self.to_dict()
_snake_case = json.dumps(lowerCAmelCase_ , indent=2 , sort_keys=lowerCAmelCase_ ) + '\n'
writer.write(lowerCAmelCase_ )
def lowerCAmelCase ( self ) -> Dict[str, Any]:
_snake_case = copy.deepcopy(self.__dict__ )
_snake_case = str(output['bnb_4bit_compute_dtype'] ).split('.' )[1]
return output
def __repr__( self ) -> str:
return F'''{self.__class__.__name__} {self.to_json_string()}'''
def lowerCAmelCase ( self , lowerCAmelCase_ = True ) -> str:
if use_diff is True:
_snake_case = self.to_diff_dict()
else:
_snake_case = self.to_dict()
return json.dumps(lowerCAmelCase_ , indent=2 , sort_keys=lowerCAmelCase_ ) + "\n"
def lowerCAmelCase ( self ) -> Dict[str, Any]:
_snake_case = self.to_dict()
# get the default config dict
_snake_case = BitsAndBytesConfig().to_dict()
_snake_case = {}
# only serialize values that differ from the default config
for key, value in config_dict.items():
if value != default_config_dict[key]:
_snake_case = value
return serializable_config_dict
| 368 |
import warnings
from ...utils import logging
from .image_processing_flava import FlavaImageProcessor
UpperCAmelCase_ = logging.get_logger(__name__)
class UpperCamelCase_ ( _lowerCamelCase ):
def __init__( self , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> None:
warnings.warn(
'The class FlavaFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use FlavaImageProcessor instead.' , lowerCAmelCase_ , )
super().__init__(*lowerCAmelCase_ , **lowerCAmelCase_ )
| 295 | 0 |
import torch
from transformers import PreTrainedModel, XLMRobertaConfig, XLMRobertaModel
class A__ ( __magic_name__ ):
lowercase = 'M-CLIP'
def __init__( self : Optional[Any] , a : str=1_024 , a : Union[str, Any]=768 , **a : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = transformerDimSize
lowerCAmelCase__ : List[Any] = imageDimSize
super().__init__(**a )
class A__ ( __magic_name__ ):
lowercase = MCLIPConfig
def __init__( self : int , a : Tuple , *a : Optional[int] , **a : Any ):
'''simple docstring'''
super().__init__(a , *a , **a )
lowerCAmelCase__ : Any = XLMRobertaModel(a )
lowerCAmelCase__ : List[Any] = torch.nn.Linear(
in_features=config.transformerDimensions , out_features=config.numDims )
def _lowerCamelCase ( self : Any , a : Any , a : int ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = self.transformer(input_ids=a , attention_mask=a )[0]
lowerCAmelCase__ : Any = (embs * attention_mask.unsqueeze(2 )).sum(dim=1 ) / attention_mask.sum(dim=1 )[:, None]
return self.LinearTransformation(a ), embs | 212 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionAttendAndExcitePipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_numpy, skip_mps, slow
from diffusers.utils.testing_utils import require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
lowerCamelCase__ = False
@skip_mps
class A__ ( __magic_name__ , __magic_name__ , __magic_name__ , unittest.TestCase ):
lowercase = StableDiffusionAttendAndExcitePipeline
lowercase = False
lowercase = TEXT_TO_IMAGE_PARAMS
lowercase = TEXT_TO_IMAGE_BATCH_PARAMS.union({'token_indices'} )
lowercase = TEXT_TO_IMAGE_IMAGE_PARAMS
lowercase = TEXT_TO_IMAGE_IMAGE_PARAMS
@classmethod
def _lowerCamelCase ( cls : Tuple ):
'''simple docstring'''
super().setUpClass()
torch.use_deterministic_algorithms(a )
@classmethod
def _lowerCamelCase ( cls : Any ):
'''simple docstring'''
super().tearDownClass()
torch.use_deterministic_algorithms(a )
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase__ : Any = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=a , )
lowerCAmelCase__ : Any = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='scaled_linear' , clip_sample=a , set_alpha_to_one=a , )
torch.manual_seed(0 )
lowerCAmelCase__ : Union[str, Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
lowerCAmelCase__ : Tuple = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act='gelu' , projection_dim=512 , )
lowerCAmelCase__ : str = CLIPTextModel(a )
lowerCAmelCase__ : Optional[Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
lowerCAmelCase__ : Union[str, Any] = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def _lowerCamelCase ( self : Union[str, Any] , a : Tuple , a : Union[str, Any]=0 ):
'''simple docstring'''
if str(a ).startswith('mps' ):
lowerCAmelCase__ : List[str] = torch.manual_seed(a )
else:
lowerCAmelCase__ : Any = torch.Generator(device=a ).manual_seed(a )
lowerCAmelCase__ : Optional[int] = {
'prompt': 'a cat and a frog',
'token_indices': [2, 5],
'generator': generator,
'num_inference_steps': 1,
'guidance_scale': 6.0,
'output_type': 'numpy',
'max_iter_to_alter': 2,
'thresholds': {0: 0.7},
}
return inputs
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = 'cpu'
lowerCAmelCase__ : Optional[int] = self.get_dummy_components()
lowerCAmelCase__ : Dict = self.pipeline_class(**a )
pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowerCAmelCase__ : Union[str, Any] = self.get_dummy_inputs(a )
lowerCAmelCase__ : Union[str, Any] = pipe(**a ).images
lowerCAmelCase__ : Optional[int] = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 64, 64, 3) )
lowerCAmelCase__ : Dict = np.array(
[0.6_3_9_0_5_3_6_4, 0.6_2_8_9_7_3_0_7, 0.4_8_5_9_9_0_1_7, 0.5_1_3_3_6_2_4, 0.5_5_5_0_0_4_8, 0.4_5_7_6_9_5_1_6, 0.5_0_3_2_6_9_7_3, 0.5_0_2_3_1_3_9, 0.4_5_3_8_4_4_9_6] )
lowerCAmelCase__ : Optional[int] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(a , 1E-3 )
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
super().test_cpu_offload_forward_pass(expected_max_diff=5E-4 )
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
self._test_inference_batch_single_identical(batch_size=2 , expected_max_diff=7E-4 )
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=5E-4 )
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
super().test_save_load_local(expected_max_difference=5E-4 )
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
super().test_save_load_optional_components(expected_max_difference=4E-4 )
@require_torch_gpu
@slow
class A__ ( unittest.TestCase ):
@classmethod
def _lowerCamelCase ( cls : List[str] ):
'''simple docstring'''
super().setUpClass()
torch.use_deterministic_algorithms(a )
@classmethod
def _lowerCamelCase ( cls : List[str] ):
'''simple docstring'''
super().tearDownClass()
torch.use_deterministic_algorithms(a )
def _lowerCamelCase ( self : int ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = torch.manual_seed(51 )
lowerCAmelCase__ : Any = StableDiffusionAttendAndExcitePipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , safety_checker=a , torch_dtype=torch.floataa )
pipe.to('cuda' )
lowerCAmelCase__ : Optional[int] = 'a painting of an elephant with glasses'
lowerCAmelCase__ : Any = [5, 7]
lowerCAmelCase__ : Optional[Any] = pipe(
prompt=a , token_indices=a , guidance_scale=7.5 , generator=a , num_inference_steps=5 , max_iter_to_alter=5 , output_type='numpy' , ).images[0]
lowerCAmelCase__ : str = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/attend-and-excite/elephant_glasses.npy' )
assert np.abs((expected_image - image).max() ) < 5E-1 | 212 | 1 |
"""simple docstring"""
import importlib
import os
import fsspec
import pytest
from fsspec import register_implementation
from fsspec.registry import _registry as _fsspec_registry
from datasets.filesystems import COMPRESSION_FILESYSTEMS, HfFileSystem, extract_path_from_uri, is_remote_filesystem
from .utils import require_lza, require_zstandard
def lowercase_ ( _lowerCamelCase: Optional[int] ) -> List[str]:
'''simple docstring'''
assert "mock" in _fsspec_registry
assert "bz2" in _fsspec_registry
def lowercase_ ( ) -> Union[str, Any]:
'''simple docstring'''
assert "mock" not in _fsspec_registry
assert "bz2" in _fsspec_registry
def lowercase_ ( ) -> int:
'''simple docstring'''
__lowerCamelCase : Optional[int] = "mock-s3-bucket"
__lowerCamelCase : Optional[Any] = F"""s3://{mock_bucket}"""
__lowerCamelCase : int = extract_path_from_uri(_lowerCamelCase )
assert dataset_path.startswith("s3://" ) is False
__lowerCamelCase : Optional[Any] = "./local/path"
__lowerCamelCase : Union[str, Any] = extract_path_from_uri(_lowerCamelCase )
assert dataset_path == new_dataset_path
def lowercase_ ( _lowerCamelCase: List[Any] ) -> Optional[int]:
'''simple docstring'''
__lowerCamelCase : Optional[int] = is_remote_filesystem(_lowerCamelCase )
assert is_remote is True
__lowerCamelCase : str = fsspec.filesystem("file" )
__lowerCamelCase : List[Any] = is_remote_filesystem(_lowerCamelCase )
assert is_remote is False
@pytest.mark.parametrize("compression_fs_class" , _lowerCamelCase )
def lowercase_ ( _lowerCamelCase: Tuple , _lowerCamelCase: Dict , _lowerCamelCase: Union[str, Any] , _lowerCamelCase: int , _lowerCamelCase: Any , _lowerCamelCase: str , _lowerCamelCase: Optional[int] ) -> Optional[int]:
'''simple docstring'''
__lowerCamelCase : Optional[Any] = {"gzip": gz_file, "xz": xz_file, "zstd": zstd_file, "bz2": bza_file, "lz4": lza_file}
__lowerCamelCase : List[Any] = input_paths[compression_fs_class.protocol]
if input_path is None:
__lowerCamelCase : Optional[Any] = F"""for '{compression_fs_class.protocol}' compression protocol, """
if compression_fs_class.protocol == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_fs_class.protocol == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(_lowerCamelCase )
__lowerCamelCase : Tuple = fsspec.filesystem(compression_fs_class.protocol , fo=_lowerCamelCase )
assert isinstance(_lowerCamelCase , _lowerCamelCase )
__lowerCamelCase : str = os.path.basename(_lowerCamelCase )
__lowerCamelCase : Dict = expected_filename[: expected_filename.rindex("." )]
assert fs.glob("*" ) == [expected_filename]
with fs.open(_lowerCamelCase , "r" , encoding="utf-8" ) as f, open(_lowerCamelCase , encoding="utf-8" ) as expected_file:
assert f.read() == expected_file.read()
@pytest.mark.parametrize("protocol" , ["zip", "gzip"] )
def lowercase_ ( _lowerCamelCase: Any , _lowerCamelCase: Optional[int] , _lowerCamelCase: Tuple ) -> List[Any]:
'''simple docstring'''
__lowerCamelCase : Any = {"zip": zip_jsonl_path, "gzip": jsonl_gz_path}
__lowerCamelCase : Optional[int] = compressed_file_paths[protocol]
__lowerCamelCase : int = "dataset.jsonl"
__lowerCamelCase : Optional[Any] = F"""{protocol}://{member_file_path}::{compressed_file_path}"""
__lowerCamelCase , *__lowerCamelCase : Dict = fsspec.get_fs_token_paths(_lowerCamelCase )
assert fs.isfile(_lowerCamelCase )
assert not fs.isfile("non_existing_" + member_file_path )
@pytest.mark.integration
def lowercase_ ( _lowerCamelCase: Dict , _lowerCamelCase: Tuple , _lowerCamelCase: Dict , _lowerCamelCase: List[str] ) -> List[str]:
'''simple docstring'''
__lowerCamelCase : Any = hf_api.dataset_info(_lowerCamelCase , token=_lowerCamelCase )
__lowerCamelCase : Dict = HfFileSystem(repo_info=_lowerCamelCase , token=_lowerCamelCase )
assert sorted(hffs.glob("*" ) ) == [".gitattributes", "data"]
assert hffs.isdir("data" )
assert hffs.isfile(".gitattributes" ) and hffs.isfile("data/text_data.txt" )
with open(_lowerCamelCase ) as f:
assert hffs.open("data/text_data.txt" , "r" ).read() == f.read()
def lowercase_ ( ) -> Tuple:
'''simple docstring'''
__lowerCamelCase : str = "bz2"
# Import module
import datasets.filesystems
# Overwrite protocol and reload
register_implementation(_lowerCamelCase , _lowerCamelCase , clobber=_lowerCamelCase )
with pytest.warns(_lowerCamelCase ) as warning_info:
importlib.reload(datasets.filesystems )
assert len(_lowerCamelCase ) == 1
assert (
str(warning_info[0].message )
== F"""A filesystem protocol was already set for {protocol} and will be overwritten."""
) | 64 | """simple docstring"""
from typing import List, Optional, Tuple, Union
import torch
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class _snake_case ( a__ ):
def __init__( self : Optional[Any] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : str ):
super().__init__()
# make sure scheduler can always be converted to DDIM
__lowerCamelCase : Dict = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=UpperCAmelCase , scheduler=UpperCAmelCase )
@torch.no_grad()
def __call__( self : str , UpperCAmelCase : int = 1 , UpperCAmelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCAmelCase : float = 0.0 , UpperCAmelCase : int = 50 , UpperCAmelCase : Optional[bool] = None , UpperCAmelCase : Optional[str] = "pil" , UpperCAmelCase : bool = True , ):
# Sample gaussian noise to begin loop
if isinstance(self.unet.config.sample_size , UpperCAmelCase ):
__lowerCamelCase : Any = (
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size,
self.unet.config.sample_size,
)
else:
__lowerCamelCase : Dict = (batch_size, self.unet.config.in_channels, *self.unet.config.sample_size)
if isinstance(UpperCAmelCase , UpperCAmelCase ) and len(UpperCAmelCase ) != batch_size:
raise ValueError(
F"""You have passed a list of generators of length {len(UpperCAmelCase )}, but requested an effective batch"""
F""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
__lowerCamelCase : str = randn_tensor(UpperCAmelCase , generator=UpperCAmelCase , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(UpperCAmelCase )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
__lowerCamelCase : Any = self.unet(UpperCAmelCase , UpperCAmelCase ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
__lowerCamelCase : Union[str, Any] = self.scheduler.step(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , eta=UpperCAmelCase , use_clipped_model_output=UpperCAmelCase , generator=UpperCAmelCase ).prev_sample
__lowerCamelCase : Any = (image / 2 + 0.5).clamp(0 , 1 )
__lowerCamelCase : Optional[Any] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__lowerCamelCase : str = self.numpy_to_pil(UpperCAmelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=UpperCAmelCase ) | 64 | 1 |
'''simple docstring'''
from __future__ import annotations
def a__ ( lowercase : Dict, lowercase : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
if len(lowercase__ ) < k or k < 0:
raise ValueError('''Invalid Input''' )
_UpperCamelCase = sum(array[:k] )
for i in range(len(lowercase__ ) - k ):
_UpperCamelCase = current_sum - array[i] + array[i + k]
_UpperCamelCase = max(lowercase__, lowercase__ )
return max_sum
if __name__ == "__main__":
from doctest import testmod
from random import randint
testmod()
lowercase__ : List[str] = [randint(-10_00, 10_00) for i in range(1_00)]
lowercase__ : str = randint(0, 1_10)
print(F"""The maximum sum of {k} consecutive elements is {max_sum_in_array(array,k)}""")
| 324 |
import gc
import unittest
import numpy as np
import torch
from torch.backends.cuda import sdp_kernel
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
from diffusers.utils import randn_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_a, require_torch_gpu
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __lowercase (_UpperCAmelCase , unittest.TestCase ):
_UpperCamelCase = ConsistencyModelPipeline
_UpperCamelCase = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
_UpperCamelCase = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
# Override required_optional_params to remove num_images_per_prompt
_UpperCamelCase = frozenset(
[
"""num_inference_steps""",
"""generator""",
"""latents""",
"""output_type""",
"""return_dict""",
"""callback""",
"""callback_steps""",
] )
@property
def UpperCamelCase__ ( self ) ->List[str]:
'''simple docstring'''
__lowerCAmelCase : List[Any] = UNetaDModel.from_pretrained(
'''diffusers/consistency-models-test''' , subfolder='''test_unet''' , )
return unet
@property
def UpperCamelCase__ ( self ) ->Optional[int]:
'''simple docstring'''
__lowerCAmelCase : List[str] = UNetaDModel.from_pretrained(
'''diffusers/consistency-models-test''' , subfolder='''test_unet_class_cond''' , )
return unet
def UpperCamelCase__ ( self , A_=False ) ->Dict:
'''simple docstring'''
if class_cond:
__lowerCAmelCase : List[str] = self.dummy_cond_unet
else:
__lowerCAmelCase : Optional[Any] = self.dummy_uncond_unet
# Default to CM multistep sampler
__lowerCAmelCase : List[str] = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , )
__lowerCAmelCase : Dict = {
'''unet''': unet,
'''scheduler''': scheduler,
}
return components
def UpperCamelCase__ ( self , A_ , A_=0 ) ->Tuple:
'''simple docstring'''
if str(A_ ).startswith('''mps''' ):
__lowerCAmelCase : str = torch.manual_seed(A_ )
else:
__lowerCAmelCase : Dict = torch.Generator(device=A_ ).manual_seed(A_ )
__lowerCAmelCase : Tuple = {
'''batch_size''': 1,
'''num_inference_steps''': None,
'''timesteps''': [22, 0],
'''generator''': generator,
'''output_type''': '''np''',
}
return inputs
def UpperCamelCase__ ( self ) ->Tuple:
'''simple docstring'''
__lowerCAmelCase : Union[str, Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__lowerCAmelCase : Tuple = self.get_dummy_components()
__lowerCAmelCase : Union[str, Any] = ConsistencyModelPipeline(**A_ )
__lowerCAmelCase : List[str] = pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
__lowerCAmelCase : Any = self.get_dummy_inputs(A_ )
__lowerCAmelCase : int = pipe(**A_ ).images
assert image.shape == (1, 32, 32, 3)
__lowerCAmelCase : Tuple = image[0, -3:, -3:, -1]
__lowerCAmelCase : str = np.array([0.3_572, 0.6_273, 0.4_031, 0.3_961, 0.4_321, 0.5_730, 0.5_266, 0.4_780, 0.5_004] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def UpperCamelCase__ ( self ) ->Tuple:
'''simple docstring'''
__lowerCAmelCase : List[Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__lowerCAmelCase : str = self.get_dummy_components(class_cond=A_ )
__lowerCAmelCase : List[str] = ConsistencyModelPipeline(**A_ )
__lowerCAmelCase : List[Any] = pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
__lowerCAmelCase : Optional[Any] = self.get_dummy_inputs(A_ )
__lowerCAmelCase : Optional[int] = 0
__lowerCAmelCase : int = pipe(**A_ ).images
assert image.shape == (1, 32, 32, 3)
__lowerCAmelCase : Optional[int] = image[0, -3:, -3:, -1]
__lowerCAmelCase : List[str] = np.array([0.3_572, 0.6_273, 0.4_031, 0.3_961, 0.4_321, 0.5_730, 0.5_266, 0.4_780, 0.5_004] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def UpperCamelCase__ ( self ) ->Optional[Any]:
'''simple docstring'''
__lowerCAmelCase : Any = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__lowerCAmelCase : Union[str, Any] = self.get_dummy_components()
__lowerCAmelCase : List[Any] = ConsistencyModelPipeline(**A_ )
__lowerCAmelCase : int = pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
__lowerCAmelCase : Tuple = self.get_dummy_inputs(A_ )
__lowerCAmelCase : Any = 1
__lowerCAmelCase : List[Any] = None
__lowerCAmelCase : Dict = pipe(**A_ ).images
assert image.shape == (1, 32, 32, 3)
__lowerCAmelCase : Any = image[0, -3:, -3:, -1]
__lowerCAmelCase : List[Any] = np.array([0.5_004, 0.5_004, 0.4_994, 0.5_008, 0.4_976, 0.5_018, 0.4_990, 0.4_982, 0.4_987] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def UpperCamelCase__ ( self ) ->Optional[Any]:
'''simple docstring'''
__lowerCAmelCase : List[Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__lowerCAmelCase : Optional[Any] = self.get_dummy_components(class_cond=A_ )
__lowerCAmelCase : List[str] = ConsistencyModelPipeline(**A_ )
__lowerCAmelCase : Union[str, Any] = pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
__lowerCAmelCase : Any = self.get_dummy_inputs(A_ )
__lowerCAmelCase : List[str] = 1
__lowerCAmelCase : Dict = None
__lowerCAmelCase : Tuple = 0
__lowerCAmelCase : Dict = pipe(**A_ ).images
assert image.shape == (1, 32, 32, 3)
__lowerCAmelCase : List[Any] = image[0, -3:, -3:, -1]
__lowerCAmelCase : Any = np.array([0.5_004, 0.5_004, 0.4_994, 0.5_008, 0.4_976, 0.5_018, 0.4_990, 0.4_982, 0.4_987] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
@slow
@require_torch_gpu
class __lowercase (unittest.TestCase ):
def UpperCamelCase__ ( self ) ->int:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase__ ( self , A_=0 , A_=False , A_="cpu" , A_=torch.floataa , A_=(1, 3, 64, 64) ) ->str:
'''simple docstring'''
__lowerCAmelCase : Dict = torch.manual_seed(A_ )
__lowerCAmelCase : Tuple = {
'''num_inference_steps''': None,
'''timesteps''': [22, 0],
'''class_labels''': 0,
'''generator''': generator,
'''output_type''': '''np''',
}
if get_fixed_latents:
__lowerCAmelCase : List[str] = self.get_fixed_latents(seed=A_ , device=A_ , dtype=A_ , shape=A_ )
__lowerCAmelCase : Union[str, Any] = latents
return inputs
def UpperCamelCase__ ( self , A_=0 , A_="cpu" , A_=torch.floataa , A_=(1, 3, 64, 64) ) ->Optional[int]:
'''simple docstring'''
if type(A_ ) == str:
__lowerCAmelCase : int = torch.device(A_ )
__lowerCAmelCase : Optional[Any] = torch.Generator(device=A_ ).manual_seed(A_ )
__lowerCAmelCase : Union[str, Any] = randn_tensor(A_ , generator=A_ , device=A_ , dtype=A_ )
return latents
def UpperCamelCase__ ( self ) ->Optional[Any]:
'''simple docstring'''
__lowerCAmelCase : int = UNetaDModel.from_pretrained('''diffusers/consistency_models''' , subfolder='''diffusers_cd_imagenet64_l2''' )
__lowerCAmelCase : List[Any] = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , )
__lowerCAmelCase : List[str] = ConsistencyModelPipeline(unet=A_ , scheduler=A_ )
pipe.to(torch_device=A_ )
pipe.set_progress_bar_config(disable=A_ )
__lowerCAmelCase : str = self.get_inputs()
__lowerCAmelCase : Any = pipe(**A_ ).images
assert image.shape == (1, 64, 64, 3)
__lowerCAmelCase : Dict = image[0, -3:, -3:, -1]
__lowerCAmelCase : Optional[int] = np.array([0.0_888, 0.0_881, 0.0_666, 0.0_479, 0.0_292, 0.0_195, 0.0_201, 0.0_163, 0.0_254] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
def UpperCamelCase__ ( self ) ->Any:
'''simple docstring'''
__lowerCAmelCase : int = UNetaDModel.from_pretrained('''diffusers/consistency_models''' , subfolder='''diffusers_cd_imagenet64_l2''' )
__lowerCAmelCase : Optional[Any] = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , )
__lowerCAmelCase : List[str] = ConsistencyModelPipeline(unet=A_ , scheduler=A_ )
pipe.to(torch_device=A_ )
pipe.set_progress_bar_config(disable=A_ )
__lowerCAmelCase : List[Any] = self.get_inputs()
__lowerCAmelCase : Tuple = 1
__lowerCAmelCase : Optional[Any] = None
__lowerCAmelCase : str = pipe(**A_ ).images
assert image.shape == (1, 64, 64, 3)
__lowerCAmelCase : Optional[int] = image[0, -3:, -3:, -1]
__lowerCAmelCase : List[Any] = np.array([0.0_340, 0.0_152, 0.0_063, 0.0_267, 0.0_221, 0.0_107, 0.0_416, 0.0_186, 0.0_217] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
@require_torch_a
def UpperCamelCase__ ( self ) ->Optional[Any]:
'''simple docstring'''
__lowerCAmelCase : Tuple = UNetaDModel.from_pretrained('''diffusers/consistency_models''' , subfolder='''diffusers_cd_imagenet64_l2''' )
__lowerCAmelCase : List[str] = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , )
__lowerCAmelCase : Union[str, Any] = ConsistencyModelPipeline(unet=A_ , scheduler=A_ )
pipe.to(torch_device=A_ , torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=A_ )
__lowerCAmelCase : Any = self.get_inputs(get_fixed_latents=A_ , device=A_ )
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=A_ , enable_math=A_ , enable_mem_efficient=A_ ):
__lowerCAmelCase : Dict = pipe(**A_ ).images
assert image.shape == (1, 64, 64, 3)
__lowerCAmelCase : Dict = image[0, -3:, -3:, -1]
__lowerCAmelCase : Optional[int] = np.array([0.1_875, 0.1_428, 0.1_289, 0.2_151, 0.2_092, 0.1_477, 0.1_877, 0.1_641, 0.1_353] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
@require_torch_a
def UpperCamelCase__ ( self ) ->List[str]:
'''simple docstring'''
__lowerCAmelCase : Optional[Any] = UNetaDModel.from_pretrained('''diffusers/consistency_models''' , subfolder='''diffusers_cd_imagenet64_l2''' )
__lowerCAmelCase : List[Any] = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , )
__lowerCAmelCase : Union[str, Any] = ConsistencyModelPipeline(unet=A_ , scheduler=A_ )
pipe.to(torch_device=A_ , torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=A_ )
__lowerCAmelCase : Union[str, Any] = self.get_inputs(get_fixed_latents=A_ , device=A_ )
__lowerCAmelCase : Any = 1
__lowerCAmelCase : int = None
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=A_ , enable_math=A_ , enable_mem_efficient=A_ ):
__lowerCAmelCase : int = pipe(**A_ ).images
assert image.shape == (1, 64, 64, 3)
__lowerCAmelCase : str = image[0, -3:, -3:, -1]
__lowerCAmelCase : Any = np.array([0.1_663, 0.1_948, 0.2_275, 0.1_680, 0.1_204, 0.1_245, 0.1_858, 0.1_338, 0.2_095] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
| 275 | 0 |
'''simple docstring'''
import argparse
import torch
from transformers import YosoConfig, YosoForMaskedLM
def snake_case_ (UpperCamelCase : Dict ):
'''simple docstring'''
if "model" in orig_key:
_a = orig_key.replace('''model.''' , '''''' )
if "norm1" in orig_key:
_a = orig_key.replace('''norm1''' , '''attention.output.LayerNorm''' )
if "norm2" in orig_key:
_a = orig_key.replace('''norm2''' , '''output.LayerNorm''' )
if "norm" in orig_key:
_a = orig_key.replace('''norm''' , '''LayerNorm''' )
if "transformer" in orig_key:
_a = orig_key.split('''.''' )[0].split('''_''' )[-1]
_a = orig_key.replace(f'transformer_{layer_num}' , f'encoder.layer.{layer_num}' )
if "mha.attn" in orig_key:
_a = orig_key.replace('''mha.attn''' , '''attention.self''' )
if "mha" in orig_key:
_a = orig_key.replace('''mha''' , '''attention''' )
if "W_q" in orig_key:
_a = orig_key.replace('''W_q''' , '''self.query''' )
if "W_k" in orig_key:
_a = orig_key.replace('''W_k''' , '''self.key''' )
if "W_v" in orig_key:
_a = orig_key.replace('''W_v''' , '''self.value''' )
if "ff1" in orig_key:
_a = orig_key.replace('''ff1''' , '''intermediate.dense''' )
if "ff2" in orig_key:
_a = orig_key.replace('''ff2''' , '''output.dense''' )
if "ff" in orig_key:
_a = orig_key.replace('''ff''' , '''output.dense''' )
if "mlm_class" in orig_key:
_a = orig_key.replace('''mlm.mlm_class''' , '''cls.predictions.decoder''' )
if "mlm" in orig_key:
_a = orig_key.replace('''mlm''' , '''cls.predictions.transform''' )
if "cls" not in orig_key:
_a = '''yoso.''' + orig_key
return orig_key
def snake_case_ (UpperCamelCase : Optional[int] , UpperCamelCase : Tuple ):
'''simple docstring'''
for key in orig_state_dict.copy().keys():
_a = orig_state_dict.pop(UpperCamelCase )
if ("pooler" in key) or ("sen_class" in key):
continue
else:
_a = val
_a = orig_state_dict['''cls.predictions.decoder.bias''']
_a = torch.arange(UpperCamelCase ).expand((1, -1) ) + 2
return orig_state_dict
def snake_case_ (UpperCamelCase : Any , UpperCamelCase : List[Any] , UpperCamelCase : List[Any] ):
'''simple docstring'''
_a = torch.load(UpperCamelCase , map_location='''cpu''' )['''model_state_dict''']
_a = YosoConfig.from_json_file(UpperCamelCase )
_a = YosoForMaskedLM(UpperCamelCase )
_a = convert_checkpoint_helper(config.max_position_embeddings , UpperCamelCase )
print(model.load_state_dict(UpperCamelCase ) )
model.eval()
model.save_pretrained(UpperCamelCase )
print(f'Checkpoint successfuly converted. Model saved at {pytorch_dump_path}' )
if __name__ == "__main__":
_snake_case : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--pytorch_model_path', default=None, type=str, required=True, help='Path to YOSO pytorch checkpoint.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The json file for YOSO model config.',
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
_snake_case : List[Any] = parser.parse_args()
convert_yoso_checkpoint(args.pytorch_model_path, args.config_file, args.pytorch_dump_path)
| 179 |
'''simple docstring'''
from __future__ import annotations
def snake_case_ (UpperCamelCase : list[int] ):
'''simple docstring'''
if not nums:
return 0
_a = nums[0]
_a = 0
for num in nums[1:]:
_a , _a = (
max_excluding + num,
max(UpperCamelCase , UpperCamelCase ),
)
return max(UpperCamelCase , UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 179 | 1 |
def __UpperCamelCase ( _A : int ) ->list[int]:
"""simple docstring"""
if num <= 0:
raise ValueError("""Input must be a positive integer""" )
lowerCamelCase_ =[True] * (num + 1)
lowerCamelCase_ =2
while p * p <= num:
if primes[p]:
for i in range(p * p , num + 1 , UpperCAmelCase__ ):
lowerCamelCase_ =False
p += 1
return [prime for prime in range(2 , num + 1 ) if primes[prime]]
if __name__ == "__main__":
import doctest
doctest.testmod()
__A : Any = int(input('Enter a positive integer: ').strip())
print(prime_sieve_eratosthenes(user_num))
| 154 | '''simple docstring'''
import os
def lowerCamelCase ( UpperCAmelCase__ : str = "input.txt" ) -> int:
with open(os.path.join(os.path.dirname(UpperCAmelCase__ ) , UpperCAmelCase__ ) ) as input_file:
lowercase_ : str = [
[int(UpperCAmelCase__ ) for element in line.split(""",""" )]
for line in input_file.readlines()
]
lowercase_ : Optional[Any] = len(UpperCAmelCase__ )
lowercase_ : Any = len(matrix[0] )
lowercase_ : Union[str, Any] = [[-1 for _ in range(UpperCAmelCase__ )] for _ in range(UpperCAmelCase__ )]
for i in range(UpperCAmelCase__ ):
lowercase_ : int = matrix[i][0]
for j in range(1 , UpperCAmelCase__ ):
for i in range(UpperCAmelCase__ ):
lowercase_ : Union[str, Any] = minimal_path_sums[i][j - 1] + matrix[i][j]
for i in range(1 , UpperCAmelCase__ ):
lowercase_ : Tuple = min(
minimal_path_sums[i][j] , minimal_path_sums[i - 1][j] + matrix[i][j] )
for i in range(rows - 2 , -1 , -1 ):
lowercase_ : Dict = min(
minimal_path_sums[i][j] , minimal_path_sums[i + 1][j] + matrix[i][j] )
return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 239 | 0 |
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from torch import nn
from transformers import (
SpeechaTextaConfig,
SpeechaTextaForCausalLM,
SpeechaTextaTokenizer,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
__SCREAMING_SNAKE_CASE : Optional[int] = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : Dict = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
}
__SCREAMING_SNAKE_CASE : Dict = [
'''lm_head''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
]
def lowerCAmelCase_( lowercase_ : Tuple , lowercase_ : Any , lowercase_ : Optional[Any] , lowercase_ : int , lowercase_ : List[Any] ) -> List[Any]:
for attribute in key.split('''.''' ):
_lowerCamelCase = getattr(lowercase_ , lowercase_ )
if weight_type is not None:
_lowerCamelCase = getattr(lowercase_ , lowercase_ ).shape
else:
_lowerCamelCase = hf_pointer.shape
assert hf_shape == value.shape, (
F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
_lowerCamelCase = value
elif weight_type == "weight_g":
_lowerCamelCase = value
elif weight_type == "weight_v":
_lowerCamelCase = value
elif weight_type == "bias":
_lowerCamelCase = value
else:
_lowerCamelCase = value
logger.info(F"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def lowerCAmelCase_( lowercase_ : str , lowercase_ : Optional[Any] ) -> Any:
_lowerCamelCase = []
_lowerCamelCase = fairseq_model.state_dict()
_lowerCamelCase = hf_model.feature_extractor
# if encoder has different dim to decoder -> use proj_weight
_lowerCamelCase = None
for name, value in fairseq_dict.items():
_lowerCamelCase = False
if "conv_layers" in name:
load_conv_layer(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , hf_model.config.feat_extract_norm == '''group''' , )
_lowerCamelCase = True
elif name.split('''.''' )[0] == "proj":
_lowerCamelCase = fairseq_model.proj
_lowerCamelCase = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
_lowerCamelCase = True
if "*" in mapped_key:
_lowerCamelCase = name.split(lowercase_ )[0].split('''.''' )[-2]
_lowerCamelCase = mapped_key.replace('''*''' , lowercase_ )
if "weight_g" in name:
_lowerCamelCase = '''weight_g'''
elif "weight_v" in name:
_lowerCamelCase = '''weight_v'''
elif "bias" in name:
_lowerCamelCase = '''bias'''
elif "weight" in name:
_lowerCamelCase = '''weight'''
else:
_lowerCamelCase = None
set_recursively(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
continue
if not is_used:
unused_weights.append(lowercase_ )
logger.warning(F"""Unused weights: {unused_weights}""" )
return proj_weight
def lowerCAmelCase_( lowercase_ : Optional[int] , lowercase_ : str , lowercase_ : Optional[Any] , lowercase_ : Tuple , lowercase_ : Tuple ) -> Union[str, Any]:
_lowerCamelCase = full_name.split('''conv_layers.''' )[-1]
_lowerCamelCase = name.split('''.''' )
_lowerCamelCase = int(items[0] )
_lowerCamelCase = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
_lowerCamelCase = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
_lowerCamelCase = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
_lowerCamelCase = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
_lowerCamelCase = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(lowercase_ )
def lowerCAmelCase_( lowercase_ : Any ) -> Any:
_lowerCamelCase , _lowerCamelCase = emb.weight.shape
_lowerCamelCase = nn.Linear(lowercase_ , lowercase_ , bias=lowercase_ )
_lowerCamelCase = emb.weight.data
return lin_layer
def lowerCAmelCase_( lowercase_ : Optional[Any] ) -> List[Any]:
with open(lowercase_ , '''r''' , encoding='''utf-8''' ) as f:
_lowerCamelCase = f.readlines()
_lowerCamelCase = [line.split(''' ''' )[0] for line in lines]
_lowerCamelCase = len(lowercase_ )
_lowerCamelCase = {
'''<s>''': 0,
'''<pad>''': 1,
'''</s>''': 2,
'''<unk>''': 3,
}
vocab_dict.update(dict(zip(lowercase_ , range(4 , num_words + 4 ) ) ) )
return vocab_dict
@torch.no_grad()
def lowerCAmelCase_( lowercase_ : str , lowercase_ : Optional[int] , lowercase_ : str , lowercase_ : List[Any] , lowercase_ : Union[str, Any] , lowercase_ : Tuple , lowercase_ : List[Any] , ) -> Dict:
_lowerCamelCase = WavaVecaConfig.from_pretrained(lowercase_ )
_lowerCamelCase = SpeechaTextaConfig.from_pretrained(
lowercase_ , vocab_size=lowercase_ , decoder_layers=lowercase_ , do_stable_layer_norm=lowercase_ )
_lowerCamelCase = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=lowercase_ , return_attention_mask=lowercase_ , )
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
_lowerCamelCase = model[0].eval()
# set weights for wav2vec2 encoder
_lowerCamelCase = WavaVecaModel(lowercase_ )
_lowerCamelCase = recursively_load_weights_wavaveca(model.encoder , lowercase_ )
_lowerCamelCase = SpeechaTextaForCausalLM(lowercase_ )
_lowerCamelCase , _lowerCamelCase = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=lowercase_ )
# set output linear layer
unexpected_keys.remove('''embed_out''' )
_lowerCamelCase = nn.Parameter(model.decoder.embed_out.detach() )
# layer norm is init to identity matrix so leaving it is fine
logger.warning(F"""The following keys are missing when loading the decoder weights: {missing_keys}""" )
logger.warning(F"""The following keys are unexpected when loading the decoder weights: {unexpected_keys}""" )
_lowerCamelCase = SpeechEncoderDecoderModel(encoder=lowercase_ , decoder=lowercase_ )
_lowerCamelCase = False
# add projection layer
_lowerCamelCase = nn.Parameter(projection_layer.weight )
_lowerCamelCase = nn.Parameter(projection_layer.bias )
_lowerCamelCase = create_vocab_dict(lowercase_ )
with open(os.path.join(lowercase_ , '''vocab.json''' ) , '''w''' ) as fp:
json.dump(lowercase_ , lowercase_ )
_lowerCamelCase = SpeechaTextaTokenizer(os.path.join(lowercase_ , '''vocab.json''' ) )
tokenizer.save_pretrained(lowercase_ )
_lowerCamelCase = hf_wavavec.config.to_dict()
_lowerCamelCase = tokenizer.pad_token_id
_lowerCamelCase = tokenizer.bos_token_id
_lowerCamelCase = tokenizer.eos_token_id
_lowerCamelCase = '''speech_to_text_2'''
_lowerCamelCase = '''wav2vec2'''
_lowerCamelCase = SpeechEncoderDecoderConfig.from_dict(lowercase_ )
hf_wavavec.save_pretrained(lowercase_ )
feature_extractor.save_pretrained(lowercase_ )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : Tuple = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument(
'''--encoder_config_path''',
default='''facebook/wav2vec2-large-lv60''',
type=str,
help='''Path to hf encoder wav2vec2 checkpoint config''',
)
parser.add_argument(
'''--decoder_config_path''',
default='''facebook/s2t-small-mustc-en-fr-st''',
type=str,
help='''Path to hf decoder s2t checkpoint config''',
)
parser.add_argument('''--vocab_size''', default=1_0_2_2_4, type=int, help='''Vocab size of decoder''')
parser.add_argument('''--num_decoder_layers''', default=7, type=int, help='''Number of decoder layers''')
__SCREAMING_SNAKE_CASE : int = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
vocab_size=args.vocab_size,
num_decoder_layers=args.num_decoder_layers,
)
| 73 |
"""simple docstring"""
import argparse
import glob
import importlib.util
import os
import re
import black
from doc_builder.style_doc import style_docstrings_in_code
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
__SCREAMING_SNAKE_CASE : List[Any] = '''src/diffusers'''
__SCREAMING_SNAKE_CASE : Union[str, Any] = '''.'''
# This is to make sure the diffusers module imported is the one in the repo.
__SCREAMING_SNAKE_CASE : Tuple = importlib.util.spec_from_file_location(
'''diffusers''',
os.path.join(DIFFUSERS_PATH, '''__init__.py'''),
submodule_search_locations=[DIFFUSERS_PATH],
)
__SCREAMING_SNAKE_CASE : Union[str, Any] = spec.loader.load_module()
def lowerCAmelCase_( lowercase_ : str , lowercase_ : Tuple ) -> int:
return line.startswith(lowercase_ ) or len(lowercase_ ) <= 1 or re.search(r'''^\s*\)(\s*->.*:|:)\s*$''' , lowercase_ ) is not None
def lowerCAmelCase_( lowercase_ : Any ) -> Tuple:
_lowerCamelCase = object_name.split('''.''' )
_lowerCamelCase = 0
# First let's find the module where our object lives.
_lowerCamelCase = parts[i]
while i < len(lowercase_ ) and not os.path.isfile(os.path.join(lowercase_ , F"""{module}.py""" ) ):
i += 1
if i < len(lowercase_ ):
_lowerCamelCase = os.path.join(lowercase_ , parts[i] )
if i >= len(lowercase_ ):
raise ValueError(F"""`object_name` should begin with the name of a module of diffusers but got {object_name}.""" )
with open(os.path.join(lowercase_ , F"""{module}.py""" ) , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
_lowerCamelCase = f.readlines()
# Now let's find the class / func in the code!
_lowerCamelCase = ''''''
_lowerCamelCase = 0
for name in parts[i + 1 :]:
while (
line_index < len(lowercase_ ) and re.search(rF"""^{indent}(class|def)\s+{name}(\(|\:)""" , lines[line_index] ) is None
):
line_index += 1
indent += " "
line_index += 1
if line_index >= len(lowercase_ ):
raise ValueError(F""" {object_name} does not match any function or class in {module}.""" )
# We found the beginning of the class / func, now let's find the end (when the indent diminishes).
_lowerCamelCase = line_index
while line_index < len(lowercase_ ) and _should_continue(lines[line_index] , lowercase_ ):
line_index += 1
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
_lowerCamelCase = lines[start_index:line_index]
return "".join(lowercase_ )
__SCREAMING_SNAKE_CASE : str = re.compile(R'''^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)''')
__SCREAMING_SNAKE_CASE : List[Any] = re.compile(R'''^\s*(\S+)->(\S+)(\s+.*|$)''')
__SCREAMING_SNAKE_CASE : List[str] = re.compile(R'''<FILL\s+[^>]*>''')
def lowerCAmelCase_( lowercase_ : List[Any] ) -> str:
_lowerCamelCase = code.split('''\n''' )
_lowerCamelCase = 0
while idx < len(lowercase_ ) and len(lines[idx] ) == 0:
idx += 1
if idx < len(lowercase_ ):
return re.search(r'''^(\s*)\S''' , lines[idx] ).groups()[0]
return ""
def lowerCAmelCase_( lowercase_ : List[Any] ) -> Union[str, Any]:
_lowerCamelCase = len(get_indent(lowercase_ ) ) > 0
if has_indent:
_lowerCamelCase = F"""class Bla:\n{code}"""
_lowerCamelCase = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_19 , preview=lowercase_ )
_lowerCamelCase = black.format_str(lowercase_ , mode=lowercase_ )
_lowerCamelCase , _lowerCamelCase = style_docstrings_in_code(lowercase_ )
return result[len('''class Bla:\n''' ) :] if has_indent else result
def lowerCAmelCase_( lowercase_ : Dict , lowercase_ : Union[str, Any]=False ) -> str:
with open(lowercase_ , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
_lowerCamelCase = f.readlines()
_lowerCamelCase = []
_lowerCamelCase = 0
# Not a for loop cause `lines` is going to change (if `overwrite=True`).
while line_index < len(lowercase_ ):
_lowerCamelCase = _re_copy_warning.search(lines[line_index] )
if search is None:
line_index += 1
continue
# There is some copied code here, let's retrieve the original.
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase = search.groups()
_lowerCamelCase = find_code_in_diffusers(lowercase_ )
_lowerCamelCase = get_indent(lowercase_ )
_lowerCamelCase = line_index + 1 if indent == theoretical_indent else line_index + 2
_lowerCamelCase = theoretical_indent
_lowerCamelCase = start_index
# Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment.
_lowerCamelCase = True
while line_index < len(lowercase_ ) and should_continue:
line_index += 1
if line_index >= len(lowercase_ ):
break
_lowerCamelCase = lines[line_index]
_lowerCamelCase = _should_continue(lowercase_ , lowercase_ ) and re.search(F"""^{indent}# End copy""" , lowercase_ ) is None
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
_lowerCamelCase = lines[start_index:line_index]
_lowerCamelCase = ''''''.join(lowercase_ )
# Remove any nested `Copied from` comments to avoid circular copies
_lowerCamelCase = [line for line in theoretical_code.split('''\n''' ) if _re_copy_warning.search(lowercase_ ) is None]
_lowerCamelCase = '''\n'''.join(lowercase_ )
# Before comparing, use the `replace_pattern` on the original code.
if len(lowercase_ ) > 0:
_lowerCamelCase = replace_pattern.replace('''with''' , '''''' ).split(''',''' )
_lowerCamelCase = [_re_replace_pattern.search(lowercase_ ) for p in patterns]
for pattern in patterns:
if pattern is None:
continue
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase = pattern.groups()
_lowerCamelCase = re.sub(lowercase_ , lowercase_ , lowercase_ )
if option.strip() == "all-casing":
_lowerCamelCase = re.sub(obja.lower() , obja.lower() , lowercase_ )
_lowerCamelCase = re.sub(obja.upper() , obja.upper() , lowercase_ )
# Blackify after replacement. To be able to do that, we need the header (class or function definition)
# from the previous line
_lowerCamelCase = blackify(lines[start_index - 1] + theoretical_code )
_lowerCamelCase = theoretical_code[len(lines[start_index - 1] ) :]
# Test for a diff and act accordingly.
if observed_code != theoretical_code:
diffs.append([object_name, start_index] )
if overwrite:
_lowerCamelCase = lines[:start_index] + [theoretical_code] + lines[line_index:]
_lowerCamelCase = start_index + 1
if overwrite and len(lowercase_ ) > 0:
# Warn the user a file has been modified.
print(F"""Detected changes, rewriting {filename}.""" )
with open(lowercase_ , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.writelines(lowercase_ )
return diffs
def lowerCAmelCase_( lowercase_ : bool = False ) -> Union[str, Any]:
_lowerCamelCase = glob.glob(os.path.join(lowercase_ , '''**/*.py''' ) , recursive=lowercase_ )
_lowerCamelCase = []
for filename in all_files:
_lowerCamelCase = is_copy_consistent(lowercase_ , lowercase_ )
diffs += [F"""- {filename}: copy does not match {d[0]} at line {d[1]}""" for d in new_diffs]
if not overwrite and len(lowercase_ ) > 0:
_lowerCamelCase = '''\n'''.join(lowercase_ )
raise Exception(
'''Found the following copy inconsistencies:\n'''
+ diff
+ '''\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them.''' )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : str = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
__SCREAMING_SNAKE_CASE : str = parser.parse_args()
check_copies(args.fix_and_overwrite)
| 73 | 1 |
'''simple docstring'''
def _A ( A__ ):
"""simple docstring"""
return sum(i for i in range(1 , number // 2 + 1 ) if number % i == 0 ) == number
if __name__ == "__main__":
print('''Program to check whether a number is a Perfect number or not...''')
lowerCAmelCase__ = int(input('''Enter number: ''').strip())
print(f'{number} is {"" if perfect(number) else "not "}a Perfect Number.')
| 104 |
from __future__ import annotations
def _lowerCamelCase( lowercase__ , lowercase__ ) -> bool:
'''simple docstring'''
__lowercase= get_failure_array(lowercase__ )
# 2) Step through text searching for pattern
__lowercase, __lowercase= 0, 0 # index into text, pattern
while i < len(lowercase__ ):
if pattern[j] == text[i]:
if j == (len(lowercase__ ) - 1):
return True
j += 1
# if this is a prefix in our pattern
# just go back far enough to continue
elif j > 0:
__lowercase= failure[j - 1]
continue
i += 1
return False
def _lowerCamelCase( lowercase__ ) -> list[int]:
'''simple docstring'''
__lowercase= [0]
__lowercase= 0
__lowercase= 1
while j < len(lowercase__ ):
if pattern[i] == pattern[j]:
i += 1
elif i > 0:
__lowercase= failure[i - 1]
continue
j += 1
failure.append(lowercase__ )
return failure
if __name__ == "__main__":
# Test 1)
lowerCAmelCase = '''abc1abc12'''
lowerCAmelCase = '''alskfjaldsabc1abc1abc12k23adsfabcabc'''
lowerCAmelCase = '''alskfjaldsk23adsfabcabc'''
assert kmp(pattern, texta) and not kmp(pattern, texta)
# Test 2)
lowerCAmelCase = '''ABABX'''
lowerCAmelCase = '''ABABZABABYABABX'''
assert kmp(pattern, text)
# Test 3)
lowerCAmelCase = '''AAAB'''
lowerCAmelCase = '''ABAAAAAB'''
assert kmp(pattern, text)
# Test 4)
lowerCAmelCase = '''abcdabcy'''
lowerCAmelCase = '''abcxabcdabxabcdabcdabcy'''
assert kmp(pattern, text)
# Test 5)
lowerCAmelCase = '''aabaabaaa'''
assert get_failure_array(pattern) == [0, 1, 0, 1, 2, 3, 4, 5, 2]
| 295 | 0 |
'''simple docstring'''
a_ : Optional[int] = "Input must be a string of 8 numbers plus letter"
a_ : List[Any] = "TRWAGMYFPDXBNJZSQVHLCKE"
def _A (lowerCAmelCase__ :str ) -> bool:
'''simple docstring'''
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
_a = f'Expected string as input, found {type(lowerCAmelCase__ ).__name__}'
raise TypeError(lowerCAmelCase__ )
_a = spanish_id.replace('-' , '' ).upper()
if len(lowerCAmelCase__ ) != 9:
raise ValueError(lowerCAmelCase__ )
try:
_a = int(spanish_id_clean[0:8] )
_a = spanish_id_clean[8]
except ValueError as ex:
raise ValueError(lowerCAmelCase__ ) from ex
if letter.isdigit():
raise ValueError(lowerCAmelCase__ )
return letter == LOOKUP_LETTERS[number % 23]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 364 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_torch_available
from ...utils import OptionalDependencyNotAvailable
a_ : int = {
"configuration_gpt_neox_japanese": ["GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTNeoXJapaneseConfig"],
"tokenization_gpt_neox_japanese": ["GPTNeoXJapaneseTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : List[str] = [
"GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST",
"GPTNeoXJapaneseForCausalLM",
"GPTNeoXJapaneseLayer",
"GPTNeoXJapaneseModel",
"GPTNeoXJapanesePreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_gpt_neox_japanese import GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXJapaneseConfig
from .tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox_japanese import (
GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXJapaneseForCausalLM,
GPTNeoXJapaneseLayer,
GPTNeoXJapaneseModel,
GPTNeoXJapanesePreTrainedModel,
)
else:
import sys
a_ : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 104 | 0 |
"""simple docstring"""
import fire
from transformers import AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer
def UpperCAmelCase__ (snake_case__ : str , snake_case__ : str , **snake_case__ : Tuple ):
"""simple docstring"""
_snake_case : List[Any] = AutoConfig.from_pretrained(snake_case__ , **snake_case__ )
_snake_case : Dict = AutoModelForSeqaSeqLM.from_config(snake_case__ )
model.save_pretrained(snake_case__ )
AutoTokenizer.from_pretrained(snake_case__ ).save_pretrained(snake_case__ )
return model
if __name__ == "__main__":
fire.Fire(save_randomly_initialized_version)
| 64 |
"""simple docstring"""
from math import factorial
A_ = {str(d): factorial(d) for d in range(10)}
def UpperCAmelCase__ (snake_case__ : int ):
"""simple docstring"""
return sum(DIGIT_FACTORIAL[d] for d in str(snake_case__ ) )
def UpperCAmelCase__ ():
"""simple docstring"""
_snake_case : List[str] = 7 * factorial(9 ) + 1
return sum(i for i in range(3 , snake_case__ ) if sum_of_digit_factorial(snake_case__ ) == i )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 64 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
"facebook/vit-mae-base": "https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json",
# See all ViT MAE models at https://huggingface.co/models?filter=vit-mae
}
class lowercase ( A__ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = """vit_mae"""
def __init__( self , _snake_case=768 , _snake_case=12 , _snake_case=12 , _snake_case=3072 , _snake_case="gelu" , _snake_case=0.0 , _snake_case=0.0 , _snake_case=0.02 , _snake_case=1e-12 , _snake_case=224 , _snake_case=16 , _snake_case=3 , _snake_case=True , _snake_case=16 , _snake_case=512 , _snake_case=8 , _snake_case=2048 , _snake_case=0.75 , _snake_case=False , **_snake_case , ) -> int:
"""simple docstring"""
super().__init__(**_snake_case )
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_act
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = initializer_range
UpperCAmelCase = layer_norm_eps
UpperCAmelCase = image_size
UpperCAmelCase = patch_size
UpperCAmelCase = num_channels
UpperCAmelCase = qkv_bias
UpperCAmelCase = decoder_num_attention_heads
UpperCAmelCase = decoder_hidden_size
UpperCAmelCase = decoder_num_hidden_layers
UpperCAmelCase = decoder_intermediate_size
UpperCAmelCase = mask_ratio
UpperCAmelCase = norm_pix_loss
| 360 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
"facebook/deit-base-distilled-patch16-224": (
"https://huggingface.co/facebook/deit-base-patch16-224/resolve/main/config.json"
),
# See all DeiT models at https://huggingface.co/models?filter=deit
}
class lowercase ( A__ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = """deit"""
def __init__( self , _snake_case=768 , _snake_case=12 , _snake_case=12 , _snake_case=3072 , _snake_case="gelu" , _snake_case=0.0 , _snake_case=0.0 , _snake_case=0.02 , _snake_case=1e-12 , _snake_case=224 , _snake_case=16 , _snake_case=3 , _snake_case=True , _snake_case=16 , **_snake_case , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(**_snake_case )
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_act
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = initializer_range
UpperCAmelCase = layer_norm_eps
UpperCAmelCase = image_size
UpperCAmelCase = patch_size
UpperCAmelCase = num_channels
UpperCAmelCase = qkv_bias
UpperCAmelCase = encoder_stride
class lowercase ( A__ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = version.parse("""1.11""" )
@property
def snake_case_ ( self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def snake_case_ ( self ) -> float:
"""simple docstring"""
return 1e-4
| 152 | 0 |
"""simple docstring"""
import logging
from transformers.configuration_utils import PretrainedConfig
a_ = logging.getLogger(__name__)
class __snake_case ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = """masked_bert"""
def __init__( self , __lowerCamelCase=3_0522 , __lowerCamelCase=768 , __lowerCamelCase=12 , __lowerCamelCase=12 , __lowerCamelCase=3072 , __lowerCamelCase="gelu" , __lowerCamelCase=0.1 , __lowerCamelCase=0.1 , __lowerCamelCase=512 , __lowerCamelCase=2 , __lowerCamelCase=0.0_2 , __lowerCamelCase=1e-1_2 , __lowerCamelCase=0 , __lowerCamelCase="topK" , __lowerCamelCase="constant" , __lowerCamelCase=0.0 , **__lowerCamelCase , ):
'''simple docstring'''
super().__init__(pad_token_id=__lowerCamelCase , **__lowerCamelCase )
__A : Dict = vocab_size
__A : Union[str, Any] = hidden_size
__A : Tuple = num_hidden_layers
__A : Tuple = num_attention_heads
__A : Optional[Any] = hidden_act
__A : List[str] = intermediate_size
__A : Any = hidden_dropout_prob
__A : Optional[Any] = attention_probs_dropout_prob
__A : Any = max_position_embeddings
__A : str = type_vocab_size
__A : List[Any] = initializer_range
__A : str = layer_norm_eps
__A : Optional[int] = pruning_method
__A : str = mask_init
__A : Any = mask_scale
| 179 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
a_ = logging.get_logger(__name__)
a_ = {
"""shi-labs/nat-mini-in1k-224""": """https://huggingface.co/shi-labs/nat-mini-in1k-224/resolve/main/config.json""",
# See all Nat models at https://huggingface.co/models?filter=nat
}
class __snake_case ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = """nat"""
_lowerCamelCase = {
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self , __lowerCamelCase=4 , __lowerCamelCase=3 , __lowerCamelCase=64 , __lowerCamelCase=[3, 4, 6, 5] , __lowerCamelCase=[2, 4, 8, 16] , __lowerCamelCase=7 , __lowerCamelCase=3.0 , __lowerCamelCase=True , __lowerCamelCase=0.0 , __lowerCamelCase=0.0 , __lowerCamelCase=0.1 , __lowerCamelCase="gelu" , __lowerCamelCase=0.0_2 , __lowerCamelCase=1e-5 , __lowerCamelCase=0.0 , __lowerCamelCase=None , __lowerCamelCase=None , **__lowerCamelCase , ):
'''simple docstring'''
super().__init__(**__lowerCamelCase )
__A : Union[str, Any] = patch_size
__A : Optional[Any] = num_channels
__A : Tuple = embed_dim
__A : Dict = depths
__A : str = len(__lowerCamelCase )
__A : Optional[Any] = num_heads
__A : str = kernel_size
__A : Any = mlp_ratio
__A : Optional[int] = qkv_bias
__A : str = hidden_dropout_prob
__A : Any = attention_probs_dropout_prob
__A : int = drop_path_rate
__A : int = hidden_act
__A : Any = layer_norm_eps
__A : Tuple = initializer_range
# we set the hidden_size attribute in order to make Nat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
__A : int = int(embed_dim * 2 ** (len(__lowerCamelCase ) - 1) )
__A : Union[str, Any] = layer_scale_init_value
__A : List[str] = ['''stem'''] + [F"""stage{idx}""" for idx in range(1 , len(__lowerCamelCase ) + 1 )]
__A , __A : Any = get_aligned_output_features_output_indices(
out_features=__lowerCamelCase , out_indices=__lowerCamelCase , stage_names=self.stage_names )
| 179 | 1 |
"""simple docstring"""
import unittest
import numpy as np
import torch
from diffusers import PNDMPipeline, PNDMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@property
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> List[Any]:
torch.manual_seed(0 )
_UpperCamelCase : Any = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("DownBlock2D", "AttnDownBlock2D") , up_block_types=("AttnUpBlock2D", "UpBlock2D") , )
return model
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Union[str, Any]:
_UpperCamelCase : Union[str, Any] = self.dummy_uncond_unet
_UpperCamelCase : List[Any] = PNDMScheduler()
_UpperCamelCase : Any = PNDMPipeline(unet=__a , scheduler=__a )
pndm.to(__a )
pndm.set_progress_bar_config(disable=__a )
_UpperCamelCase : Optional[int] = torch.manual_seed(0 )
_UpperCamelCase : str = pndm(generator=__a , num_inference_steps=20 , output_type="numpy" ).images
_UpperCamelCase : int = torch.manual_seed(0 )
_UpperCamelCase : Union[str, Any] = pndm(generator=__a , num_inference_steps=20 , output_type="numpy" , return_dict=__a )[0]
_UpperCamelCase : Union[str, Any] = image[0, -3:, -3:, -1]
_UpperCamelCase : str = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_UpperCamelCase : Optional[int] = np.array([1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self : int ) -> Dict:
_UpperCamelCase : Optional[int] = "google/ddpm-cifar10-32"
_UpperCamelCase : Optional[int] = UNetaDModel.from_pretrained(__a )
_UpperCamelCase : Optional[Any] = PNDMScheduler()
_UpperCamelCase : str = PNDMPipeline(unet=__a , scheduler=__a )
pndm.to(__a )
pndm.set_progress_bar_config(disable=__a )
_UpperCamelCase : Union[str, Any] = torch.manual_seed(0 )
_UpperCamelCase : Tuple = pndm(generator=__a , output_type="numpy" ).images
_UpperCamelCase : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_UpperCamelCase : str = np.array([0.15_64, 0.1_46_45, 0.14_06, 0.1_47_15, 0.1_24_25, 0.1_40_45, 0.1_31_15, 0.1_21_75, 0.1_25] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 310 |
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import add_start_docstrings
lowerCamelCase__ = R"\n [`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and\n can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.\n\n Args:\n title_sep (`str`, *optional*, defaults to `\" / \"`):\n Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].\n doc_sep (`str`, *optional*, defaults to `\" // \"`):\n Separator inserted between the text of the retrieved document and the original input when calling\n [`RagRetriever`].\n n_docs (`int`, *optional*, defaults to 5):\n Number of documents to retrieve.\n max_combined_length (`int`, *optional*, defaults to 300):\n Max length of contextualized input returned by [`~RagRetriever.__call__`].\n retrieval_vector_size (`int`, *optional*, defaults to 768):\n Dimensionality of the document embeddings indexed by [`RagRetriever`].\n retrieval_batch_size (`int`, *optional*, defaults to 8):\n Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated\n [`RagRetriever`].\n dataset (`str`, *optional*, defaults to `\"wiki_dpr\"`):\n A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids\n using `datasets.list_datasets()`).\n dataset_split (`str`, *optional*, defaults to `\"train\"`)\n Which split of the `dataset` to load.\n index_name (`str`, *optional*, defaults to `\"compressed\"`)\n The index name of the index associated with the `dataset`. One can choose between `\"legacy\"`, `\"exact\"` and\n `\"compressed\"`.\n index_path (`str`, *optional*)\n The path to the serialized faiss index on disk.\n passages_path (`str`, *optional*):\n A path to text passages compatible with the faiss index. Required if using\n [`~models.rag.retrieval_rag.LegacyIndex`]\n use_dummy_dataset (`bool`, *optional*, defaults to `False`)\n Whether to load a \"dummy\" variant of the dataset specified by `dataset`.\n label_smoothing (`float`, *optional*, defaults to 0.0):\n Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing\n in the loss calculation. If set to 0, no label smoothing is performed.\n do_marginalize (`bool`, *optional*, defaults to `False`):\n If `True`, the logits are marginalized over all documents by making use of\n `torch.nn.functional.log_softmax`.\n reduce_loss (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.\n do_deduplication (`bool`, *optional*, defaults to `True`):\n Whether or not to deduplicate the generations from different context documents for a given input. Has to be\n set to `False` if used while training with distributed backend.\n exclude_bos_score (`bool`, *optional*, defaults to `False`):\n Whether or not to disregard the BOS token when computing the loss.\n output_retrieved(`bool`, *optional*, defaults to `False`):\n If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and\n `context_attention_mask` are returned. See returned tensors for more detail.\n use_cache (`bool`, *optional*, defaults to `True`):\n Whether or not the model should return the last key/values attentions (not used by all models).\n forced_eos_token_id (`int`, *optional*):\n The id of the token to force as the last generated token when `max_length` is reached. Usually set to\n `eos_token_id`.\n"
@add_start_docstrings(_UpperCamelCase )
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :int = "rag"
SCREAMING_SNAKE_CASE__ :List[str] = True
def __init__( self : List[Any] , __a : Optional[Any]=None , __a : str=True , __a : Tuple=None , __a : Dict=None , __a : Optional[int]=None , __a : Optional[int]=None , __a : List[Any]=None , __a : Dict=" / " , __a : int=" // " , __a : Optional[Any]=5 , __a : Dict=300 , __a : Optional[int]=768 , __a : Tuple=8 , __a : Union[str, Any]="wiki_dpr" , __a : Dict="train" , __a : List[Any]="compressed" , __a : str=None , __a : Tuple=None , __a : int=False , __a : str=False , __a : Optional[int]=0.0 , __a : Dict=True , __a : Tuple=False , __a : Dict=False , __a : str=False , __a : str=True , __a : Optional[Any]=None , **__a : Tuple , ) -> Any:
super().__init__(
bos_token_id=__a , pad_token_id=__a , eos_token_id=__a , decoder_start_token_id=__a , forced_eos_token_id=__a , is_encoder_decoder=__a , prefix=__a , vocab_size=__a , **__a , )
assert (
"question_encoder" in kwargs and "generator" in kwargs
), "Config has to be initialized with question_encoder and generator config"
_UpperCamelCase : Optional[int] = kwargs.pop("question_encoder" )
_UpperCamelCase : str = question_encoder_config.pop("model_type" )
_UpperCamelCase : Tuple = kwargs.pop("generator" )
_UpperCamelCase : str = decoder_config.pop("model_type" )
from ..auto.configuration_auto import AutoConfig
_UpperCamelCase : Union[str, Any] = AutoConfig.for_model(__a , **__a )
_UpperCamelCase : str = AutoConfig.for_model(__a , **__a )
_UpperCamelCase : Optional[int] = reduce_loss
_UpperCamelCase : str = label_smoothing
_UpperCamelCase : int = exclude_bos_score
_UpperCamelCase : List[str] = do_marginalize
_UpperCamelCase : Optional[int] = title_sep
_UpperCamelCase : Optional[int] = doc_sep
_UpperCamelCase : Union[str, Any] = n_docs
_UpperCamelCase : Tuple = max_combined_length
_UpperCamelCase : Union[str, Any] = dataset
_UpperCamelCase : Any = dataset_split
_UpperCamelCase : List[str] = index_name
_UpperCamelCase : int = retrieval_vector_size
_UpperCamelCase : str = retrieval_batch_size
_UpperCamelCase : Dict = passages_path
_UpperCamelCase : str = index_path
_UpperCamelCase : Tuple = use_dummy_dataset
_UpperCamelCase : Union[str, Any] = output_retrieved
_UpperCamelCase : Optional[Any] = do_deduplication
_UpperCamelCase : str = use_cache
if self.forced_eos_token_id is None:
_UpperCamelCase : List[str] = getattr(self.generator , "forced_eos_token_id" , __a )
@classmethod
def __SCREAMING_SNAKE_CASE ( cls : Union[str, Any] , __a : PretrainedConfig , __a : PretrainedConfig , **__a : Optional[int] ) -> PretrainedConfig:
return cls(question_encoder=question_encoder_config.to_dict() , generator=generator_config.to_dict() , **__a )
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> int:
_UpperCamelCase : Dict = copy.deepcopy(self.__dict__ )
_UpperCamelCase : List[Any] = self.question_encoder.to_dict()
_UpperCamelCase : Tuple = self.generator.to_dict()
_UpperCamelCase : Any = self.__class__.model_type
return output
| 310 | 1 |
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> float:
return base * power(lowerCamelCase__ , (exponent - 1) ) if exponent else 1
if __name__ == "__main__":
print("""Raise base to the power of exponent using recursion...""")
a =int(input("""Enter the base: """).strip())
a =int(input("""Enter the exponent: """).strip())
a =power(base, abs(exponent))
if exponent < 0: # power() does not properly deal w/ negative exponents
a =1 / result
print(F"""{base} to the power of {exponent} is {result}""")
| 73 |
# This is the module that test_patching.py uses to test patch_submodule()
import os # noqa: this is just for tests
import os as renamed_os # noqa: this is just for tests
from os import path # noqa: this is just for tests
from os import path as renamed_path # noqa: this is just for tests
from os.path import join # noqa: this is just for tests
from os.path import join as renamed_join # noqa: this is just for tests
a =open # noqa: we just need to have a builtin inside this module to test it properly
| 73 | 1 |
'''simple docstring'''
import argparse
import requests
import torch
from PIL import Image
from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel
def SCREAMING_SNAKE_CASE__ ( __A ) -> List[Any]:
if "img_encoder.pos_embed" in name:
_snake_case = name.replace('img_encoder.pos_embed' , 'vision_model.embeddings.position_embeddings' )
if "img_encoder.patch_embed.proj" in name:
_snake_case = name.replace('img_encoder.patch_embed.proj' , 'vision_model.embeddings.patch_embeddings.projection' )
if "img_encoder.patch_embed.norm" in name:
_snake_case = name.replace('img_encoder.patch_embed.norm' , 'vision_model.embeddings.layernorm' )
if "img_encoder.layers" in name:
_snake_case = name.replace('img_encoder.layers' , 'vision_model.encoder.stages' )
if "blocks" in name and "res" not in name:
_snake_case = name.replace('blocks' , 'layers' )
if "attn" in name and "pre_assign" not in name:
_snake_case = name.replace('attn' , 'self_attn' )
if "proj" in name and "self_attn" in name and "text" not in name:
_snake_case = name.replace('proj' , 'out_proj' )
if "pre_assign_attn.attn.proj" in name:
_snake_case = name.replace('pre_assign_attn.attn.proj' , 'pre_assign_attn.attn.out_proj' )
if "norm1" in name:
_snake_case = name.replace('norm1' , 'layer_norm1' )
if "norm2" in name and "pre_assign" not in name:
_snake_case = name.replace('norm2' , 'layer_norm2' )
if "img_encoder.norm" in name:
_snake_case = name.replace('img_encoder.norm' , 'vision_model.layernorm' )
# text encoder
if "text_encoder.token_embedding" in name:
_snake_case = name.replace('text_encoder.token_embedding' , 'text_model.embeddings.token_embedding' )
if "text_encoder.positional_embedding" in name:
_snake_case = name.replace('text_encoder.positional_embedding' , 'text_model.embeddings.position_embedding.weight' )
if "text_encoder.transformer.resblocks." in name:
_snake_case = name.replace('text_encoder.transformer.resblocks.' , 'text_model.encoder.layers.' )
if "ln_1" in name:
_snake_case = name.replace('ln_1' , 'layer_norm1' )
if "ln_2" in name:
_snake_case = name.replace('ln_2' , 'layer_norm2' )
if "c_fc" in name:
_snake_case = name.replace('c_fc' , 'fc1' )
if "c_proj" in name:
_snake_case = name.replace('c_proj' , 'fc2' )
if "text_encoder" in name:
_snake_case = name.replace('text_encoder' , 'text_model' )
if "ln_final" in name:
_snake_case = name.replace('ln_final' , 'final_layer_norm' )
# projection layers
if "img_projector.linear_hidden." in name:
_snake_case = name.replace('img_projector.linear_hidden.' , 'visual_projection.' )
if "img_projector.linear_out." in name:
_snake_case = name.replace('img_projector.linear_out.' , 'visual_projection.3.' )
if "text_projector.linear_hidden" in name:
_snake_case = name.replace('text_projector.linear_hidden' , 'text_projection' )
if "text_projector.linear_out" in name:
_snake_case = name.replace('text_projector.linear_out' , 'text_projection.3' )
return name
def SCREAMING_SNAKE_CASE__ ( __A , __A ) -> Any:
for key in orig_state_dict.copy().keys():
_snake_case = orig_state_dict.pop(__A )
if "qkv" in key:
# weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
_snake_case = key.split('.' )
_snake_case , _snake_case = int(key_split[2] ), int(key_split[4] )
_snake_case = config.vision_config.hidden_size
if "weight" in key:
_snake_case = val[:dim, :]
_snake_case = val[dim : dim * 2, :]
_snake_case = val[-dim:, :]
else:
_snake_case = val[:dim]
_snake_case = val[dim : dim * 2]
_snake_case = val[-dim:]
elif "in_proj" in key:
# weights and biases of the key, value and query projections of text encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
_snake_case = key.split('.' )
_snake_case = int(key_split[3] )
_snake_case = config.text_config.hidden_size
if "weight" in key:
_snake_case = val[:dim, :]
_snake_case = val[
dim : dim * 2, :
]
_snake_case = val[-dim:, :]
else:
_snake_case = val[:dim]
_snake_case = val[dim : dim * 2]
_snake_case = val[-dim:]
else:
_snake_case = rename_key(__A )
# squeeze if necessary
if (
"text_projection.0" in new_name
or "text_projection.3" in new_name
or "visual_projection.0" in new_name
or "visual_projection.3" in new_name
):
_snake_case = val.squeeze_()
else:
_snake_case = val
return orig_state_dict
def SCREAMING_SNAKE_CASE__ ( ) -> Any:
_snake_case = 'http://images.cocodataset.org/val2017/000000039769.jpg'
_snake_case = Image.open(requests.get(__A , stream=__A ).raw )
return im
@torch.no_grad()
def SCREAMING_SNAKE_CASE__ ( __A , __A , __A="groupvit-gcc-yfcc" , __A=False ) -> Optional[Any]:
_snake_case = GroupViTConfig()
_snake_case = GroupViTModel(__A ).eval()
_snake_case = torch.load(__A , map_location='cpu' )['model']
_snake_case = convert_state_dict(__A , __A )
_snake_case , _snake_case = model.load_state_dict(__A , strict=__A )
assert missing_keys == ["text_model.embeddings.position_ids"]
assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(__A ) == 0)
# verify result
_snake_case = CLIPProcessor.from_pretrained('openai/clip-vit-base-patch32' )
_snake_case = prepare_img()
_snake_case = processor(text=['a photo of a cat', 'a photo of a dog'] , images=__A , padding=__A , return_tensors='pt' )
with torch.no_grad():
_snake_case = model(**__A )
if model_name == "groupvit-gcc-yfcc":
_snake_case = torch.tensor([[1_3.3_5_2_3, 6.3_6_2_9]] )
elif model_name == "groupvit-gcc-redcaps":
_snake_case = torch.tensor([[1_6.1_8_7_3, 8.6_2_3_0]] )
else:
raise ValueError(F'Model name {model_name} not supported.' )
assert torch.allclose(outputs.logits_per_image , __A , atol=1e-3 )
processor.save_pretrained(__A )
model.save_pretrained(__A )
print('Successfully saved processor and model to' , __A )
if push_to_hub:
print('Pushing to the hub...' )
processor.push_to_hub(__A , organization='nielsr' )
model.push_to_hub(__A , organization='nielsr' )
if __name__ == "__main__":
lowercase : Any = argparse.ArgumentParser()
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to dump the processor and PyTorch model."
)
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to GroupViT checkpoint")
parser.add_argument(
"--model_name",
default="groupvit-gccy-fcc",
type=str,
help="Name of the model. Expecting either 'groupvit-gcc-yfcc' or 'groupvit-gcc-redcaps'",
)
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.",
)
lowercase : Optional[int] = parser.parse_args()
convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 357 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
lowercase : Optional[Any] = logging.get_logger(__name__)
lowercase : List[str] = {
"Helsinki-NLP/opus-mt-en-de": "https://huggingface.co/Helsinki-NLP/opus-mt-en-de/resolve/main/config.json",
# See all Marian models at https://huggingface.co/models?filter=marian
}
class __UpperCAmelCase ( _lowerCamelCase ):
__lowercase = """marian"""
__lowercase = ["""past_key_values"""]
__lowercase = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self , lowerCAmelCase_=5_81_01 , lowerCAmelCase_=None , lowerCAmelCase_=10_24 , lowerCAmelCase_=12 , lowerCAmelCase_=40_96 , lowerCAmelCase_=16 , lowerCAmelCase_=12 , lowerCAmelCase_=40_96 , lowerCAmelCase_=16 , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.0 , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_="gelu" , lowerCAmelCase_=10_24 , lowerCAmelCase_=0.1 , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.02 , lowerCAmelCase_=5_81_00 , lowerCAmelCase_=False , lowerCAmelCase_=5_81_00 , lowerCAmelCase_=0 , lowerCAmelCase_=0 , lowerCAmelCase_=True , **lowerCAmelCase_ , ):
"""simple docstring"""
_snake_case = vocab_size
_snake_case = decoder_vocab_size or vocab_size
_snake_case = max_position_embeddings
_snake_case = d_model
_snake_case = encoder_ffn_dim
_snake_case = encoder_layers
_snake_case = encoder_attention_heads
_snake_case = decoder_ffn_dim
_snake_case = decoder_layers
_snake_case = decoder_attention_heads
_snake_case = dropout
_snake_case = attention_dropout
_snake_case = activation_dropout
_snake_case = activation_function
_snake_case = init_std
_snake_case = encoder_layerdrop
_snake_case = decoder_layerdrop
_snake_case = use_cache
_snake_case = encoder_layers
_snake_case = scale_embedding # scale factor will be sqrt(d_model) if True
_snake_case = share_encoder_decoder_embeddings
super().__init__(
pad_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ , is_encoder_decoder=lowerCAmelCase_ , decoder_start_token_id=lowerCAmelCase_ , forced_eos_token_id=lowerCAmelCase_ , **lowerCAmelCase_ , )
class __UpperCAmelCase ( _lowerCamelCase ):
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.inputs
def lowerCamelCase ( self ):
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
_snake_case = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
] )
if self.use_past:
_snake_case = {0: 'batch'}
_snake_case = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
_snake_case = {0: 'batch', 1: 'decoder_sequence'}
_snake_case = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(lowerCAmelCase_ , direction='inputs' )
elif self.task == "causal-lm":
# TODO: figure this case out.
_snake_case = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
] )
if self.use_past:
_snake_case , _snake_case = self.num_layers
for i in range(lowerCAmelCase_ ):
_snake_case = {0: 'batch', 2: 'past_sequence + sequence'}
_snake_case = {0: 'batch', 2: 'past_sequence + sequence'}
else:
_snake_case = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
('decoder_input_ids', {0: 'batch', 1: 'decoder_sequence'}),
('decoder_attention_mask', {0: 'batch', 1: 'decoder_sequence'}),
] )
return common_inputs
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.outputs
def lowerCamelCase ( self ):
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
_snake_case = super().outputs
else:
_snake_case = super(lowerCAmelCase_ , self ).outputs
if self.use_past:
_snake_case , _snake_case = self.num_layers
for i in range(lowerCAmelCase_ ):
_snake_case = {0: 'batch', 2: 'past_sequence + sequence'}
_snake_case = {0: 'batch', 2: 'past_sequence + sequence'}
return common_outputs
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = -1 , lowerCAmelCase_ = -1 , lowerCAmelCase_ = False , lowerCAmelCase_ = None , ):
"""simple docstring"""
_snake_case = self._generate_dummy_inputs_for_encoder_and_decoder(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# Generate decoder inputs
_snake_case = seq_length if not self.use_past else 1
_snake_case = self._generate_dummy_inputs_for_encoder_and_decoder(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case = {F'decoder_{name}': tensor for name, tensor in decoder_inputs.items()}
_snake_case = dict(**lowerCAmelCase_ , **lowerCAmelCase_ )
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
_snake_case , _snake_case = common_inputs['input_ids'].shape
_snake_case = common_inputs['decoder_input_ids'].shape[1]
_snake_case , _snake_case = self.num_attention_heads
_snake_case = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
_snake_case = decoder_seq_length + 3
_snake_case = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
_snake_case = torch.cat(
[common_inputs['decoder_attention_mask'], torch.ones(lowerCAmelCase_ , lowerCAmelCase_ )] , dim=1 )
_snake_case = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
_snake_case , _snake_case = self.num_layers
_snake_case = min(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case = max(lowerCAmelCase_ , lowerCAmelCase_ ) - min_num_layers
_snake_case = 'encoder' if num_encoder_layers > num_decoder_layers else 'decoder'
for _ in range(lowerCAmelCase_ ):
common_inputs["past_key_values"].append(
(
torch.zeros(lowerCAmelCase_ ),
torch.zeros(lowerCAmelCase_ ),
torch.zeros(lowerCAmelCase_ ),
torch.zeros(lowerCAmelCase_ ),
) )
# TODO: test this.
_snake_case = encoder_shape if remaining_side_name == 'encoder' else decoder_shape
for _ in range(lowerCAmelCase_ , lowerCAmelCase_ ):
common_inputs["past_key_values"].append((torch.zeros(lowerCAmelCase_ ), torch.zeros(lowerCAmelCase_ )) )
return common_inputs
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = -1 , lowerCAmelCase_ = -1 , lowerCAmelCase_ = False , lowerCAmelCase_ = None , ):
"""simple docstring"""
_snake_case = self._generate_dummy_inputs_for_encoder_and_decoder(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
_snake_case , _snake_case = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
_snake_case = seqlen + 2
_snake_case , _snake_case = self.num_layers
_snake_case , _snake_case = self.num_attention_heads
_snake_case = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
_snake_case = common_inputs['attention_mask'].dtype
_snake_case = torch.cat(
[common_inputs['attention_mask'], torch.ones(lowerCAmelCase_ , lowerCAmelCase_ , dtype=lowerCAmelCase_ )] , dim=1 )
_snake_case = [
(torch.zeros(lowerCAmelCase_ ), torch.zeros(lowerCAmelCase_ )) for _ in range(lowerCAmelCase_ )
]
return common_inputs
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = -1 , lowerCAmelCase_ = -1 , lowerCAmelCase_ = False , lowerCAmelCase_ = None , ):
"""simple docstring"""
_snake_case = compute_effective_axis_dimension(
lowerCAmelCase_ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
_snake_case = tokenizer.num_special_tokens_to_add(lowerCAmelCase_ )
_snake_case = compute_effective_axis_dimension(
lowerCAmelCase_ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=lowerCAmelCase_ )
# Generate dummy inputs according to compute batch and sequence
_snake_case = [' '.join([tokenizer.unk_token] ) * seq_length] * batch_size
_snake_case = dict(tokenizer(lowerCAmelCase_ , return_tensors=lowerCAmelCase_ ) )
return common_inputs
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = -1 , lowerCAmelCase_ = -1 , lowerCAmelCase_ = False , lowerCAmelCase_ = None , ):
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
_snake_case = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
lowerCAmelCase_ , batch_size=lowerCAmelCase_ , seq_length=lowerCAmelCase_ , is_pair=lowerCAmelCase_ , framework=lowerCAmelCase_ )
else:
_snake_case = self._generate_dummy_inputs_for_causal_lm(
lowerCAmelCase_ , batch_size=lowerCAmelCase_ , seq_length=lowerCAmelCase_ , is_pair=lowerCAmelCase_ , framework=lowerCAmelCase_ )
return common_inputs
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
_snake_case = super()._flatten_past_key_values_(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
else:
_snake_case = super(lowerCAmelCase_ , self )._flatten_past_key_values_(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
@property
def lowerCamelCase ( self ):
"""simple docstring"""
return 1E-4
| 160 | 0 |
'''simple docstring'''
import warnings
from functools import wraps
from typing import Callable
def lowerCamelCase ( UpperCAmelCase__ : Tuple ) -> Optional[int]:
@wraps(A__ )
def _inner_fn(*UpperCAmelCase__ : Optional[Any] , **UpperCAmelCase__ : str ):
warnings.warn(
(F'''\'{fn.__name__}\' is experimental and might be subject to breaking changes in the future.''') , A__ , )
return fn(*A__ , **A__ )
return _inner_fn
| 239 |
'''simple docstring'''
import argparse
import fairseq
import torch
from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging
logging.set_verbosity_info()
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''encoder.layer_norm_for_extract''': '''layer_norm_for_extract''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''lm_head''',
'''label_embs_concat''': '''label_embeddings_concat''',
'''mask_emb''': '''masked_spec_embed''',
'''spk_proj''': '''speaker_proj''',
}
lowerCAmelCase__ = [
'''lm_head''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
'''label_embeddings_concat''',
'''speaker_proj''',
'''layer_norm_for_extract''',
]
def _A ( A__ , A__ , A__ , A__ , A__ ):
"""simple docstring"""
for attribute in key.split('''.''' ):
__lowercase = getattr(A__ , A__ )
if weight_type is not None:
__lowercase = getattr(A__ , A__ ).shape
else:
__lowercase = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
F" {value.shape} for {full_name}" )
if weight_type == "weight":
__lowercase = value
elif weight_type == "weight_g":
__lowercase = value
elif weight_type == "weight_v":
__lowercase = value
elif weight_type == "bias":
__lowercase = value
else:
__lowercase = value
logger.info(F"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def _A ( A__ , A__ ):
"""simple docstring"""
__lowercase = []
__lowercase = fairseq_model.state_dict()
__lowercase = hf_model.unispeech_sat.feature_extractor
for name, value in fairseq_dict.items():
__lowercase = False
if "conv_layers" in name:
load_conv_layer(
A__ , A__ , A__ , A__ , hf_model.config.feat_extract_norm == '''group''' , )
__lowercase = True
else:
for key, mapped_key in MAPPING.items():
__lowercase = '''unispeech_sat.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
if "layer_norm_for_extract" in name and (".".join(name.split('''.''' )[:-1] ) != key):
# special case since naming is very similar
continue
__lowercase = True
if "*" in mapped_key:
__lowercase = name.split(A__ )[0].split('''.''' )[-2]
__lowercase = mapped_key.replace('''*''' , A__ )
if "weight_g" in name:
__lowercase = '''weight_g'''
elif "weight_v" in name:
__lowercase = '''weight_v'''
elif "bias" in name:
__lowercase = '''bias'''
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__lowercase = '''weight'''
else:
__lowercase = None
set_recursively(A__ , A__ , A__ , A__ , A__ )
continue
if not is_used:
unused_weights.append(A__ )
logger.warning(F"Unused weights: {unused_weights}" )
def _A ( A__ , A__ , A__ , A__ , A__ ):
"""simple docstring"""
__lowercase = full_name.split('''conv_layers.''' )[-1]
__lowercase = name.split('''.''' )
__lowercase = int(items[0] )
__lowercase = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found." )
__lowercase = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found." )
__lowercase = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F"{full_name} has size {value.shape}, but"
F" {feature_extractor[layer_id].layer_norm.bias.data.shape} was found." )
__lowercase = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F"{full_name} has size {value.shape}, but"
F" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found." )
__lowercase = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(A__ )
@torch.no_grad()
def _A ( A__ , A__ , A__=None , A__=None , A__=True ):
"""simple docstring"""
if config_path is not None:
__lowercase = UniSpeechSatConfig.from_pretrained(A__ )
else:
__lowercase = UniSpeechSatConfig()
__lowercase = ''''''
if is_finetuned:
__lowercase = UniSpeechSatForCTC(A__ )
else:
__lowercase = UniSpeechSatForPreTraining(A__ )
__lowercase , __lowercase , __lowercase = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
__lowercase = model[0].eval()
recursively_load_weights(A__ , A__ )
hf_wavavec.save_pretrained(A__ )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
lowerCAmelCase__ = parser.parse_args()
convert_unispeech_sat_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 104 | 0 |
'''simple docstring'''
import argparse
from copy import deepcopy
import numpy as np
from datasets import ClassLabel, DatasetDict, load_dataset
from evaluate import load
from transformers import (
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
Trainer,
TrainerCallback,
TrainingArguments,
set_seed,
)
def a__ ( ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument("""--model_ckpt""" , type=a__ , default="""microsoft/unixcoder-base-nine""" )
parser.add_argument("""--num_epochs""" , type=a__ , default=5 )
parser.add_argument("""--batch_size""" , type=a__ , default=6 )
parser.add_argument("""--gradient_accumulation_steps""" , type=a__ , default=1 )
parser.add_argument("""--freeze""" , type=a__ , default=a__ )
parser.add_argument("""--learning_rate""" , type=a__ , default=5E-4 )
parser.add_argument("""--seed""" , type=a__ , default=0 )
parser.add_argument("""--lr_scheduler_type""" , type=a__ , default="""cosine""" )
parser.add_argument("""--num_warmup_steps""" , type=a__ , default=10 )
parser.add_argument("""--weight_decay""" , type=a__ , default=0.01 )
parser.add_argument("""--output_dir""" , type=a__ , default="""./results""" )
return parser.parse_args()
UpperCAmelCase : List[Any] = load('accuracy')
def a__ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = eval_pred
__SCREAMING_SNAKE_CASE = np.argmax(a__ , axis=1 )
return metric.compute(predictions=a__ , references=a__ )
class lowerCAmelCase__ ( a ):
"""simple docstring"""
def __init__( self : Dict , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> None:
"""simple docstring"""
super().__init__()
__SCREAMING_SNAKE_CASE = trainer
def UpperCAmelCase__ ( self : Dict , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Tuple , **__SCREAMING_SNAKE_CASE : Any ) -> Dict:
"""simple docstring"""
if control.should_evaluate:
__SCREAMING_SNAKE_CASE = deepcopy(__SCREAMING_SNAKE_CASE )
self._trainer.evaluate(eval_dataset=self._trainer.train_dataset , metric_key_prefix="""train""" )
return control_copy
def a__ ( ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = get_args()
set_seed(args.seed )
__SCREAMING_SNAKE_CASE = load_dataset("""codeparrot/codecomplex""" , split="""train""" )
__SCREAMING_SNAKE_CASE = dataset.train_test_split(test_size=0.2 )
__SCREAMING_SNAKE_CASE = train_test["""test"""].train_test_split(test_size=0.5 )
__SCREAMING_SNAKE_CASE = DatasetDict(
{
"""train""": train_test["""train"""],
"""test""": test_validation["""train"""],
"""valid""": test_validation["""test"""],
} )
print("""Loading tokenizer and model""" )
__SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(args.model_ckpt )
__SCREAMING_SNAKE_CASE = tokenizer.eos_token
__SCREAMING_SNAKE_CASE = AutoModelForSequenceClassification.from_pretrained(args.model_ckpt , num_labels=7 )
__SCREAMING_SNAKE_CASE = model.config.eos_token_id
if args.freeze:
for param in model.roberta.parameters():
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = ClassLabel(num_classes=7 , names=list(set(train_test_validation["""train"""]["""complexity"""] ) ) )
def tokenize(a__ ):
__SCREAMING_SNAKE_CASE = tokenizer(example["""src"""] , truncation=a__ , max_length=10_24 )
__SCREAMING_SNAKE_CASE = labels.straint(example["""complexity"""] )
return {
"input_ids": inputs["input_ids"],
"attention_mask": inputs["attention_mask"],
"label": label,
}
__SCREAMING_SNAKE_CASE = train_test_validation.map(
a__ , batched=a__ , remove_columns=train_test_validation["""train"""].column_names , )
__SCREAMING_SNAKE_CASE = DataCollatorWithPadding(tokenizer=a__ )
__SCREAMING_SNAKE_CASE = TrainingArguments(
output_dir=args.output_dir , learning_rate=args.learning_rate , lr_scheduler_type=args.lr_scheduler_type , evaluation_strategy="""epoch""" , save_strategy="""epoch""" , logging_strategy="""epoch""" , per_device_train_batch_size=args.batch_size , per_device_eval_batch_size=args.batch_size , num_train_epochs=args.num_epochs , gradient_accumulation_steps=args.gradient_accumulation_steps , weight_decay=0.01 , metric_for_best_model="""accuracy""" , run_name="""complexity-java""" , report_to="""wandb""" , )
__SCREAMING_SNAKE_CASE = Trainer(
model=a__ , args=a__ , train_dataset=tokenized_datasets["""train"""] , eval_dataset=tokenized_datasets["""valid"""] , tokenizer=a__ , data_collator=a__ , compute_metrics=a__ , )
print("""Training...""" )
trainer.add_callback(CustomCallback(a__ ) )
trainer.train()
if __name__ == "__main__":
main() | 352 |
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self : List[Any] ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = [[1, 2, 4], [1, 2, 3, 4]]
__SCREAMING_SNAKE_CASE = DisjunctiveConstraint(__SCREAMING_SNAKE_CASE )
self.assertTrue(isinstance(dc.token_ids , __SCREAMING_SNAKE_CASE ) )
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def UpperCAmelCase__ ( self : Union[str, Any] ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
DisjunctiveConstraint(__SCREAMING_SNAKE_CASE ) # fails here
def UpperCAmelCase__ ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = [[1, 2, 3], [1, 2, 4]]
__SCREAMING_SNAKE_CASE = DisjunctiveConstraint(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = dc.update(1 )
__SCREAMING_SNAKE_CASE = stepped is True and completed is False and reset is False
self.assertTrue(__SCREAMING_SNAKE_CASE )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = dc.update(2 )
__SCREAMING_SNAKE_CASE = stepped is True and completed is False and reset is False
self.assertTrue(__SCREAMING_SNAKE_CASE )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = dc.update(3 )
__SCREAMING_SNAKE_CASE = stepped is True and completed is True and reset is False
self.assertTrue(__SCREAMING_SNAKE_CASE )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def UpperCAmelCase__ ( self : int ) -> Union[str, Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
__SCREAMING_SNAKE_CASE = DisjunctiveConstraint(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] )
| 331 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__snake_case : Union[str, Any] = {
'configuration_clip': [
'CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'CLIPConfig',
'CLIPOnnxConfig',
'CLIPTextConfig',
'CLIPVisionConfig',
],
'processing_clip': ['CLIPProcessor'],
'tokenization_clip': ['CLIPTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Union[str, Any] = ['CLIPTokenizerFast']
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Any = ['CLIPFeatureExtractor']
__snake_case : Optional[Any] = ['CLIPImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : List[Any] = [
'CLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'CLIPModel',
'CLIPPreTrainedModel',
'CLIPTextModel',
'CLIPTextModelWithProjection',
'CLIPVisionModel',
'CLIPVisionModelWithProjection',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : int = [
'TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFCLIPModel',
'TFCLIPPreTrainedModel',
'TFCLIPTextModel',
'TFCLIPVisionModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : List[Any] = [
'FlaxCLIPModel',
'FlaxCLIPPreTrainedModel',
'FlaxCLIPTextModel',
'FlaxCLIPTextPreTrainedModel',
'FlaxCLIPVisionModel',
'FlaxCLIPVisionPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_clip import (
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPConfig,
CLIPOnnxConfig,
CLIPTextConfig,
CLIPVisionConfig,
)
from .processing_clip import CLIPProcessor
from .tokenization_clip import CLIPTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_clip_fast import CLIPTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clip import CLIPFeatureExtractor
from .image_processing_clip import CLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clip import (
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPModel,
CLIPPreTrainedModel,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPVisionModel,
CLIPVisionModelWithProjection,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_clip import (
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCLIPModel,
TFCLIPPreTrainedModel,
TFCLIPTextModel,
TFCLIPVisionModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_clip import (
FlaxCLIPModel,
FlaxCLIPPreTrainedModel,
FlaxCLIPTextModel,
FlaxCLIPTextPreTrainedModel,
FlaxCLIPVisionModel,
FlaxCLIPVisionPreTrainedModel,
)
else:
import sys
__snake_case : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 269 |
'''simple docstring'''
import importlib
import os
import sys
# This is required to make the module import works (when the python process is running from the root of the repo)
sys.path.append('.')
def _a( UpperCamelCase__ : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str =test_file.split(os.path.sep )
if components[0:2] != ["tests", "models"]:
raise ValueError(
'''`test_file` should start with `tests/models/` (with `/` being the OS specific path separator). Got '''
f"{test_file} instead." )
SCREAMING_SNAKE_CASE__ : str =components[-1]
if not test_fn.endswith('''py''' ):
raise ValueError(f"`test_file` should be a python file. Got {test_fn} instead." )
if not test_fn.startswith('''test_modeling_''' ):
raise ValueError(
f"`test_file` should point to a file name of the form `test_modeling_*.py`. Got {test_fn} instead." )
SCREAMING_SNAKE_CASE__ : Any =components[:-1] + [test_fn.replace('''.py''', '''''' )]
SCREAMING_SNAKE_CASE__ : List[str] ='''.'''.join(UpperCamelCase__ )
return test_module_path
def _a( UpperCamelCase__ : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int =get_module_path(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =importlib.import_module(UpperCamelCase__ )
return test_module
def _a( UpperCamelCase__ : Tuple ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] =[]
SCREAMING_SNAKE_CASE__ : List[Any] =get_test_module(UpperCamelCase__ )
for attr in dir(UpperCamelCase__ ):
if attr.endswith('''ModelTester''' ):
tester_classes.append(getattr(UpperCamelCase__, UpperCamelCase__ ) )
# sort with class names
return sorted(UpperCamelCase__, key=lambda UpperCamelCase__ : x.__name__ )
def _a( UpperCamelCase__ : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] =[]
SCREAMING_SNAKE_CASE__ : Any =get_test_module(UpperCamelCase__ )
for attr in dir(UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ : List[Any] =getattr(UpperCamelCase__, UpperCamelCase__ )
# (TF/Flax)ModelTesterMixin is also an attribute in specific model test module. Let's exclude them by checking
# `all_model_classes` is not empty (which also excludes other special classes).
SCREAMING_SNAKE_CASE__ : Any =getattr(UpperCamelCase__, '''all_model_classes''', [] )
if len(UpperCamelCase__ ) > 0:
test_classes.append(UpperCamelCase__ )
# sort with class names
return sorted(UpperCamelCase__, key=lambda UpperCamelCase__ : x.__name__ )
def _a( UpperCamelCase__ : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] =get_test_classes(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : int =set()
for test_class in test_classes:
model_classes.update(test_class.all_model_classes )
# sort with class names
return sorted(UpperCamelCase__, key=lambda UpperCamelCase__ : x.__name__ )
def _a( UpperCamelCase__ : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any =test_class()
if hasattr(UpperCamelCase__, '''setUp''' ):
test.setUp()
SCREAMING_SNAKE_CASE__ : List[Any] =None
if hasattr(UpperCamelCase__, '''model_tester''' ):
# `(TF/Flax)ModelTesterMixin` has this attribute default to `None`. Let's skip this case.
if test.model_tester is not None:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =test.model_tester.__class__
return model_tester
def _a( UpperCamelCase__ : Optional[Any], UpperCamelCase__ : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any =get_test_classes(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : str =[]
for test_class in test_classes:
if model_class in test_class.all_model_classes:
target_test_classes.append(UpperCamelCase__ )
# sort with class names
return sorted(UpperCamelCase__, key=lambda UpperCamelCase__ : x.__name__ )
def _a( UpperCamelCase__ : Optional[Any], UpperCamelCase__ : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] =get_test_classes_for_model(UpperCamelCase__, UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Any =[]
for test_class in test_classes:
SCREAMING_SNAKE_CASE__ : List[str] =get_model_tester_from_test_class(UpperCamelCase__ )
if tester_class is not None:
tester_classes.append(UpperCamelCase__ )
# sort with class names
return sorted(UpperCamelCase__, key=lambda UpperCamelCase__ : x.__name__ )
def _a( UpperCamelCase__ : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict =get_test_classes(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] ={test_class: get_model_tester_from_test_class(UpperCamelCase__ ) for test_class in test_classes}
return test_tester_mapping
def _a( UpperCamelCase__ : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple =get_model_classes(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : int ={
model_class: get_test_classes_for_model(UpperCamelCase__, UpperCamelCase__ ) for model_class in model_classes
}
return model_test_mapping
def _a( UpperCamelCase__ : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] =get_model_classes(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Any ={
model_class: get_tester_classes_for_model(UpperCamelCase__, UpperCamelCase__ ) for model_class in model_classes
}
return model_to_tester_mapping
def _a( UpperCamelCase__ : int ):
'''simple docstring'''
if isinstance(UpperCamelCase__, UpperCamelCase__ ):
return o
elif isinstance(UpperCamelCase__, UpperCamelCase__ ):
return o.__name__
elif isinstance(UpperCamelCase__, (list, tuple) ):
return [to_json(UpperCamelCase__ ) for x in o]
elif isinstance(UpperCamelCase__, UpperCamelCase__ ):
return {to_json(UpperCamelCase__ ): to_json(UpperCamelCase__ ) for k, v in o.items()}
else:
return o | 152 | 0 |
import json
import os
from typing import Dict, List, Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCamelCase__ : Dict = logging.get_logger(__name__)
UpperCamelCase__ : Any = {
"""vocab_file""": """vocab.json""",
"""merges_file""": """merges.txt""",
"""tokenizer_config_file""": """tokenizer_config.json""",
}
UpperCamelCase__ : Any = {
"""vocab_file""": {
"""facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json"""
},
"""merges_file""": {
"""facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt"""
},
"""tokenizer_config_file""": {
"""facebook/blenderbot_small-90M""": (
"""https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json"""
)
},
}
UpperCamelCase__ : Optional[int] = {"""facebook/blenderbot_small-90M""": 512}
def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> Any:
"""simple docstring"""
a = set()
a = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
a = char
a = set(snake_case_ )
return pairs
class lowerCamelCase_ ( a_ ):
SCREAMING_SNAKE_CASE_ = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ = ['input_ids', 'attention_mask']
def __init__( self : Any ,__lowerCamelCase : List[str] ,__lowerCamelCase : List[str] ,__lowerCamelCase : Dict="__start__" ,__lowerCamelCase : Any="__end__" ,__lowerCamelCase : List[Any]="__unk__" ,__lowerCamelCase : Optional[Any]="__null__" ,**__lowerCamelCase : Tuple ,):
'''simple docstring'''
super().__init__(unk_token=__lowerCamelCase ,bos_token=__lowerCamelCase ,eos_token=__lowerCamelCase ,pad_token=__lowerCamelCase ,**__lowerCamelCase )
with open(__lowerCamelCase ,encoding='''utf-8''' ) as vocab_handle:
a = json.load(__lowerCamelCase )
a = {v: k for k, v in self.encoder.items()}
with open(__lowerCamelCase ,encoding='''utf-8''' ) as merges_handle:
a = merges_handle.read().split('''\n''' )[1:-1]
a = [tuple(merge.split() ) for merge in merges]
a = dict(zip(__lowerCamelCase ,range(len(__lowerCamelCase ) ) ) )
a = {}
@property
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
'''simple docstring'''
return len(self.encoder )
def SCREAMING_SNAKE_CASE_ ( self : int ):
'''simple docstring'''
return dict(self.encoder ,**self.added_tokens_encoder )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ,__lowerCamelCase : str ):
'''simple docstring'''
if token in self.cache:
return self.cache[token]
a = re.sub('''([.,!?()])''' ,r''' \1''' ,__lowerCamelCase )
a = re.sub('''(\')''' ,r''' \1 ''' ,__lowerCamelCase )
a = re.sub(r'''\s{2,}''' ,''' ''' ,__lowerCamelCase )
if "\n" in token:
a = token.replace('''\n''' ,''' __newln__''' )
a = token.split(''' ''' )
a = []
for token in tokens:
if not len(__lowerCamelCase ):
continue
a = token.lower()
a = tuple(__lowerCamelCase )
a = tuple(list(word[:-1] ) + [word[-1] + '''</w>'''] )
a = get_pairs(__lowerCamelCase )
if not pairs:
words.append(__lowerCamelCase )
continue
while True:
a = min(__lowerCamelCase ,key=lambda __lowerCamelCase : self.bpe_ranks.get(__lowerCamelCase ,float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
a , a = bigram
a = []
a = 0
while i < len(__lowerCamelCase ):
try:
a = word.index(__lowerCamelCase ,__lowerCamelCase )
new_word.extend(word[i:j] )
a = j
except ValueError:
new_word.extend(word[i:] )
break
if word[i] == first and i < len(__lowerCamelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
a = tuple(__lowerCamelCase )
a = new_word
if len(__lowerCamelCase ) == 1:
break
else:
a = get_pairs(__lowerCamelCase )
a = '''@@ '''.join(__lowerCamelCase )
a = word[:-4]
a = word
words.append(__lowerCamelCase )
return " ".join(__lowerCamelCase )
def SCREAMING_SNAKE_CASE_ ( self : Dict ,__lowerCamelCase : str ):
'''simple docstring'''
a = []
a = re.findall(r'''\S+\n?''' ,__lowerCamelCase )
for token in words:
split_tokens.extend(list(self.bpe(__lowerCamelCase ).split(''' ''' ) ) )
return split_tokens
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ,__lowerCamelCase : str ):
'''simple docstring'''
a = token.lower()
return self.encoder.get(__lowerCamelCase ,self.encoder.get(self.unk_token ) )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ,__lowerCamelCase : int ):
'''simple docstring'''
return self.decoder.get(__lowerCamelCase ,self.unk_token )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ,__lowerCamelCase : List[str] ):
'''simple docstring'''
a = ''' '''.join(__lowerCamelCase ).replace('''@@ ''' ,'''''' ).strip()
return out_string
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ,__lowerCamelCase : str ,__lowerCamelCase : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(__lowerCamelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
a = os.path.join(
__lowerCamelCase ,(filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
a = os.path.join(
__lowerCamelCase ,(filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(__lowerCamelCase ,'''w''' ,encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder ,indent=2 ,sort_keys=__lowerCamelCase ,ensure_ascii=__lowerCamelCase ) + '''\n''' )
a = 0
with open(__lowerCamelCase ,'''w''' ,encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() ,key=lambda __lowerCamelCase : kv[1] ):
if index != token_index:
logger.warning(
F"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
''' Please check that the tokenizer is not corrupted!''' )
a = token_index
writer.write(''' '''.join(__lowerCamelCase ) + '''\n''' )
index += 1
return vocab_file, merge_file
| 330 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ : Optional[int] = logging.get_logger(__name__)
UpperCamelCase__ : Dict = {
"""facebook/vit-mae-base""": """https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json""",
# See all ViT MAE models at https://huggingface.co/models?filter=vit-mae
}
class lowerCamelCase_ ( a_ ):
SCREAMING_SNAKE_CASE_ = 'vit_mae'
def __init__( self : Dict ,__lowerCamelCase : Any=7_68 ,__lowerCamelCase : Optional[Any]=12 ,__lowerCamelCase : List[str]=12 ,__lowerCamelCase : Optional[int]=30_72 ,__lowerCamelCase : int="gelu" ,__lowerCamelCase : Union[str, Any]=0.0 ,__lowerCamelCase : Optional[int]=0.0 ,__lowerCamelCase : Dict=0.02 ,__lowerCamelCase : List[Any]=1e-12 ,__lowerCamelCase : Dict=2_24 ,__lowerCamelCase : str=16 ,__lowerCamelCase : Union[str, Any]=3 ,__lowerCamelCase : Optional[Any]=True ,__lowerCamelCase : Dict=16 ,__lowerCamelCase : List[str]=5_12 ,__lowerCamelCase : int=8 ,__lowerCamelCase : int=20_48 ,__lowerCamelCase : Optional[Any]=0.75 ,__lowerCamelCase : int=False ,**__lowerCamelCase : Any ,):
'''simple docstring'''
super().__init__(**__lowerCamelCase )
a = hidden_size
a = num_hidden_layers
a = num_attention_heads
a = intermediate_size
a = hidden_act
a = hidden_dropout_prob
a = attention_probs_dropout_prob
a = initializer_range
a = layer_norm_eps
a = image_size
a = patch_size
a = num_channels
a = qkv_bias
a = decoder_num_attention_heads
a = decoder_hidden_size
a = decoder_num_hidden_layers
a = decoder_intermediate_size
a = mask_ratio
a = norm_pix_loss
| 330 | 1 |
class __lowerCamelCase :
def __init__( self: str,A_: list[int] ):
'''simple docstring'''
__UpperCamelCase = len(A_ )
__UpperCamelCase = [0] * len_array
if len_array > 0:
__UpperCamelCase = array[0]
for i in range(1,A_ ):
__UpperCamelCase = self.prefix_sum[i - 1] + array[i]
def snake_case_ ( self: Tuple,A_: int,A_: int ):
'''simple docstring'''
if start == 0:
return self.prefix_sum[end]
return self.prefix_sum[end] - self.prefix_sum[start - 1]
def snake_case_ ( self: Union[str, Any],A_: int ):
'''simple docstring'''
__UpperCamelCase = {0}
for sum_item in self.prefix_sum:
if sum_item - target_sum in sums:
return True
sums.add(A_ )
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 310 |
def _A ( _lowercase ) -> list[int]:
"""simple docstring"""
if length <= 0 or not isinstance(_lowercase , _lowercase ):
raise ValueError('Length must be a positive integer.' )
return [n * (2 * n - 1) for n in range(_lowercase )]
if __name__ == "__main__":
print(hexagonal_numbers(length=5))
print(hexagonal_numbers(length=1_0))
| 310 | 1 |
'''simple docstring'''
def __snake_case( _lowerCAmelCase = 100 ) -> int:
snake_case__ : List[str] = 0
snake_case__ : Optional[Any] = 0
for i in range(1 , n + 1 ):
sum_of_squares += i**2
sum_of_ints += i
return sum_of_ints**2 - sum_of_squares
if __name__ == "__main__":
print(F"{solution() = }")
| 43 |
'''simple docstring'''
import numpy as np
def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = 1e-12 , _lowerCAmelCase = 100 , ) -> tuple[float, np.ndarray]:
assert np.shape(_lowerCAmelCase )[0] == np.shape(_lowerCAmelCase )[1]
# Ensure proper dimensionality.
assert np.shape(_lowerCAmelCase )[0] == np.shape(_lowerCAmelCase )[0]
# Ensure inputs are either both complex or both real
assert np.iscomplexobj(_lowerCAmelCase ) == np.iscomplexobj(_lowerCAmelCase )
snake_case__ : str = np.iscomplexobj(_lowerCAmelCase )
if is_complex:
# Ensure complex input_matrix is Hermitian
assert np.array_equal(_lowerCAmelCase , input_matrix.conj().T )
# Set convergence to False. Will define convergence when we exceed max_iterations
# or when we have small changes from one iteration to next.
snake_case__ : Tuple = False
snake_case__ : Any = 0
snake_case__ : List[str] = 0
snake_case__ : Dict = 1e12
while not convergence:
# Multiple matrix by the vector.
snake_case__ : Optional[int] = np.dot(_lowerCAmelCase , _lowerCAmelCase )
# Normalize the resulting output vector.
snake_case__ : Optional[Any] = w / np.linalg.norm(_lowerCAmelCase )
# Find rayleigh quotient
# (faster than usual b/c we know vector is normalized already)
snake_case__ : List[str] = vector.conj().T if is_complex else vector.T
snake_case__ : Optional[int] = np.dot(_lowerCAmelCase , np.dot(_lowerCAmelCase , _lowerCAmelCase ) )
# Check convergence.
snake_case__ : Union[str, Any] = np.abs(lambda_ - lambda_previous ) / lambda_
iterations += 1
if error <= error_tol or iterations >= max_iterations:
snake_case__ : Dict = True
snake_case__ : Union[str, Any] = lambda_
if is_complex:
snake_case__ : int = np.real(lambda_ )
return lambda_, vector
def __snake_case( ) -> None:
snake_case__ : int = np.array([[41, 4, 20], [4, 26, 30], [20, 30, 50]] )
snake_case__ : Tuple = np.array([41, 4, 20] )
snake_case__ : Dict = real_input_matrix.astype(np.complexaaa )
snake_case__ : Optional[int] = np.triu(1J * complex_input_matrix , 1 )
complex_input_matrix += imag_matrix
complex_input_matrix += -1 * imag_matrix.T
snake_case__ : Dict = np.array([41, 4, 20] ).astype(np.complexaaa )
for problem_type in ["real", "complex"]:
if problem_type == "real":
snake_case__ : Dict = real_input_matrix
snake_case__ : Optional[Any] = real_vector
elif problem_type == "complex":
snake_case__ : Optional[Any] = complex_input_matrix
snake_case__ : Optional[Any] = complex_vector
# Our implementation.
snake_case__ , snake_case__ : Tuple = power_iteration(_lowerCAmelCase , _lowerCAmelCase )
# Numpy implementation.
# Get eigenvalues and eigenvectors using built-in numpy
# eigh (eigh used for symmetric or hermetian matrices).
snake_case__ , snake_case__ : Dict = np.linalg.eigh(_lowerCAmelCase )
# Last eigenvalue is the maximum one.
snake_case__ : Optional[int] = eigen_values[-1]
# Last column in this matrix is eigenvector corresponding to largest eigenvalue.
snake_case__ : Any = eigen_vectors[:, -1]
# Check our implementation and numpy gives close answers.
assert np.abs(eigen_value - eigen_value_max ) <= 1e-6
# Take absolute values element wise of each eigenvector.
# as they are only unique to a minus sign.
assert np.linalg.norm(np.abs(_lowerCAmelCase ) - np.abs(_lowerCAmelCase ) ) <= 1e-6
if __name__ == "__main__":
import doctest
doctest.testmod()
test_power_iteration()
| 43 | 1 |
'''simple docstring'''
import argparse
import logging
from collections import namedtuple
import torch
from model_bertabs import BertAbsSummarizer
from models.model_builder import AbsSummarizer # The authors' implementation
from transformers import BertTokenizer
logging.basicConfig(level=logging.INFO)
__snake_case =logging.getLogger(__name__)
__snake_case ="""Hello world! cécé herlolip"""
__snake_case =namedtuple(
"""BertAbsConfig""",
[
"""temp_dir""",
"""large""",
"""use_bert_emb""",
"""finetune_bert""",
"""encoder""",
"""share_emb""",
"""max_pos""",
"""enc_layers""",
"""enc_hidden_size""",
"""enc_heads""",
"""enc_ff_size""",
"""enc_dropout""",
"""dec_layers""",
"""dec_hidden_size""",
"""dec_heads""",
"""dec_ff_size""",
"""dec_dropout""",
],
)
def a_ ( lowerCamelCase : Tuple , lowerCamelCase : Dict ):
lowerCAmelCase = BertAbsConfig(
temp_dir='.' , finetune_bert=lowerCamelCase , large=lowerCamelCase , share_emb=lowerCamelCase , use_bert_emb=lowerCamelCase , encoder='bert' , max_pos=512 , enc_layers=6 , enc_hidden_size=512 , enc_heads=8 , enc_ff_size=512 , enc_dropout=0.2 , dec_layers=6 , dec_hidden_size=768 , dec_heads=8 , dec_ff_size=2048 , dec_dropout=0.2 , )
lowerCAmelCase = torch.load(lowerCamelCase , lambda lowerCamelCase , lowerCamelCase : storage )
lowerCAmelCase = AbsSummarizer(lowerCamelCase , torch.device('cpu' ) , lowerCamelCase )
original.eval()
lowerCAmelCase = BertAbsSummarizer(lowerCamelCase , torch.device('cpu' ) )
new_model.eval()
# -------------------
# Convert the weights
# -------------------
logging.info('convert the model' )
new_model.bert.load_state_dict(original.bert.state_dict() )
new_model.decoder.load_state_dict(original.decoder.state_dict() )
new_model.generator.load_state_dict(original.generator.state_dict() )
# ----------------------------------
# Make sure the outpus are identical
# ----------------------------------
logging.info('Make sure that the models\' outputs are identical' )
lowerCAmelCase = BertTokenizer.from_pretrained('bert-base-uncased' )
# prepare the model inputs
lowerCAmelCase = tokenizer.encode('This is sample éàalj\'-.' )
encoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(lowerCamelCase )) )
lowerCAmelCase = torch.tensor(lowerCamelCase ).unsqueeze(0 )
lowerCAmelCase = tokenizer.encode('This is sample 3 éàalj\'-.' )
decoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(lowerCamelCase )) )
lowerCAmelCase = torch.tensor(lowerCamelCase ).unsqueeze(0 )
# failsafe to make sure the weights reset does not affect the
# loaded weights.
assert torch.max(torch.abs(original.generator[0].weight - new_model.generator[0].weight ) ) == 0
# forward pass
lowerCAmelCase = encoder_input_ids
lowerCAmelCase = decoder_input_ids
lowerCAmelCase = lowerCAmelCase = None
lowerCAmelCase = None
lowerCAmelCase = lowerCAmelCase = None
lowerCAmelCase = lowerCAmelCase = None
lowerCAmelCase = None
# The original model does not apply the geneator layer immediatly but rather in
# the beam search (where it combines softmax + linear layer). Since we already
# apply the softmax in our generation process we only apply the linear layer here.
# We make sure that the outputs of the full stack are identical
lowerCAmelCase = original(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )[0]
lowerCAmelCase = original.generator(lowerCamelCase )
lowerCAmelCase = new_model(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )[0]
lowerCAmelCase = new_model.generator(lowerCamelCase )
lowerCAmelCase = torch.max(torch.abs(output_converted_model - output_original_model ) ).item()
print('Maximum absolute difference beween weights: {:.2f}'.format(lowerCamelCase ) )
lowerCAmelCase = torch.max(torch.abs(output_converted_generator - output_original_generator ) ).item()
print('Maximum absolute difference beween weights: {:.2f}'.format(lowerCamelCase ) )
lowerCAmelCase = torch.allclose(lowerCamelCase , lowerCamelCase , atol=1e-3 )
if are_identical:
logging.info('all weights are equal up to 1e-3' )
else:
raise ValueError('the weights are different. The new model is likely different from the original one.' )
# The model has been saved with torch.save(model) and this is bound to the exact
# directory structure. We save the state_dict instead.
logging.info('saving the model\'s state dictionary' )
torch.save(
new_model.state_dict() , './bertabs-finetuned-cnndm-extractive-abstractive-summarization/pytorch_model.bin' )
if __name__ == "__main__":
__snake_case =argparse.ArgumentParser()
parser.add_argument(
"""--bertabs_checkpoint_path""",
default=None,
type=str,
required=True,
help="""Path the official PyTorch dump.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=str,
required=True,
help="""Path to the output PyTorch model.""",
)
__snake_case =parser.parse_args()
convert_bertabs_checkpoints(
args.bertabs_checkpoint_path,
args.pytorch_dump_folder_path,
)
| 4 |
"""simple docstring"""
import inspect
import unittest
from transformers import DecisionTransformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import DecisionTransformerModel
from transformers.models.decision_transformer.modeling_decision_transformer import (
DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
class __lowercase :
'''simple docstring'''
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=7 , _UpperCAmelCase=6 , _UpperCAmelCase=17 , _UpperCAmelCase=23 , _UpperCAmelCase=11 , _UpperCAmelCase=True , ):
__a : str = parent
__a : Any = batch_size
__a : Any = seq_length
__a : Optional[int] = act_dim
__a : Any = state_dim
__a : str = hidden_size
__a : List[str] = max_length
__a : Dict = is_training
def _lowerCamelCase ( self ):
__a : Dict = floats_tensor((self.batch_size, self.seq_length, self.state_dim) )
__a : Union[str, Any] = floats_tensor((self.batch_size, self.seq_length, self.act_dim) )
__a : int = floats_tensor((self.batch_size, self.seq_length, 1) )
__a : List[str] = floats_tensor((self.batch_size, self.seq_length, 1) )
__a : Optional[Any] = ids_tensor((self.batch_size, self.seq_length) , vocab_size=1000 )
__a : str = random_attention_mask((self.batch_size, self.seq_length) )
__a : Dict = self.get_config()
return (
config,
states,
actions,
rewards,
returns_to_go,
timesteps,
attention_mask,
)
def _lowerCamelCase ( self ):
return DecisionTransformerConfig(
batch_size=self.batch_size , seq_length=self.seq_length , act_dim=self.act_dim , state_dim=self.state_dim , hidden_size=self.hidden_size , max_length=self.max_length , )
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ):
__a : Union[str, Any] = DecisionTransformerModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__a : int = model(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
self.parent.assertEqual(result.state_preds.shape , states.shape )
self.parent.assertEqual(result.action_preds.shape , actions.shape )
self.parent.assertEqual(result.return_preds.shape , returns_to_go.shape )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length * 3, self.hidden_size) ) # seq length *3 as there are 3 modelities: states, returns and actions
def _lowerCamelCase ( self ):
__a : Optional[Any] = self.prepare_config_and_inputs()
(
(
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) ,
) : List[str] = config_and_inputs
__a : Dict = {
'''states''': states,
'''actions''': actions,
'''rewards''': rewards,
'''returns_to_go''': returns_to_go,
'''timesteps''': timesteps,
'''attention_mask''': attention_mask,
}
return config, inputs_dict
@require_torch
class __lowercase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__lowerCAmelCase = (DecisionTransformerModel,) if is_torch_available() else ()
__lowerCAmelCase = ()
__lowerCAmelCase = {'''feature-extraction''': DecisionTransformerModel} if is_torch_available() else {}
# Ignoring of a failing test from GenerationTesterMixin, as the model does not use inputs_ids
__lowerCAmelCase = False
# Ignoring of a failing tests from ModelTesterMixin, as the model does not implement these features
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
def _lowerCamelCase ( self ):
__a : str = DecisionTransformerModelTester(self )
__a : Any = ConfigTester(self , config_class=_UpperCAmelCase , hidden_size=37 )
def _lowerCamelCase ( self ):
self.config_tester.run_common_tests()
def _lowerCamelCase ( self ):
__a : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
@slow
def _lowerCamelCase ( self ):
for model_name in DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a : Dict = DecisionTransformerModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
def _lowerCamelCase ( self ):
__a , __a : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a : Optional[int] = model_class(_UpperCAmelCase )
__a : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__a : Optional[int] = [*signature.parameters.keys()]
__a : Union[str, Any] = [
'''states''',
'''actions''',
'''rewards''',
'''returns_to_go''',
'''timesteps''',
'''attention_mask''',
]
self.assertListEqual(arg_names[: len(_UpperCAmelCase )] , _UpperCAmelCase )
@require_torch
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
@slow
def _lowerCamelCase ( self ):
__a : Dict = 2 # number of steps of autoregressive prediction we will perform
__a : List[str] = 10 # defined by the RL environment, may be normalized
__a : Union[str, Any] = DecisionTransformerModel.from_pretrained('''edbeeching/decision-transformer-gym-hopper-expert''' )
__a : str = model.to(_UpperCAmelCase )
__a : str = model.config
torch.manual_seed(0 )
__a : List[str] = torch.randn(1 , 1 , config.state_dim ).to(device=_UpperCAmelCase , dtype=torch.floataa ) # env.reset()
__a : List[str] = torch.tensor(
[[0.2_4_2_7_9_3, -0.2_8_6_9_3_0_7_4, 0.8_7_4_2_6_1_3], [0.6_7_8_1_5_2_7_4, -0.0_8_1_0_1_0_8_5, -0.1_2_9_5_2_1_4_7]] , device=_UpperCAmelCase )
__a : str = torch.tensor(_UpperCAmelCase , device=_UpperCAmelCase , dtype=torch.floataa ).reshape(1 , 1 , 1 )
__a : str = state
__a : List[Any] = torch.zeros(1 , 0 , config.act_dim , device=_UpperCAmelCase , dtype=torch.floataa )
__a : List[Any] = torch.zeros(1 , 0 , device=_UpperCAmelCase , dtype=torch.floataa )
__a : int = torch.tensor(0 , device=_UpperCAmelCase , dtype=torch.long ).reshape(1 , 1 )
for step in range(_UpperCAmelCase ):
__a : Optional[int] = torch.cat([actions, torch.zeros(1 , 1 , config.act_dim , device=_UpperCAmelCase )] , dim=1 )
__a : Any = torch.cat([rewards, torch.zeros(1 , 1 , device=_UpperCAmelCase )] , dim=1 )
__a : Any = torch.ones(1 , states.shape[1] ).to(dtype=torch.long , device=states.device )
with torch.no_grad():
__a , __a , __a : int = model(
states=_UpperCAmelCase , actions=_UpperCAmelCase , rewards=_UpperCAmelCase , returns_to_go=_UpperCAmelCase , timesteps=_UpperCAmelCase , attention_mask=_UpperCAmelCase , return_dict=_UpperCAmelCase , )
self.assertEqual(action_pred.shape , actions.shape )
self.assertTrue(torch.allclose(action_pred[0, -1] , expected_outputs[step] , atol=1e-4 ) )
__a , __a , __a , __a : Dict = ( # env.step(action)
torch.randn(1 , 1 , config.state_dim ).to(device=_UpperCAmelCase , dtype=torch.floataa ),
1.0,
False,
{},
)
__a : int = action_pred[0, -1]
__a : int = torch.cat([states, state] , dim=1 )
__a : Any = returns_to_go[0, -1] - reward
__a : Dict = torch.cat([returns_to_go, pred_return.reshape(1 , 1 , 1 )] , dim=1 )
__a : Optional[Any] = torch.cat(
[timesteps, torch.ones((1, 1) , device=_UpperCAmelCase , dtype=torch.long ) * (step + 1)] , dim=1 ) | 160 | 0 |
'''simple docstring'''
import doctest
import sys
import warnings
from os.path import abspath, dirname, join
import _pytest
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
__UpperCAmelCase = abspath(join(dirname(__file__), '''src'''))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='''ignore''', category=FutureWarning)
def _snake_case ( A ) -> Any:
config.addinivalue_line(
'''markers''' , '''is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested''' )
config.addinivalue_line(
'''markers''' , '''is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested''' )
config.addinivalue_line('''markers''' , '''is_pipeline_test: mark test to run only when pipelines are tested''' )
config.addinivalue_line('''markers''' , '''is_staging_test: mark test to run only in the staging environment''' )
config.addinivalue_line('''markers''' , '''accelerate_tests: mark test that require accelerate''' )
config.addinivalue_line('''markers''' , '''tool_tests: mark the tool tests that are run on their specific schedule''' )
def _snake_case ( A ) -> Dict:
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(A )
def _snake_case ( A ) -> List[str]:
from transformers.testing_utils import pytest_terminal_summary_main
lowerCAmelCase__ = terminalreporter.config.getoption('''--make-reports''' )
if make_reports:
pytest_terminal_summary_main(A , id=A )
def _snake_case ( A , A ) -> Dict:
# If no tests are collected, pytest exists with code 5, which makes the CI fail.
if exitstatus == 5:
lowerCAmelCase__ = 0
# Doctest custom flag to ignore output.
__UpperCAmelCase = doctest.register_optionflag('''IGNORE_RESULT''')
__UpperCAmelCase = doctest.OutputChecker
class a__ ( a__ ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Optional[Any]:
if IGNORE_RESULT & optionflags:
return True
return OutputChecker.check_output(self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
__UpperCAmelCase = CustomOutputChecker
__UpperCAmelCase = HfDoctestModule
__UpperCAmelCase = HfDocTestParser | 356 |
'''simple docstring'''
import argparse
import json
import os
import re
import torch
from transformers import BloomConfig, BloomModel
from transformers.file_utils import CONFIG_NAME, WEIGHTS_NAME
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCAmelCase = [
'''word_embeddings_layernorm.weight''',
'''word_embeddings_layernorm.bias''',
'''input_layernorm.weight''',
'''input_layernorm.bias''',
'''post_attention_layernorm.weight''',
'''post_attention_layernorm.bias''',
'''self_attention.dense.bias''',
'''mlp.dense_4h_to_h.bias''',
'''ln_f.weight''',
'''ln_f.bias''',
]
__UpperCAmelCase = [
'''mlp.dense_4h_to_h.weight''',
'''self_attention.dense.weight''',
]
def _snake_case ( A , A ) -> Optional[Any]:
lowerCAmelCase__ = {
'''word_embeddings.weight''': '''word_embeddings.weight''',
'''word_embeddings.norm.weight''': '''word_embeddings_layernorm.weight''',
'''word_embeddings.norm.bias''': '''word_embeddings_layernorm.bias''',
'''weight''': '''ln_f.weight''',
'''bias''': '''ln_f.bias''',
}
if key in layer_rename_map:
return layer_rename_map[key]
# Handle transformer blocks
lowerCAmelCase__ = int(re.match(R'''.*layer_(\d*).*''' , A )[1] )
layer_number -= 3
return F"""h.{layer_number}.""" + key
def _snake_case ( A ) -> Optional[int]:
if dtype == torch.bool:
return 1 / 8
lowerCAmelCase__ = re.search(R'''[^\d](\d+)$''' , str(A ) )
if bit_search is None:
raise ValueError(F"""`dtype` is not a valid dtype: {dtype}.""" )
lowerCAmelCase__ = int(bit_search.groups()[0] )
return bit_size // 8
def _snake_case ( A , A , A , A , A ) -> Dict:
# Construct model
if bloom_config_file == "":
lowerCAmelCase__ = BloomConfig()
else:
lowerCAmelCase__ = BloomConfig.from_json_file(A )
if shard_model:
lowerCAmelCase__ = os.listdir(A )
lowerCAmelCase__ = sorted(filter(lambda A : s.startswith('''layer''' ) and "model_00" in s , A ) )
lowerCAmelCase__ = {'''weight_map''': {}, '''metadata''': {}}
lowerCAmelCase__ = 0
lowerCAmelCase__ = None
lowerCAmelCase__ = BloomConfig()
for j, file in enumerate(A ):
print('''Processing file: {}'''.format(A ) )
lowerCAmelCase__ = None
for i in range(A ):
# load all TP files
lowerCAmelCase__ = file.replace('''model_00''' , F"""model_0{i}""" )
lowerCAmelCase__ = torch.load(os.path.join(A , A ) , map_location='''cpu''' )
# Rename keys in the transformers names
lowerCAmelCase__ = list(temp.keys() )
for key in keys:
lowerCAmelCase__ = temp.pop(A )
if tensors is None:
lowerCAmelCase__ = temp
else:
for key in tensors.keys():
if any(key.endswith(A ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
lowerCAmelCase__ = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
lowerCAmelCase__ = torch.cat([tensors[key], temp[key]] , dim=A )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(A ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
lowerCAmelCase__ = tensors[key] / pretraining_tp
torch.save(
A , os.path.join(
A , '''pytorch_model_{}-of-{}.bin'''.format(str(j + 1 ).zfill(5 ) , str(len(A ) ).zfill(5 ) ) , ) , )
for key in tensors.keys():
lowerCAmelCase__ = tensors[key]
total_size += value.numel() * get_dtype_size(value.dtype )
if key not in index_dict["weight_map"]:
lowerCAmelCase__ = '''pytorch_model_{}-of-{}.bin'''.format(
str(j + 1 ).zfill(5 ) , str(len(A ) ).zfill(5 ) )
lowerCAmelCase__ = BloomConfig()
lowerCAmelCase__ = pytorch_dump_folder_path + '''/''' + CONFIG_NAME
lowerCAmelCase__ = total_size
with open(A , '''w''' , encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
with open(os.path.join(A , WEIGHTS_NAME + '''.index.json''' ) , '''w''' , encoding='''utf-8''' ) as f:
lowerCAmelCase__ = json.dumps(A , indent=2 , sort_keys=A ) + '''\n'''
f.write(A )
else:
lowerCAmelCase__ = BloomModel(A )
lowerCAmelCase__ = os.listdir(A )
lowerCAmelCase__ = sorted(filter(lambda A : s.startswith('''layer''' ) and "model_00" in s , A ) )
lowerCAmelCase__ = None
for i, file in enumerate(A ):
lowerCAmelCase__ = None
for i in range(A ):
# load all TP files
lowerCAmelCase__ = file.replace('''model_00''' , F"""model_0{i}""" )
lowerCAmelCase__ = torch.load(os.path.join(A , A ) , map_location='''cpu''' )
# Rename keys in the transformers names
lowerCAmelCase__ = list(temp.keys() )
for key in keys:
lowerCAmelCase__ = temp.pop(A )
if tensors is None:
lowerCAmelCase__ = temp
else:
for key in tensors.keys():
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
if any(key.endswith(A ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
lowerCAmelCase__ = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
lowerCAmelCase__ = torch.cat([tensors[key], temp[key]] , dim=A )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(A ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
lowerCAmelCase__ = tensors[key] / pretraining_tp
lowerCAmelCase__ = model.load_state_dict(A , strict=A )
assert not other_keys.unexpected_keys, F"""The keys {other_keys.unexpected_keys} are unexpected"""
if missing_keys is None:
lowerCAmelCase__ = set(other_keys.missing_keys )
else:
lowerCAmelCase__ = missing_keys.intersection(set(other_keys.missing_keys ) )
assert not missing_keys, F"""The keys {missing_keys} are missing"""
# Save pytorch-model
os.makedirs(A , exist_ok=A )
lowerCAmelCase__ = pytorch_dump_folder_path + '''/''' + WEIGHTS_NAME
lowerCAmelCase__ = pytorch_dump_folder_path + '''/''' + CONFIG_NAME
print(F"""Save PyTorch model to {pytorch_weights_dump_path} with dtype {config.torch_dtype}""" )
if config.torch_dtype is not None:
lowerCAmelCase__ = model.to(config.torch_dtype )
torch.save(model.state_dict() , A )
print(F"""Save configuration file to {pytorch_config_dump_path}""" )
with open(A , '''w''' , encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--bloom_checkpoint_path''',
default=None,
type=str,
required=True,
help='''Path to the Megatron-LM checkpoint path.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--bloom_config_file''',
default='''''',
type=str,
help=(
'''An optional config json file corresponding to the pre-trained model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--shard_model''',
action='''store_true''',
help='''An optional setting to shard the output model \nThis enables sharding the converted checkpoint''',
)
parser.add_argument(
'''--pretraining_tp''',
default=4,
type=int,
help='''Pretraining TP rank that has been used when training the model in Megatron-LM \n''',
)
__UpperCAmelCase = parser.parse_args()
convert_bloom_checkpoint_to_pytorch(
args.bloom_checkpoint_path,
args.bloom_config_file,
args.pytorch_dump_folder_path,
args.shard_model,
args.pretraining_tp,
) | 228 | 0 |
import argparse
from copy import deepcopy
import numpy as np
from datasets import ClassLabel, DatasetDict, load_dataset
from evaluate import load
from transformers import (
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
Trainer,
TrainerCallback,
TrainingArguments,
set_seed,
)
def __lowerCamelCase ( ):
"""simple docstring"""
a :Any = argparse.ArgumentParser()
parser.add_argument('''--model_ckpt''' , type=UpperCAmelCase_ , default='''microsoft/unixcoder-base-nine''' )
parser.add_argument('''--num_epochs''' , type=UpperCAmelCase_ , default=5 )
parser.add_argument('''--batch_size''' , type=UpperCAmelCase_ , default=6 )
parser.add_argument('''--gradient_accumulation_steps''' , type=UpperCAmelCase_ , default=1 )
parser.add_argument('''--freeze''' , type=UpperCAmelCase_ , default=UpperCAmelCase_ )
parser.add_argument('''--learning_rate''' , type=UpperCAmelCase_ , default=5E-4 )
parser.add_argument('''--seed''' , type=UpperCAmelCase_ , default=0 )
parser.add_argument('''--lr_scheduler_type''' , type=UpperCAmelCase_ , default='''cosine''' )
parser.add_argument('''--num_warmup_steps''' , type=UpperCAmelCase_ , default=10 )
parser.add_argument('''--weight_decay''' , type=UpperCAmelCase_ , default=0.01 )
parser.add_argument('''--output_dir''' , type=UpperCAmelCase_ , default='''./results''' )
return parser.parse_args()
snake_case : str = load('''accuracy''')
def __lowerCamelCase ( UpperCAmelCase_ : List[Any] ):
"""simple docstring"""
a :List[Any] = eval_pred
a :List[Any] = np.argmax(UpperCAmelCase_ , axis=1 )
return metric.compute(predictions=UpperCAmelCase_ , references=UpperCAmelCase_ )
class _snake_case ( lowercase__ ):
def __init__( self , _lowerCamelCase ):
super().__init__()
a :List[Any] = trainer
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , **_lowerCamelCase ):
if control.should_evaluate:
a :Optional[int] = deepcopy(_A )
self._trainer.evaluate(eval_dataset=self._trainer.train_dataset , metric_key_prefix='''train''' )
return control_copy
def __lowerCamelCase ( ):
"""simple docstring"""
a :str = get_args()
set_seed(args.seed )
a :str = load_dataset('''codeparrot/codecomplex''' , split='''train''' )
a :Tuple = dataset.train_test_split(test_size=0.2 )
a :str = train_test['test'].train_test_split(test_size=0.5 )
a :List[str] = DatasetDict(
{
'''train''': train_test['''train'''],
'''test''': test_validation['''train'''],
'''valid''': test_validation['''test'''],
} )
print('''Loading tokenizer and model''' )
a :Optional[Any] = AutoTokenizer.from_pretrained(args.model_ckpt )
a :Dict = tokenizer.eos_token
a :int = AutoModelForSequenceClassification.from_pretrained(args.model_ckpt , num_labels=7 )
a :List[Any] = model.config.eos_token_id
if args.freeze:
for param in model.roberta.parameters():
a :Any = False
a :Union[str, Any] = ClassLabel(num_classes=7 , names=list(set(train_test_validation['''train''']['''complexity'''] ) ) )
def tokenize(UpperCAmelCase_ : Dict ):
a :Union[str, Any] = tokenizer(example['''src'''] , truncation=UpperCAmelCase_ , max_length=1024 )
a :Optional[Any] = labels.straint(example['''complexity'''] )
return {
"input_ids": inputs["input_ids"],
"attention_mask": inputs["attention_mask"],
"label": label,
}
a :Optional[Any] = train_test_validation.map(
UpperCAmelCase_ , batched=UpperCAmelCase_ , remove_columns=train_test_validation['''train'''].column_names , )
a :Optional[Any] = DataCollatorWithPadding(tokenizer=UpperCAmelCase_ )
a :int = TrainingArguments(
output_dir=args.output_dir , learning_rate=args.learning_rate , lr_scheduler_type=args.lr_scheduler_type , evaluation_strategy='''epoch''' , save_strategy='''epoch''' , logging_strategy='''epoch''' , per_device_train_batch_size=args.batch_size , per_device_eval_batch_size=args.batch_size , num_train_epochs=args.num_epochs , gradient_accumulation_steps=args.gradient_accumulation_steps , weight_decay=0.01 , metric_for_best_model='''accuracy''' , run_name='''complexity-java''' , report_to='''wandb''' , )
a :Union[str, Any] = Trainer(
model=UpperCAmelCase_ , args=UpperCAmelCase_ , train_dataset=tokenized_datasets['''train'''] , eval_dataset=tokenized_datasets['''valid'''] , tokenizer=UpperCAmelCase_ , data_collator=UpperCAmelCase_ , compute_metrics=UpperCAmelCase_ , )
print('''Training...''' )
trainer.add_callback(CustomCallback(UpperCAmelCase_ ) )
trainer.train()
if __name__ == "__main__":
main()
| 94 |
'''simple docstring'''
import re
import tempfile
from pathlib import Path
import pytest
import yaml
from datasets.utils.readme import ReadMe
# @pytest.fixture
# def example_yaml_structure():
lowerCAmelCase :List[str] = yaml.safe_load(
'''\
name: ""
allow_empty: false
allow_empty_text: true
subsections:
- name: "Dataset Card for X" # First-level markdown heading
allow_empty: false
allow_empty_text: true
subsections:
- name: "Table of Contents"
allow_empty: false
allow_empty_text: false
subsections: null
- name: "Dataset Description"
allow_empty: false
allow_empty_text: false
subsections:
- name: "Dataset Summary"
allow_empty: false
allow_empty_text: false
subsections: null
- name: "Supported Tasks and Leaderboards"
allow_empty: true
allow_empty_text: true
subsections: null
- name: Languages
allow_empty: false
allow_empty_text: true
subsections: null
'''
)
lowerCAmelCase :List[Any] = {
'''name''': '''root''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [
{
'''name''': '''Dataset Card for My Dataset''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [
{'''name''': '''Table of Contents''', '''text''': '''Some text here.''', '''is_empty_text''': False, '''subsections''': []},
{
'''name''': '''Dataset Description''',
'''text''': '''Some text here.''',
'''is_empty_text''': False,
'''subsections''': [
{
'''name''': '''Dataset Summary''',
'''text''': '''Some text here.''',
'''is_empty_text''': False,
'''subsections''': [],
},
{
'''name''': '''Supported Tasks and Leaderboards''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [],
},
{'''name''': '''Languages''', '''text''': '''Language Text''', '''is_empty_text''': False, '''subsections''': []},
],
},
],
}
],
}
lowerCAmelCase :Union[str, Any] = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
lowerCAmelCase :List[Any] = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
#### Extra Ignored Subsection
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
lowerCAmelCase :Tuple = {
'''name''': '''root''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [
{
'''name''': '''Dataset Card for My Dataset''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [
{'''name''': '''Table of Contents''', '''text''': '''Some text here.''', '''is_empty_text''': False, '''subsections''': []},
{
'''name''': '''Dataset Description''',
'''text''': '''Some text here.''',
'''is_empty_text''': False,
'''subsections''': [
{
'''name''': '''Dataset Summary''',
'''text''': '''Some text here.''',
'''is_empty_text''': False,
'''subsections''': [
{
'''name''': '''Extra Ignored Subsection''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [],
}
],
},
{
'''name''': '''Supported Tasks and Leaderboards''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [],
},
{'''name''': '''Languages''', '''text''': '''Language Text''', '''is_empty_text''': False, '''subsections''': []},
],
},
],
}
],
}
lowerCAmelCase :Optional[Any] = '''\
---
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
lowerCAmelCase :Optional[int] = (
'''The following issues were found for the README at `{path}`:\n-\tEmpty YAML markers are present in the README.'''
)
lowerCAmelCase :Tuple = '''\
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
lowerCAmelCase :Union[str, Any] = (
'''The following issues were found for the README at `{path}`:\n-\tNo YAML markers are present in the README.'''
)
lowerCAmelCase :Dict = '''\
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
lowerCAmelCase :Dict = '''The following issues were found for the README at `{path}`:\n-\tOnly the start of YAML tags present in the README.'''
lowerCAmelCase :Optional[Any] = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
lowerCAmelCase :int = '''The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Summary` but it is empty.\n-\tExpected some text in section `Dataset Summary` but it is empty (text in subsections are ignored).'''
lowerCAmelCase :int = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
'''
lowerCAmelCase :List[str] = '''The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Card for My Dataset` but it is empty.\n-\tSection `Dataset Card for My Dataset` expected the following subsections: `Table of Contents`, `Dataset Description`. Found \'None\'.'''
lowerCAmelCase :List[Any] = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Languages
Language Text
'''
lowerCAmelCase :List[str] = '''The following issues were found for the README at `{path}`:\n-\tSection `Dataset Description` is missing subsection: `Supported Tasks and Leaderboards`.'''
lowerCAmelCase :int = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
'''
lowerCAmelCase :Dict = '''The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Languages` but it is empty.'''
lowerCAmelCase :Tuple = '''\
---
language:
- zh
- en
---
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
lowerCAmelCase :Optional[Any] = '''The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.'''
lowerCAmelCase :Any = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
# Dataset Card My Dataset
'''
lowerCAmelCase :Dict = '''The following issues were found for the README at `{path}`:\n-\tThe README has several first-level headings: `Dataset Card for My Dataset`, `Dataset Card My Dataset`. Only one heading is expected. Skipping further validation for this README.'''
lowerCAmelCase :Tuple = '''\
---
language:
- zh
- en
---
# Dataset Card My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
lowerCAmelCase :str = '''The following issues were found for the README at `{path}`:\n-\tNo first-level heading starting with `Dataset Card for` found in README. Skipping further validation for this README.'''
lowerCAmelCase :Any = ''''''
lowerCAmelCase :Any = '''The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.\n-\tNo YAML markers are present in the README.'''
lowerCAmelCase :List[Any] = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
lowerCAmelCase :str = '''The following issues were found while parsing the README at `{path}`:\n-\tMultiple sections with the same heading `Dataset Card for My Dataset` have been found. Please keep only one of these sections.'''
@pytest.mark.parametrize(
'readme_md, expected_dict' , [
(README_CORRECT, CORRECT_DICT),
(README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL),
] , )
def lowerCamelCase ( lowerCAmelCase : str , lowerCAmelCase : Optional[int] ):
"""simple docstring"""
assert ReadMe.from_string(lowerCAmelCase , lowerCAmelCase ).to_dict() == expected_dict
@pytest.mark.parametrize(
'readme_md, expected_error' , [
(README_NO_YAML, EXPECTED_ERROR_README_NO_YAML),
(README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML),
(README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML),
(README_EMPTY, EXPECTED_ERROR_README_EMPTY),
(README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION),
(README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL),
(README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION),
(README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT),
(README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL),
(README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL),
(README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT),
] , )
def lowerCamelCase ( lowerCAmelCase : Optional[int] , lowerCAmelCase : Dict ):
"""simple docstring"""
with pytest.raises(lowerCAmelCase , match=re.escape(expected_error.format(path='root' ) ) ):
__magic_name__ : str = ReadMe.from_string(lowerCAmelCase , lowerCAmelCase )
readme.validate()
@pytest.mark.parametrize(
'readme_md, expected_error' , [
(README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1),
] , )
def lowerCamelCase ( lowerCAmelCase : List[str] , lowerCAmelCase : Optional[int] ):
"""simple docstring"""
with pytest.raises(lowerCAmelCase , match=re.escape(expected_error.format(path='root' ) ) ):
ReadMe.from_string(lowerCAmelCase , lowerCAmelCase )
@pytest.mark.parametrize(
'readme_md,' , [
(README_MULTIPLE_SAME_HEADING_1),
] , )
def lowerCamelCase ( lowerCAmelCase : Tuple ):
"""simple docstring"""
ReadMe.from_string(lowerCAmelCase , lowerCAmelCase , suppress_parsing_errors=lowerCAmelCase )
@pytest.mark.parametrize(
'readme_md, expected_dict' , [
(README_CORRECT, CORRECT_DICT),
(README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL),
] , )
def lowerCamelCase ( lowerCAmelCase : Optional[Any] , lowerCAmelCase : List[Any] ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
__magic_name__ : Optional[Any] = Path(lowerCAmelCase ) / 'README.md'
with open(lowerCAmelCase , 'w+' ) as readme_file:
readme_file.write(lowerCAmelCase )
__magic_name__ : Optional[int] = ReadMe.from_readme(lowerCAmelCase , lowerCAmelCase ).to_dict()
assert out["name"] == path
assert out["text"] == ""
assert out["is_empty_text"]
assert out["subsections"] == expected_dict["subsections"]
@pytest.mark.parametrize(
'readme_md, expected_error' , [
(README_NO_YAML, EXPECTED_ERROR_README_NO_YAML),
(README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML),
(README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML),
(README_EMPTY, EXPECTED_ERROR_README_EMPTY),
(README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION),
(README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL),
(README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION),
(README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT),
(README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL),
(README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL),
(README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT),
] , )
def lowerCamelCase ( lowerCAmelCase : Tuple , lowerCAmelCase : List[Any] ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
__magic_name__ : Union[str, Any] = Path(lowerCAmelCase ) / 'README.md'
with open(lowerCAmelCase , 'w+' ) as readme_file:
readme_file.write(lowerCAmelCase )
__magic_name__ : str = expected_error.format(path=lowerCAmelCase )
with pytest.raises(lowerCAmelCase , match=re.escape(lowerCAmelCase ) ):
__magic_name__ : int = ReadMe.from_readme(lowerCAmelCase , lowerCAmelCase )
readme.validate()
@pytest.mark.parametrize(
'readme_md, expected_error' , [
(README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1),
] , )
def lowerCamelCase ( lowerCAmelCase : int , lowerCAmelCase : str ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
__magic_name__ : Optional[int] = Path(lowerCAmelCase ) / 'README.md'
with open(lowerCAmelCase , 'w+' ) as readme_file:
readme_file.write(lowerCAmelCase )
__magic_name__ : Any = expected_error.format(path=lowerCAmelCase )
with pytest.raises(lowerCAmelCase , match=re.escape(lowerCAmelCase ) ):
ReadMe.from_readme(lowerCAmelCase , lowerCAmelCase )
@pytest.mark.parametrize(
'readme_md,' , [
(README_MULTIPLE_SAME_HEADING_1),
] , )
def lowerCamelCase ( lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
__magic_name__ : Any = Path(lowerCAmelCase ) / 'README.md'
with open(lowerCAmelCase , 'w+' ) as readme_file:
readme_file.write(lowerCAmelCase )
ReadMe.from_readme(lowerCAmelCase , lowerCAmelCase , suppress_parsing_errors=lowerCAmelCase ) | 331 | 0 |
"""simple docstring"""
import contextlib
import csv
import json
import os
import sqlitea
import tarfile
import textwrap
import zipfile
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
import datasets
import datasets.config
@pytest.fixture(scope="session" )
def _lowerCamelCase( ):
__a = 1_0
__a = datasets.Features(
{
"tokens": datasets.Sequence(datasets.Value("string" ) ),
"labels": datasets.Sequence(datasets.ClassLabel(names=["negative", "positive"] ) ),
"answers": datasets.Sequence(
{
"text": datasets.Value("string" ),
"answer_start": datasets.Value("int32" ),
} ),
"id": datasets.Value("int64" ),
} )
__a = datasets.Dataset.from_dict(
{
"tokens": [["foo"] * 5] * n,
"labels": [[1] * 5] * n,
"answers": [{"answer_start": [9_7], "text": ["1976"]}] * 1_0,
"id": list(range(_a ) ),
} , features=_a , )
return dataset
@pytest.fixture(scope="session" )
def _lowerCamelCase( a , a ):
__a = str(tmp_path_factory.mktemp("data" ) / "file.arrow" )
dataset.map(cache_file_name=_a )
return filename
# FILE_CONTENT + files
SCREAMING_SNAKE_CASE__:List[str] = """\
Text data.
Second line of data."""
@pytest.fixture(scope="session" )
def _lowerCamelCase( a ):
__a = tmp_path_factory.mktemp("data" ) / """file.txt"""
__a = FILE_CONTENT
with open(_a , "w" ) as f:
f.write(_a )
return filename
@pytest.fixture(scope="session" )
def _lowerCamelCase( a ):
import bza
__a = tmp_path_factory.mktemp("data" ) / """file.txt.bz2"""
__a = bytes(_a , "utf-8" )
with bza.open(_a , "wb" ) as f:
f.write(_a )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase( a ):
import gzip
__a = str(tmp_path_factory.mktemp("data" ) / "file.txt.gz" )
__a = bytes(_a , "utf-8" )
with gzip.open(_a , "wb" ) as f:
f.write(_a )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase( a ):
if datasets.config.LZ4_AVAILABLE:
import lza.frame
__a = tmp_path_factory.mktemp("data" ) / """file.txt.lz4"""
__a = bytes(_a , "utf-8" )
with lza.frame.open(_a , "wb" ) as f:
f.write(_a )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase( a , a ):
if datasets.config.PY7ZR_AVAILABLE:
import pyazr
__a = tmp_path_factory.mktemp("data" ) / """file.txt.7z"""
with pyazr.SevenZipFile(_a , "w" ) as archive:
archive.write(_a , arcname=os.path.basename(_a ) )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase( a , a ):
import tarfile
__a = tmp_path_factory.mktemp("data" ) / """file.txt.tar"""
with tarfile.TarFile(_a , "w" ) as f:
f.add(_a , arcname=os.path.basename(_a ) )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase( a ):
import lzma
__a = tmp_path_factory.mktemp("data" ) / """file.txt.xz"""
__a = bytes(_a , "utf-8" )
with lzma.open(_a , "wb" ) as f:
f.write(_a )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase( a , a ):
import zipfile
__a = tmp_path_factory.mktemp("data" ) / """file.txt.zip"""
with zipfile.ZipFile(_a , "w" ) as f:
f.write(_a , arcname=os.path.basename(_a ) )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase( a ):
if datasets.config.ZSTANDARD_AVAILABLE:
import zstandard as zstd
__a = tmp_path_factory.mktemp("data" ) / """file.txt.zst"""
__a = bytes(_a , "utf-8" )
with zstd.open(_a , "wb" ) as f:
f.write(_a )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase( a ):
__a = tmp_path_factory.mktemp("data" ) / """file.xml"""
__a = textwrap.dedent(
"\\n <?xml version=\"1.0\" encoding=\"UTF-8\" ?>\n <tmx version=\"1.4\">\n <header segtype=\"sentence\" srclang=\"ca\" />\n <body>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 1</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 1</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 2</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 2</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 3</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 3</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 4</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 4</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 5</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 5</seg></tuv>\n </tu>\n </body>\n </tmx>" )
with open(_a , "w" ) as f:
f.write(_a )
return filename
SCREAMING_SNAKE_CASE__:List[str] = [
{"""col_1""": """0""", """col_2""": 0, """col_3""": 0.0},
{"""col_1""": """1""", """col_2""": 1, """col_3""": 1.0},
{"""col_1""": """2""", """col_2""": 2, """col_3""": 2.0},
{"""col_1""": """3""", """col_2""": 3, """col_3""": 3.0},
]
SCREAMING_SNAKE_CASE__:Any = [
{"""col_1""": """4""", """col_2""": 4, """col_3""": 4.0},
{"""col_1""": """5""", """col_2""": 5, """col_3""": 5.0},
]
SCREAMING_SNAKE_CASE__:Dict = {
"""col_1""": ["""0""", """1""", """2""", """3"""],
"""col_2""": [0, 1, 2, 3],
"""col_3""": [0.0, 1.0, 2.0, 3.0],
}
SCREAMING_SNAKE_CASE__:Union[str, Any] = [
{"""col_3""": 0.0, """col_1""": """0""", """col_2""": 0},
{"""col_3""": 1.0, """col_1""": """1""", """col_2""": 1},
]
SCREAMING_SNAKE_CASE__:Optional[Any] = [
{"""col_1""": """s0""", """col_2""": 0, """col_3""": 0.0},
{"""col_1""": """s1""", """col_2""": 1, """col_3""": 1.0},
{"""col_1""": """s2""", """col_2""": 2, """col_3""": 2.0},
{"""col_1""": """s3""", """col_2""": 3, """col_3""": 3.0},
]
@pytest.fixture(scope="session" )
def _lowerCamelCase( ):
return DATA_DICT_OF_LISTS
@pytest.fixture(scope="session" )
def _lowerCamelCase( a ):
__a = datasets.Dataset.from_dict(_a )
__a = str(tmp_path_factory.mktemp("data" ) / "dataset.arrow" )
dataset.map(cache_file_name=_a )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase( a ):
__a = str(tmp_path_factory.mktemp("data" ) / "dataset.sqlite" )
with contextlib.closing(sqlitea.connect(_a ) ) as con:
__a = con.cursor()
cur.execute("CREATE TABLE dataset(col_1 text, col_2 int, col_3 real)" )
for item in DATA:
cur.execute("INSERT INTO dataset(col_1, col_2, col_3) VALUES (?, ?, ?)" , tuple(item.values() ) )
con.commit()
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase( a ):
__a = str(tmp_path_factory.mktemp("data" ) / "dataset.csv" )
with open(_a , "w" , newline="" ) as f:
__a = csv.DictWriter(_a , fieldnames=["col_1", "col_2", "col_3"] )
writer.writeheader()
for item in DATA:
writer.writerow(_a )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase( a ):
__a = str(tmp_path_factory.mktemp("data" ) / "dataset2.csv" )
with open(_a , "w" , newline="" ) as f:
__a = csv.DictWriter(_a , fieldnames=["col_1", "col_2", "col_3"] )
writer.writeheader()
for item in DATA:
writer.writerow(_a )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase( a , a ):
import bza
__a = tmp_path_factory.mktemp("data" ) / """dataset.csv.bz2"""
with open(_a , "rb" ) as f:
__a = f.read()
# data = bytes(FILE_CONTENT, "utf-8")
with bza.open(_a , "wb" ) as f:
f.write(_a )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase( a , a , a ):
__a = tmp_path_factory.mktemp("data" ) / """dataset.csv.zip"""
with zipfile.ZipFile(_a , "w" ) as f:
f.write(_a , arcname=os.path.basename(_a ) )
f.write(_a , arcname=os.path.basename(_a ) )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase( a , a , a ):
__a = tmp_path_factory.mktemp("data" ) / """dataset.csv.zip"""
with zipfile.ZipFile(_a , "w" ) as f:
f.write(_a , arcname=os.path.basename(csv_path.replace(".csv" , ".CSV" ) ) )
f.write(_a , arcname=os.path.basename(csva_path.replace(".csv" , ".CSV" ) ) )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase( a , a , a ):
__a = tmp_path_factory.mktemp("data" ) / """dataset_with_dir.csv.zip"""
with zipfile.ZipFile(_a , "w" ) as f:
f.write(_a , arcname=os.path.join("main_dir" , os.path.basename(_a ) ) )
f.write(_a , arcname=os.path.join("main_dir" , os.path.basename(_a ) ) )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase( a ):
__a = str(tmp_path_factory.mktemp("data" ) / "dataset.parquet" )
__a = pa.schema(
{
"col_1": pa.string(),
"col_2": pa.intaa(),
"col_3": pa.floataa(),
} )
with open(_a , "wb" ) as f:
__a = pq.ParquetWriter(_a , schema=_a )
__a = pa.Table.from_pydict({k: [DATA[i][k] for i in range(len(_a ) )] for k in DATA[0]} , schema=_a )
writer.write_table(_a )
writer.close()
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase( a ):
__a = str(tmp_path_factory.mktemp("data" ) / "dataset.json" )
__a = {"""data""": DATA}
with open(_a , "w" ) as f:
json.dump(_a , _a )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase( a ):
__a = str(tmp_path_factory.mktemp("data" ) / "dataset.json" )
__a = {"""data""": DATA_DICT_OF_LISTS}
with open(_a , "w" ) as f:
json.dump(_a , _a )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase( a ):
__a = str(tmp_path_factory.mktemp("data" ) / "dataset.jsonl" )
with open(_a , "w" ) as f:
for item in DATA:
f.write(json.dumps(_a ) + "\n" )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase( a ):
__a = str(tmp_path_factory.mktemp("data" ) / "dataset2.jsonl" )
with open(_a , "w" ) as f:
for item in DATA:
f.write(json.dumps(_a ) + "\n" )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase( a ):
__a = str(tmp_path_factory.mktemp("data" ) / "dataset_312.jsonl" )
with open(_a , "w" ) as f:
for item in DATA_312:
f.write(json.dumps(_a ) + "\n" )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase( a ):
__a = str(tmp_path_factory.mktemp("data" ) / "dataset-str.jsonl" )
with open(_a , "w" ) as f:
for item in DATA_STR:
f.write(json.dumps(_a ) + "\n" )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase( a , a ):
import gzip
__a = str(tmp_path_factory.mktemp("data" ) / "dataset.txt.gz" )
with open(_a , "rb" ) as orig_file:
with gzip.open(_a , "wb" ) as zipped_file:
zipped_file.writelines(_a )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase( a , a ):
import gzip
__a = str(tmp_path_factory.mktemp("data" ) / "dataset.jsonl.gz" )
with open(_a , "rb" ) as orig_file:
with gzip.open(_a , "wb" ) as zipped_file:
zipped_file.writelines(_a )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase( a , a , a ):
__a = tmp_path_factory.mktemp("data" ) / """dataset.jsonl.zip"""
with zipfile.ZipFile(_a , "w" ) as f:
f.write(_a , arcname=os.path.basename(_a ) )
f.write(_a , arcname=os.path.basename(_a ) )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase( a , a , a , a ):
__a = tmp_path_factory.mktemp("data" ) / """dataset_nested.jsonl.zip"""
with zipfile.ZipFile(_a , "w" ) as f:
f.write(_a , arcname=os.path.join("nested" , os.path.basename(_a ) ) )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase( a , a , a ):
__a = tmp_path_factory.mktemp("data" ) / """dataset_with_dir.jsonl.zip"""
with zipfile.ZipFile(_a , "w" ) as f:
f.write(_a , arcname=os.path.join("main_dir" , os.path.basename(_a ) ) )
f.write(_a , arcname=os.path.join("main_dir" , os.path.basename(_a ) ) )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase( a , a , a ):
__a = tmp_path_factory.mktemp("data" ) / """dataset.jsonl.tar"""
with tarfile.TarFile(_a , "w" ) as f:
f.add(_a , arcname=os.path.basename(_a ) )
f.add(_a , arcname=os.path.basename(_a ) )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase( a , a , a , a ):
__a = tmp_path_factory.mktemp("data" ) / """dataset_nested.jsonl.tar"""
with tarfile.TarFile(_a , "w" ) as f:
f.add(_a , arcname=os.path.join("nested" , os.path.basename(_a ) ) )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase( a ):
__a = ["""0""", """1""", """2""", """3"""]
__a = str(tmp_path_factory.mktemp("data" ) / "dataset.txt" )
with open(_a , "w" ) as f:
for item in data:
f.write(item + "\n" )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase( a ):
__a = ["""0""", """1""", """2""", """3"""]
__a = str(tmp_path_factory.mktemp("data" ) / "dataset2.txt" )
with open(_a , "w" ) as f:
for item in data:
f.write(item + "\n" )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase( a ):
__a = ["""0""", """1""", """2""", """3"""]
__a = tmp_path_factory.mktemp("data" ) / """dataset.abc"""
with open(_a , "w" ) as f:
for item in data:
f.write(item + "\n" )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase( a , a , a ):
__a = tmp_path_factory.mktemp("data" ) / """dataset.text.zip"""
with zipfile.ZipFile(_a , "w" ) as f:
f.write(_a , arcname=os.path.basename(_a ) )
f.write(_a , arcname=os.path.basename(_a ) )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase( a , a , a ):
__a = tmp_path_factory.mktemp("data" ) / """dataset_with_dir.text.zip"""
with zipfile.ZipFile(_a , "w" ) as f:
f.write(_a , arcname=os.path.join("main_dir" , os.path.basename(_a ) ) )
f.write(_a , arcname=os.path.join("main_dir" , os.path.basename(_a ) ) )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase( a , a , a ):
__a = tmp_path_factory.mktemp("data" ) / """dataset.ext.zip"""
with zipfile.ZipFile(_a , "w" ) as f:
f.write(_a , arcname=os.path.basename("unsupported.ext" ) )
f.write(_a , arcname=os.path.basename("unsupported_2.ext" ) )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase( a ):
__a = """\n""".join(["First", "Second\u2029with Unicode new line", "Third"] )
__a = str(tmp_path_factory.mktemp("data" ) / "dataset_with_unicode_new_lines.txt" )
with open(_a , "w" , encoding="utf-8" ) as f:
f.write(_a )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase( ):
return os.path.join("tests" , "features" , "data" , "test_image_rgb.jpg" )
@pytest.fixture(scope="session" )
def _lowerCamelCase( ):
return os.path.join("tests" , "features" , "data" , "test_audio_44100.wav" )
@pytest.fixture(scope="session" )
def _lowerCamelCase( a , a ):
__a = tmp_path_factory.mktemp("data" ) / """dataset.img.zip"""
with zipfile.ZipFile(_a , "w" ) as f:
f.write(_a , arcname=os.path.basename(_a ) )
f.write(_a , arcname=os.path.basename(_a ).replace(".jpg" , "2.jpg" ) )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase( a ):
__a = tmp_path_factory.mktemp("data_dir" )
(data_dir / "subdir").mkdir()
with open(data_dir / "subdir" / "train.txt" , "w" ) as f:
f.write("foo\n" * 1_0 )
with open(data_dir / "subdir" / "test.txt" , "w" ) as f:
f.write("bar\n" * 1_0 )
# hidden file
with open(data_dir / "subdir" / ".test.txt" , "w" ) as f:
f.write("bar\n" * 1_0 )
# hidden directory
(data_dir / ".subdir").mkdir()
with open(data_dir / ".subdir" / "train.txt" , "w" ) as f:
f.write("foo\n" * 1_0 )
with open(data_dir / ".subdir" / "test.txt" , "w" ) as f:
f.write("bar\n" * 1_0 )
return data_dir
| 365 | """simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__:Optional[Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__:str = {
"""asapp/sew-d-tiny-100k""": """https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json""",
# See all SEW-D models at https://huggingface.co/models?filter=sew-d
}
class snake_case__ ( snake_case_ ):
_snake_case : str = """sew-d"""
def __init__( self , lowerCamelCase=32 , lowerCamelCase=768 , lowerCamelCase=12 , lowerCamelCase=12 , lowerCamelCase=3072 , lowerCamelCase=2 , lowerCamelCase=512 , lowerCamelCase=256 , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=("p2c", "c2p") , lowerCamelCase="layer_norm" , lowerCamelCase="gelu_python" , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=0.0 , lowerCamelCase=0.1 , lowerCamelCase=0.02 , lowerCamelCase=1E-7 , lowerCamelCase=1E-5 , lowerCamelCase="group" , lowerCamelCase="gelu" , lowerCamelCase=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , lowerCamelCase=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , lowerCamelCase=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , lowerCamelCase=False , lowerCamelCase=128 , lowerCamelCase=16 , lowerCamelCase=True , lowerCamelCase=0.05 , lowerCamelCase=10 , lowerCamelCase=2 , lowerCamelCase=0.0 , lowerCamelCase=10 , lowerCamelCase=0 , lowerCamelCase="mean" , lowerCamelCase=False , lowerCamelCase=False , lowerCamelCase=256 , lowerCamelCase=0 , lowerCamelCase=1 , lowerCamelCase=2 , **lowerCamelCase , ):
super().__init__(**lowerCamelCase , pad_token_id=lowerCamelCase , bos_token_id=lowerCamelCase , eos_token_id=lowerCamelCase )
__a = hidden_size
__a = feat_extract_norm
__a = feat_extract_activation
__a = list(lowerCamelCase )
__a = list(lowerCamelCase )
__a = list(lowerCamelCase )
__a = conv_bias
__a = num_conv_pos_embeddings
__a = num_conv_pos_embedding_groups
__a = len(self.conv_dim )
__a = num_hidden_layers
__a = intermediate_size
__a = squeeze_factor
__a = max_position_embeddings
__a = position_buckets
__a = share_att_key
__a = relative_attention
__a = norm_rel_ebd
__a = list(lowerCamelCase )
__a = hidden_act
__a = num_attention_heads
__a = hidden_dropout
__a = attention_dropout
__a = activation_dropout
__a = feat_proj_dropout
__a = final_dropout
__a = layer_norm_eps
__a = feature_layer_norm_eps
__a = initializer_range
__a = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect."
"It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,"
F"but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)"
F"= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`." )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__a = apply_spec_augment
__a = mask_time_prob
__a = mask_time_length
__a = mask_time_min_masks
__a = mask_feature_prob
__a = mask_feature_length
__a = mask_feature_min_masks
# ctc loss
__a = ctc_loss_reduction
__a = ctc_zero_infinity
# sequence classification
__a = use_weighted_layer_sum
__a = classifier_proj_size
@property
def a__ ( self ):
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 268 | 0 |
import argparse
import torch
from transformers import (
SpeechTaConfig,
SpeechTaFeatureExtractor,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaProcessor,
SpeechTaTokenizer,
logging,
)
from transformers.tokenization_utils import AddedToken
logging.set_verbosity_info()
a_ = logging.get_logger("""transformers.models.speecht5""")
a_ = {
"""speech_encoder_prenet.layer_norm""": """speecht5.encoder.prenet.feature_projection.layer_norm""",
"""speech_encoder_prenet.post_extract_proj""": """speecht5.encoder.prenet.feature_projection.projection""",
"""speech_encoder_prenet.pos_conv.0""": """speecht5.encoder.prenet.pos_conv_embed.conv""",
"""speech_encoder_prenet.mask_emb""": """speecht5.encoder.prenet.masked_spec_embed""",
}
a_ = {
"""text_encoder_prenet.encoder_prenet.0""": """speecht5.encoder.prenet.embed_tokens""",
"""text_encoder_prenet.encoder_prenet.1.alpha""": """speecht5.encoder.prenet.encode_positions.alpha""",
}
a_ = {
"""speech_decoder_prenet.decoder_prenet.0.0.prenet.0.0""": """speecht5.decoder.prenet.layers.0""",
"""speech_decoder_prenet.decoder_prenet.0.0.prenet.1.0""": """speecht5.decoder.prenet.layers.1""",
"""speech_decoder_prenet.decoder_prenet.0.1""": """speecht5.decoder.prenet.final_layer""",
"""speech_decoder_prenet.decoder_prenet.1.alpha""": """speecht5.decoder.prenet.encode_positions.alpha""",
"""speech_decoder_prenet.spkembs_layer.0""": """speecht5.decoder.prenet.speaker_embeds_layer""",
}
a_ = {
"""speech_decoder_postnet.feat_out""": """speech_decoder_postnet.feat_out""",
"""speech_decoder_postnet.prob_out""": """speech_decoder_postnet.prob_out""",
"""speech_decoder_postnet.postnet.postnet.0.0""": """speech_decoder_postnet.layers.0.conv""",
"""speech_decoder_postnet.postnet.postnet.0.1""": """speech_decoder_postnet.layers.0.batch_norm""",
"""speech_decoder_postnet.postnet.postnet.1.0""": """speech_decoder_postnet.layers.1.conv""",
"""speech_decoder_postnet.postnet.postnet.1.1""": """speech_decoder_postnet.layers.1.batch_norm""",
"""speech_decoder_postnet.postnet.postnet.2.0""": """speech_decoder_postnet.layers.2.conv""",
"""speech_decoder_postnet.postnet.postnet.2.1""": """speech_decoder_postnet.layers.2.batch_norm""",
"""speech_decoder_postnet.postnet.postnet.3.0""": """speech_decoder_postnet.layers.3.conv""",
"""speech_decoder_postnet.postnet.postnet.3.1""": """speech_decoder_postnet.layers.3.batch_norm""",
"""speech_decoder_postnet.postnet.postnet.4.0""": """speech_decoder_postnet.layers.4.conv""",
"""speech_decoder_postnet.postnet.postnet.4.1""": """speech_decoder_postnet.layers.4.batch_norm""",
}
a_ = {
"""text_decoder_prenet.embed_tokens""": """speecht5.decoder.prenet.embed_tokens""",
}
a_ = {
"""text_decoder_postnet.output_projection""": """text_decoder_postnet.lm_head""",
}
a_ = {
"""encoder.layers.*.self_attn.k_proj""": """speecht5.encoder.wrapped_encoder.layers.*.attention.k_proj""",
"""encoder.layers.*.self_attn.v_proj""": """speecht5.encoder.wrapped_encoder.layers.*.attention.v_proj""",
"""encoder.layers.*.self_attn.q_proj""": """speecht5.encoder.wrapped_encoder.layers.*.attention.q_proj""",
"""encoder.layers.*.self_attn.out_proj""": """speecht5.encoder.wrapped_encoder.layers.*.attention.out_proj""",
"""encoder.layers.*.self_attn_layer_norm""": """speecht5.encoder.wrapped_encoder.layers.*.layer_norm""",
"""encoder.layers.*.fc1""": """speecht5.encoder.wrapped_encoder.layers.*.feed_forward.intermediate_dense""",
"""encoder.layers.*.fc2""": """speecht5.encoder.wrapped_encoder.layers.*.feed_forward.output_dense""",
"""encoder.layers.*.final_layer_norm""": """speecht5.encoder.wrapped_encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """speecht5.encoder.wrapped_encoder.layer_norm""",
"""encoder.pos_emb.pe_k""": """speecht5.encoder.wrapped_encoder.embed_positions.pe_k""",
}
a_ = {
"""decoder.layers.*.self_attn.k_proj""": """speecht5.decoder.wrapped_decoder.layers.*.self_attn.k_proj""",
"""decoder.layers.*.self_attn.v_proj""": """speecht5.decoder.wrapped_decoder.layers.*.self_attn.v_proj""",
"""decoder.layers.*.self_attn.q_proj""": """speecht5.decoder.wrapped_decoder.layers.*.self_attn.q_proj""",
"""decoder.layers.*.self_attn.out_proj""": """speecht5.decoder.wrapped_decoder.layers.*.self_attn.out_proj""",
"""decoder.layers.*.self_attn_layer_norm""": """speecht5.decoder.wrapped_decoder.layers.*.self_attn_layer_norm""",
"""decoder.layers.*.encoder_attn.k_proj""": """speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.k_proj""",
"""decoder.layers.*.encoder_attn.v_proj""": """speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.v_proj""",
"""decoder.layers.*.encoder_attn.q_proj""": """speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.q_proj""",
"""decoder.layers.*.encoder_attn.out_proj""": """speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.out_proj""",
"""decoder.layers.*.encoder_attn_layer_norm""": """speecht5.decoder.wrapped_decoder.layers.*.encoder_attn_layer_norm""",
"""decoder.layers.*.fc1""": """speecht5.decoder.wrapped_decoder.layers.*.feed_forward.intermediate_dense""",
"""decoder.layers.*.fc2""": """speecht5.decoder.wrapped_decoder.layers.*.feed_forward.output_dense""",
"""decoder.layers.*.final_layer_norm""": """speecht5.decoder.wrapped_decoder.layers.*.final_layer_norm""",
}
a_ = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_TEXT_DECODER_PRENET,
**MAPPING_TEXT_DECODER_POSTNET,
}
a_ = {
**MAPPING_TEXT_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
a_ = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
a_ = []
a_ = [
"""encoder.version""",
"""encoder.layers.*.norm_k.weight""",
"""encoder.layers.*.norm_k.bias""",
"""decoder.version""",
"""decoder.layers.*.norm_k.weight""",
"""decoder.layers.*.norm_k.bias""",
"""decoder.pos_emb.pe_k""",
"""speech_encoder_prenet.embed_positions._float_tensor""",
"""text_decoder_prenet.embed_positions._float_tensor""",
]
a_ = IGNORE_KEYS + [
"""encoder.proj""",
"""text_encoder_prenet.*""",
"""speech_decoder_prenet.*""",
"""speech_decoder_postnet.*""",
]
a_ = IGNORE_KEYS + [
"""encoder.proj""",
"""speech_encoder_prenet.*""",
"""text_decoder_prenet.*""",
"""text_decoder_postnet.*""",
]
a_ = IGNORE_KEYS + [
"""encoder.proj""",
"""text_encoder_prenet.*""",
"""text_decoder_prenet.*""",
"""text_decoder_postnet.*""",
]
def a__ ( _UpperCamelCase : Optional[Any] ,_UpperCamelCase : Union[str, Any] ,_UpperCamelCase : Any ,_UpperCamelCase : Optional[int] ,_UpperCamelCase : Union[str, Any] ):
for attribute in key.split('''.''' ):
__lowerCamelCase = getattr(_UpperCamelCase ,_UpperCamelCase )
if weight_type is not None:
__lowerCamelCase = getattr(_UpperCamelCase ,_UpperCamelCase ).shape
else:
__lowerCamelCase = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}""" )
if weight_type == "weight":
__lowerCamelCase = value
elif weight_type == "weight_g":
__lowerCamelCase = value
elif weight_type == "weight_v":
__lowerCamelCase = value
elif weight_type == "bias":
__lowerCamelCase = value
elif weight_type == "running_mean":
__lowerCamelCase = value
elif weight_type == "running_var":
__lowerCamelCase = value
elif weight_type == "num_batches_tracked":
__lowerCamelCase = value
else:
__lowerCamelCase = value
logger.info(F"""{key + ("." + weight_type if weight_type is not None else "")} was initialized from {full_name}.""" )
def a__ ( _UpperCamelCase : int ,_UpperCamelCase : str ):
for key in ignore_keys:
if key.endswith('''.*''' ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
__lowerCamelCase ,__lowerCamelCase = key.split('''.*.''' )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def a__ ( _UpperCamelCase : Dict ,_UpperCamelCase : str ,_UpperCamelCase : int ):
__lowerCamelCase = []
if task == "s2t":
__lowerCamelCase = hf_model.speechta.encoder.prenet.feature_encoder
__lowerCamelCase = MAPPING_S2T
__lowerCamelCase = IGNORE_KEYS_S2T
elif task == "t2s":
__lowerCamelCase = None
__lowerCamelCase = MAPPING_T2S
__lowerCamelCase = IGNORE_KEYS_T2S
elif task == "s2s":
__lowerCamelCase = hf_model.speechta.encoder.prenet.feature_encoder
__lowerCamelCase = MAPPING_S2S
__lowerCamelCase = IGNORE_KEYS_S2S
else:
raise ValueError(F"""Unsupported task: {task}""" )
for name, value in fairseq_dict.items():
if should_ignore(_UpperCamelCase ,_UpperCamelCase ):
logger.info(F"""{name} was ignored""" )
continue
__lowerCamelCase = False
if "conv_layers" in name:
load_conv_layer(
_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,hf_model.config.feat_extract_norm == '''group''' ,)
__lowerCamelCase = True
else:
for key, mapped_key in MAPPING.items():
# mapped_key = "speecht5." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if "*" in key:
__lowerCamelCase ,__lowerCamelCase = key.split('''.*.''' )
if prefix in name and suffix in name:
__lowerCamelCase = suffix
# if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]:
if key in name:
__lowerCamelCase = True
if "*" in mapped_key:
__lowerCamelCase = name.split(_UpperCamelCase )[0].split('''.''' )[-2]
__lowerCamelCase = mapped_key.replace('''*''' ,_UpperCamelCase )
if "weight_g" in name:
__lowerCamelCase = '''weight_g'''
elif "weight_v" in name:
__lowerCamelCase = '''weight_v'''
elif "bias" in name:
__lowerCamelCase = '''bias'''
elif "weight" in name:
__lowerCamelCase = '''weight'''
elif "running_mean" in name:
__lowerCamelCase = '''running_mean'''
elif "running_var" in name:
__lowerCamelCase = '''running_var'''
elif "num_batches_tracked" in name:
__lowerCamelCase = '''num_batches_tracked'''
else:
__lowerCamelCase = None
set_recursively(_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase )
continue
if not is_used:
unused_weights.append(_UpperCamelCase )
logger.warning(F"""Unused weights: {unused_weights}""" )
def a__ ( _UpperCamelCase : Any ,_UpperCamelCase : Tuple ,_UpperCamelCase : Union[str, Any] ,_UpperCamelCase : Union[str, Any] ,_UpperCamelCase : List[str] ):
__lowerCamelCase = full_name.split('''conv_layers.''' )[-1]
__lowerCamelCase = name.split('''.''' )
__lowerCamelCase = int(items[0] )
__lowerCamelCase = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
__lowerCamelCase = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
__lowerCamelCase = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" )
__lowerCamelCase = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" )
__lowerCamelCase = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(_UpperCamelCase )
@torch.no_grad()
def a__ ( _UpperCamelCase : List[str] ,_UpperCamelCase : str ,_UpperCamelCase : Any ,_UpperCamelCase : Union[str, Any]=None ,_UpperCamelCase : List[Any]=None ,_UpperCamelCase : str=None ,):
if config_path is not None:
__lowerCamelCase = SpeechTaConfig.from_pretrained(_UpperCamelCase )
else:
__lowerCamelCase = SpeechTaConfig()
if task == "s2t":
__lowerCamelCase = config.max_text_positions
__lowerCamelCase = SpeechTaForSpeechToText(_UpperCamelCase )
elif task == "t2s":
__lowerCamelCase = 18_76
__lowerCamelCase = 6_00
__lowerCamelCase = config.max_speech_positions
__lowerCamelCase = SpeechTaForTextToSpeech(_UpperCamelCase )
elif task == "s2s":
__lowerCamelCase = 18_76
__lowerCamelCase = config.max_speech_positions
__lowerCamelCase = SpeechTaForSpeechToSpeech(_UpperCamelCase )
else:
raise ValueError(F"""Unknown task name: {task}""" )
if vocab_path:
__lowerCamelCase = SpeechTaTokenizer(_UpperCamelCase ,model_max_length=config.max_text_positions )
# Mask token behaves like a normal word, i.e. include the space before it
__lowerCamelCase = AddedToken('''<mask>''' ,lstrip=_UpperCamelCase ,rstrip=_UpperCamelCase )
__lowerCamelCase = mask_token
tokenizer.add_special_tokens({'''mask_token''': mask_token} )
tokenizer.add_tokens(['''<ctc_blank>'''] )
__lowerCamelCase = SpeechTaFeatureExtractor()
__lowerCamelCase = SpeechTaProcessor(tokenizer=_UpperCamelCase ,feature_extractor=_UpperCamelCase )
processor.save_pretrained(_UpperCamelCase )
__lowerCamelCase = torch.load(_UpperCamelCase )
recursively_load_weights(fairseq_checkpoint['''model'''] ,_UpperCamelCase ,_UpperCamelCase )
model.save_pretrained(_UpperCamelCase )
if repo_id:
print('''Pushing to the hub...''' )
processor.push_to_hub(_UpperCamelCase )
model.push_to_hub(_UpperCamelCase )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument(
"""--task""",
default="""s2t""",
type=str,
help="""Type of the SpeechT5 model you'd like to convert. Should be one of 's2t', 't2s', 's2s'.""",
)
parser.add_argument("""--checkpoint_path""", required=True, default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--vocab_path""", default=None, type=str, help="""Path to SentencePiece model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, default=None, type=str, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
a_ = parser.parse_args()
convert_speechta_checkpoint(
args.task,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.vocab_path,
args.push_to_hub,
)
| 330 |
import argparse
import os
from pathlib import Path
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer
from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params
a_ = [
# replace left string with right string to get the relevant state_dict key (identical state dict to bart)
["""memory_attention""", """encoder_attn"""],
["""attention""", """attn"""],
["""/""", """."""],
[""".LayerNorm.gamma""", """_layer_norm.weight"""],
[""".LayerNorm.beta""", """_layer_norm.bias"""],
["""r.layer_""", """r.layers."""],
["""output_proj""", """out_proj"""],
["""ffn.dense_1.""", """fc2."""],
["""ffn.dense.""", """fc1."""],
["""ffn_layer_norm""", """final_layer_norm"""],
["""kernel""", """weight"""],
["""encoder_layer_norm.""", """encoder.layer_norm."""],
["""decoder_layer_norm.""", """decoder.layer_norm."""],
["""embeddings.weights""", """shared.weight"""],
]
def a__ ( _UpperCamelCase : int ):
for pegasus_name, hf_name in PATTERNS:
__lowerCamelCase = k.replace(_UpperCamelCase ,_UpperCamelCase )
return k
def a__ ( _UpperCamelCase : dict ,_UpperCamelCase : dict ):
__lowerCamelCase = DEFAULTS.copy()
cfg_kwargs.update(_UpperCamelCase )
__lowerCamelCase = PegasusConfig(**_UpperCamelCase )
__lowerCamelCase = PegasusForConditionalGeneration(_UpperCamelCase )
__lowerCamelCase = torch_model.model.state_dict()
__lowerCamelCase = {}
for k, v in tf_weights.items():
__lowerCamelCase = rename_state_dict_key(_UpperCamelCase )
if new_k not in sd:
raise ValueError(F"""could not find new key {new_k} in state dict. (converted from {k})""" )
if "dense" in k or "proj" in new_k:
__lowerCamelCase = v.T
__lowerCamelCase = torch.tensor(_UpperCamelCase ,dtype=sd[new_k].dtype )
assert v.shape == sd[new_k].shape, F"""{new_k}, {k}, {v.shape}, {sd[new_k].shape}"""
# make sure embedding.padding_idx is respected
__lowerCamelCase = torch.zeros_like(mapping['''shared.weight'''][cfg.pad_token_id + 1] )
__lowerCamelCase = mapping['''shared.weight''']
__lowerCamelCase = mapping['''shared.weight''']
__lowerCamelCase = {k: torch.zeros_like(_UpperCamelCase ) for k, v in sd.items() if k.endswith('''bias''' ) and k not in mapping}
mapping.update(**_UpperCamelCase )
__lowerCamelCase ,__lowerCamelCase = torch_model.model.load_state_dict(_UpperCamelCase ,strict=_UpperCamelCase )
__lowerCamelCase = [
k for k in missing if k not in ['''encoder.embed_positions.weight''', '''decoder.embed_positions.weight''']
]
assert unexpected_missing == [], F"""no matches found for the following torch keys {unexpected_missing}"""
assert extra == [], F"""no matches found for the following tf keys {extra}"""
return torch_model
def a__ ( _UpperCamelCase : str="./ckpt/aeslc/model.ckpt-32000" ):
__lowerCamelCase = tf.train.list_variables(_UpperCamelCase )
__lowerCamelCase = {}
__lowerCamelCase = ['''Adafactor''', '''global_step''']
for name, shape in tqdm(_UpperCamelCase ,desc='''converting tf checkpoint to dict''' ):
__lowerCamelCase = any(pat in name for pat in ignore_name )
if skip_key:
continue
__lowerCamelCase = tf.train.load_variable(_UpperCamelCase ,_UpperCamelCase )
__lowerCamelCase = array
return tf_weights
def a__ ( _UpperCamelCase : str ,_UpperCamelCase : str ):
# save tokenizer first
__lowerCamelCase = Path(_UpperCamelCase ).parent.name
__lowerCamelCase = task_specific_params[F"""summarization_{dataset}"""]['''max_position_embeddings''']
__lowerCamelCase = PegasusTokenizer.from_pretrained('''sshleifer/pegasus''' ,model_max_length=_UpperCamelCase )
assert tok.model_max_length == desired_max_model_length
tok.save_pretrained(_UpperCamelCase )
# convert model
__lowerCamelCase = get_tf_weights_as_numpy(_UpperCamelCase )
__lowerCamelCase = task_specific_params[F"""summarization_{dataset}"""]
if dataset == "large":
__lowerCamelCase = task_specific_params
__lowerCamelCase = convert_pegasus(_UpperCamelCase ,_UpperCamelCase )
torch_model.save_pretrained(_UpperCamelCase )
__lowerCamelCase = torch_model.state_dict()
sd.pop('''model.decoder.embed_positions.weight''' )
sd.pop('''model.encoder.embed_positions.weight''' )
torch.save(_UpperCamelCase ,Path(_UpperCamelCase ) / '''pytorch_model.bin''' )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""tf_ckpt_path""", type=str, help="""passed to tf.train.list_variables""")
parser.add_argument("""save_dir""", default=None, type=str, help="""Path to the output PyTorch model.""")
a_ = parser.parse_args()
if args.save_dir is None:
a_ = Path(args.tf_ckpt_path).parent.name
a_ = os.path.join("""pegasus""", dataset)
convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
| 330 | 1 |
"""simple docstring"""
from __future__ import annotations
import typing
from collections.abc import Iterable
import numpy as np
a : str = typing.Union[Iterable[float], Iterable[int], np.ndarray] # noqa: UP007
a : int = typing.Union[np.floataa, int, float] # noqa: UP007
def _SCREAMING_SNAKE_CASE ( _lowercase : Vector , _lowercase : Vector ) ->VectorOut:
'''simple docstring'''
return np.sqrt(np.sum((np.asarray(_lowercase ) - np.asarray(_lowercase )) ** 2 ) )
def _SCREAMING_SNAKE_CASE ( _lowercase : Vector , _lowercase : Vector ) ->VectorOut:
'''simple docstring'''
return sum((va - va) ** 2 for va, va in zip(_lowercase , _lowercase ) ) ** (1 / 2)
if __name__ == "__main__":
def _SCREAMING_SNAKE_CASE ( ) ->None:
'''simple docstring'''
from timeit import timeit
print("Without Numpy" )
print(
timeit(
"euclidean_distance_no_np([1, 2, 3], [4, 5, 6])" , number=1_0000 , globals=globals() , ) )
print("With Numpy" )
print(
timeit(
"euclidean_distance([1, 2, 3], [4, 5, 6])" , number=1_0000 , globals=globals() , ) )
benchmark()
| 79 |
"""simple docstring"""
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def _SCREAMING_SNAKE_CASE ( ) ->Optional[Any]:
'''simple docstring'''
a : int = HfArgumentParser(_lowercase )
a : int = parser.parse_args_into_dataclasses()[0]
a : Any = TensorFlowBenchmark(args=_lowercase )
try:
a : Tuple = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
a : Optional[Any] = "Arg --no_{0} is no longer used, please use --no-{0} instead."
a : Tuple = " ".join(str(_lowercase ).split(" " )[:-1] )
a : Any = ""
a : Any = eval(str(_lowercase ).split(" " )[-1] )
a : List[Any] = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(_lowercase )
if len(_lowercase ) > 0:
a : Tuple = full_error_msg + begin_error_msg + str(_lowercase )
raise ValueError(_lowercase )
benchmark.run()
if __name__ == "__main__":
main()
| 79 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__lowercase = {
'''configuration_biogpt''': ['''BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BioGptConfig'''],
'''tokenization_biogpt''': ['''BioGptTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
'''BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BioGptForCausalLM''',
'''BioGptForTokenClassification''',
'''BioGptForSequenceClassification''',
'''BioGptModel''',
'''BioGptPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig
from .tokenization_biogpt import BioGptTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_biogpt import (
BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST,
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptPreTrainedModel,
)
else:
import sys
__lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 43 | import math
from typing import Any, Callable, List, Optional, Tuple, Union
import numpy as np
import torch
from ...models import TaFilmDecoder
from ...schedulers import DDPMScheduler
from ...utils import is_onnx_available, logging, randn_tensor
if is_onnx_available():
from ..onnx_utils import OnnxRuntimeModel
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
from .continous_encoder import SpectrogramContEncoder
from .notes_encoder import SpectrogramNotesEncoder
__lowercase = logging.get_logger(__name__) # pylint: disable=invalid-name
__lowercase = 256
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
a__ : Tuple = ["""melgan"""]
def __init__( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , ) -> None:
super().__init__()
# From MELGAN
__UpperCamelCase :int = math.log(1E-5) # Matches MelGAN training.
__UpperCamelCase :int = 4.0 # Largest value for most examples
__UpperCamelCase :str = 128
self.register_modules(
notes_encoder=__lowercase , continuous_encoder=__lowercase , decoder=__lowercase , scheduler=__lowercase , melgan=__lowercase , )
def UpperCamelCase__ ( self , __lowercase , __lowercase=(-1.0, 1.0) , __lowercase=False) -> Dict:
__UpperCamelCase , __UpperCamelCase :str = output_range
if clip:
__UpperCamelCase :Union[str, Any] = torch.clip(__lowercase , self.min_value , self.max_value)
# Scale to [0, 1].
__UpperCamelCase :Union[str, Any] = (features - self.min_value) / (self.max_value - self.min_value)
# Scale to [min_out, max_out].
return zero_one * (max_out - min_out) + min_out
def UpperCamelCase__ ( self , __lowercase , __lowercase=(-1.0, 1.0) , __lowercase=False) -> Optional[int]:
__UpperCamelCase , __UpperCamelCase :int = input_range
__UpperCamelCase :Optional[int] = torch.clip(__lowercase , __lowercase , __lowercase) if clip else outputs
# Scale to [0, 1].
__UpperCamelCase :List[str] = (outputs - min_out) / (max_out - min_out)
# Scale to [self.min_value, self.max_value].
return zero_one * (self.max_value - self.min_value) + self.min_value
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase) -> List[Any]:
__UpperCamelCase :List[str] = input_tokens > 0
__UpperCamelCase , __UpperCamelCase :Union[str, Any] = self.notes_encoder(
encoder_input_tokens=__lowercase , encoder_inputs_mask=__lowercase)
__UpperCamelCase , __UpperCamelCase :Union[str, Any] = self.continuous_encoder(
encoder_inputs=__lowercase , encoder_inputs_mask=__lowercase)
return [(tokens_encoded, tokens_mask), (continuous_encoded, continuous_mask)]
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase) -> str:
__UpperCamelCase :Optional[int] = noise_time
if not torch.is_tensor(__lowercase):
__UpperCamelCase :str = torch.tensor([timesteps] , dtype=torch.long , device=input_tokens.device)
elif torch.is_tensor(__lowercase) and len(timesteps.shape) == 0:
__UpperCamelCase :Dict = timesteps[None].to(input_tokens.device)
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
__UpperCamelCase :List[str] = timesteps * torch.ones(input_tokens.shape[0] , dtype=timesteps.dtype , device=timesteps.device)
__UpperCamelCase :Tuple = self.decoder(
encodings_and_masks=__lowercase , decoder_input_tokens=__lowercase , decoder_noise_time=__lowercase)
return logits
@torch.no_grad()
def __call__( self , __lowercase , __lowercase = None , __lowercase = 100 , __lowercase = True , __lowercase = "numpy" , __lowercase = None , __lowercase = 1 , ) -> Union[AudioPipelineOutput, Tuple]:
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(__lowercase , __lowercase) or callback_steps <= 0)
):
raise ValueError(
f"""`callback_steps` has to be a positive integer but is {callback_steps} of type"""
f""" {type(__lowercase)}.""")
__UpperCamelCase :Union[str, Any] = np.zeros([1, TARGET_FEATURE_LENGTH, self.n_dims] , dtype=np.floataa)
__UpperCamelCase :Union[str, Any] = np.zeros([1, 0, self.n_dims] , np.floataa)
__UpperCamelCase :Union[str, Any] = torch.ones((1, TARGET_FEATURE_LENGTH) , dtype=__lowercase , device=self.device)
for i, encoder_input_tokens in enumerate(__lowercase):
if i == 0:
__UpperCamelCase :int = torch.from_numpy(pred_mel[:1].copy()).to(
device=self.device , dtype=self.decoder.dtype)
# The first chunk has no previous context.
__UpperCamelCase :int = torch.zeros((1, TARGET_FEATURE_LENGTH) , dtype=__lowercase , device=self.device)
else:
# The full song pipeline does not feed in a context feature, so the mask
# will be all 0s after the feature converter. Because we know we're
# feeding in a full context chunk from the previous prediction, set it
# to all 1s.
__UpperCamelCase :Tuple = ones
__UpperCamelCase :Optional[Any] = self.scale_features(
__lowercase , output_range=[-1.0, 1.0] , clip=__lowercase)
__UpperCamelCase :int = self.encode(
input_tokens=torch.IntTensor([encoder_input_tokens]).to(device=self.device) , continuous_inputs=__lowercase , continuous_mask=__lowercase , )
# Sample encoder_continuous_inputs shaped gaussian noise to begin loop
__UpperCamelCase :int = randn_tensor(
shape=encoder_continuous_inputs.shape , generator=__lowercase , device=self.device , dtype=self.decoder.dtype , )
# set step values
self.scheduler.set_timesteps(__lowercase)
# Denoising diffusion loop
for j, t in enumerate(self.progress_bar(self.scheduler.timesteps)):
__UpperCamelCase :Optional[int] = self.decode(
encodings_and_masks=__lowercase , input_tokens=__lowercase , noise_time=t / self.scheduler.config.num_train_timesteps , )
# Compute previous output: x_t -> x_t-1
__UpperCamelCase :int = self.scheduler.step(__lowercase , __lowercase , __lowercase , generator=__lowercase).prev_sample
__UpperCamelCase :Tuple = self.scale_to_features(__lowercase , input_range=[-1.0, 1.0])
__UpperCamelCase :List[Any] = mel[:1]
__UpperCamelCase :Optional[Any] = mel.cpu().float().numpy()
__UpperCamelCase :Any = np.concatenate([full_pred_mel, pred_mel[:1]] , axis=1)
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(__lowercase , __lowercase)
logger.info('''Generated segment''' , __lowercase)
if output_type == "numpy" and not is_onnx_available():
raise ValueError(
'''Cannot return output in \'np\' format if ONNX is not available. Make sure to have ONNX installed or set \'output_type\' to \'mel\'.''')
elif output_type == "numpy" and self.melgan is None:
raise ValueError(
'''Cannot return output in \'np\' format if melgan component is not defined. Make sure to define `self.melgan` or set \'output_type\' to \'mel\'.''')
if output_type == "numpy":
__UpperCamelCase :Optional[Any] = self.melgan(input_features=full_pred_mel.astype(np.floataa))
else:
__UpperCamelCase :List[str] = full_pred_mel
if not return_dict:
return (output,)
return AudioPipelineOutput(audios=__lowercase)
| 43 | 1 |
'''simple docstring'''
import torch
from transformers import CamembertForMaskedLM, CamembertTokenizer
def snake_case_ ( __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : List[str]=5 ):
"""simple docstring"""
assert masked_input.count('''<mask>''' ) == 1
lowercase_ : str = torch.tensor(tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE ) ).unsqueeze(0 ) # Batch size 1
lowercase_ : List[Any] = model(__SCREAMING_SNAKE_CASE )[0] # The last hidden-state is the first element of the output tuple
lowercase_ : Any = (input_ids.squeeze() == tokenizer.mask_token_id).nonzero().item()
lowercase_ : Optional[Any] = logits[0, masked_index, :]
lowercase_ : List[Any] = logits.softmax(dim=0 )
lowercase_ , lowercase_ : int = prob.topk(k=__SCREAMING_SNAKE_CASE , dim=0 )
lowercase_ : List[str] = ''' '''.join(
[tokenizer.convert_ids_to_tokens(indices[i].item() ) for i in range(len(__SCREAMING_SNAKE_CASE ) )] )
lowercase_ : Union[str, Any] = tokenizer.mask_token
lowercase_ : Union[str, Any] = []
for index, predicted_token_bpe in enumerate(topk_predicted_token_bpe.split(''' ''' ) ):
lowercase_ : List[Any] = predicted_token_bpe.replace('''\u2581''' , ''' ''' )
if " {0}".format(__SCREAMING_SNAKE_CASE ) in masked_input:
topk_filled_outputs.append(
(
masked_input.replace(''' {0}'''.format(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE ),
values[index].item(),
predicted_token,
) )
else:
topk_filled_outputs.append(
(
masked_input.replace(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ),
values[index].item(),
predicted_token,
) )
return topk_filled_outputs
_lowercase : Union[str, Any] = CamembertTokenizer.from_pretrained("camembert-base")
_lowercase : Tuple = CamembertForMaskedLM.from_pretrained("camembert-base")
model.eval()
_lowercase : List[Any] = "Le camembert est <mask> :)"
print(fill_mask(masked_input, model, tokenizer, topk=3))
| 264 |
'''simple docstring'''
import sys
_lowercase : Union[str, Any] = (
"73167176531330624919225119674426574742355349194934"
"96983520312774506326239578318016984801869478851843"
"85861560789112949495459501737958331952853208805511"
"12540698747158523863050715693290963295227443043557"
"66896648950445244523161731856403098711121722383113"
"62229893423380308135336276614282806444486645238749"
"30358907296290491560440772390713810515859307960866"
"70172427121883998797908792274921901699720888093776"
"65727333001053367881220235421809751254540594752243"
"52584907711670556013604839586446706324415722155397"
"53697817977846174064955149290862569321978468622482"
"83972241375657056057490261407972968652414535100474"
"82166370484403199890008895243450658541227588666881"
"16427171479924442928230863465674813919123162824586"
"17866458359124566529476545682848912883142607690042"
"24219022671055626321111109370544217506941658960408"
"07198403850962455444362981230987879927244284909188"
"84580156166097919133875499200524063689912560717606"
"05886116467109405077541002256983155200055935729725"
"71636269561882670428252483600823257530420752963450"
)
def snake_case_ ( __SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
lowercase_ : Any = 1
for digit in s:
product *= int(__SCREAMING_SNAKE_CASE )
return product
def snake_case_ ( __SCREAMING_SNAKE_CASE : str = N ):
"""simple docstring"""
lowercase_ : Any = -sys.maxsize - 1
lowercase_ : List[Any] = n[:13]
lowercase_ : List[str] = 13
while cur_index < len(__SCREAMING_SNAKE_CASE ) - 13:
if int(n[cur_index] ) >= int(substr[0] ):
lowercase_ : Tuple = substr[1:] + n[cur_index]
cur_index += 1
else:
lowercase_ : str = max(__SCREAMING_SNAKE_CASE , str_eval(__SCREAMING_SNAKE_CASE ) )
lowercase_ : List[Any] = n[cur_index : cur_index + 13]
cur_index += 13
return largest_product
if __name__ == "__main__":
print(f"""{solution() = }""")
| 264 | 1 |
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import doctest
import sys
import warnings
from os.path import abspath, dirname, join
import _pytest
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
__UpperCAmelCase = abspath(join(dirname(__file__), '''src'''))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='''ignore''', category=FutureWarning)
def UpperCamelCase ( snake_case__ : List[Any] ) -> List[Any]:
config.addinivalue_line(
'markers' , 'is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested' )
config.addinivalue_line(
'markers' , 'is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested' )
config.addinivalue_line('markers' , 'is_pipeline_test: mark test to run only when pipelines are tested' )
config.addinivalue_line('markers' , 'is_staging_test: mark test to run only in the staging environment' )
config.addinivalue_line('markers' , 'accelerate_tests: mark test that require accelerate' )
config.addinivalue_line('markers' , 'tool_tests: mark the tool tests that are run on their specific schedule' )
def UpperCamelCase ( snake_case__ : Optional[Any] ) -> Dict:
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(__lowerCamelCase )
def UpperCamelCase ( snake_case__ : List[Any] ) -> Tuple:
from transformers.testing_utils import pytest_terminal_summary_main
UpperCamelCase : Union[str, Any] = terminalreporter.config.getoption('--make-reports' )
if make_reports:
pytest_terminal_summary_main(__lowerCamelCase , id=__lowerCamelCase )
def UpperCamelCase ( snake_case__ : int , snake_case__ : List[Any] ) -> List[Any]:
# If no tests are collected, pytest exists with code 5, which makes the CI fail.
if exitstatus == 5:
UpperCamelCase : Tuple = 0
# Doctest custom flag to ignore output.
__UpperCAmelCase = doctest.register_optionflag('''IGNORE_RESULT''')
__UpperCAmelCase = doctest.OutputChecker
class lowerCAmelCase_ ( a__ ):
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> List[str]:
if IGNORE_RESULT & optionflags:
return True
return OutputChecker.check_output(self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
__UpperCAmelCase = CustomOutputChecker
__UpperCAmelCase = HfDoctestModule
__UpperCAmelCase = HfDocTestParser
| 119 |
from __future__ import annotations
import unittest
from transformers import LEDConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFLEDForConditionalGeneration, TFLEDModel
@require_tf
class __lowerCAmelCase :
UpperCamelCase__ = LEDConfig
UpperCamelCase__ = {}
UpperCamelCase__ = '''gelu'''
def __init__( self :Optional[int] , __magic_name__ :Dict , __magic_name__ :List[str]=13 , __magic_name__ :Union[str, Any]=7 , __magic_name__ :str=True , __magic_name__ :Union[str, Any]=False , __magic_name__ :Union[str, Any]=99 , __magic_name__ :List[Any]=32 , __magic_name__ :str=2 , __magic_name__ :List[str]=4 , __magic_name__ :str=37 , __magic_name__ :Any=0.1 , __magic_name__ :Dict=0.1 , __magic_name__ :List[str]=20 , __magic_name__ :Union[str, Any]=2 , __magic_name__ :List[Any]=1 , __magic_name__ :Optional[int]=0 , __magic_name__ :Optional[int]=4 , ):
'''simple docstring'''
a = parent
a = batch_size
a = seq_length
a = is_training
a = use_labels
a = vocab_size
a = hidden_size
a = num_hidden_layers
a = num_attention_heads
a = intermediate_size
a = hidden_dropout_prob
a = attention_probs_dropout_prob
a = max_position_embeddings
a = eos_token_id
a = pad_token_id
a = bos_token_id
a = attention_window
# `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size
# [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention
# returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1]
# because its local attention only attends to `self.attention_window` and one before and one after
a = self.attention_window + 2
# because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for
# the `test_attention_outputs` and `test_hidden_states_output` tests
a = (
self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window
)
def lowerCamelCase__ ( self :Tuple ):
'''simple docstring'''
a = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
a = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
a = tf.concat([input_ids, eos_tensor] , axis=1 )
a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , attention_window=self.attention_window , **self.config_updates , )
a = prepare_led_inputs_dict(__magic_name__ , __magic_name__ , __magic_name__ )
a = tf.concat(
[tf.zeros_like(__magic_name__ )[:, :-1], tf.ones_like(__magic_name__ )[:, -1:]] , axis=-1 , )
a = global_attention_mask
return config, inputs_dict
def lowerCamelCase__ ( self :Union[str, Any] , __magic_name__ :Optional[int] , __magic_name__ :List[Any] ):
'''simple docstring'''
a = TFLEDModel(config=__magic_name__ ).get_decoder()
a = inputs_dict["""input_ids"""]
a = input_ids[:1, :]
a = inputs_dict["""attention_mask"""][:1, :]
a = 1
# first forward pass
a = model(__magic_name__ , attention_mask=__magic_name__ , use_cache=__magic_name__ )
a , a = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
a = ids_tensor((self.batch_size, 3) , config.vocab_size )
a = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
a = tf.concat([input_ids, next_tokens] , axis=-1 )
a = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
a = model(__magic_name__ , attention_mask=__magic_name__ )[0]
a = model(__magic_name__ , attention_mask=__magic_name__ , past_key_values=__magic_name__ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
a = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
a = output_from_no_past[:, -3:, random_slice_idx]
a = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(__magic_name__ , __magic_name__ , rtol=1E-3 )
def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=None , ) -> List[str]:
if attention_mask is None:
a = tf.cast(tf.math.not_equal(__lowerCamelCase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
a = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
a = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
a = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
"decoder_input_ids": decoder_input_ids,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
}
@require_tf
class __lowerCAmelCase ( __magic_name__ , __magic_name__ , unittest.TestCase ):
UpperCamelCase__ = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else ()
UpperCamelCase__ = (TFLEDForConditionalGeneration,) if is_tf_available() else ()
UpperCamelCase__ = (
{
'''conversational''': TFLEDForConditionalGeneration,
'''feature-extraction''': TFLEDModel,
'''summarization''': TFLEDForConditionalGeneration,
'''text2text-generation''': TFLEDForConditionalGeneration,
'''translation''': TFLEDForConditionalGeneration,
}
if is_tf_available()
else {}
)
UpperCamelCase__ = True
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
def lowerCamelCase__ ( self :Tuple ):
'''simple docstring'''
a = TFLEDModelTester(self )
a = ConfigTester(self , config_class=__magic_name__ )
def lowerCamelCase__ ( self :int ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCamelCase__ ( self :str ):
'''simple docstring'''
a = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*__magic_name__ )
def lowerCamelCase__ ( self :Optional[int] ):
'''simple docstring'''
a , a = self.model_tester.prepare_config_and_inputs_for_common()
a = tf.zeros_like(inputs_dict["""attention_mask"""] )
a = 2
a = tf.where(
tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices , 1 , inputs_dict["""global_attention_mask"""] , )
a = True
a = self.model_tester.seq_length
a = self.model_tester.encoder_seq_length
def check_decoder_attentions_output(__magic_name__ :int ):
a = outputs.decoder_attentions
self.assertEqual(len(__magic_name__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
def check_encoder_attentions_output(__magic_name__ :Any ):
a = [t.numpy() for t in outputs.encoder_attentions]
a = [t.numpy() for t in outputs.encoder_global_attentions]
self.assertEqual(len(__magic_name__ ) , self.model_tester.num_hidden_layers )
self.assertEqual(len(__magic_name__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
self.assertListEqual(
list(global_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] , )
for model_class in self.all_model_classes:
a = True
a = False
a = False
a = model_class(__magic_name__ )
a = model(self._prepare_for_class(__magic_name__ , __magic_name__ ) )
a = len(__magic_name__ )
self.assertEqual(config.output_hidden_states , __magic_name__ )
check_encoder_attentions_output(__magic_name__ )
if self.is_encoder_decoder:
a = model_class(__magic_name__ )
a = model(self._prepare_for_class(__magic_name__ , __magic_name__ ) )
self.assertEqual(config.output_hidden_states , __magic_name__ )
check_decoder_attentions_output(__magic_name__ )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
a = True
a = model_class(__magic_name__ )
a = model(self._prepare_for_class(__magic_name__ , __magic_name__ ) )
self.assertEqual(config.output_hidden_states , __magic_name__ )
check_encoder_attentions_output(__magic_name__ )
# Check attention is always last and order is fine
a = True
a = True
a = model_class(__magic_name__ )
a = model(self._prepare_for_class(__magic_name__ , __magic_name__ ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(__magic_name__ ) )
self.assertEqual(model.config.output_hidden_states , __magic_name__ )
check_encoder_attentions_output(__magic_name__ )
@unittest.skip("""LED keeps using potentially symbolic tensors in conditionals and breaks tracing.""" )
def lowerCamelCase__ ( self :Union[str, Any] ):
'''simple docstring'''
pass
def lowerCamelCase__ ( self :int ):
'''simple docstring'''
pass
def __A ( __lowerCamelCase ) -> int:
return tf.constant(__lowerCamelCase , dtype=tf.intaa )
__UpperCamelCase : int = 1E-4
@slow
@require_tf
class __lowerCAmelCase ( unittest.TestCase ):
def lowerCamelCase__ ( self :Union[str, Any] ):
'''simple docstring'''
a = TFLEDForConditionalGeneration.from_pretrained("""allenai/led-base-16384""" ).led
# change to intended input here
a = _long_tensor([512 * [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69]] )
a = _long_tensor([128 * [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69]] )
a = prepare_led_inputs_dict(model.config , __magic_name__ , __magic_name__ )
a = model(**__magic_name__ )[0]
a = (1, 1024, 768)
self.assertEqual(output.shape , __magic_name__ )
# change to expected output here
a = tf.convert_to_tensor(
[[2.3050, 2.8279, 0.6531], [-1.8457, -0.1455, -3.5661], [-1.0186, 0.4586, -2.2043]] , )
tf.debugging.assert_near(output[:, :3, :3] , __magic_name__ , atol=1E-3 )
def lowerCamelCase__ ( self :str ):
'''simple docstring'''
a = TFLEDForConditionalGeneration.from_pretrained("""allenai/led-base-16384""" )
# change to intended input here
a = _long_tensor([512 * [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69]] )
a = _long_tensor([128 * [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69]] )
a = prepare_led_inputs_dict(model.config , __magic_name__ , __magic_name__ )
a = model(**__magic_name__ )[0]
a = (1, 1024, model.config.vocab_size)
self.assertEqual(output.shape , __magic_name__ )
# change to expected output here
a = tf.convert_to_tensor(
[[33.6507, 6.4572, 16.8089], [5.8739, -2.4238, 11.2902], [-3.2139, -4.3149, 4.2783]] , )
tf.debugging.assert_near(output[:, :3, :3] , __magic_name__ , atol=1E-3 , rtol=1E-3 )
| 228 | 0 |
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class a_ ( unittest.TestCase ):
def __a ( self :Dict) -> str:
UpperCAmelCase_ = '''ylacombe/bark-small'''
UpperCAmelCase_ = tempfile.mkdtemp()
UpperCAmelCase_ = '''en_speaker_1'''
UpperCAmelCase_ = '''This is a test string'''
UpperCAmelCase_ = '''speaker_embeddings_path.json'''
UpperCAmelCase_ = '''speaker_embeddings'''
def __a ( self :Tuple , **_lowercase :Tuple) -> str:
return AutoTokenizer.from_pretrained(self.checkpoint , **_lowercase)
def __a ( self :List[str]) -> Union[str, Any]:
shutil.rmtree(self.tmpdirname)
def __a ( self :str) -> str:
UpperCAmelCase_ = self.get_tokenizer()
UpperCAmelCase_ = BarkProcessor(tokenizer=_lowercase)
processor.save_pretrained(self.tmpdirname)
UpperCAmelCase_ = BarkProcessor.from_pretrained(self.tmpdirname)
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab())
@slow
def __a ( self :str) -> Optional[int]:
UpperCAmelCase_ = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
UpperCAmelCase_ = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''')
UpperCAmelCase_ = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token='''(BOS)''' , eos_token='''(EOS)''' , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab())
def __a ( self :Any) -> Union[str, Any]:
UpperCAmelCase_ = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
UpperCAmelCase_ = 35
UpperCAmelCase_ = 2
UpperCAmelCase_ = 8
UpperCAmelCase_ = {
'''semantic_prompt''': np.ones(_lowercase),
'''coarse_prompt''': np.ones((nb_codebooks_coarse, seq_len)),
'''fine_prompt''': np.ones((nb_codebooks_total, seq_len)),
}
# test providing already loaded voice_preset
UpperCAmelCase_ = processor(text=self.input_string , voice_preset=_lowercase)
UpperCAmelCase_ = inputs['''history_prompt''']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(_lowercase , np.array([])).tolist())
# test loading voice preset from npz file
UpperCAmelCase_ = os.path.join(self.tmpdirname , '''file.npz''')
np.savez(_lowercase , **_lowercase)
UpperCAmelCase_ = processor(text=self.input_string , voice_preset=_lowercase)
UpperCAmelCase_ = inputs['''history_prompt''']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(_lowercase , np.array([])).tolist())
# test loading voice preset from the hub
UpperCAmelCase_ = processor(text=self.input_string , voice_preset=self.voice_preset)
def __a ( self :str) -> Tuple:
UpperCAmelCase_ = self.get_tokenizer()
UpperCAmelCase_ = BarkProcessor(tokenizer=_lowercase)
UpperCAmelCase_ = processor(text=self.input_string)
UpperCAmelCase_ = tokenizer(
self.input_string , padding='''max_length''' , max_length=256 , add_special_tokens=_lowercase , return_attention_mask=_lowercase , return_token_type_ids=_lowercase , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist())
| 344 |
def A ( __UpperCAmelCase = 100_0000 ) -> int:
'''simple docstring'''
UpperCAmelCase_ = [i - 1 for i in range(limit + 1 )]
for i in range(2 , limit + 1 ):
if phi[i] == i - 1:
for j in range(2 * i , limit + 1 , __UpperCAmelCase ):
phi[j] -= phi[j] // i
return sum(phi[2 : limit + 1] )
if __name__ == "__main__":
print(solution())
| 344 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__lowerCamelCase : str = {
"""configuration_owlvit""": [
"""OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""OwlViTConfig""",
"""OwlViTOnnxConfig""",
"""OwlViTTextConfig""",
"""OwlViTVisionConfig""",
],
"""processing_owlvit""": ["""OwlViTProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Optional[Any] = ["""OwlViTFeatureExtractor"""]
__lowerCamelCase : Optional[int] = ["""OwlViTImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Union[str, Any] = [
"""OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""OwlViTModel""",
"""OwlViTPreTrainedModel""",
"""OwlViTTextModel""",
"""OwlViTVisionModel""",
"""OwlViTForObjectDetection""",
]
if TYPE_CHECKING:
from .configuration_owlvit import (
OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OwlViTConfig,
OwlViTOnnxConfig,
OwlViTTextConfig,
OwlViTVisionConfig,
)
from .processing_owlvit import OwlViTProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_owlvit import OwlViTFeatureExtractor
from .image_processing_owlvit import OwlViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_owlvit import (
OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
OwlViTForObjectDetection,
OwlViTModel,
OwlViTPreTrainedModel,
OwlViTTextModel,
OwlViTVisionModel,
)
else:
import sys
__lowerCamelCase : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 52 |
"""simple docstring"""
from __future__ import annotations
import inspect
import unittest
from typing import List, Tuple
from transformers import RegNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFRegNetForImageClassification, TFRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCamelCase_ :
def __init__( self : Optional[Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Optional[int]=3 , lowerCAmelCase_ : Optional[int]=32 , lowerCAmelCase_ : List[Any]=3 , lowerCAmelCase_ : List[Any]=10 , lowerCAmelCase_ : Any=[10, 20, 30, 40] , lowerCAmelCase_ : Any=[1, 1, 2, 1] , lowerCAmelCase_ : Union[str, Any]=True , lowerCAmelCase_ : Union[str, Any]=True , lowerCAmelCase_ : int="relu" , lowerCAmelCase_ : Tuple=3 , lowerCAmelCase_ : Optional[int]=None , ) -> str:
UpperCAmelCase_ : Tuple = parent
UpperCAmelCase_ : int = batch_size
UpperCAmelCase_ : str = image_size
UpperCAmelCase_ : List[Any] = num_channels
UpperCAmelCase_ : Tuple = embeddings_size
UpperCAmelCase_ : Union[str, Any] = hidden_sizes
UpperCAmelCase_ : int = depths
UpperCAmelCase_ : Optional[Any] = is_training
UpperCAmelCase_ : Dict = use_labels
UpperCAmelCase_ : str = hidden_act
UpperCAmelCase_ : str = num_labels
UpperCAmelCase_ : str = scope
UpperCAmelCase_ : str = len(lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Dict:
UpperCAmelCase_ : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase_ : Union[str, Any] = None
if self.use_labels:
UpperCAmelCase_ : List[str] = ids_tensor([self.batch_size] , self.num_labels )
UpperCAmelCase_ : Optional[Any] = self.get_config()
return config, pixel_values, labels
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]:
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : Dict ) -> str:
UpperCAmelCase_ : List[Any] = TFRegNetModel(config=lowerCAmelCase_ )
UpperCAmelCase_ : List[Any] = model(lowerCAmelCase_ , training=lowerCAmelCase_ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[Any] ) -> Optional[Any]:
UpperCAmelCase_ : Union[str, Any] = self.num_labels
UpperCAmelCase_ : List[Any] = TFRegNetForImageClassification(lowerCAmelCase_ )
UpperCAmelCase_ : Optional[int] = model(lowerCAmelCase_ , labels=lowerCAmelCase_ , training=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Dict:
UpperCAmelCase_ : Any = self.prepare_config_and_inputs()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Dict = config_and_inputs
UpperCAmelCase_ : List[str] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class UpperCamelCase_ (__A , __A , unittest.TestCase ):
__magic_name__ = (TFRegNetModel, TFRegNetForImageClassification) if is_tf_available() else ()
__magic_name__ = (
{'''feature-extraction''': TFRegNetModel, '''image-classification''': TFRegNetForImageClassification}
if is_tf_available()
else {}
)
__magic_name__ = False
__magic_name__ = False
__magic_name__ = False
__magic_name__ = False
__magic_name__ = False
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Union[str, Any]:
UpperCAmelCase_ : Optional[int] = TFRegNetModelTester(self )
UpperCAmelCase_ : Optional[Any] = ConfigTester(self , config_class=lowerCAmelCase_ , has_text_modality=lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]:
return
@unittest.skip(reason="RegNet does not use inputs_embeds" )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]:
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices("GPU" ) ) == 0 , reason="TF does not support backprop for grouped convolutions on CPU." , )
@slow
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Dict:
super().test_keras_fit()
@unittest.skip(reason="RegNet does not support input and output embeddings" )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Any:
pass
def _SCREAMING_SNAKE_CASE ( self : int ) -> List[str]:
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ : Dict = model_class(lowerCAmelCase_ )
UpperCAmelCase_ : Tuple = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ : List[Any] = [*signature.parameters.keys()]
UpperCAmelCase_ : int = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Union[str, Any]:
UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[int]:
def check_hidden_states_output(lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : int ):
UpperCAmelCase_ : str = model_class(lowerCAmelCase_ )
UpperCAmelCase_ : Any = model(**self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) , training=lowerCAmelCase_ )
UpperCAmelCase_ : str = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
UpperCAmelCase_ : Optional[Any] = self.model_tester.num_stages
self.assertEqual(len(lowerCAmelCase_ ) , expected_num_stages + 1 )
# RegNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , )
UpperCAmelCase_ , UpperCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ : Tuple = ["basic", "bottleneck"]
for model_class in self.all_model_classes:
for layer_type in layers_type:
UpperCAmelCase_ : List[Any] = layer_type
UpperCAmelCase_ : int = True
check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase_ : Optional[int] = True
check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Any:
UpperCAmelCase_ , UpperCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
def check_equivalence(lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : List[str]={} ):
UpperCAmelCase_ : Tuple = model(lowerCAmelCase_ , return_dict=lowerCAmelCase_ , **lowerCAmelCase_ )
UpperCAmelCase_ : Optional[Any] = model(lowerCAmelCase_ , return_dict=lowerCAmelCase_ , **lowerCAmelCase_ ).to_tuple()
def recursive_check(lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Any ):
if isinstance(lowerCAmelCase_ , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(lowerCAmelCase_ , lowerCAmelCase_ ):
recursive_check(lowerCAmelCase_ , lowerCAmelCase_ )
elif tuple_object is None:
return
else:
self.assertTrue(
all(tf.equal(lowerCAmelCase_ , lowerCAmelCase_ ) ) , msg=(
"Tuple and dict output are not equal. Difference:"
f""" {tf.math.reduce_max(tf.abs(tuple_object - dict_object ) )}"""
) , )
recursive_check(lowerCAmelCase_ , lowerCAmelCase_ )
for model_class in self.all_model_classes:
UpperCAmelCase_ : Union[str, Any] = model_class(lowerCAmelCase_ )
UpperCAmelCase_ : List[Any] = self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase_ : str = self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ )
check_equivalence(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase_ : Any = self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ , return_labels=lowerCAmelCase_ )
UpperCAmelCase_ : List[Any] = self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ , return_labels=lowerCAmelCase_ )
check_equivalence(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase_ : Union[str, Any] = self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase_ : str = self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ )
check_equivalence(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , {"output_hidden_states": True} )
UpperCAmelCase_ : List[str] = self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ , return_labels=lowerCAmelCase_ )
UpperCAmelCase_ : Tuple = self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ , return_labels=lowerCAmelCase_ )
check_equivalence(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , {"output_hidden_states": True} )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> str:
UpperCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase_ )
@slow
def _SCREAMING_SNAKE_CASE ( self : str ) -> Union[str, Any]:
for model_name in TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ : Any = TFRegNetModel.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
def snake_case ( ):
UpperCAmelCase_ : Dict = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class UpperCamelCase_ (unittest.TestCase ):
@cached_property
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[Any]:
return (
AutoImageProcessor.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Dict:
UpperCAmelCase_ : Any = TFRegNetForImageClassification.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
UpperCAmelCase_ : Union[str, Any] = self.default_image_processor
UpperCAmelCase_ : int = prepare_img()
UpperCAmelCase_ : List[Any] = image_processor(images=lowerCAmelCase_ , return_tensors="tf" )
# forward pass
UpperCAmelCase_ : Tuple = model(**lowerCAmelCase_ , training=lowerCAmelCase_ )
# verify the logits
UpperCAmelCase_ : List[str] = tf.TensorShape((1, 1_000) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase_ )
UpperCAmelCase_ : Optional[Any] = tf.constant([-0.4_1_8_0, -1.5_0_5_1, -3.4_8_3_6] )
tf.debugging.assert_near(outputs.logits[0, :3] , lowerCAmelCase_ , atol=1e-4 )
| 268 | 0 |
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel
from diffusers import DDIMScheduler, LDMPipeline, UNetaDModel, VQModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@property
def UpperCamelCase__ ( self ):
torch.manual_seed(0 )
snake_case_ = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , )
return model
@property
def UpperCamelCase__ ( self ):
torch.manual_seed(0 )
snake_case_ = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=3 , )
return model
@property
def UpperCamelCase__ ( self ):
torch.manual_seed(0 )
snake_case_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
return CLIPTextModel(UpperCamelCase__ )
def UpperCamelCase__ ( self ):
snake_case_ = self.dummy_uncond_unet
snake_case_ = DDIMScheduler()
snake_case_ = self.dummy_vq_model
snake_case_ = LDMPipeline(unet=UpperCamelCase__ , vqvae=UpperCamelCase__ , scheduler=UpperCamelCase__ )
ldm.to(UpperCamelCase__ )
ldm.set_progress_bar_config(disable=UpperCamelCase__ )
snake_case_ = torch.manual_seed(0 )
snake_case_ = ldm(generator=UpperCamelCase__ , num_inference_steps=2 , output_type='''numpy''' ).images
snake_case_ = torch.manual_seed(0 )
snake_case_ = ldm(generator=UpperCamelCase__ , num_inference_steps=2 , output_type='''numpy''' , return_dict=UpperCamelCase__ )[0]
snake_case_ = image[0, -3:, -3:, -1]
snake_case_ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
snake_case_ = np.array([0.8_512, 0.818, 0.6_411, 0.6_808, 0.4_465, 0.5_618, 0.46, 0.6_231, 0.5_172] )
snake_case_ = 1E-2 if torch_device != '''mps''' else 3E-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < tolerance
@slow
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ ( self ):
snake_case_ = LDMPipeline.from_pretrained('''CompVis/ldm-celebahq-256''' )
ldm.to(UpperCamelCase__ )
ldm.set_progress_bar_config(disable=UpperCamelCase__ )
snake_case_ = torch.manual_seed(0 )
snake_case_ = ldm(generator=UpperCamelCase__ , num_inference_steps=5 , output_type='''numpy''' ).images
snake_case_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 2_56, 2_56, 3)
snake_case_ = np.array([0.4_399, 0.44_975, 0.46_825, 0.474, 0.4_359, 0.4_581, 0.45_095, 0.4_341, 0.4_447] )
snake_case_ = 1E-2 if torch_device != '''mps''' else 3E-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance | 367 |
def __lowerCAmelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )-> list[int]:
"""simple docstring"""
snake_case_ = int(SCREAMING_SNAKE_CASE )
# Initialize Result
snake_case_ = []
# Traverse through all denomination
for denomination in reversed(SCREAMING_SNAKE_CASE ):
# Find denominations
while int(SCREAMING_SNAKE_CASE ) >= int(SCREAMING_SNAKE_CASE ):
total_value -= int(SCREAMING_SNAKE_CASE )
answer.append(SCREAMING_SNAKE_CASE ) # Append the "answers" array
return answer
# Driver Code
if __name__ == "__main__":
UpperCAmelCase = []
UpperCAmelCase = """0"""
if (
input("""Do you want to enter your denominations ? (yY/n): """).strip().lower()
== "y"
):
UpperCAmelCase = int(input("""Enter the number of denominations you want to add: """).strip())
for i in range(0, n):
denominations.append(int(input(f'''Denomination {i}: ''').strip()))
UpperCAmelCase = input("""Enter the change you want to make in Indian Currency: """).strip()
else:
# All denominations of Indian Currency if user does not enter
UpperCAmelCase = [1, 2, 5, 10, 20, 50, 100, 500, 2000]
UpperCAmelCase = input("""Enter the change you want to make: """).strip()
if int(value) == 0 or int(value) < 0:
print("""The total value cannot be zero or negative.""")
else:
print(f'''Following is minimal change for {value}: ''')
UpperCAmelCase = find_minimum_change(denominations, value)
# Print result
for i in range(len(answer)):
print(answer[i], end=""" """) | 267 | 0 |
'''simple docstring'''
import requests
def __lowercase ( __lowercase , __lowercase ) -> None:
'''simple docstring'''
_A = {"Content-Type": "application/json"}
_A = requests.post(__lowercase , json={"text": message_body} , headers=__lowercase )
if response.status_code != 200:
_A = (
"Request to slack returned an error "
F'''{response.status_code}, the response is:\n{response.text}'''
)
raise ValueError(__lowercase )
if __name__ == "__main__":
# Set the slack url to the one provided by Slack when you create the webhook at
# https://my.slack.com/services/new/incoming-webhook/
send_slack_message('''<YOUR MESSAGE BODY>''', '''<SLACK CHANNEL URL>''')
| 79 |
'''simple docstring'''
from typing import List
import numpy as np
def __lowercase ( __lowercase ) -> int:
'''simple docstring'''
_A = {key: len(__lowercase ) for key, value in gen_kwargs.items() if isinstance(__lowercase , __lowercase )}
if len(set(lists_lengths.values() ) ) > 1:
raise RuntimeError(
(
"Sharding is ambiguous for this dataset: "
+ "we found several data sources lists of different lengths, and we don't know over which list we should parallelize:\n"
+ "\n".join(F'''\t- key {key} has length {length}''' for key, length in lists_lengths.items() )
+ "\nTo fix this, check the 'gen_kwargs' and make sure to use lists only for data sources, "
+ "and use tuples otherwise. In the end there should only be one single list, or several lists with the same length."
) )
_A = max(lists_lengths.values() , default=0 )
return max(1 , __lowercase )
def __lowercase ( __lowercase , __lowercase ) -> List[range]:
'''simple docstring'''
_A = []
for group_idx in range(__lowercase ):
_A = num_shards // max_num_jobs + (group_idx < (num_shards % max_num_jobs))
if num_shards_to_add == 0:
break
_A = shards_indices_per_group[-1].stop if shards_indices_per_group else 0
_A = range(__lowercase , start + num_shards_to_add )
shards_indices_per_group.append(__lowercase )
return shards_indices_per_group
def __lowercase ( __lowercase , __lowercase ) -> List[dict]:
'''simple docstring'''
_A = _number_of_shards_in_gen_kwargs(__lowercase )
if num_shards == 1:
return [dict(__lowercase )]
else:
_A = _distribute_shards(num_shards=__lowercase , max_num_jobs=__lowercase )
return [
{
key: [value[shard_idx] for shard_idx in shard_indices_per_group[group_idx]]
if isinstance(__lowercase , __lowercase )
else value
for key, value in gen_kwargs.items()
}
for group_idx in range(len(__lowercase ) )
]
def __lowercase ( __lowercase ) -> dict:
'''simple docstring'''
return {
key: [value for gen_kwargs in gen_kwargs_list for value in gen_kwargs[key]]
if isinstance(gen_kwargs_list[0][key] , __lowercase )
else gen_kwargs_list[0][key]
for key in gen_kwargs_list[0]
}
def __lowercase ( __lowercase , __lowercase ) -> dict:
'''simple docstring'''
_A = {len(__lowercase ) for value in gen_kwargs.values() if isinstance(__lowercase , __lowercase )}
_A = {}
for size in list_sizes:
_A = list(range(__lowercase ) )
rng.shuffle(indices_per_size[size] )
# Now let's copy the gen_kwargs and shuffle the lists based on their sizes
_A = dict(__lowercase )
for key, value in shuffled_kwargs.items():
if isinstance(__lowercase , __lowercase ):
_A = [value[i] for i in indices_per_size[len(__lowercase )]]
return shuffled_kwargs
| 79 | 1 |
# XXX: we want transformers master here - in the absense of conftest manipulating sys.path:
# hack it in for now:
import sys
from pathlib import Path
__lowerCAmelCase = Path(__file__).resolve().parents[3] / '''src'''
sys.path.insert(1, str(git_repo_path))
import dataclasses # noqa
import io # noqa
import itertools # noqa
import json # noqa
import os # noqa
import unittest # noqa
from copy import deepcopy # noqa
from parameterized import parameterized # noqa
from transformers import TrainingArguments, is_torch_available # noqa
from transformers.deepspeed import is_deepspeed_available # noqa
from transformers.file_utils import WEIGHTS_NAME # noqa
from transformers.testing_utils import ( # noqa
CaptureLogger,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
mockenv_context,
require_deepspeed,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
from transformers.trainer_utils import set_seed # noqa
set_seed(42)
__lowerCAmelCase = {'''base''': '''patrickvonplaten/wav2vec2_tiny_random''', '''robust''': '''patrickvonplaten/wav2vec2_tiny_random_robust'''}
__lowerCAmelCase = '''zero2'''
__lowerCAmelCase = '''zero3'''
__lowerCAmelCase = [ZEROa, ZEROa]
def snake_case_ ( snake_case , snake_case , snake_case ) -> List[str]:
# customize the test name generator function as we want both params to appear in the sub-test
# name, as by default it shows only the first param
lowercase__: List[str] = parameterized.to_safe_name('_'.join(str(snake_case ) for x in param.args ) )
return f'{func.__name__}_{param_based_name}'
# Cartesian-product of zero stages with models to test
__lowerCAmelCase = list(itertools.product(stages, models.keys()))
@slow
@require_deepspeed
@require_torch_gpu
class __a ( __UpperCamelCase ):
@parameterized.expand(lowerCAmelCase__ , name_func=lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> Dict:
'''simple docstring'''
self.run_and_check(
stage=lowerCAmelCase__ , model=lowerCAmelCase__ , distributed=lowerCAmelCase__ , fpaa=lowerCAmelCase__ , )
@require_torch_multi_gpu
@parameterized.expand(lowerCAmelCase__ , name_func=lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> Tuple:
'''simple docstring'''
self.run_and_check(
stage=lowerCAmelCase__ , model=lowerCAmelCase__ , distributed=lowerCAmelCase__ , fpaa=lowerCAmelCase__ , )
@parameterized.expand(lowerCAmelCase__ , name_func=lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> int:
'''simple docstring'''
self.run_and_check(
stage=lowerCAmelCase__ , model=lowerCAmelCase__ , distributed=lowerCAmelCase__ , fpaa=lowerCAmelCase__ , )
@require_torch_multi_gpu
@parameterized.expand(lowerCAmelCase__ , name_func=lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> str:
'''simple docstring'''
self.run_and_check(
stage=lowerCAmelCase__ , model=lowerCAmelCase__ , distributed=lowerCAmelCase__ , fpaa=lowerCAmelCase__ , )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ ) -> Optional[int]:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = 10 , lowerCAmelCase__ = True , lowerCAmelCase__ = True , lowerCAmelCase__ = True , ) -> Tuple:
'''simple docstring'''
lowercase__: Dict = models[model]
lowercase__: Union[str, Any] = self.run_trainer(
stage=lowerCAmelCase__ , model_name=lowerCAmelCase__ , eval_steps=lowerCAmelCase__ , num_train_epochs=1 , distributed=lowerCAmelCase__ , fpaa=lowerCAmelCase__ , )
self.do_checks(lowerCAmelCase__ )
return output_dir
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = 10 , lowerCAmelCase__ = 1 , lowerCAmelCase__ = True , lowerCAmelCase__ = True , ) -> str:
'''simple docstring'''
lowercase__: int = self.get_auto_remove_tmp_dir('./xxx' , after=lowerCAmelCase__ )
lowercase__: str = F'\n --model_name_or_path {model_name}\n --dataset_name hf-internal-testing/librispeech_asr_dummy\n --dataset_config_name clean\n --train_split_name validation\n --validation_split_name validation\n --output_dir {output_dir}\n --num_train_epochs {str(lowerCAmelCase__ )}\n --per_device_train_batch_size 2\n --per_device_eval_batch_size 2\n --evaluation_strategy steps\n --learning_rate 5e-4\n --warmup_steps 8\n --orthography timit\n --preprocessing_num_workers 1\n --group_by_length\n --freeze_feature_extractor\n --report_to none\n --save_steps 0\n --eval_steps {eval_steps}\n --report_to none\n '.split()
if fpaa:
args.extend(['--fp16'] )
# currently ds_config_wav2vec2_zero.json requires "zero_optimization.find_unused_parameters": true,
# hence the separate config files
lowercase__: Dict = F'--deepspeed {self.test_file_dir_str}/ds_config_wav2vec2_{stage}.json'.split()
lowercase__: Optional[int] = [F'{self.examples_dir_str}/research_projects/wav2vec2/run_asr.py']
lowercase__: Optional[Any] = self.get_launcher(lowerCAmelCase__ )
lowercase__: str = launcher + script + args + ds_args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(lowerCAmelCase__ , env=self.get_env() )
return output_dir
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__=False ) -> Union[str, Any]:
'''simple docstring'''
lowercase__: Tuple = min(2 , get_gpu_count() ) if distributed else 1
return F'deepspeed --num_nodes 1 --num_gpus {num_gpus}'.split()
| 350 |
from __future__ import annotations
from numpy import array, cos, cross, floataa, radians, sin
from numpy.typing import NDArray
def snake_case_ ( snake_case , snake_case , snake_case = False ) -> list[float]:
if radian_mode:
return [magnitude * cos(snake_case ), magnitude * sin(snake_case )]
return [magnitude * cos(radians(snake_case ) ), magnitude * sin(radians(snake_case ) )]
def snake_case_ ( snake_case , snake_case , snake_case = 10**-1 ) -> bool:
lowercase__: NDArray[floataa] = cross(snake_case , snake_case )
lowercase__: float = sum(snake_case )
return abs(snake_case ) < eps
if __name__ == "__main__":
# Test to check if it works
__lowerCAmelCase = array(
[
polar_force(718.4, 1_80 - 30),
polar_force(879.54, 45),
polar_force(1_00, -90),
]
)
__lowerCAmelCase = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem 1 in image_data/2D_problems.jpg
__lowerCAmelCase = array(
[
polar_force(30 * 9.81, 15),
polar_force(2_15, 1_80 - 45),
polar_force(2_64, 90 - 30),
]
)
__lowerCAmelCase = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem in image_data/2D_problems_1.jpg
__lowerCAmelCase = array([[0, -20_00], [0, -12_00], [0, 1_56_00], [0, -1_24_00]])
__lowerCAmelCase = array([[0, 0], [6, 0], [10, 0], [12, 0]])
assert in_static_equilibrium(forces, location)
import doctest
doctest.testmod()
| 288 | 0 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import LEDConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFLEDForConditionalGeneration, TFLEDModel
@require_tf
class _UpperCAmelCase :
_lowerCAmelCase : Tuple = LEDConfig
_lowerCAmelCase : List[Any] = {}
_lowerCAmelCase : Tuple = """gelu"""
def __init__( self : Any , lowercase_ : Optional[int] , lowercase_ : Tuple=13 , lowercase_ : Union[str, Any]=7 , lowercase_ : str=True , lowercase_ : Tuple=False , lowercase_ : List[str]=99 , lowercase_ : str=32 , lowercase_ : Any=2 , lowercase_ : str=4 , lowercase_ : List[str]=37 , lowercase_ : List[str]=0.1 , lowercase_ : List[str]=0.1 , lowercase_ : Dict=20 , lowercase_ : Any=2 , lowercase_ : int=1 , lowercase_ : Union[str, Any]=0 , lowercase_ : Optional[Any]=4 , ):
snake_case_ : List[str] = parent
snake_case_ : Union[str, Any] = batch_size
snake_case_ : Dict = seq_length
snake_case_ : Optional[Any] = is_training
snake_case_ : List[Any] = use_labels
snake_case_ : Any = vocab_size
snake_case_ : str = hidden_size
snake_case_ : Union[str, Any] = num_hidden_layers
snake_case_ : List[Any] = num_attention_heads
snake_case_ : List[str] = intermediate_size
snake_case_ : Union[str, Any] = hidden_dropout_prob
snake_case_ : int = attention_probs_dropout_prob
snake_case_ : Union[str, Any] = max_position_embeddings
snake_case_ : List[Any] = eos_token_id
snake_case_ : Any = pad_token_id
snake_case_ : Tuple = bos_token_id
snake_case_ : Union[str, Any] = attention_window
# `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size
# [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention
# returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1]
# because its local attention only attends to `self.attention_window` and one before and one after
snake_case_ : Optional[int] = self.attention_window + 2
# because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for
# the `test_attention_outputs` and `test_hidden_states_output` tests
snake_case_ : int = (
self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window
)
def _snake_case ( self : int ):
snake_case_ : Any = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
snake_case_ : List[str] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
snake_case_ : Dict = tf.concat([input_ids, eos_tensor] , axis=1 )
snake_case_ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ : str = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , attention_window=self.attention_window , **self.config_updates , )
snake_case_ : int = prepare_led_inputs_dict(lowercase_ , lowercase_ , lowercase_ )
snake_case_ : Optional[int] = tf.concat(
[tf.zeros_like(lowercase_ )[:, :-1], tf.ones_like(lowercase_ )[:, -1:]] , axis=-1 , )
snake_case_ : List[str] = global_attention_mask
return config, inputs_dict
def _snake_case ( self : str , lowercase_ : Optional[int] , lowercase_ : int ):
snake_case_ : List[str] = TFLEDModel(config=lowercase_ ).get_decoder()
snake_case_ : str = inputs_dict['''input_ids''']
snake_case_ : Optional[int] = input_ids[:1, :]
snake_case_ : List[str] = inputs_dict['''attention_mask'''][:1, :]
snake_case_ : List[Any] = 1
# first forward pass
snake_case_ : str = model(lowercase_ , attention_mask=lowercase_ , use_cache=lowercase_ )
snake_case_, snake_case_ : Any = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
snake_case_ : Union[str, Any] = ids_tensor((self.batch_size, 3) , config.vocab_size )
snake_case_ : Union[str, Any] = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
snake_case_ : int = tf.concat([input_ids, next_tokens] , axis=-1 )
snake_case_ : Any = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
snake_case_ : int = model(lowercase_ , attention_mask=lowercase_ )[0]
snake_case_ : Any = model(lowercase_ , attention_mask=lowercase_ , past_key_values=lowercase_ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
snake_case_ : Any = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
snake_case_ : Union[str, Any] = output_from_no_past[:, -3:, random_slice_idx]
snake_case_ : List[str] = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(lowercase_ , lowercase_ , rtol=1E-3 )
def __lowercase ( _a , _a , _a , _a=None , _a=None , _a=None , _a=None , ):
if attention_mask is None:
snake_case_ : Dict = tf.cast(tf.math.not_equal(_a , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
snake_case_ : Optional[int] = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
snake_case_ : str = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
snake_case_ : Optional[int] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
"decoder_input_ids": decoder_input_ids,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
}
@require_tf
class _UpperCAmelCase ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase):
_lowerCAmelCase : Optional[Any] = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else ()
_lowerCAmelCase : Union[str, Any] = (TFLEDForConditionalGeneration,) if is_tf_available() else ()
_lowerCAmelCase : Optional[int] = (
{
"""conversational""": TFLEDForConditionalGeneration,
"""feature-extraction""": TFLEDModel,
"""summarization""": TFLEDForConditionalGeneration,
"""text2text-generation""": TFLEDForConditionalGeneration,
"""translation""": TFLEDForConditionalGeneration,
}
if is_tf_available()
else {}
)
_lowerCAmelCase : Any = True
_lowerCAmelCase : Tuple = False
_lowerCAmelCase : Optional[Any] = False
_lowerCAmelCase : Optional[int] = False
def _snake_case ( self : Optional[int] ):
snake_case_ : Union[str, Any] = TFLEDModelTester(self )
snake_case_ : Tuple = ConfigTester(self , config_class=lowercase_ )
def _snake_case ( self : Tuple ):
self.config_tester.run_common_tests()
def _snake_case ( self : int ):
snake_case_ : str = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowercase_ )
def _snake_case ( self : List[Any] ):
snake_case_, snake_case_ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ : Union[str, Any] = tf.zeros_like(inputs_dict['''attention_mask'''] )
snake_case_ : Dict = 2
snake_case_ : List[Any] = tf.where(
tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices , 1 , inputs_dict['''global_attention_mask'''] , )
snake_case_ : Tuple = True
snake_case_ : List[Any] = self.model_tester.seq_length
snake_case_ : int = self.model_tester.encoder_seq_length
def check_decoder_attentions_output(lowercase_ : List[str] ):
snake_case_ : str = outputs.decoder_attentions
self.assertEqual(len(lowercase_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
def check_encoder_attentions_output(lowercase_ : Dict ):
snake_case_ : Any = [t.numpy() for t in outputs.encoder_attentions]
snake_case_ : Any = [t.numpy() for t in outputs.encoder_global_attentions]
self.assertEqual(len(lowercase_ ) , self.model_tester.num_hidden_layers )
self.assertEqual(len(lowercase_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
self.assertListEqual(
list(global_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] , )
for model_class in self.all_model_classes:
snake_case_ : Optional[Any] = True
snake_case_ : Tuple = False
snake_case_ : int = False
snake_case_ : Optional[int] = model_class(lowercase_ )
snake_case_ : str = model(self._prepare_for_class(lowercase_ , lowercase_ ) )
snake_case_ : Any = len(lowercase_ )
self.assertEqual(config.output_hidden_states , lowercase_ )
check_encoder_attentions_output(lowercase_ )
if self.is_encoder_decoder:
snake_case_ : str = model_class(lowercase_ )
snake_case_ : Any = model(self._prepare_for_class(lowercase_ , lowercase_ ) )
self.assertEqual(config.output_hidden_states , lowercase_ )
check_decoder_attentions_output(lowercase_ )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
snake_case_ : List[Any] = True
snake_case_ : Any = model_class(lowercase_ )
snake_case_ : Tuple = model(self._prepare_for_class(lowercase_ , lowercase_ ) )
self.assertEqual(config.output_hidden_states , lowercase_ )
check_encoder_attentions_output(lowercase_ )
# Check attention is always last and order is fine
snake_case_ : Optional[Any] = True
snake_case_ : List[str] = True
snake_case_ : int = model_class(lowercase_ )
snake_case_ : str = model(self._prepare_for_class(lowercase_ , lowercase_ ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(lowercase_ ) )
self.assertEqual(model.config.output_hidden_states , lowercase_ )
check_encoder_attentions_output(lowercase_ )
@unittest.skip('''LED keeps using potentially symbolic tensors in conditionals and breaks tracing.''' )
def _snake_case ( self : List[str] ):
pass
def _snake_case ( self : Union[str, Any] ):
# TODO: Head-masking not yet implement
pass
def __lowercase ( _a ):
return tf.constant(_a , dtype=tf.intaa )
lowercase__ : Optional[int] = 1e-4
@slow
@require_tf
class _UpperCAmelCase ( unittest.TestCase):
def _snake_case ( self : Any ):
snake_case_ : Tuple = TFLEDForConditionalGeneration.from_pretrained('''allenai/led-base-16384''' ).led
# change to intended input here
snake_case_ : str = _long_tensor([512 * [0, 31414, 232, 328, 740, 1140, 12695, 69]] )
snake_case_ : str = _long_tensor([128 * [0, 31414, 232, 328, 740, 1140, 12695, 69]] )
snake_case_ : Optional[Any] = prepare_led_inputs_dict(model.config , lowercase_ , lowercase_ )
snake_case_ : Optional[int] = model(**lowercase_ )[0]
snake_case_ : Optional[Any] = (1, 1024, 768)
self.assertEqual(output.shape , lowercase_ )
# change to expected output here
snake_case_ : Optional[Any] = tf.convert_to_tensor(
[[2.30_50, 2.82_79, 0.65_31], [-1.84_57, -0.14_55, -3.56_61], [-1.01_86, 0.45_86, -2.20_43]] , )
tf.debugging.assert_near(output[:, :3, :3] , lowercase_ , atol=1E-3 )
def _snake_case ( self : Optional[int] ):
snake_case_ : int = TFLEDForConditionalGeneration.from_pretrained('''allenai/led-base-16384''' )
# change to intended input here
snake_case_ : Any = _long_tensor([512 * [0, 31414, 232, 328, 740, 1140, 12695, 69]] )
snake_case_ : Optional[int] = _long_tensor([128 * [0, 31414, 232, 328, 740, 1140, 12695, 69]] )
snake_case_ : Optional[int] = prepare_led_inputs_dict(model.config , lowercase_ , lowercase_ )
snake_case_ : Tuple = model(**lowercase_ )[0]
snake_case_ : Optional[int] = (1, 1024, model.config.vocab_size)
self.assertEqual(output.shape , lowercase_ )
# change to expected output here
snake_case_ : Any = tf.convert_to_tensor(
[[33.65_07, 6.45_72, 16.80_89], [5.87_39, -2.42_38, 11.29_02], [-3.21_39, -4.31_49, 4.27_83]] , )
tf.debugging.assert_near(output[:, :3, :3] , lowercase_ , atol=1E-3 , rtol=1E-3 )
| 264 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_pegasus import PegasusTokenizer
else:
lowercase__ : int = None
lowercase__ : Any = logging.get_logger(__name__)
lowercase__ : List[str] = '''▁'''
lowercase__ : Optional[int] = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
lowercase__ : str = {
'''vocab_file''': {'''google/pegasus-xsum''': '''https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model'''},
'''tokenizer_file''': {
'''google/pegasus-xsum''': '''https://huggingface.co/google/pegasus-xsum/resolve/main/tokenizer.json'''
},
}
lowercase__ : List[Any] = {
'''google/pegasus-xsum''': 5_12,
}
class _UpperCAmelCase ( lowerCAmelCase__):
_lowerCAmelCase : List[str] = VOCAB_FILES_NAMES
_lowerCAmelCase : List[str] = PRETRAINED_VOCAB_FILES_MAP
_lowerCAmelCase : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCAmelCase : Tuple = PegasusTokenizer
_lowerCAmelCase : str = ["""input_ids""", """attention_mask"""]
def __init__( self : Any , lowercase_ : Optional[Any]=None , lowercase_ : int=None , lowercase_ : Tuple="<pad>" , lowercase_ : int="</s>" , lowercase_ : Tuple="<unk>" , lowercase_ : str="<mask_2>" , lowercase_ : Optional[Any]="<mask_1>" , lowercase_ : str=None , lowercase_ : List[str]=103 , **lowercase_ : List[Any] , ):
snake_case_ : Dict = offset
if additional_special_tokens is not None:
if not isinstance(lowercase_ , lowercase_ ):
raise TypeError(
f"additional_special_tokens should be of type {type(lowercase_ )}, but is"
f" {type(lowercase_ )}" )
snake_case_ : str = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
f"<unk_{i}>" for i in range(len(lowercase_ ) , self.offset - 1 )
]
if len(set(lowercase_ ) ) != len(lowercase_ ):
raise ValueError(
'''Please make sure that the provided additional_special_tokens do not contain an incorrectly'''
f" shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}." )
snake_case_ : Union[str, Any] = additional_special_tokens_extended
else:
snake_case_ : Dict = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [f"<unk_{i}>" for i in range(2 , self.offset )]
super().__init__(
lowercase_ , tokenizer_file=lowercase_ , pad_token=lowercase_ , eos_token=lowercase_ , unk_token=lowercase_ , mask_token=lowercase_ , mask_token_sent=lowercase_ , offset=lowercase_ , additional_special_tokens=lowercase_ , **lowercase_ , )
snake_case_ : List[Any] = vocab_file
snake_case_ : List[Any] = False if not self.vocab_file else True
def _snake_case ( self : str , lowercase_ : Union[str, Any] ):
snake_case_ : Any = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
if all_special_ids != set(range(len(self.additional_special_tokens ) + 3 ) ):
raise ValueError(
'''There should be 3 special tokens: mask_token, pad_token, and eos_token +'''
f" {len(self.additional_special_tokens )} additional_special_tokens, but got {all_special_ids}" )
return [1 if x in all_special_ids else 0 for x in seq]
def _snake_case ( self : int , lowercase_ : List , lowercase_ : Optional[List] = None , lowercase_ : bool = False ):
if already_has_special_tokens:
return self._special_token_mask(lowercase_ )
elif token_ids_a is None:
return self._special_token_mask(lowercase_ ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def _snake_case ( self : List[Any] , lowercase_ : Optional[int] , lowercase_ : str=None ):
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def _snake_case ( self : Optional[Any] , lowercase_ : str , lowercase_ : Optional[str] = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(lowercase_ ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
snake_case_ : Dict = os.path.join(
lowercase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase_ ):
copyfile(self.vocab_file , lowercase_ )
return (out_vocab_file,)
| 264 | 1 |
from typing import List
import jiwer
import jiwer.transforms as tr
from packaging import version
import datasets
from datasets.config import PY_VERSION
if PY_VERSION < version.parse('''3.8'''):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
__snake_case = ''''''
if version.parse(importlib_metadata.version('''jiwer''')) < version.parse('''2.3.0'''):
class __snake_case ( tr.AbstractTransform ):
def __init__( self , snake_case__ = " " ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase : Dict =sentence_delimiter
def UpperCAmelCase__ ( self , snake_case__ ) -> Dict:
'''simple docstring'''
return list(snake_case__ )
def UpperCAmelCase__ ( self , snake_case__ ) -> Tuple:
'''simple docstring'''
UpperCAmelCase : Union[str, Any] =[]
for sent_idx, sentence in enumerate(snake_case__ ):
chars.extend(self.process_string(snake_case__ ) )
if self.sentence_delimiter is not None and self.sentence_delimiter != "" and sent_idx < len(snake_case__ ) - 1:
chars.append(self.sentence_delimiter )
return chars
__snake_case = tr.Compose(
[tr.RemoveMultipleSpaces(), tr.Strip(), SentencesToListOfCharacters(SENTENCE_DELIMITER)]
)
else:
__snake_case = tr.Compose(
[
tr.RemoveMultipleSpaces(),
tr.Strip(),
tr.ReduceToSingleSentence(SENTENCE_DELIMITER),
tr.ReduceToListOfListOfChars(),
]
)
__snake_case = '''\
@inproceedings{inproceedings,
author = {Morris, Andrew and Maier, Viktoria and Green, Phil},
year = {2004},
month = {01},
pages = {},
title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}
}
'''
__snake_case = '''\
Character error rate (CER) is a common metric of the performance of an automatic speech recognition system.
CER is similar to Word Error Rate (WER), but operates on character instead of word. Please refer to docs of WER for further information.
Character error rate can be computed as:
CER = (S + D + I) / N = (S + D + I) / (S + D + C)
where
S is the number of substitutions,
D is the number of deletions,
I is the number of insertions,
C is the number of correct characters,
N is the number of characters in the reference (N=S+D+C).
CER\'s output is not always a number between 0 and 1, in particular when there is a high number of insertions. This value is often associated to the percentage of characters that were incorrectly predicted. The lower the value, the better the
performance of the ASR system with a CER of 0 being a perfect score.
'''
__snake_case = '''
Computes CER score of transcribed segments against references.
Args:
references: list of references for each speech input.
predictions: list of transcribtions to score.
concatenate_texts: Whether or not to concatenate sentences before evaluation, set to True for more accurate result.
Returns:
(float): the character error rate
Examples:
>>> predictions = ["this is the prediction", "there is an other sample"]
>>> references = ["this is the reference", "there is another one"]
>>> cer = datasets.load_metric("cer")
>>> cer_score = cer.compute(predictions=predictions, references=references)
>>> print(cer_score)
0.34146341463414637
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __snake_case ( datasets.Metric ):
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/jitsi/jiwer/'''] , reference_urls=[
'''https://en.wikipedia.org/wiki/Word_error_rate''',
'''https://sites.google.com/site/textdigitisation/qualitymeasures/computingerrorrates''',
] , )
def UpperCAmelCase__ ( self , snake_case__ , snake_case__ , snake_case__=False ) -> List[Any]:
'''simple docstring'''
if concatenate_texts:
return jiwer.compute_measures(
snake_case__ , snake_case__ , truth_transform=snake_case__ , hypothesis_transform=snake_case__ , )["wer"]
UpperCAmelCase : List[Any] =0
UpperCAmelCase : Tuple =0
for prediction, reference in zip(snake_case__ , snake_case__ ):
UpperCAmelCase : int =jiwer.compute_measures(
snake_case__ , snake_case__ , truth_transform=snake_case__ , hypothesis_transform=snake_case__ , )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 78 | from collections import OrderedDict
from typing import List, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = {
'''google/efficientnet-b7''': '''https://huggingface.co/google/efficientnet-b7/resolve/main/config.json''',
}
class __snake_case ( lowerCamelCase__ ):
__lowerCamelCase : Optional[Any] = """efficientnet"""
def __init__( self , snake_case__ = 3 , snake_case__ = 600 , snake_case__ = 2.0 , snake_case__ = 3.1 , snake_case__ = 8 , snake_case__ = [3, 3, 5, 3, 5, 5, 3] , snake_case__ = [32, 16, 24, 40, 80, 112, 192] , snake_case__ = [16, 24, 40, 80, 112, 192, 320] , snake_case__ = [] , snake_case__ = [1, 2, 2, 2, 1, 2, 1] , snake_case__ = [1, 2, 2, 3, 3, 4, 1] , snake_case__ = [1, 6, 6, 6, 6, 6, 6] , snake_case__ = 0.25 , snake_case__ = "swish" , snake_case__ = 2560 , snake_case__ = "mean" , snake_case__ = 0.02 , snake_case__ = 0.001 , snake_case__ = 0.99 , snake_case__ = 0.5 , snake_case__ = 0.2 , **snake_case__ , ) -> int:
'''simple docstring'''
super().__init__(**snake_case__ )
UpperCAmelCase : Tuple =num_channels
UpperCAmelCase : Any =image_size
UpperCAmelCase : Optional[int] =width_coefficient
UpperCAmelCase : Union[str, Any] =depth_coefficient
UpperCAmelCase : List[Any] =depth_divisor
UpperCAmelCase : List[str] =kernel_sizes
UpperCAmelCase : Any =in_channels
UpperCAmelCase : str =out_channels
UpperCAmelCase : Optional[int] =depthwise_padding
UpperCAmelCase : str =strides
UpperCAmelCase : Tuple =num_block_repeats
UpperCAmelCase : Union[str, Any] =expand_ratios
UpperCAmelCase : Dict =squeeze_expansion_ratio
UpperCAmelCase : Union[str, Any] =hidden_act
UpperCAmelCase : int =hidden_dim
UpperCAmelCase : Optional[int] =pooling_type
UpperCAmelCase : Union[str, Any] =initializer_range
UpperCAmelCase : List[str] =batch_norm_eps
UpperCAmelCase : List[str] =batch_norm_momentum
UpperCAmelCase : Tuple =dropout_rate
UpperCAmelCase : Tuple =drop_connect_rate
UpperCAmelCase : int =sum(snake_case__ ) * 4
class __snake_case ( lowerCamelCase__ ):
__lowerCamelCase : List[Any] = version.parse("""1.11""" )
@property
def UpperCAmelCase__ ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def UpperCAmelCase__ ( self ) -> float:
'''simple docstring'''
return 1e-5
| 78 | 1 |
'''simple docstring'''
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase_ ( self ) -> Optional[int]:
A_ : Union[str, Any] = """ylacombe/bark-small"""
A_ : Union[str, Any] = tempfile.mkdtemp()
A_ : Any = """en_speaker_1"""
A_ : List[str] = """This is a test string"""
A_ : Optional[int] = """speaker_embeddings_path.json"""
A_ : List[str] = """speaker_embeddings"""
def UpperCAmelCase_ ( self , **_lowerCamelCase ) -> int:
return AutoTokenizer.from_pretrained(self.checkpoint , **_lowerCamelCase )
def UpperCAmelCase_ ( self ) -> Tuple:
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase_ ( self ) -> Any:
A_ : Optional[Any] = self.get_tokenizer()
A_ : int = BarkProcessor(tokenizer=_lowerCamelCase )
processor.save_pretrained(self.tmpdirname )
A_ : Optional[int] = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
@slow
def UpperCAmelCase_ ( self ) -> Any:
A_ : Tuple = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
A_ : List[Any] = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
A_ : Any = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token="""(BOS)""" , eos_token="""(EOS)""" , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
A_ : Union[str, Any] = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
A_ : Any = 35
A_ : Dict = 2
A_ : int = 8
A_ : Union[str, Any] = {
"""semantic_prompt""": np.ones(_lowerCamelCase ),
"""coarse_prompt""": np.ones((nb_codebooks_coarse, seq_len) ),
"""fine_prompt""": np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
A_ : Optional[Any] = processor(text=self.input_string , voice_preset=_lowerCamelCase )
A_ : Any = inputs["""history_prompt"""]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(_lowerCamelCase , np.array([] ) ).tolist() )
# test loading voice preset from npz file
A_ : List[str] = os.path.join(self.tmpdirname , """file.npz""" )
np.savez(_lowerCamelCase , **_lowerCamelCase )
A_ : Optional[int] = processor(text=self.input_string , voice_preset=_lowerCamelCase )
A_ : int = inputs["""history_prompt"""]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(_lowerCamelCase , np.array([] ) ).tolist() )
# test loading voice preset from the hub
A_ : str = processor(text=self.input_string , voice_preset=self.voice_preset )
def UpperCAmelCase_ ( self ) -> int:
A_ : Dict = self.get_tokenizer()
A_ : Union[str, Any] = BarkProcessor(tokenizer=_lowerCamelCase )
A_ : List[Any] = processor(text=self.input_string )
A_ : Dict = tokenizer(
self.input_string , padding="""max_length""" , max_length=256 , add_special_tokens=_lowerCamelCase , return_attention_mask=_lowerCamelCase , return_token_type_ids=_lowerCamelCase , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
| 344 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
UpperCamelCase__ : int = {'processing_layoutxlm': ['LayoutXLMProcessor']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : Tuple = ['LayoutXLMTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : List[Any] = ['LayoutXLMTokenizerFast']
if TYPE_CHECKING:
from .processing_layoutxlm import LayoutXLMProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm import LayoutXLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast
else:
import sys
UpperCamelCase__ : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 344 | 1 |
import numpy as np
def UpperCAmelCase_ (_lowerCAmelCase : np.array ) -> np.array:
return 1 / (1 + np.exp(-vector ))
if __name__ == "__main__":
import doctest
doctest.testmod() | 351 |
import warnings
from ...utils import logging
from .image_processing_deformable_detr import DeformableDetrImageProcessor
lowercase : Optional[Any] = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ):
"""simple docstring"""
def __init__( self , *__UpperCamelCase , **__UpperCamelCase ) -> None:
'''simple docstring'''
warnings.warn(
"The class DeformableDetrFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use DeformableDetrImageProcessor instead." , __UpperCamelCase , )
super().__init__(*__UpperCamelCase , **__UpperCamelCase ) | 171 | 0 |
import gc
import unittest
from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline
from diffusers.utils import is_flax_available, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCamelCase ( self : Tuple ) -> int:
"""simple docstring"""
super().tearDown()
gc.collect()
def __UpperCamelCase ( self : str ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : str = FlaxStableDiffusionPipeline.from_pretrained(
"stabilityai/stable-diffusion-2" , revision="bf16" , dtype=jnp.bfloataa , )
SCREAMING_SNAKE_CASE : int = "A painting of a squirrel eating a burger"
SCREAMING_SNAKE_CASE : Union[str, Any] = jax.device_count()
SCREAMING_SNAKE_CASE : Optional[int] = num_samples * [prompt]
SCREAMING_SNAKE_CASE : Any = sd_pipe.prepare_inputs(a )
SCREAMING_SNAKE_CASE : Tuple = replicate(a )
SCREAMING_SNAKE_CASE : Dict = shard(a )
SCREAMING_SNAKE_CASE : Union[str, Any] = jax.random.PRNGKey(0 )
SCREAMING_SNAKE_CASE : Dict = jax.random.split(a , jax.device_count() )
SCREAMING_SNAKE_CASE : str = sd_pipe(a , a , a , num_inference_steps=25 , jit=a )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
SCREAMING_SNAKE_CASE : Tuple = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
SCREAMING_SNAKE_CASE : List[str] = images[0, 253:256, 253:256, -1]
SCREAMING_SNAKE_CASE : Optional[int] = jnp.asarray(jax.device_get(image_slice.flatten() ) )
SCREAMING_SNAKE_CASE : Any = jnp.array([0.4238, 0.4414, 0.4395, 0.4453, 0.4629, 0.4590, 0.4531, 0.4_5508, 0.4512] )
print(F"output_slice: {output_slice}" )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
def __UpperCamelCase ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = "stabilityai/stable-diffusion-2"
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Tuple = FlaxDPMSolverMultistepScheduler.from_pretrained(a , subfolder="scheduler" )
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : str = FlaxStableDiffusionPipeline.from_pretrained(
a , scheduler=a , revision="bf16" , dtype=jnp.bfloataa , )
SCREAMING_SNAKE_CASE : Tuple = scheduler_params
SCREAMING_SNAKE_CASE : List[str] = "A painting of a squirrel eating a burger"
SCREAMING_SNAKE_CASE : Optional[Any] = jax.device_count()
SCREAMING_SNAKE_CASE : int = num_samples * [prompt]
SCREAMING_SNAKE_CASE : Union[str, Any] = sd_pipe.prepare_inputs(a )
SCREAMING_SNAKE_CASE : Tuple = replicate(a )
SCREAMING_SNAKE_CASE : Optional[int] = shard(a )
SCREAMING_SNAKE_CASE : Dict = jax.random.PRNGKey(0 )
SCREAMING_SNAKE_CASE : str = jax.random.split(a , jax.device_count() )
SCREAMING_SNAKE_CASE : str = sd_pipe(a , a , a , num_inference_steps=25 , jit=a )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
SCREAMING_SNAKE_CASE : Any = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
SCREAMING_SNAKE_CASE : Union[str, Any] = images[0, 253:256, 253:256, -1]
SCREAMING_SNAKE_CASE : Union[str, Any] = jnp.asarray(jax.device_get(image_slice.flatten() ) )
SCREAMING_SNAKE_CASE : Union[str, Any] = jnp.array([0.4336, 0.4_2969, 0.4453, 0.4199, 0.4297, 0.4531, 0.4434, 0.4434, 0.4297] )
print(F"output_slice: {output_slice}" )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2 | 76 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import PNDMPipeline, PNDMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@property
def UpperCAmelCase__ ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") , up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") , )
return model
def UpperCAmelCase__ ( self : List[Any] ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.dummy_uncond_unet
__SCREAMING_SNAKE_CASE = PNDMScheduler()
__SCREAMING_SNAKE_CASE = PNDMPipeline(unet=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE )
pndm.to(__SCREAMING_SNAKE_CASE )
pndm.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE = pndm(generator=__SCREAMING_SNAKE_CASE , num_inference_steps=20 , output_type="""numpy""" ).images
__SCREAMING_SNAKE_CASE = torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE = pndm(generator=__SCREAMING_SNAKE_CASE , num_inference_steps=20 , output_type="""numpy""" , return_dict=__SCREAMING_SNAKE_CASE )[0]
__SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
__SCREAMING_SNAKE_CASE = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__SCREAMING_SNAKE_CASE = np.array([1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = """google/ddpm-cifar10-32"""
__SCREAMING_SNAKE_CASE = UNetaDModel.from_pretrained(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = PNDMScheduler()
__SCREAMING_SNAKE_CASE = PNDMPipeline(unet=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE )
pndm.to(__SCREAMING_SNAKE_CASE )
pndm.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE = pndm(generator=__SCREAMING_SNAKE_CASE , output_type="""numpy""" ).images
__SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__SCREAMING_SNAKE_CASE = np.array([0.1564, 0.14645, 0.1406, 0.14715, 0.12425, 0.14045, 0.13115, 0.12175, 0.125] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 267 | 0 |
import importlib.util
import json
import os
import warnings
from dataclasses import dataclass, field
import torch
from ..training_args import TrainingArguments
from ..utils import cached_property, is_sagemaker_dp_enabled, logging
UpperCamelCase = logging.get_logger(__name__)
def lowercase_ ( ):
# Get the sagemaker specific mp parameters from smp_options variable.
lowercase__ : Dict = os.getenv("SM_HP_MP_PARAMETERS" , "{}")
try:
# Parse it and check the field "partitions" is included, it is required for model parallel.
lowercase__ : Optional[Any] = json.loads(_lowerCamelCase)
if "partitions" not in smp_options:
return False
except json.JSONDecodeError:
return False
# Get the sagemaker specific framework parameters from mpi_options variable.
lowercase__ : List[str] = os.getenv("SM_FRAMEWORK_PARAMS" , "{}")
try:
# Parse it and check the field "sagemaker_distributed_dataparallel_enabled".
lowercase__ : List[Any] = json.loads(_lowerCamelCase)
if not mpi_options.get("sagemaker_mpi_enabled" , _lowerCamelCase):
return False
except json.JSONDecodeError:
return False
# Lastly, check if the `smdistributed` module is present.
return importlib.util.find_spec("smdistributed") is not None
if is_sagemaker_model_parallel_available():
import smdistributed.modelparallel.torch as smp
smp.init()
@dataclass
class snake_case_ ( __A ):
__A : str = field(
default="" ,metadata={"help": "Used by the SageMaker launcher to send mp-specific args. Ignored in SageMakerTrainer"} ,)
def __UpperCamelCase ( self : List[Any] ) -> Optional[Any]:
super().__post_init__()
warnings.warn(
"`SageMakerTrainingArguments` is deprecated and will be removed in v5 of Transformers. You can use "
"`TrainingArguments` instead." , lowercase_ , )
@cached_property
def __UpperCamelCase ( self : Dict ) -> "torch.device":
logger.info("PyTorch: setting up devices" )
if torch.distributed.is_available() and torch.distributed.is_initialized() and self.local_rank == -1:
logger.warning(
"torch.distributed process group is initialized, but local_rank == -1. "
"In order to use Torch DDP, launch your script with `python -m torch.distributed.launch" )
if self.no_cuda:
lowercase__ : List[Any] = torch.device("cpu" )
lowercase__ : Optional[Any] = 0
elif is_sagemaker_model_parallel_available():
lowercase__ : Tuple = smp.local_rank()
lowercase__ : Union[str, Any] = torch.device("cuda" , lowercase_ )
lowercase__ : Dict = 1
elif is_sagemaker_dp_enabled():
import smdistributed.dataparallel.torch.torch_smddp # noqa: F401
torch.distributed.init_process_group(backend="smddp" , timeout=self.ddp_timeout_delta )
lowercase__ : List[Any] = int(os.getenv("SMDATAPARALLEL_LOCAL_RANK" ) )
lowercase__ : Tuple = torch.device("cuda" , self.local_rank )
lowercase__ : Any = 1
elif self.local_rank == -1:
# if n_gpu is > 1 we'll use nn.DataParallel.
# If you only want to use a specific subset of GPUs use `CUDA_VISIBLE_DEVICES=0`
# Explicitly set CUDA to the first (index 0) CUDA device, otherwise `set_device` will
# trigger an error that a device index is missing. Index 0 takes into account the
# GPUs available in the environment, so `CUDA_VISIBLE_DEVICES=1,2` with `cuda:0`
# will use the first GPU in that env, i.e. GPU#1
lowercase__ : Any = torch.device("cuda:0" if torch.cuda.is_available() else "cpu" )
# Sometimes the line in the postinit has not been run before we end up here, so just checking we're not at
# the default value.
lowercase__ : Optional[int] = torch.cuda.device_count()
else:
# Here, we'll use torch.distributed.
# Initializes the distributed backend which will take care of synchronizing nodes/GPUs
if not torch.distributed.is_initialized():
torch.distributed.init_process_group(backend="nccl" , timeout=self.ddp_timeout_delta )
lowercase__ : Dict = torch.device("cuda" , self.local_rank )
lowercase__ : Tuple = 1
if device.type == "cuda":
torch.cuda.set_device(lowercase_ )
return device
@property
def __UpperCamelCase ( self : Tuple ) -> List[str]:
if is_sagemaker_model_parallel_available():
return smp.dp_size()
return super().world_size
@property
def __UpperCamelCase ( self : str ) -> str:
return not is_sagemaker_model_parallel_available()
@property
def __UpperCamelCase ( self : Union[str, Any] ) -> Dict:
return False
| 333 | def lowercase_ ( _lowerCamelCase : int , _lowerCamelCase : int):
while a != 0:
lowercase__ , lowercase__ : Dict = b % a, a
return b
def lowercase_ ( _lowerCamelCase : int , _lowerCamelCase : int):
if gcd(_lowerCamelCase , _lowerCamelCase) != 1:
lowercase__ : Tuple = f'''mod inverse of {a!r} and {m!r} does not exist'''
raise ValueError(_lowerCamelCase)
lowercase__ , lowercase__ , lowercase__ : Optional[int] = 1, 0, a
lowercase__ , lowercase__ , lowercase__ : Union[str, Any] = 0, 1, m
while va != 0:
lowercase__ : Tuple = ua // va
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ : Any = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va
return ua % m
| 333 | 1 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCAmelCase : Dict = logging.get_logger(__name__)
_UpperCAmelCase : Optional[int] = {
"microsoft/git-base": "https://huggingface.co/microsoft/git-base/resolve/main/config.json",
}
class __lowerCAmelCase ( A_):
_a = '''git_vision_model'''
def __init__( self: Optional[Any] , _lowerCAmelCase: Optional[Any]=7_68 , _lowerCAmelCase: Optional[Any]=30_72 , _lowerCAmelCase: List[str]=12 , _lowerCAmelCase: int=12 , _lowerCAmelCase: Optional[Any]=3 , _lowerCAmelCase: Dict=2_24 , _lowerCAmelCase: str=16 , _lowerCAmelCase: Union[str, Any]="quick_gelu" , _lowerCAmelCase: Optional[int]=1e-5 , _lowerCAmelCase: Tuple=0.0 , _lowerCAmelCase: Union[str, Any]=0.02 , **_lowerCAmelCase: Union[str, Any] , ):
super().__init__(**_lowerCamelCase )
lowercase :Dict = hidden_size
lowercase :Union[str, Any] = intermediate_size
lowercase :Any = num_hidden_layers
lowercase :List[str] = num_attention_heads
lowercase :List[str] = num_channels
lowercase :Tuple = patch_size
lowercase :Dict = image_size
lowercase :List[str] = initializer_range
lowercase :Optional[int] = attention_dropout
lowercase :Any = layer_norm_eps
lowercase :Tuple = hidden_act
@classmethod
def SCREAMING_SNAKE_CASE ( cls: List[Any] , _lowerCAmelCase: Union[str, os.PathLike] , **_lowerCAmelCase: Optional[Any] ):
cls._set_token_in_kwargs(_lowerCamelCase )
lowercase , lowercase :Tuple = cls.get_config_dict(_lowerCamelCase , **_lowerCamelCase )
# get the vision config dict if we are loading from GITConfig
if config_dict.get("model_type" ) == "git":
lowercase :List[Any] = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
F"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(_lowerCamelCase , **_lowerCamelCase )
class __lowerCAmelCase ( A_):
_a = '''git'''
def __init__( self: str , _lowerCAmelCase: int=None , _lowerCAmelCase: str=3_05_22 , _lowerCAmelCase: int=7_68 , _lowerCAmelCase: Union[str, Any]=6 , _lowerCAmelCase: int=12 , _lowerCAmelCase: int=30_72 , _lowerCAmelCase: Dict="gelu" , _lowerCAmelCase: int=0.1 , _lowerCAmelCase: List[Any]=0.1 , _lowerCAmelCase: List[str]=10_24 , _lowerCAmelCase: int=0.02 , _lowerCAmelCase: Tuple=1e-1_2 , _lowerCAmelCase: Dict=0 , _lowerCAmelCase: List[Any]="absolute" , _lowerCAmelCase: Optional[int]=True , _lowerCAmelCase: List[str]=False , _lowerCAmelCase: Optional[int]=1_01 , _lowerCAmelCase: List[str]=1_02 , _lowerCAmelCase: Any=None , **_lowerCAmelCase: str , ):
super().__init__(bos_token_id=_lowerCamelCase , eos_token_id=_lowerCamelCase , pad_token_id=_lowerCamelCase , **_lowerCamelCase )
if vision_config is None:
lowercase :Union[str, Any] = {}
logger.info("vision_config is None. initializing the GitVisionConfig with default values." )
lowercase :Tuple = GitVisionConfig(**_lowerCamelCase )
lowercase :Optional[int] = vocab_size
lowercase :int = hidden_size
lowercase :List[str] = num_hidden_layers
lowercase :Union[str, Any] = num_attention_heads
lowercase :List[str] = hidden_act
lowercase :List[Any] = intermediate_size
lowercase :str = hidden_dropout_prob
lowercase :List[str] = attention_probs_dropout_prob
lowercase :int = max_position_embeddings
lowercase :Optional[Any] = initializer_range
lowercase :Optional[int] = layer_norm_eps
lowercase :Optional[Any] = position_embedding_type
lowercase :Tuple = use_cache
lowercase :int = tie_word_embeddings
lowercase :List[str] = num_image_with_embedding
lowercase :Union[str, Any] = bos_token_id
lowercase :Optional[Any] = eos_token_id
def SCREAMING_SNAKE_CASE ( self: Tuple ):
lowercase :str = copy.deepcopy(self.__dict__ )
lowercase :Any = self.vision_config.to_dict()
lowercase :List[str] = self.__class__.model_type
return output
| 236 |
"""simple docstring"""
import json
import os
from pathlib import Path
import pytest
from datasets.download.download_config import DownloadConfig
from datasets.download.download_manager import DownloadManager
from datasets.utils.file_utils import hash_url_to_filename
UpperCAmelCase__ = 'http://www.mocksite.com/file1.txt'
UpperCAmelCase__ = '"text": ["foo", "foo"]'
UpperCAmelCase__ = '6d8ce9aa78a471c7477201efbeabd3bb01ac2e7d100a6dc024ba1608361f90a8'
class lowerCAmelCase__ :
__a = 200
__a = {"""Content-Length""": """100"""}
__a = {}
def lowercase ( self : List[str] , **_lowerCamelCase : List[str] ):
return [bytes(_lowerCamelCase , '''utf-8''' )]
def _UpperCAmelCase ( *__lowerCamelCase : List[str] , **__lowerCamelCase : Dict ) -> Dict:
return MockResponse()
@pytest.mark.parametrize('''urls_type''' , [str, list, dict] )
def _UpperCAmelCase ( __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[str] , __lowerCamelCase : str ) -> int:
import requests
monkeypatch.setattr(__lowerCamelCase , '''request''' , __lowerCamelCase )
_snake_case = URL
if issubclass(__lowerCamelCase , __lowerCamelCase ):
_snake_case = url
elif issubclass(__lowerCamelCase , __lowerCamelCase ):
_snake_case = [url]
elif issubclass(__lowerCamelCase , __lowerCamelCase ):
_snake_case = {'''train''': url}
_snake_case = '''dummy'''
_snake_case = '''downloads'''
_snake_case = tmp_path
_snake_case = DownloadConfig(
cache_dir=os.path.join(__lowerCamelCase , __lowerCamelCase ) , use_etag=__lowerCamelCase , )
_snake_case = DownloadManager(dataset_name=__lowerCamelCase , download_config=__lowerCamelCase )
_snake_case = dl_manager.download(__lowerCamelCase )
_snake_case = urls
for downloaded_paths in [downloaded_paths]:
if isinstance(__lowerCamelCase , __lowerCamelCase ):
_snake_case = [downloaded_paths]
_snake_case = [urls]
elif isinstance(__lowerCamelCase , __lowerCamelCase ):
assert "train" in downloaded_paths.keys()
_snake_case = downloaded_paths.values()
_snake_case = urls.values()
assert downloaded_paths
for downloaded_path, input_url in zip(__lowerCamelCase , __lowerCamelCase ):
assert downloaded_path == dl_manager.downloaded_paths[input_url]
_snake_case = Path(__lowerCamelCase )
_snake_case = downloaded_path.parts
assert parts[-1] == HASH
assert parts[-2] == cache_subdir
assert downloaded_path.exists()
_snake_case = downloaded_path.read_text()
assert content == CONTENT
_snake_case = downloaded_path.with_suffix('''.json''' )
assert metadata_downloaded_path.exists()
_snake_case = json.loads(metadata_downloaded_path.read_text() )
assert metadata_content == {"url": URL, "etag": None}
@pytest.mark.parametrize('''paths_type''' , [str, list, dict] )
def _UpperCAmelCase ( __lowerCamelCase : int , __lowerCamelCase : str , __lowerCamelCase : Optional[int] ) -> int:
_snake_case = str(__lowerCamelCase )
if issubclass(__lowerCamelCase , __lowerCamelCase ):
_snake_case = filename
elif issubclass(__lowerCamelCase , __lowerCamelCase ):
_snake_case = [filename]
elif issubclass(__lowerCamelCase , __lowerCamelCase ):
_snake_case = {'''train''': filename}
_snake_case = '''dummy'''
_snake_case = xz_file.parent
_snake_case = '''extracted'''
_snake_case = DownloadConfig(
cache_dir=__lowerCamelCase , use_etag=__lowerCamelCase , )
_snake_case = DownloadManager(dataset_name=__lowerCamelCase , download_config=__lowerCamelCase )
_snake_case = dl_manager.extract(__lowerCamelCase )
_snake_case = paths
for extracted_paths in [extracted_paths]:
if isinstance(__lowerCamelCase , __lowerCamelCase ):
_snake_case = [extracted_paths]
_snake_case = [paths]
elif isinstance(__lowerCamelCase , __lowerCamelCase ):
assert "train" in extracted_paths.keys()
_snake_case = extracted_paths.values()
_snake_case = paths.values()
assert extracted_paths
for extracted_path, input_path in zip(__lowerCamelCase , __lowerCamelCase ):
assert extracted_path == dl_manager.extracted_paths[input_path]
_snake_case = Path(__lowerCamelCase )
_snake_case = extracted_path.parts
assert parts[-1] == hash_url_to_filename(__lowerCamelCase , etag=__lowerCamelCase )
assert parts[-2] == extracted_subdir
assert extracted_path.exists()
_snake_case = extracted_path.read_text()
_snake_case = text_file.read_text()
assert extracted_file_content == expected_file_content
def _UpperCAmelCase ( __lowerCamelCase : Tuple , __lowerCamelCase : List[Any] ) -> Dict:
assert path.endswith('''.jsonl''' )
for num_items, line in enumerate(__lowerCamelCase , start=1 ):
_snake_case = json.loads(line.decode('''utf-8''' ) )
assert item.keys() == {"col_1", "col_2", "col_3"}
assert num_items == 4
@pytest.mark.parametrize('''archive_jsonl''' , ['''tar_jsonl_path''', '''zip_jsonl_path'''] )
def _UpperCAmelCase ( __lowerCamelCase : Dict , __lowerCamelCase : str ) -> Dict:
_snake_case = request.getfixturevalue(__lowerCamelCase )
_snake_case = DownloadManager()
for num_jsonl, (path, file) in enumerate(dl_manager.iter_archive(__lowerCamelCase ) , start=1 ):
_test_jsonl(__lowerCamelCase , __lowerCamelCase )
assert num_jsonl == 2
@pytest.mark.parametrize('''archive_nested_jsonl''' , ['''tar_nested_jsonl_path''', '''zip_nested_jsonl_path'''] )
def _UpperCAmelCase ( __lowerCamelCase : str , __lowerCamelCase : List[Any] ) -> Tuple:
_snake_case = request.getfixturevalue(__lowerCamelCase )
_snake_case = DownloadManager()
for num_tar, (path, file) in enumerate(dl_manager.iter_archive(__lowerCamelCase ) , start=1 ):
for num_jsonl, (subpath, subfile) in enumerate(dl_manager.iter_archive(__lowerCamelCase ) , start=1 ):
_test_jsonl(__lowerCamelCase , __lowerCamelCase )
assert num_tar == 1
assert num_jsonl == 2
def _UpperCAmelCase ( __lowerCamelCase : Tuple ) -> List[Any]:
_snake_case = DownloadManager()
for num_file, file in enumerate(dl_manager.iter_files(__lowerCamelCase ) , start=1 ):
assert os.path.basename(__lowerCamelCase ) == ("test.txt" if num_file == 1 else "train.txt")
assert num_file == 2
| 288 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
A = {
'configuration_poolformer': [
'POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'PoolFormerConfig',
'PoolFormerOnnxConfig',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = ['PoolFormerFeatureExtractor']
A = ['PoolFormerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = [
'POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'PoolFormerForImageClassification',
'PoolFormerModel',
'PoolFormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_poolformer import (
POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
PoolFormerConfig,
PoolFormerOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_poolformer import PoolFormerFeatureExtractor
from .image_processing_poolformer import PoolFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_poolformer import (
POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
PoolFormerForImageClassification,
PoolFormerModel,
PoolFormerPreTrainedModel,
)
else:
import sys
A = _LazyModule(__name__, globals()['__file__'], _import_structure) | 362 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A : Dict = {
'configuration_instructblip': [
'INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'InstructBlipConfig',
'InstructBlipQFormerConfig',
'InstructBlipVisionConfig',
],
'processing_instructblip': ['InstructBlipProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Union[str, Any] = [
'INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'InstructBlipQFormerModel',
'InstructBlipPreTrainedModel',
'InstructBlipForConditionalGeneration',
'InstructBlipVisionModel',
]
if TYPE_CHECKING:
from .configuration_instructblip import (
INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
InstructBlipConfig,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
)
from .processing_instructblip import InstructBlipProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_instructblip import (
INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
InstructBlipForConditionalGeneration,
InstructBlipPreTrainedModel,
InstructBlipQFormerModel,
InstructBlipVisionModel,
)
else:
import sys
A : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 33 | 0 |
"""simple docstring"""
import importlib.metadata
from typing import Union
from packaging.version import Version, parse
from .constants import STR_OPERATION_TO_FUNC
snake_case_ = parse(importlib.metadata.version("""torch"""))
def _lowerCAmelCase ( lowercase_ , lowercase_ , lowercase_ ):
if operation not in STR_OPERATION_TO_FUNC.keys():
raise ValueError(F"""`operation` must be one of {list(STR_OPERATION_TO_FUNC.keys() )}, received {operation}""" )
UpperCAmelCase = STR_OPERATION_TO_FUNC[operation]
if isinstance(lowercase_ , lowercase_ ):
UpperCAmelCase = parse(importlib.metadata.version(lowercase_ ) )
return operation(lowercase_ , parse(lowercase_ ) )
def _lowerCAmelCase ( lowercase_ , lowercase_ ):
return compare_versions(lowercase_ , lowercase_ , lowercase_ )
| 78 |
"""simple docstring"""
from typing import Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
snake_case_ = logging.get_logger(__name__)
@add_end_docstrings(SCREAMING_SNAKE_CASE_ )
class A_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
def __init__( self :Any , *lowercase_ :str , **lowercase_ :List[Any] ) -> Union[str, Any]:
super().__init__(*lowercase_ , **lowercase_ )
self.check_model_type(lowercase_ )
def UpperCAmelCase__ ( self :Union[str, Any] , lowercase_ :Any=None , lowercase_ :Optional[int]=None , lowercase_ :Tuple=None , **lowercase_ :Tuple ) -> Dict:
UpperCAmelCase , UpperCAmelCase = {}, {}
if padding is not None:
UpperCAmelCase = padding
if truncation is not None:
UpperCAmelCase = truncation
if top_k is not None:
UpperCAmelCase = top_k
return preprocess_params, {}, postprocess_params
def __call__( self :List[Any] , lowercase_ :Union["Image.Image", str] , lowercase_ :str = None , **lowercase_ :Union[str, Any] ) -> Union[str, Any]:
if isinstance(lowercase_ , (Image.Image, str) ) and isinstance(lowercase_ , lowercase_ ):
UpperCAmelCase = {'image': image, 'question': question}
else:
UpperCAmelCase = image
UpperCAmelCase = super().__call__(lowercase_ , **lowercase_ )
return results
def UpperCAmelCase__ ( self :List[str] , lowercase_ :List[Any] , lowercase_ :int=False , lowercase_ :Optional[int]=False ) -> Union[str, Any]:
UpperCAmelCase = load_image(inputs['image'] )
UpperCAmelCase = self.tokenizer(
inputs['question'] , return_tensors=self.framework , padding=lowercase_ , truncation=lowercase_ )
UpperCAmelCase = self.image_processor(images=lowercase_ , return_tensors=self.framework )
model_inputs.update(lowercase_ )
return model_inputs
def UpperCAmelCase__ ( self :List[Any] , lowercase_ :List[str] ) -> Any:
UpperCAmelCase = self.model(**lowercase_ )
return model_outputs
def UpperCAmelCase__ ( self :Dict , lowercase_ :Tuple , lowercase_ :List[Any]=5 ) -> Union[str, Any]:
if top_k > self.model.config.num_labels:
UpperCAmelCase = self.model.config.num_labels
if self.framework == "pt":
UpperCAmelCase = model_outputs.logits.sigmoid()[0]
UpperCAmelCase , UpperCAmelCase = probs.topk(lowercase_ )
else:
raise ValueError(f"""Unsupported framework: {self.framework}""" )
UpperCAmelCase = scores.tolist()
UpperCAmelCase = ids.tolist()
return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(lowercase_ , lowercase_ )]
| 78 | 1 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.utils import is_vision_available
from transformers.utils.generic import TensorType
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import logging
if is_vision_available():
import PIL
_lowercase : Union[str, Any] = logging.get_logger(__name__)
def lowerCamelCase__ ( A : Any ):
'''simple docstring'''
if isinstance(a__ , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(a__ , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(a__ ):
return [[videos]]
raise ValueError(f"""Could not make batched video from {videos}""" )
class UpperCamelCase__( _a ):
__magic_name__ : Optional[int] = ["""pixel_values"""]
def __init__( self : List[Any] , lowerCAmelCase : Optional[Any] = True , lowerCAmelCase : List[str] = None , lowerCAmelCase : List[str] = PILImageResampling.BILINEAR , lowerCAmelCase : List[Any] = True , lowerCAmelCase : Optional[Any] = None , lowerCAmelCase : str = True , lowerCAmelCase : Union[str, Any] = 1 / 255 , lowerCAmelCase : Optional[Any] = True , lowerCAmelCase : int = True , lowerCAmelCase : Tuple = None , lowerCAmelCase : Tuple = None , **lowerCAmelCase : int , )-> int:
"""simple docstring"""
super().__init__(**__lowerCAmelCase )
UpperCAmelCase = size if size is not None else {'''shortest_edge''': 256}
UpperCAmelCase = get_size_dict(__lowerCAmelCase , default_to_square=__lowerCAmelCase )
UpperCAmelCase = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
UpperCAmelCase = get_size_dict(__lowerCAmelCase , param_name='''crop_size''' )
UpperCAmelCase = do_resize
UpperCAmelCase = size
UpperCAmelCase = do_center_crop
UpperCAmelCase = crop_size
UpperCAmelCase = resample
UpperCAmelCase = do_rescale
UpperCAmelCase = rescale_factor
UpperCAmelCase = offset
UpperCAmelCase = do_normalize
UpperCAmelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCAmelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD
def a__( self : str , lowerCAmelCase : int , lowerCAmelCase : Tuple , lowerCAmelCase : Tuple = PILImageResampling.BILINEAR , lowerCAmelCase : Union[str, Any] = None , **lowerCAmelCase : Optional[int] , )-> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = get_size_dict(__lowerCAmelCase , default_to_square=__lowerCAmelCase )
if "shortest_edge" in size:
UpperCAmelCase = get_resize_output_image_size(__lowerCAmelCase , size['''shortest_edge'''] , default_to_square=__lowerCAmelCase )
elif "height" in size and "width" in size:
UpperCAmelCase = (size['''height'''], size['''width'''])
else:
raise ValueError(F"""Size must have 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}""" )
return resize(__lowerCAmelCase , size=__lowerCAmelCase , resample=__lowerCAmelCase , data_format=__lowerCAmelCase , **__lowerCAmelCase )
def a__( self : Union[str, Any] , lowerCAmelCase : Optional[int] , lowerCAmelCase : str , lowerCAmelCase : List[Any] = None , **lowerCAmelCase : List[Any] , )-> int:
"""simple docstring"""
UpperCAmelCase = get_size_dict(__lowerCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(F"""Size must have 'height' and 'width' as keys. Got {size.keys()}""" )
return center_crop(__lowerCAmelCase , size=(size['''height'''], size['''width''']) , data_format=__lowerCAmelCase , **__lowerCAmelCase )
def a__( self : Optional[Any] , lowerCAmelCase : Optional[int] , lowerCAmelCase : List[str] , lowerCAmelCase : Dict = True , lowerCAmelCase : Any = None , **lowerCAmelCase : List[str] , )-> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = image.astype(np.floataa )
if offset:
UpperCAmelCase = image - (scale / 2)
return rescale(__lowerCAmelCase , scale=__lowerCAmelCase , data_format=__lowerCAmelCase , **__lowerCAmelCase )
def a__( self : List[Any] , lowerCAmelCase : List[str] , lowerCAmelCase : Dict , lowerCAmelCase : Any , lowerCAmelCase : str = None , **lowerCAmelCase : List[Any] , )-> int:
"""simple docstring"""
return normalize(__lowerCAmelCase , mean=__lowerCAmelCase , std=__lowerCAmelCase , data_format=__lowerCAmelCase , **__lowerCAmelCase )
def a__( self : str , lowerCAmelCase : Optional[int] , lowerCAmelCase : str = None , lowerCAmelCase : Any = None , lowerCAmelCase : Optional[int] = None , lowerCAmelCase : List[Any] = None , lowerCAmelCase : Dict = None , lowerCAmelCase : str = None , lowerCAmelCase : Optional[int] = None , lowerCAmelCase : str = None , lowerCAmelCase : Union[str, Any] = None , lowerCAmelCase : List[Any] = None , lowerCAmelCase : Tuple = None , lowerCAmelCase : Any = ChannelDimension.FIRST , )-> Union[str, Any]:
"""simple docstring"""
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
if offset and not do_rescale:
raise ValueError('''For offset, do_rescale must also be set to True.''' )
# All transformations expect numpy arrays.
UpperCAmelCase = to_numpy_array(__lowerCAmelCase )
if do_resize:
UpperCAmelCase = self.resize(image=__lowerCAmelCase , size=__lowerCAmelCase , resample=__lowerCAmelCase )
if do_center_crop:
UpperCAmelCase = self.center_crop(__lowerCAmelCase , size=__lowerCAmelCase )
if do_rescale:
UpperCAmelCase = self.rescale(image=__lowerCAmelCase , scale=__lowerCAmelCase , offset=__lowerCAmelCase )
if do_normalize:
UpperCAmelCase = self.normalize(image=__lowerCAmelCase , mean=__lowerCAmelCase , std=__lowerCAmelCase )
UpperCAmelCase = to_channel_dimension_format(__lowerCAmelCase , __lowerCAmelCase )
return image
def a__( self : Optional[int] , lowerCAmelCase : Any , lowerCAmelCase : str = None , lowerCAmelCase : Union[str, Any] = None , lowerCAmelCase : Optional[int] = None , lowerCAmelCase : Dict = None , lowerCAmelCase : Optional[int] = None , lowerCAmelCase : Dict = None , lowerCAmelCase : Dict = None , lowerCAmelCase : Any = None , lowerCAmelCase : str = None , lowerCAmelCase : List[Any] = None , lowerCAmelCase : List[Any] = None , lowerCAmelCase : Optional[int] = None , lowerCAmelCase : Tuple = ChannelDimension.FIRST , **lowerCAmelCase : List[Any] , )-> Dict:
"""simple docstring"""
UpperCAmelCase = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase = resample if resample is not None else self.resample
UpperCAmelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCAmelCase = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase = offset if offset is not None else self.offset
UpperCAmelCase = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase = image_mean if image_mean is not None else self.image_mean
UpperCAmelCase = image_std if image_std is not None else self.image_std
UpperCAmelCase = size if size is not None else self.size
UpperCAmelCase = get_size_dict(__lowerCAmelCase , default_to_square=__lowerCAmelCase )
UpperCAmelCase = crop_size if crop_size is not None else self.crop_size
UpperCAmelCase = get_size_dict(__lowerCAmelCase , param_name='''crop_size''' )
if not valid_images(__lowerCAmelCase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
UpperCAmelCase = make_batched(__lowerCAmelCase )
UpperCAmelCase = [
[
self._preprocess_image(
image=__lowerCAmelCase , do_resize=__lowerCAmelCase , size=__lowerCAmelCase , resample=__lowerCAmelCase , do_center_crop=__lowerCAmelCase , crop_size=__lowerCAmelCase , do_rescale=__lowerCAmelCase , rescale_factor=__lowerCAmelCase , offset=__lowerCAmelCase , do_normalize=__lowerCAmelCase , image_mean=__lowerCAmelCase , image_std=__lowerCAmelCase , data_format=__lowerCAmelCase , )
for img in video
]
for video in videos
]
UpperCAmelCase = {'''pixel_values''': videos}
return BatchFeature(data=__lowerCAmelCase , tensor_type=__lowerCAmelCase )
| 371 |
'''simple docstring'''
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_gpta import GPTaTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_lowercase : str = logging.get_logger(__name__)
_lowercase : Any = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
_lowercase : Tuple = {
"""vocab_file""": {
"""gpt2""": """https://huggingface.co/gpt2/resolve/main/vocab.json""",
"""gpt2-medium""": """https://huggingface.co/gpt2-medium/resolve/main/vocab.json""",
"""gpt2-large""": """https://huggingface.co/gpt2-large/resolve/main/vocab.json""",
"""gpt2-xl""": """https://huggingface.co/gpt2-xl/resolve/main/vocab.json""",
"""distilgpt2""": """https://huggingface.co/distilgpt2/resolve/main/vocab.json""",
},
"""merges_file""": {
"""gpt2""": """https://huggingface.co/gpt2/resolve/main/merges.txt""",
"""gpt2-medium""": """https://huggingface.co/gpt2-medium/resolve/main/merges.txt""",
"""gpt2-large""": """https://huggingface.co/gpt2-large/resolve/main/merges.txt""",
"""gpt2-xl""": """https://huggingface.co/gpt2-xl/resolve/main/merges.txt""",
"""distilgpt2""": """https://huggingface.co/distilgpt2/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""gpt2""": """https://huggingface.co/gpt2/resolve/main/tokenizer.json""",
"""gpt2-medium""": """https://huggingface.co/gpt2-medium/resolve/main/tokenizer.json""",
"""gpt2-large""": """https://huggingface.co/gpt2-large/resolve/main/tokenizer.json""",
"""gpt2-xl""": """https://huggingface.co/gpt2-xl/resolve/main/tokenizer.json""",
"""distilgpt2""": """https://huggingface.co/distilgpt2/resolve/main/tokenizer.json""",
},
}
_lowercase : Union[str, Any] = {
"""gpt2""": 1024,
"""gpt2-medium""": 1024,
"""gpt2-large""": 1024,
"""gpt2-xl""": 1024,
"""distilgpt2""": 1024,
}
class UpperCamelCase__( lowerCAmelCase ):
__magic_name__ : Dict = VOCAB_FILES_NAMES
__magic_name__ : str = PRETRAINED_VOCAB_FILES_MAP
__magic_name__ : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ : List[Any] = ["input_ids", "attention_mask"]
__magic_name__ : List[Any] = GPTaTokenizer
def __init__( self : Tuple , lowerCAmelCase : List[Any]=None , lowerCAmelCase : Dict=None , lowerCAmelCase : Dict=None , lowerCAmelCase : Tuple="<|endoftext|>" , lowerCAmelCase : Union[str, Any]="<|endoftext|>" , lowerCAmelCase : Union[str, Any]="<|endoftext|>" , lowerCAmelCase : Optional[int]=False , **lowerCAmelCase : Tuple , )-> int:
"""simple docstring"""
super().__init__(
lowerCAmelCase , lowerCAmelCase , tokenizer_file=lowerCAmelCase , unk_token=lowerCAmelCase , bos_token=lowerCAmelCase , eos_token=lowerCAmelCase , add_prefix_space=lowerCAmelCase , **lowerCAmelCase , )
UpperCAmelCase = kwargs.pop('''add_bos_token''' , lowerCAmelCase )
UpperCAmelCase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , lowerCAmelCase ) != add_prefix_space:
UpperCAmelCase = getattr(lowerCAmelCase , pre_tok_state.pop('''type''' ) )
UpperCAmelCase = add_prefix_space
UpperCAmelCase = pre_tok_class(**lowerCAmelCase )
UpperCAmelCase = add_prefix_space
def a__( self : Union[str, Any] , *lowerCAmelCase : Union[str, Any] , **lowerCAmelCase : Dict )-> BatchEncoding:
"""simple docstring"""
UpperCAmelCase = kwargs.get('''is_split_into_words''' , lowerCAmelCase )
assert self.add_prefix_space or not is_split_into_words, (
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*lowerCAmelCase , **lowerCAmelCase )
def a__( self : List[str] , *lowerCAmelCase : Any , **lowerCAmelCase : Tuple )-> BatchEncoding:
"""simple docstring"""
UpperCAmelCase = kwargs.get('''is_split_into_words''' , lowerCAmelCase )
assert self.add_prefix_space or not is_split_into_words, (
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._encode_plus(*lowerCAmelCase , **lowerCAmelCase )
def a__( self : str , lowerCAmelCase : str , lowerCAmelCase : Optional[str] = None )-> Tuple[str]:
"""simple docstring"""
UpperCAmelCase = self._tokenizer.model.save(lowerCAmelCase , name=lowerCAmelCase )
return tuple(lowerCAmelCase )
def a__( self : List[Any] , lowerCAmelCase : "Conversation" )-> List[int]:
"""simple docstring"""
UpperCAmelCase = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase ) + [self.eos_token_id] )
if len(lowerCAmelCase ) > self.model_max_length:
UpperCAmelCase = input_ids[-self.model_max_length :]
return input_ids
| 91 | 0 |
"""simple docstring"""
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotConfig, is_flax_available
from transformers.testing_utils import jax_device, require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
A_ = '''platform'''
import jax
import jax.numpy as jnp
from transformers import BlenderbotTokenizer
from transformers.models.blenderbot.modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
shift_tokens_right,
)
def UpperCAmelCase__ (snake_case__ : int , snake_case__ : List[str] , snake_case__ : str=None , snake_case__ : Optional[int]=None , snake_case__ : List[Any]=None , snake_case__ : int=None , snake_case__ : Union[str, Any]=None , snake_case__ : Tuple=None , ):
"""simple docstring"""
if attention_mask is None:
_snake_case : Dict = np.where(input_ids != config.pad_token_id , 1 , 0 )
if decoder_attention_mask is None:
_snake_case : Tuple = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 )
if head_mask is None:
_snake_case : Tuple = np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
_snake_case : Dict = np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
_snake_case : Any = np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class lowercase:
'''simple docstring'''
def __init__( self: Dict, a_: Any, a_: str=13, a_: int=7, a_: Any=True, a_: Union[str, Any]=False, a_: str=99, a_: List[Any]=16, a_: Optional[Any]=2, a_: int=4, a_: int=4, a_: Dict="gelu", a_: int=0.1, a_: Dict=0.1, a_: Tuple=32, a_: Optional[int]=2, a_: Any=1, a_: List[str]=0, a_: Union[str, Any]=0.02, ):
'''simple docstring'''
_snake_case : Optional[int] = parent
_snake_case : Tuple = batch_size
_snake_case : int = seq_length
_snake_case : Optional[int] = is_training
_snake_case : Tuple = use_labels
_snake_case : List[str] = vocab_size
_snake_case : Dict = hidden_size
_snake_case : int = num_hidden_layers
_snake_case : Tuple = num_attention_heads
_snake_case : Optional[int] = intermediate_size
_snake_case : Any = hidden_act
_snake_case : Optional[int] = hidden_dropout_prob
_snake_case : List[Any] = attention_probs_dropout_prob
_snake_case : Tuple = max_position_embeddings
_snake_case : Any = eos_token_id
_snake_case : List[str] = pad_token_id
_snake_case : Dict = bos_token_id
_snake_case : Tuple = initializer_range
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
_snake_case : Any = np.clip(ids_tensor([self.batch_size, self.seq_length - 1], self.vocab_size ), 3, self.vocab_size )
_snake_case : Tuple = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1), dtype=np.intaa )), -1 )
_snake_case : List[str] = shift_tokens_right(a_, 1, 2 )
_snake_case : List[str] = BlenderbotConfig(
vocab_size=self.vocab_size, d_model=self.hidden_size, encoder_layers=self.num_hidden_layers, decoder_layers=self.num_hidden_layers, encoder_attention_heads=self.num_attention_heads, decoder_attention_heads=self.num_attention_heads, encoder_ffn_dim=self.intermediate_size, decoder_ffn_dim=self.intermediate_size, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, eos_token_id=self.eos_token_id, bos_token_id=self.bos_token_id, pad_token_id=self.pad_token_id, initializer_range=self.initializer_range, use_cache=a_, )
_snake_case : Tuple = prepare_blenderbot_inputs_dict(a_, a_, a_ )
return config, inputs_dict
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
_snake_case , _snake_case : Tuple = self.prepare_config_and_inputs()
return config, inputs_dict
def UpperCamelCase_ ( self: Union[str, Any], a_: Optional[Any], a_: Any, a_: Union[str, Any] ):
'''simple docstring'''
_snake_case : Optional[Any] = 20
_snake_case : List[str] = model_class_name(a_ )
_snake_case : Optional[Any] = model.encode(inputs_dict["""input_ids"""] )
_snake_case , _snake_case : Optional[Any] = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
_snake_case : Tuple = model.init_cache(decoder_input_ids.shape[0], a_, a_ )
_snake_case : Optional[Any] = jnp.ones((decoder_input_ids.shape[0], max_decoder_length), dtype="""i4""" )
_snake_case : Any = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :], (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1), )
_snake_case : List[str] = model.decode(
decoder_input_ids[:, :-1], a_, decoder_attention_mask=a_, past_key_values=a_, decoder_position_ids=a_, )
_snake_case : Union[str, Any] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]], dtype="""i4""" )
_snake_case : Tuple = model.decode(
decoder_input_ids[:, -1:], a_, decoder_attention_mask=a_, past_key_values=outputs_cache.past_key_values, decoder_position_ids=a_, )
_snake_case : List[str] = model.decode(a_, a_ )
_snake_case : Tuple = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3, msg=f"Max diff is {diff}" )
def UpperCamelCase_ ( self: Union[str, Any], a_: Optional[Any], a_: List[Any], a_: Any ):
'''simple docstring'''
_snake_case : Dict = 20
_snake_case : Optional[int] = model_class_name(a_ )
_snake_case : Optional[Any] = model.encode(inputs_dict["""input_ids"""] )
_snake_case , _snake_case : Dict = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
_snake_case : Any = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
], axis=-1, )
_snake_case : List[Any] = model.init_cache(decoder_input_ids.shape[0], a_, a_ )
_snake_case : Union[str, Any] = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :], (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1), )
_snake_case : str = model.decode(
decoder_input_ids[:, :-1], a_, decoder_attention_mask=a_, past_key_values=a_, decoder_position_ids=a_, )
_snake_case : str = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]], dtype="""i4""" )
_snake_case : Optional[int] = model.decode(
decoder_input_ids[:, -1:], a_, past_key_values=outputs_cache.past_key_values, decoder_attention_mask=a_, decoder_position_ids=a_, )
_snake_case : Optional[int] = model.decode(a_, a_, decoder_attention_mask=a_ )
_snake_case : Optional[Any] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3, msg=f"Max diff is {diff}" )
@require_flax
class lowercase( unittest.TestCase ):
'''simple docstring'''
lowercase__ = 99
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
_snake_case : List[Any] = np.array(
[
[71, 82, 18, 33, 46, 91, 2],
[68, 34, 26, 58, 30, 82, 2],
[5, 97, 17, 39, 94, 40, 2],
[76, 83, 94, 25, 70, 78, 2],
[87, 59, 41, 35, 48, 66, 2],
[55, 13, 16, 58, 5, 2, 1], # note padding
[64, 27, 31, 51, 12, 75, 2],
[52, 64, 86, 17, 83, 39, 2],
[48, 61, 9, 24, 71, 82, 2],
[26, 1, 60, 48, 22, 13, 2],
[21, 5, 62, 28, 14, 76, 2],
[45, 98, 37, 86, 59, 48, 2],
[70, 70, 50, 9, 28, 0, 2],
], dtype=np.intaa, )
_snake_case : Any = input_ids.shape[0]
_snake_case : int = BlenderbotConfig(
vocab_size=self.vocab_size, d_model=24, encoder_layers=2, decoder_layers=2, encoder_attention_heads=2, decoder_attention_heads=2, encoder_ffn_dim=32, decoder_ffn_dim=32, max_position_embeddings=48, eos_token_id=2, pad_token_id=1, bos_token_id=0, )
return config, input_ids, batch_size
def UpperCamelCase_ ( self: List[Any] ):
'''simple docstring'''
_snake_case , _snake_case , _snake_case : Dict = self._get_config_and_data()
_snake_case : List[str] = FlaxBlenderbotForConditionalGeneration(a_ )
_snake_case : str = lm_model(input_ids=a_ )
_snake_case : Optional[int] = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs["""logits"""].shape, a_ )
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
_snake_case : Optional[Any] = BlenderbotConfig(
vocab_size=self.vocab_size, d_model=14, encoder_layers=2, decoder_layers=2, encoder_attention_heads=2, decoder_attention_heads=2, encoder_ffn_dim=8, decoder_ffn_dim=8, max_position_embeddings=48, )
_snake_case : Union[str, Any] = FlaxBlenderbotForConditionalGeneration(a_ )
_snake_case : str = np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]], dtype=np.intaa )
_snake_case : Dict = np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]], dtype=np.intaa )
_snake_case : Optional[Any] = lm_model(input_ids=a_, decoder_input_ids=a_ )
_snake_case : List[str] = (*summary.shape, config.vocab_size)
self.assertEqual(outputs["""logits"""].shape, a_ )
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
_snake_case : str = np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]], dtype=np.intaa )
_snake_case : List[str] = shift_tokens_right(a_, 1, 2 )
_snake_case : int = np.equal(a_, 1 ).astype(np.floataa ).sum()
_snake_case : Optional[int] = np.equal(a_, 1 ).astype(np.floataa ).sum()
self.assertEqual(shifted.shape, input_ids.shape )
self.assertEqual(a_, n_pad_before - 1 )
self.assertTrue(np.equal(shifted[:, 0], 2 ).all() )
@require_flax
class lowercase( __a , unittest.TestCase , __a ):
'''simple docstring'''
lowercase__ = True
lowercase__ = (
(
FlaxBlenderbotModel,
FlaxBlenderbotForConditionalGeneration,
)
if is_flax_available()
else ()
)
lowercase__ = (FlaxBlenderbotForConditionalGeneration,) if is_flax_available() else ()
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
_snake_case : Union[str, Any] = FlaxBlenderbotModelTester(self )
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
_snake_case , _snake_case : List[str] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(a_, a_, a_ )
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
_snake_case , _snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(a_, a_, a_ )
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
_snake_case , _snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_snake_case : List[Any] = self._prepare_for_class(a_, a_ )
_snake_case : List[Any] = model_class(a_ )
@jax.jit
def encode_jitted(a_: str, a_: Optional[Any]=None, **a_: List[str] ):
return model.encode(input_ids=a_, attention_mask=a_ )
with self.subTest("""JIT Enabled""" ):
_snake_case : Optional[Any] = encode_jitted(**a_ ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
_snake_case : int = encode_jitted(**a_ ).to_tuple()
self.assertEqual(len(a_ ), len(a_ ) )
for jitted_output, output in zip(a_, a_ ):
self.assertEqual(jitted_output.shape, output.shape )
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
_snake_case , _snake_case : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_snake_case : int = model_class(a_ )
_snake_case : int = model.encode(inputs_dict["""input_ids"""], inputs_dict["""attention_mask"""] )
_snake_case : Optional[Any] = {
"""decoder_input_ids""": inputs_dict["""decoder_input_ids"""],
"""decoder_attention_mask""": inputs_dict["""decoder_attention_mask"""],
"""encoder_outputs""": encoder_outputs,
}
@jax.jit
def decode_jitted(a_: Any, a_: Tuple, a_: int ):
return model.decode(
decoder_input_ids=a_, decoder_attention_mask=a_, encoder_outputs=a_, )
with self.subTest("""JIT Enabled""" ):
_snake_case : Any = decode_jitted(**a_ ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
_snake_case : List[str] = decode_jitted(**a_ ).to_tuple()
self.assertEqual(len(a_ ), len(a_ ) )
for jitted_output, output in zip(a_, a_ ):
self.assertEqual(jitted_output.shape, output.shape )
@slow
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
_snake_case : List[Any] = model_class_name.from_pretrained("""facebook/blenderbot-400M-distill""" )
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
_snake_case : Optional[int] = np.ones((1, 1) ) * model.config.eos_token_id
_snake_case : Union[str, Any] = model(a_ )
self.assertIsNotNone(a_ )
@unittest.skipUnless(jax_device != """cpu""", """3B test too slow on CPU.""" )
@slow
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
_snake_case : Dict = {"""num_beams""": 1, """early_stopping""": True, """min_length""": 15, """max_length""": 25}
_snake_case : str = {"""skip_special_tokens""": True, """clean_up_tokenization_spaces""": True}
_snake_case : List[str] = FlaxBlenderbotForConditionalGeneration.from_pretrained("""facebook/blenderbot-3B""", from_pt=a_ )
_snake_case : Union[str, Any] = BlenderbotTokenizer.from_pretrained("""facebook/blenderbot-3B""" )
_snake_case : str = ["""Sam"""]
_snake_case : Tuple = tokenizer(a_, return_tensors="""jax""" )
_snake_case : str = model.generate(**a_, **a_ )
_snake_case : List[Any] = """Sam is a great name. It means \"sun\" in Gaelic."""
_snake_case : int = tokenizer.batch_decode(a_, **a_ )
assert generated_txt[0].strip() == tgt_text
| 64 |
"""simple docstring"""
import unittest
from transformers import (
MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TextGenerationPipeline,
logging,
pipeline,
)
from transformers.testing_utils import (
CaptureLogger,
is_pipeline_test,
require_accelerate,
require_tf,
require_torch,
require_torch_gpu,
require_torch_or_tf,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = MODEL_FOR_CAUSAL_LM_MAPPING
SCREAMING_SNAKE_CASE = TF_MODEL_FOR_CAUSAL_LM_MAPPING
@require_torch
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : List[str] = pipeline(task="""text-generation""" , model="""sshleifer/tiny-ctrl""" , framework="""pt""" )
# Using `do_sample=False` to force deterministic output
UpperCAmelCase__ : List[str] = text_generator("""This is a test""" , do_sample=_lowerCamelCase )
self.assertEqual(
_lowerCamelCase , [
{
"""generated_text""": (
"""This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope."""
""" oscope. FiliFili@@"""
)
}
] , )
UpperCAmelCase__ : List[Any] = text_generator(["""This is a test""", """This is a second test"""] )
self.assertEqual(
_lowerCamelCase , [
[
{
"""generated_text""": (
"""This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope."""
""" oscope. FiliFili@@"""
)
}
],
[
{
"""generated_text""": (
"""This is a second test ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy"""
""" oscope. oscope. FiliFili@@"""
)
}
],
] , )
UpperCAmelCase__ : int = text_generator("""This is a test""" , do_sample=_lowerCamelCase , num_return_sequences=2 , return_tensors=_lowerCamelCase )
self.assertEqual(
_lowerCamelCase , [
{"""generated_token_ids""": ANY(_lowerCamelCase )},
{"""generated_token_ids""": ANY(_lowerCamelCase )},
] , )
UpperCAmelCase__ : Optional[int] = text_generator.model.config.eos_token_id
UpperCAmelCase__ : Any = """<pad>"""
UpperCAmelCase__ : Any = text_generator(
["""This is a test""", """This is a second test"""] , do_sample=_lowerCamelCase , num_return_sequences=2 , batch_size=2 , return_tensors=_lowerCamelCase , )
self.assertEqual(
_lowerCamelCase , [
[
{"""generated_token_ids""": ANY(_lowerCamelCase )},
{"""generated_token_ids""": ANY(_lowerCamelCase )},
],
[
{"""generated_token_ids""": ANY(_lowerCamelCase )},
{"""generated_token_ids""": ANY(_lowerCamelCase )},
],
] , )
@require_tf
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : str = pipeline(task="""text-generation""" , model="""sshleifer/tiny-ctrl""" , framework="""tf""" )
# Using `do_sample=False` to force deterministic output
UpperCAmelCase__ : List[str] = text_generator("""This is a test""" , do_sample=_lowerCamelCase )
self.assertEqual(
_lowerCamelCase , [
{
"""generated_text""": (
"""This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵"""
""" please,"""
)
}
] , )
UpperCAmelCase__ : Dict = text_generator(["""This is a test""", """This is a second test"""] , do_sample=_lowerCamelCase )
self.assertEqual(
_lowerCamelCase , [
[
{
"""generated_text""": (
"""This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵"""
""" please,"""
)
}
],
[
{
"""generated_text""": (
"""This is a second test Chieftain Chieftain prefecture prefecture prefecture Cannes Cannes"""
""" Cannes 閲閲Cannes Cannes Cannes 攵 please,"""
)
}
],
] , )
def _a (self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase__ : int = TextGenerationPipeline(model=_lowerCamelCase , tokenizer=_lowerCamelCase )
return text_generator, ["This is a test", "Another test"]
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Tuple = """Hello I believe in"""
UpperCAmelCase__ : Optional[int] = pipeline("""text-generation""" , model="""hf-internal-testing/tiny-random-gpt2""" )
UpperCAmelCase__ : Any = text_generator(_lowerCamelCase )
self.assertEqual(
_lowerCamelCase , [{"""generated_text""": """Hello I believe in fe fe fe fe fe fe fe fe fe fe fe fe"""}] , )
UpperCAmelCase__ : int = text_generator(_lowerCamelCase , stop_sequence=""" fe""" )
self.assertEqual(_lowerCamelCase , [{"""generated_text""": """Hello I believe in fe"""}] )
def _a (self , _lowerCamelCase , _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase__ : Optional[int] = text_generator.model
UpperCAmelCase__ : Union[str, Any] = text_generator.tokenizer
UpperCAmelCase__ : Any = text_generator("""This is a test""" )
self.assertEqual(_lowerCamelCase , [{"""generated_text""": ANY(_lowerCamelCase )}] )
self.assertTrue(outputs[0]["""generated_text"""].startswith("""This is a test""" ) )
UpperCAmelCase__ : List[Any] = text_generator("""This is a test""" , return_full_text=_lowerCamelCase )
self.assertEqual(_lowerCamelCase , [{"""generated_text""": ANY(_lowerCamelCase )}] )
self.assertNotIn("""This is a test""" , outputs[0]["""generated_text"""] )
UpperCAmelCase__ : int = pipeline(task="""text-generation""" , model=_lowerCamelCase , tokenizer=_lowerCamelCase , return_full_text=_lowerCamelCase )
UpperCAmelCase__ : Dict = text_generator("""This is a test""" )
self.assertEqual(_lowerCamelCase , [{"""generated_text""": ANY(_lowerCamelCase )}] )
self.assertNotIn("""This is a test""" , outputs[0]["""generated_text"""] )
UpperCAmelCase__ : Optional[Any] = text_generator("""This is a test""" , return_full_text=_lowerCamelCase )
self.assertEqual(_lowerCamelCase , [{"""generated_text""": ANY(_lowerCamelCase )}] )
self.assertTrue(outputs[0]["""generated_text"""].startswith("""This is a test""" ) )
UpperCAmelCase__ : Union[str, Any] = text_generator(["""This is great !""", """Something else"""] , num_return_sequences=2 , do_sample=_lowerCamelCase )
self.assertEqual(
_lowerCamelCase , [
[{"""generated_text""": ANY(_lowerCamelCase )}, {"""generated_text""": ANY(_lowerCamelCase )}],
[{"""generated_text""": ANY(_lowerCamelCase )}, {"""generated_text""": ANY(_lowerCamelCase )}],
] , )
if text_generator.tokenizer.pad_token is not None:
UpperCAmelCase__ : Union[str, Any] = text_generator(
["""This is great !""", """Something else"""] , num_return_sequences=2 , batch_size=2 , do_sample=_lowerCamelCase )
self.assertEqual(
_lowerCamelCase , [
[{"""generated_text""": ANY(_lowerCamelCase )}, {"""generated_text""": ANY(_lowerCamelCase )}],
[{"""generated_text""": ANY(_lowerCamelCase )}, {"""generated_text""": ANY(_lowerCamelCase )}],
] , )
with self.assertRaises(_lowerCamelCase ):
UpperCAmelCase__ : List[Any] = text_generator("""test""" , return_full_text=_lowerCamelCase , return_text=_lowerCamelCase )
with self.assertRaises(_lowerCamelCase ):
UpperCAmelCase__ : Optional[Any] = text_generator("""test""" , return_full_text=_lowerCamelCase , return_tensors=_lowerCamelCase )
with self.assertRaises(_lowerCamelCase ):
UpperCAmelCase__ : Any = text_generator("""test""" , return_text=_lowerCamelCase , return_tensors=_lowerCamelCase )
# Empty prompt is slighly special
# it requires BOS token to exist.
# Special case for Pegasus which will always append EOS so will
# work even without BOS.
if (
text_generator.tokenizer.bos_token_id is not None
or "Pegasus" in tokenizer.__class__.__name__
or "Git" in model.__class__.__name__
):
UpperCAmelCase__ : Dict = text_generator("""""" )
self.assertEqual(_lowerCamelCase , [{"""generated_text""": ANY(_lowerCamelCase )}] )
else:
with self.assertRaises((ValueError, AssertionError) ):
UpperCAmelCase__ : str = text_generator("""""" )
if text_generator.framework == "tf":
# TF generation does not support max_new_tokens, and it's impossible
# to control long generation with only max_length without
# fancy calculation, dismissing tests for now.
return
# We don't care about infinite range models.
# They already work.
# Skip this test for XGLM, since it uses sinusoidal positional embeddings which are resized on-the-fly.
UpperCAmelCase__ : Tuple = ["""RwkvForCausalLM""", """XGLMForCausalLM""", """GPTNeoXForCausalLM"""]
if (
tokenizer.model_max_length < 10000
and text_generator.model.__class__.__name__ not in EXTRA_MODELS_CAN_HANDLE_LONG_INPUTS
):
# Handling of large generations
with self.assertRaises((RuntimeError, IndexError, ValueError, AssertionError) ):
text_generator("""This is a test""" * 500 , max_new_tokens=20 )
UpperCAmelCase__ : str = text_generator("""This is a test""" * 500 , handle_long_generation="""hole""" , max_new_tokens=20 )
# Hole strategy cannot work
with self.assertRaises(_lowerCamelCase ):
text_generator(
"""This is a test""" * 500 , handle_long_generation="""hole""" , max_new_tokens=tokenizer.model_max_length + 10 , )
@require_torch
@require_accelerate
@require_torch_gpu
def _a (self ):
"""simple docstring"""
import torch
# Classic `model_kwargs`
UpperCAmelCase__ : str = pipeline(
model="""hf-internal-testing/tiny-random-bloom""" , model_kwargs={"""device_map""": """auto""", """torch_dtype""": torch.bfloataa} , )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
UpperCAmelCase__ : List[str] = pipe("""This is a test""" )
self.assertEqual(
_lowerCamelCase , [
{
"""generated_text""": (
"""This is a test test test test test test test test test test test test test test test test"""
""" test"""
)
}
] , )
# Upgraded those two to real pipeline arguments (they just get sent for the model as they're unlikely to mean anything else.)
UpperCAmelCase__ : int = pipeline(model="""hf-internal-testing/tiny-random-bloom""" , device_map="""auto""" , torch_dtype=torch.bfloataa )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
UpperCAmelCase__ : Any = pipe("""This is a test""" )
self.assertEqual(
_lowerCamelCase , [
{
"""generated_text""": (
"""This is a test test test test test test test test test test test test test test test test"""
""" test"""
)
}
] , )
# torch_dtype will be automatically set to float32 if not provided - check: https://github.com/huggingface/transformers/pull/20602
UpperCAmelCase__ : Optional[Any] = pipeline(model="""hf-internal-testing/tiny-random-bloom""" , device_map="""auto""" )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.floataa )
UpperCAmelCase__ : Optional[int] = pipe("""This is a test""" )
self.assertEqual(
_lowerCamelCase , [
{
"""generated_text""": (
"""This is a test test test test test test test test test test test test test test test test"""
""" test"""
)
}
] , )
@require_torch
@require_torch_gpu
def _a (self ):
"""simple docstring"""
import torch
UpperCAmelCase__ : Any = pipeline(model="""hf-internal-testing/tiny-random-bloom""" , device=0 , torch_dtype=torch.floataa )
pipe("""This is a test""" )
@require_torch
@require_accelerate
@require_torch_gpu
def _a (self ):
"""simple docstring"""
import torch
UpperCAmelCase__ : Any = pipeline(model="""hf-internal-testing/tiny-random-bloom""" , device_map="""auto""" , torch_dtype=torch.floataa )
pipe("""This is a test""" , do_sample=_lowerCamelCase , top_p=0.5 )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Optional[Any] = """Hello world"""
UpperCAmelCase__ : str = pipeline("""text-generation""" , model="""hf-internal-testing/tiny-random-gpt2""" )
if text_generator.model.framework == "tf":
UpperCAmelCase__ : Any = logging.get_logger("""transformers.generation.tf_utils""" )
else:
UpperCAmelCase__ : Union[str, Any] = logging.get_logger("""transformers.generation.utils""" )
UpperCAmelCase__ : Optional[int] = """Both `max_new_tokens`""" # The beggining of the message to be checked in this test
# Both are set by the user -> log warning
with CaptureLogger(_lowerCamelCase ) as cl:
UpperCAmelCase__ : List[str] = text_generator(_lowerCamelCase , max_length=10 , max_new_tokens=1 )
self.assertIn(_lowerCamelCase , cl.out )
# The user only sets one -> no warning
with CaptureLogger(_lowerCamelCase ) as cl:
UpperCAmelCase__ : Any = text_generator(_lowerCamelCase , max_new_tokens=1 )
self.assertNotIn(_lowerCamelCase , cl.out )
with CaptureLogger(_lowerCamelCase ) as cl:
UpperCAmelCase__ : Optional[Any] = text_generator(_lowerCamelCase , max_length=10 )
self.assertNotIn(_lowerCamelCase , cl.out )
| 171 | 0 |
'''simple docstring'''
import unittest
import numpy as np
from diffusers import LMSDiscreteScheduler, OnnxStableDiffusionInpaintPipeline
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class a__( lowerCamelCase__ , unittest.TestCase ):
# FIXME: add fast tests
pass
@nightly
@require_onnxruntime
@require_torch_gpu
class a__( unittest.TestCase ):
@property
def lowercase_ ( self : int ):
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def lowercase_ ( self : int ):
a : Tuple = ort.SessionOptions()
a : int = False
return options
def lowercase_ ( self : str ):
a : Tuple = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/in_paint/overture-creations-5sI6fQgYIuo.png' )
a : int = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/in_paint/overture-creations-5sI6fQgYIuo_mask.png' )
a : List[Any] = OnnxStableDiffusionInpaintPipeline.from_pretrained(
'runwayml/stable-diffusion-inpainting' , revision='onnx' , safety_checker=__snake_case , feature_extractor=__snake_case , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__snake_case )
a : Any = 'A red cat sitting on a park bench'
a : int = np.random.RandomState(0 )
a : Dict = pipe(
prompt=__snake_case , image=__snake_case , mask_image=__snake_case , guidance_scale=7.5 , num_inference_steps=10 , generator=__snake_case , output_type='np' , )
a : Tuple = output.images
a : List[Any] = images[0, 2_55:2_58, 2_55:2_58, -1]
assert images.shape == (1, 5_12, 5_12, 3)
a : Any = np.array([0.2514, 0.3007, 0.3517, 0.1790, 0.2382, 0.3167, 0.1944, 0.2273, 0.2464] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def lowercase_ ( self : Dict ):
a : int = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/in_paint/overture-creations-5sI6fQgYIuo.png' )
a : Any = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/in_paint/overture-creations-5sI6fQgYIuo_mask.png' )
a : Optional[int] = LMSDiscreteScheduler.from_pretrained(
'runwayml/stable-diffusion-inpainting' , subfolder='scheduler' , revision='onnx' )
a : List[str] = OnnxStableDiffusionInpaintPipeline.from_pretrained(
'runwayml/stable-diffusion-inpainting' , revision='onnx' , scheduler=__snake_case , safety_checker=__snake_case , feature_extractor=__snake_case , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__snake_case )
a : str = 'A red cat sitting on a park bench'
a : List[str] = np.random.RandomState(0 )
a : Optional[Any] = pipe(
prompt=__snake_case , image=__snake_case , mask_image=__snake_case , guidance_scale=7.5 , num_inference_steps=20 , generator=__snake_case , output_type='np' , )
a : Any = output.images
a : List[Any] = images[0, 2_55:2_58, 2_55:2_58, -1]
assert images.shape == (1, 5_12, 5_12, 3)
a : Optional[Any] = np.array([0.0086, 0.0077, 0.0083, 0.0093, 0.0107, 0.0139, 0.0094, 0.0097, 0.0125] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 | 96 |
'''simple docstring'''
import os
import pickle
import unittest
from transformers import AutoTokenizer
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.models.bert_japanese.tokenization_bert_japanese import (
VOCAB_FILES_NAMES,
BertJapaneseTokenizer,
CharacterTokenizer,
JumanppTokenizer,
MecabTokenizer,
SudachiTokenizer,
WordpieceTokenizer,
)
from transformers.testing_utils import custom_tokenizers, require_jumanpp, require_sudachi
from ...test_tokenization_common import TokenizerTesterMixin
@custom_tokenizers
class a__( lowerCamelCase__ , unittest.TestCase ):
lowercase__ = BertJapaneseTokenizer
lowercase__ = False
lowercase__ = True
def lowercase_ ( self : int ):
super().setUp()
a : List[Any] = [
'[UNK]',
'[CLS]',
'[SEP]',
'こんにちは',
'こん',
'にちは',
'ばんは',
'##こん',
'##にちは',
'##ばんは',
'世界',
'##世界',
'、',
'##、',
'。',
'##。',
]
a : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def lowercase_ ( self : Any , __snake_case : str ):
a : Union[str, Any] = 'こんにちは、世界。 \nこんばんは、世界。'
a : List[Any] = 'こんにちは 、 世界 。 こんばんは 、 世界 。'
return input_text, output_text
def lowercase_ ( self : Optional[Any] , __snake_case : Optional[Any] ):
a , a : List[str] = self.get_input_output_texts(__snake_case )
a : Optional[int] = tokenizer.encode(__snake_case , add_special_tokens=__snake_case )
a : str = tokenizer.decode(__snake_case , clean_up_tokenization_spaces=__snake_case )
return text, ids
def lowercase_ ( self : Optional[Any] ):
pass # TODO add if relevant
def lowercase_ ( self : List[Any] ):
pass # TODO add if relevant
def lowercase_ ( self : Dict ):
pass # TODO add if relevant
def lowercase_ ( self : List[Any] ):
a : Optional[int] = self.tokenizer_class(self.vocab_file )
a : Optional[int] = tokenizer.tokenize('こんにちは、世界。\nこんばんは、世界。' )
self.assertListEqual(__snake_case , ['こんにちは', '、', '世界', '。', 'こん', '##ばんは', '、', '世界', '。'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__snake_case ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
def lowercase_ ( self : Union[str, Any] ):
a : Tuple = self.tokenizer_class(self.vocab_file , word_tokenizer_type='mecab' )
self.assertIsNotNone(__snake_case )
a : List[str] = 'こんにちは、世界。\nこんばんは、世界。'
a : Tuple = tokenizer.tokenize(__snake_case )
self.assertListEqual(__snake_case , ['こんにちは', '、', '世界', '。', 'こん', '##ばんは', '、', '世界', '。'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__snake_case ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
a : Optional[int] = os.path.join(self.tmpdirname , 'tokenizer.bin' )
with open(__snake_case , 'wb' ) as handle:
pickle.dump(__snake_case , __snake_case )
with open(__snake_case , 'rb' ) as handle:
a : Optional[Any] = pickle.load(__snake_case )
a : Tuple = tokenizer_new.tokenize(__snake_case )
self.assertListEqual(__snake_case , __snake_case )
def lowercase_ ( self : Dict ):
a : List[str] = MecabTokenizer(mecab_dic='ipadic' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップルストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れ', 'た', '。'] , )
def lowercase_ ( self : List[Any] ):
try:
a : int = MecabTokenizer(mecab_dic='unidic_lite' )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップル', 'ストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れ', 'た', '。'] , )
def lowercase_ ( self : Any ):
try:
a : Union[str, Any] = MecabTokenizer(mecab_dic='unidic' )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップル', 'ストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れ', 'た', '。'] , )
def lowercase_ ( self : str ):
a : Tuple = MecabTokenizer(do_lower_case=__snake_case , mecab_dic='ipadic' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップルストア', 'で', 'iphone', '8', 'が', '発売', 'さ', 'れ', 'た', '。'] , )
def lowercase_ ( self : Union[str, Any] ):
try:
a : Any = MecabTokenizer(
do_lower_case=__snake_case , normalize_text=__snake_case , mecab_option='-d /usr/local/lib/mecab/dic/jumandic' )
except RuntimeError:
# if dict doesn't exist in the system, previous code raises this error.
return
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップルストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れた', '\u3000', '。'] , )
def lowercase_ ( self : List[Any] ):
a : Dict = MecabTokenizer(normalize_text=__snake_case , mecab_dic='ipadic' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップルストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れ', 'た', ' ', '。'] , )
@require_sudachi
def lowercase_ ( self : str ):
a : Optional[int] = self.tokenizer_class(self.vocab_file , word_tokenizer_type='sudachi' )
self.assertIsNotNone(__snake_case )
a : List[Any] = 'こんにちは、世界。\nこんばんは、世界。'
a : int = tokenizer.tokenize(__snake_case )
self.assertListEqual(__snake_case , ['こんにちは', '、', '世界', '。', 'こん', '##ばんは', '、', '世界', '。'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__snake_case ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
a : Tuple = os.path.join(self.tmpdirname , 'tokenizer.bin' )
with open(__snake_case , 'wb' ) as handle:
pickle.dump(__snake_case , __snake_case )
with open(__snake_case , 'rb' ) as handle:
a : Optional[int] = pickle.load(__snake_case )
a : List[Any] = tokenizer_new.tokenize(__snake_case )
self.assertListEqual(__snake_case , __snake_case )
@require_sudachi
def lowercase_ ( self : List[Any] ):
a : Optional[Any] = SudachiTokenizer(sudachi_dict_type='core' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , [' ', '\t', 'アップル', 'ストア', 'で', 'iPhone', '8', ' ', 'が', ' ', ' ', '\n ', '発売', 'さ', 'れ', 'た', ' ', '。', ' ', ' '] , )
@require_sudachi
def lowercase_ ( self : Any ):
a : str = SudachiTokenizer(sudachi_dict_type='core' , sudachi_split_mode='A' )
self.assertListEqual(tokenizer.tokenize('外国人参政権' ) , ['外国', '人', '参政', '権'] )
@require_sudachi
def lowercase_ ( self : Optional[Any] ):
a : Optional[int] = SudachiTokenizer(sudachi_dict_type='core' , sudachi_split_mode='B' )
self.assertListEqual(tokenizer.tokenize('外国人参政権' ) , ['外国人', '参政権'] )
@require_sudachi
def lowercase_ ( self : Optional[Any] ):
a : Dict = SudachiTokenizer(sudachi_dict_type='core' , sudachi_split_mode='C' )
self.assertListEqual(tokenizer.tokenize('外国人参政権' ) , ['外国人参政権'] )
@require_sudachi
def lowercase_ ( self : Dict ):
a : Optional[int] = SudachiTokenizer(do_lower_case=__snake_case , sudachi_dict_type='core' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , [' ', '\t', 'アップル', 'ストア', 'で', 'iphone', '8', ' ', 'が', ' ', ' ', '\n ', '発売', 'さ', 'れ', 'た', ' ', '。', ' ', ' '] , )
@require_sudachi
def lowercase_ ( self : Tuple ):
a : int = SudachiTokenizer(normalize_text=__snake_case , sudachi_dict_type='core' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , [' ', '\t', 'アップル', 'ストア', 'で', 'iPhone', '8', ' ', 'が', ' ', ' ', '\n ', '発売', 'さ', 'れ', 'た', '\u3000', '。', ' ', ' '] , )
@require_sudachi
def lowercase_ ( self : Union[str, Any] ):
a : List[str] = SudachiTokenizer(trim_whitespace=__snake_case , sudachi_dict_type='core' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップル', 'ストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れ', 'た', '。'] , )
@require_jumanpp
def lowercase_ ( self : List[Any] ):
a : Optional[int] = self.tokenizer_class(self.vocab_file , word_tokenizer_type='jumanpp' )
self.assertIsNotNone(__snake_case )
a : str = 'こんにちは、世界。\nこんばんは、世界。'
a : Tuple = tokenizer.tokenize(__snake_case )
self.assertListEqual(__snake_case , ['こんにちは', '、', '世界', '。', 'こん', '##ばんは', '、', '世界', '。'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__snake_case ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
a : Optional[Any] = os.path.join(self.tmpdirname , 'tokenizer.bin' )
with open(__snake_case , 'wb' ) as handle:
pickle.dump(__snake_case , __snake_case )
with open(__snake_case , 'rb' ) as handle:
a : List[str] = pickle.load(__snake_case )
a : Any = tokenizer_new.tokenize(__snake_case )
self.assertListEqual(__snake_case , __snake_case )
@require_jumanpp
def lowercase_ ( self : List[str] ):
a : Any = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップル', 'ストア', 'で', 'iPhone', '8', '\u3000', 'が', '\u3000', '\u3000', '\u3000', '発売', 'さ', 'れた', '\u3000', '。'] , )
@require_jumanpp
def lowercase_ ( self : List[str] ):
a : List[Any] = JumanppTokenizer(do_lower_case=__snake_case )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップル', 'ストア', 'で', 'iphone', '8', '\u3000', 'が', '\u3000', '\u3000', '\u3000', '発売', 'さ', 'れた', '\u3000', '。'] , )
@require_jumanpp
def lowercase_ ( self : Any ):
a : List[Any] = JumanppTokenizer(normalize_text=__snake_case )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['ア', 'ッ', 'フ', '゚', 'ル', 'ストア', 'で', 'iPhone', '8', '\u3000', 'が', '\u3000', '\u3000', '\u3000', '発売', 'さ', 'れた', '\u3000', '。'] , )
@require_jumanpp
def lowercase_ ( self : Any ):
a : str = JumanppTokenizer(trim_whitespace=__snake_case )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップル', 'ストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れた', '。'] , )
@require_jumanpp
def lowercase_ ( self : Tuple ):
a : int = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize('ありがとうございますm(_ _)m見つけるのが大変です。' ) , ['ありがとう', 'ございます', 'm(_ _)m', '見つける', 'の', 'が', '大変です', '。'] , )
def lowercase_ ( self : Any ):
a : int = ['[UNK]', '[CLS]', '[SEP]', 'こんにちは', 'こん', 'にちは', 'ばんは', '##こん', '##にちは', '##ばんは']
a : Optional[int] = {}
for i, token in enumerate(__snake_case ):
a : Dict = i
a : Optional[Any] = WordpieceTokenizer(vocab=__snake_case , unk_token='[UNK]' )
self.assertListEqual(tokenizer.tokenize('' ) , [] )
self.assertListEqual(tokenizer.tokenize('こんにちは' ) , ['こんにちは'] )
self.assertListEqual(tokenizer.tokenize('こんばんは' ) , ['こん', '##ばんは'] )
self.assertListEqual(tokenizer.tokenize('こんばんは こんばんにちは こんにちは' ) , ['こん', '##ばんは', '[UNK]', 'こんにちは'] )
def lowercase_ ( self : Tuple ):
a : List[Any] = BertJapaneseTokenizer.from_pretrained('nlp-waseda/roberta-base-japanese-with-auto-jumanpp' )
a : List[Any] = tokenizer.subword_tokenizer
a : List[str] = subword_tokenizer.tokenize('国境 の 長い トンネル を 抜ける と 雪国 であった 。' )
self.assertListEqual(__snake_case , ['▁国境', '▁の', '▁長い', '▁トンネル', '▁を', '▁抜ける', '▁と', '▁雪', '国', '▁であった', '▁。'] )
a : Union[str, Any] = subword_tokenizer.tokenize('こんばんは こんばん にち は こんにちは' )
self.assertListEqual(__snake_case , ['▁こん', 'ばん', 'は', '▁こん', 'ばん', '▁に', 'ち', '▁は', '▁こんにちは'] )
def lowercase_ ( self : Union[str, Any] ):
a : Optional[Any] = self.tokenizer_class.from_pretrained('cl-tohoku/bert-base-japanese' )
a : Dict = tokenizer.encode('ありがとう。' , add_special_tokens=__snake_case )
a : str = tokenizer.encode('どういたしまして。' , add_special_tokens=__snake_case )
a : Optional[int] = tokenizer.build_inputs_with_special_tokens(__snake_case )
a : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(__snake_case , __snake_case )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class a__( lowerCamelCase__ , unittest.TestCase ):
lowercase__ = BertJapaneseTokenizer
lowercase__ = False
def lowercase_ ( self : List[Any] ):
super().setUp()
a : List[Any] = ['[UNK]', '[CLS]', '[SEP]', 'こ', 'ん', 'に', 'ち', 'は', 'ば', '世', '界', '、', '。']
a : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def lowercase_ ( self : Optional[Any] , **__snake_case : List[Any] ):
return BertJapaneseTokenizer.from_pretrained(self.tmpdirname , subword_tokenizer_type='character' , **__snake_case )
def lowercase_ ( self : Tuple , __snake_case : List[str] ):
a : int = 'こんにちは、世界。 \nこんばんは、世界。'
a : Optional[Any] = 'こ ん に ち は 、 世 界 。 こ ん ば ん は 、 世 界 。'
return input_text, output_text
def lowercase_ ( self : str ):
pass # TODO add if relevant
def lowercase_ ( self : List[str] ):
pass # TODO add if relevant
def lowercase_ ( self : Any ):
pass # TODO add if relevant
def lowercase_ ( self : Any ):
a : Optional[int] = self.tokenizer_class(self.vocab_file , subword_tokenizer_type='character' )
a : Tuple = tokenizer.tokenize('こんにちは、世界。 \nこんばんは、世界。' )
self.assertListEqual(
__snake_case , ['こ', 'ん', 'に', 'ち', 'は', '、', '世', '界', '。', 'こ', 'ん', 'ば', 'ん', 'は', '、', '世', '界', '。'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__snake_case ) , [3, 4, 5, 6, 7, 11, 9, 10, 12, 3, 4, 8, 4, 7, 11, 9, 10, 12] )
def lowercase_ ( self : Any ):
a : Union[str, Any] = ['[UNK]', '[CLS]', '[SEP]', 'こ', 'ん', 'に', 'ち', 'は', 'ば', '世', '界', '、', '。']
a : Optional[Any] = {}
for i, token in enumerate(__snake_case ):
a : Tuple = i
a : Optional[int] = CharacterTokenizer(vocab=__snake_case , unk_token='[UNK]' )
self.assertListEqual(tokenizer.tokenize('' ) , [] )
self.assertListEqual(tokenizer.tokenize('こんにちは' ) , ['こ', 'ん', 'に', 'ち', 'は'] )
self.assertListEqual(tokenizer.tokenize('こんにちほ' ) , ['こ', 'ん', 'に', 'ち', '[UNK]'] )
def lowercase_ ( self : Tuple ):
a : List[Any] = self.tokenizer_class.from_pretrained('cl-tohoku/bert-base-japanese-char' )
a : Optional[int] = tokenizer.encode('ありがとう。' , add_special_tokens=__snake_case )
a : List[str] = tokenizer.encode('どういたしまして。' , add_special_tokens=__snake_case )
a : Optional[int] = tokenizer.build_inputs_with_special_tokens(__snake_case )
a : Dict = tokenizer.build_inputs_with_special_tokens(__snake_case , __snake_case )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class a__( unittest.TestCase ):
def lowercase_ ( self : List[str] ):
a : List[Any] = 'cl-tohoku/bert-base-japanese'
a : Dict = AutoTokenizer.from_pretrained(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
class a__( unittest.TestCase ):
def lowercase_ ( self : Union[str, Any] ):
a : List[str] = 'cl-tohoku/bert-base-japanese'
with self.assertLogs('transformers' , level='WARNING' ) as cm:
BertTokenizer.from_pretrained(__snake_case )
self.assertTrue(
cm.records[0].message.startswith(
'The tokenizer class you load from this checkpoint is not the same type as the class this function'
' is called from.' ) )
a : Dict = 'bert-base-cased'
with self.assertLogs('transformers' , level='WARNING' ) as cm:
BertJapaneseTokenizer.from_pretrained(__snake_case )
self.assertTrue(
cm.records[0].message.startswith(
'The tokenizer class you load from this checkpoint is not the same type as the class this function'
' is called from.' ) ) | 96 | 1 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
from typing import List
from unittest.mock import Mock
import torch
from torch.utils.data import DataLoader, IterableDataset, TensorDataset
from accelerate.accelerator import Accelerator
from accelerate.utils.dataclasses import DistributedType
class A_ ( _a ):
'''simple docstring'''
def __init__(self , lowercase__ ) -> Dict:
__UpperCAmelCase = data
def __iter__(self ) -> Optional[int]:
for element in self.data:
yield element
def __a ( SCREAMING_SNAKE_CASE=True ) -> Dict:
'''simple docstring'''
__UpperCAmelCase = Accelerator(even_batches=SCREAMING_SNAKE_CASE )
assert accelerator.num_processes == 2, "this script expects that two GPUs are available"
return accelerator
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = False ) -> List[str]:
'''simple docstring'''
if iterable:
__UpperCAmelCase = DummyIterableDataset(torch.as_tensor(range(SCREAMING_SNAKE_CASE ) ) )
else:
__UpperCAmelCase = TensorDataset(torch.as_tensor(range(SCREAMING_SNAKE_CASE ) ) )
__UpperCAmelCase = DataLoader(SCREAMING_SNAKE_CASE , batch_size=SCREAMING_SNAKE_CASE )
__UpperCAmelCase = accelerator.prepare(SCREAMING_SNAKE_CASE )
return dl
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , ) -> str:
'''simple docstring'''
__UpperCAmelCase = create_dataloader(accelerator=SCREAMING_SNAKE_CASE , dataset_size=SCREAMING_SNAKE_CASE , batch_size=SCREAMING_SNAKE_CASE )
__UpperCAmelCase = [len(batch[0] ) for batch in dl]
if accelerator.process_index == 0:
assert batch_sizes == process_0_expected_batch_sizes
elif accelerator.process_index == 1:
assert batch_sizes == process_1_expected_batch_sizes
def __a ( ) -> str:
'''simple docstring'''
__UpperCAmelCase = create_accelerator()
# without padding, we would expect a different number of batches
verify_dataloader_batch_sizes(
SCREAMING_SNAKE_CASE , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1, 1] , )
# without padding, we would expect the same number of batches, but different sizes
verify_dataloader_batch_sizes(
SCREAMING_SNAKE_CASE , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 2] , )
def __a ( ) -> List[str]:
'''simple docstring'''
__UpperCAmelCase = create_accelerator(even_batches=SCREAMING_SNAKE_CASE )
verify_dataloader_batch_sizes(
SCREAMING_SNAKE_CASE , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1] , )
verify_dataloader_batch_sizes(
SCREAMING_SNAKE_CASE , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 1] , )
def __a ( ) -> int:
'''simple docstring'''
__UpperCAmelCase = create_accelerator(even_batches=SCREAMING_SNAKE_CASE )
__UpperCAmelCase = torch.nn.Linear(1 , 1 )
__UpperCAmelCase = accelerator.prepare(SCREAMING_SNAKE_CASE )
__UpperCAmelCase = create_dataloader(SCREAMING_SNAKE_CASE , dataset_size=3 , batch_size=1 )
__UpperCAmelCase = []
with accelerator.join_uneven_inputs([ddp_model] ):
for batch_idx, batch in enumerate(SCREAMING_SNAKE_CASE ):
__UpperCAmelCase = ddp_model(batch[0].float() )
__UpperCAmelCase = output.sum()
loss.backward()
batch_idxs.append(SCREAMING_SNAKE_CASE )
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
assert batch_idxs == [0, 1]
elif accelerator.process_index == 1:
assert batch_idxs == [0]
def __a ( SCREAMING_SNAKE_CASE ) -> List[str]:
'''simple docstring'''
with warnings.catch_warnings(record=SCREAMING_SNAKE_CASE ) as w:
with accelerator.join_uneven_inputs([Mock()] ):
pass
assert issubclass(w[-1].category , SCREAMING_SNAKE_CASE )
assert "only supported for multi-GPU" in str(w[-1].message )
def __a ( ) -> Tuple:
'''simple docstring'''
__UpperCAmelCase = True
__UpperCAmelCase = False
__UpperCAmelCase = create_accelerator(even_batches=SCREAMING_SNAKE_CASE )
__UpperCAmelCase = torch.nn.Linear(1 , 1 )
__UpperCAmelCase = accelerator.prepare(SCREAMING_SNAKE_CASE )
__UpperCAmelCase = create_dataloader(SCREAMING_SNAKE_CASE , dataset_size=3 , batch_size=1 )
__UpperCAmelCase = create_dataloader(SCREAMING_SNAKE_CASE , dataset_size=3 , batch_size=1 )
with accelerator.join_uneven_inputs([ddp_model] , even_batches=SCREAMING_SNAKE_CASE ):
__UpperCAmelCase = train_dl.batch_sampler.even_batches
__UpperCAmelCase = valid_dl.batch_sampler.even_batches
assert train_dl_overridden_value == overridden_even_batches
assert valid_dl_overridden_value == overridden_even_batches
assert train_dl.batch_sampler.even_batches == default_even_batches
assert valid_dl.batch_sampler.even_batches == default_even_batches
def __a ( ) -> int:
'''simple docstring'''
__UpperCAmelCase = True
__UpperCAmelCase = False
__UpperCAmelCase = create_accelerator(even_batches=SCREAMING_SNAKE_CASE )
__UpperCAmelCase = torch.nn.Linear(1 , 1 )
__UpperCAmelCase = accelerator.prepare(SCREAMING_SNAKE_CASE )
create_dataloader(SCREAMING_SNAKE_CASE , dataset_size=3 , batch_size=1 , iterable=SCREAMING_SNAKE_CASE )
__UpperCAmelCase = create_dataloader(SCREAMING_SNAKE_CASE , dataset_size=3 , batch_size=1 )
with warnings.catch_warnings():
warnings.filterwarnings('''ignore''' )
try:
with accelerator.join_uneven_inputs([ddp_model] , even_batches=SCREAMING_SNAKE_CASE ):
__UpperCAmelCase = batch_dl.batch_sampler.even_batches
except AttributeError:
# ensure attribute error is not raised when processing iterable dl
raise AssertionError
assert batch_dl_overridden_value == overridden_even_batches
assert batch_dl.batch_sampler.even_batches == default_even_batches
def __a ( ) -> Optional[int]:
'''simple docstring'''
__UpperCAmelCase = create_accelerator()
__UpperCAmelCase = torch.nn.Linear(1 , 1 )
__UpperCAmelCase = accelerator.prepare(SCREAMING_SNAKE_CASE )
create_dataloader(SCREAMING_SNAKE_CASE , dataset_size=3 , batch_size=1 , iterable=SCREAMING_SNAKE_CASE )
with warnings.catch_warnings(record=SCREAMING_SNAKE_CASE ) as w:
with accelerator.join_uneven_inputs([ddp_model] , even_batches=SCREAMING_SNAKE_CASE ):
pass
assert issubclass(w[-1].category , SCREAMING_SNAKE_CASE )
assert "only supported for map-style datasets" in str(w[-1].message )
def __a ( ) -> Union[str, Any]:
'''simple docstring'''
__UpperCAmelCase = create_accelerator()
accelerator.print('''Test that even_batches variable ensures uniform batches across processes''' )
test_default_ensures_even_batch_sizes()
accelerator.print('''Run tests with even_batches disabled''' )
test_can_disable_even_batches()
accelerator.print('''Test joining uneven inputs''' )
test_can_join_uneven_inputs()
accelerator.print('''Test overriding even_batches when joining uneven inputs''' )
test_join_can_override_even_batches()
accelerator.print('''Test overriding even_batches for mixed dataloader types''' )
test_join_can_override_for_mixed_type_dataloaders()
accelerator.print('''Test overriding even_batches raises a warning for iterable dataloaders''' )
test_join_raises_warning_for_iterable_when_overriding_even_batches()
accelerator.print('''Test join with non DDP distributed raises warning''' )
__UpperCAmelCase = accelerator.state.distributed_type
__UpperCAmelCase = DistributedType.FSDP
test_join_raises_warning_for_non_ddp_distributed(SCREAMING_SNAKE_CASE )
__UpperCAmelCase = original_state
if __name__ == "__main__":
main()
| 333 |
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
A_ : Tuple = logging.get_logger(__name__)
class A_ ( _a ):
'''simple docstring'''
a__ = "linear"
a__ = "cosine"
a__ = "cosine_with_restarts"
a__ = "polynomial"
a__ = "constant"
a__ = "constant_with_warmup"
a__ = "piecewise_constant"
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = -1 ) -> Tuple:
'''simple docstring'''
return LambdaLR(SCREAMING_SNAKE_CASE , lambda SCREAMING_SNAKE_CASE : 1 , last_epoch=SCREAMING_SNAKE_CASE )
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = -1 ) -> Union[str, Any]:
'''simple docstring'''
def lr_lambda(SCREAMING_SNAKE_CASE ):
if current_step < num_warmup_steps:
return float(SCREAMING_SNAKE_CASE ) / float(max(1.0 , SCREAMING_SNAKE_CASE ) )
return 1.0
return LambdaLR(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , last_epoch=SCREAMING_SNAKE_CASE )
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = -1 ) -> List[Any]:
'''simple docstring'''
__UpperCAmelCase = {}
__UpperCAmelCase = step_rules.split(''',''' )
for rule_str in rule_list[:-1]:
__UpperCAmelCase , __UpperCAmelCase = rule_str.split(''':''' )
__UpperCAmelCase = int(SCREAMING_SNAKE_CASE )
__UpperCAmelCase = float(SCREAMING_SNAKE_CASE )
__UpperCAmelCase = value
__UpperCAmelCase = float(rule_list[-1] )
def create_rules_function(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
def rule_func(SCREAMING_SNAKE_CASE ) -> float:
__UpperCAmelCase = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(SCREAMING_SNAKE_CASE ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
__UpperCAmelCase = create_rules_function(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return LambdaLR(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , last_epoch=SCREAMING_SNAKE_CASE )
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=-1 ) -> Optional[Any]:
'''simple docstring'''
def lr_lambda(SCREAMING_SNAKE_CASE ):
if current_step < num_warmup_steps:
return float(SCREAMING_SNAKE_CASE ) / float(max(1 , SCREAMING_SNAKE_CASE ) )
return max(
0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) )
return LambdaLR(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 0.5 , SCREAMING_SNAKE_CASE = -1 ) -> int:
'''simple docstring'''
def lr_lambda(SCREAMING_SNAKE_CASE ):
if current_step < num_warmup_steps:
return float(SCREAMING_SNAKE_CASE ) / float(max(1 , SCREAMING_SNAKE_CASE ) )
__UpperCAmelCase = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(SCREAMING_SNAKE_CASE ) * 2.0 * progress )) )
return LambdaLR(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 1 , SCREAMING_SNAKE_CASE = -1 ) -> Dict:
'''simple docstring'''
def lr_lambda(SCREAMING_SNAKE_CASE ):
if current_step < num_warmup_steps:
return float(SCREAMING_SNAKE_CASE ) / float(max(1 , SCREAMING_SNAKE_CASE ) )
__UpperCAmelCase = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(SCREAMING_SNAKE_CASE ) * progress) % 1.0) )) )
return LambdaLR(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=1e-7 , SCREAMING_SNAKE_CASE=1.0 , SCREAMING_SNAKE_CASE=-1 ) -> List[str]:
'''simple docstring'''
__UpperCAmelCase = optimizer.defaults['''lr''']
if not (lr_init > lr_end):
raise ValueError(f'''lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})''' )
def lr_lambda(SCREAMING_SNAKE_CASE ):
if current_step < num_warmup_steps:
return float(SCREAMING_SNAKE_CASE ) / float(max(1 , SCREAMING_SNAKE_CASE ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
__UpperCAmelCase = lr_init - lr_end
__UpperCAmelCase = num_training_steps - num_warmup_steps
__UpperCAmelCase = 1 - (current_step - num_warmup_steps) / decay_steps
__UpperCAmelCase = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
A_ : Optional[Any] = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = 1 , SCREAMING_SNAKE_CASE = 1.0 , SCREAMING_SNAKE_CASE = -1 , ) -> Union[str, Any]:
'''simple docstring'''
__UpperCAmelCase = SchedulerType(SCREAMING_SNAKE_CASE )
__UpperCAmelCase = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(SCREAMING_SNAKE_CASE , last_epoch=SCREAMING_SNAKE_CASE )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(SCREAMING_SNAKE_CASE , step_rules=SCREAMING_SNAKE_CASE , last_epoch=SCREAMING_SNAKE_CASE )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(f'''{name} requires `num_warmup_steps`, please provide that argument.''' )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(SCREAMING_SNAKE_CASE , num_warmup_steps=SCREAMING_SNAKE_CASE , last_epoch=SCREAMING_SNAKE_CASE )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(f'''{name} requires `num_training_steps`, please provide that argument.''' )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
SCREAMING_SNAKE_CASE , num_warmup_steps=SCREAMING_SNAKE_CASE , num_training_steps=SCREAMING_SNAKE_CASE , num_cycles=SCREAMING_SNAKE_CASE , last_epoch=SCREAMING_SNAKE_CASE , )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
SCREAMING_SNAKE_CASE , num_warmup_steps=SCREAMING_SNAKE_CASE , num_training_steps=SCREAMING_SNAKE_CASE , power=SCREAMING_SNAKE_CASE , last_epoch=SCREAMING_SNAKE_CASE , )
return schedule_func(
SCREAMING_SNAKE_CASE , num_warmup_steps=SCREAMING_SNAKE_CASE , num_training_steps=SCREAMING_SNAKE_CASE , last_epoch=SCREAMING_SNAKE_CASE )
| 333 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
"RWKV/rwkv-4-169m-pile": "https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json",
"RWKV/rwkv-4-430m-pile": "https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json",
"RWKV/rwkv-4-1b5-pile": "https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json",
"RWKV/rwkv-4-3b-pile": "https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json",
"RWKV/rwkv-4-7b-pile": "https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json",
"RWKV/rwkv-4-14b-pile": "https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json",
"RWKV/rwkv-raven-1b5": "https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json",
"RWKV/rwkv-raven-3b": "https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json",
"RWKV/rwkv-raven-7b": "https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json",
"RWKV/rwkv-raven-14b": "https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json",
}
class a_ ( _snake_case ):
UpperCamelCase__ : Tuple ="rwkv"
UpperCamelCase__ : str ={"max_position_embeddings": "context_length"}
def __init__( self :List[Any] , _lowercase :Optional[int]=50277 , _lowercase :Optional[Any]=1024 , _lowercase :int=4096 , _lowercase :Union[str, Any]=32 , _lowercase :Tuple=None , _lowercase :Tuple=None , _lowercase :Optional[int]=1E-5 , _lowercase :Optional[Any]=0 , _lowercase :int=0 , _lowercase :int=6 , _lowercase :Optional[int]=False , _lowercase :Optional[Any]=True , **_lowercase :Union[str, Any] , ) -> List[str]:
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = context_length
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = attention_hidden_size if attention_hidden_size is not None else hidden_size
UpperCAmelCase_ = intermediate_size if intermediate_size is not None else 4 * hidden_size
UpperCAmelCase_ = layer_norm_epsilon
UpperCAmelCase_ = rescale_every
UpperCAmelCase_ = use_cache
UpperCAmelCase_ = bos_token_id
UpperCAmelCase_ = eos_token_id
super().__init__(
tie_word_embeddings=_lowercase , bos_token_id=_lowercase , eos_token_id=_lowercase , **_lowercase)
| 357 |
import unittest
from transformers import BigBirdTokenizer, BigBirdTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
UpperCamelCase_ = "▁"
UpperCamelCase_ = get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
@require_tokenizers
class a_ ( _snake_case , unittest.TestCase ):
UpperCamelCase__ : str =BigBirdTokenizer
UpperCamelCase__ : Tuple =BigBirdTokenizerFast
UpperCamelCase__ : Union[str, Any] =True
UpperCamelCase__ : List[str] =True
def __a ( self :Any) -> List[str]:
super().setUp()
UpperCAmelCase_ = self.tokenizer_class(_lowercase , keep_accents=_lowercase)
tokenizer.save_pretrained(self.tmpdirname)
def __a ( self :Optional[int]) -> str:
UpperCAmelCase_ = '''<s>'''
UpperCAmelCase_ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_lowercase) , _lowercase)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_lowercase) , _lowercase)
def __a ( self :str) -> str:
UpperCAmelCase_ = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , '''<unk>''')
self.assertEqual(vocab_keys[1] , '''<s>''')
self.assertEqual(vocab_keys[-1] , '''[MASK]''')
self.assertEqual(len(_lowercase) , 1004)
def __a ( self :List[str]) -> int:
self.assertEqual(self.get_tokenizer().vocab_size , 1000)
def __a ( self :Tuple) -> int:
if not self.test_rust_tokenizer:
return
UpperCAmelCase_ = self.get_tokenizer()
UpperCAmelCase_ = self.get_rust_tokenizer()
UpperCAmelCase_ = '''I was born in 92000, and this is falsé.'''
UpperCAmelCase_ = tokenizer.tokenize(_lowercase)
UpperCAmelCase_ = rust_tokenizer.tokenize(_lowercase)
self.assertListEqual(_lowercase , _lowercase)
UpperCAmelCase_ = tokenizer.encode(_lowercase , add_special_tokens=_lowercase)
UpperCAmelCase_ = rust_tokenizer.encode(_lowercase , add_special_tokens=_lowercase)
self.assertListEqual(_lowercase , _lowercase)
UpperCAmelCase_ = self.get_rust_tokenizer()
UpperCAmelCase_ = tokenizer.encode(_lowercase)
UpperCAmelCase_ = rust_tokenizer.encode(_lowercase)
self.assertListEqual(_lowercase , _lowercase)
def __a ( self :Optional[Any]) -> List[str]:
UpperCAmelCase_ = BigBirdTokenizer(_lowercase , keep_accents=_lowercase)
UpperCAmelCase_ = tokenizer.tokenize('''This is a test''')
self.assertListEqual(_lowercase , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_lowercase) , [285, 46, 10, 170, 382] , )
UpperCAmelCase_ = tokenizer.tokenize('''I was born in 92000, and this is falsé.''')
self.assertListEqual(
_lowercase , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
UpperCAmelCase_ = tokenizer.convert_tokens_to_ids(_lowercase)
self.assertListEqual(
_lowercase , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
UpperCAmelCase_ = tokenizer.convert_ids_to_tokens(_lowercase)
self.assertListEqual(
_lowercase , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
@cached_property
def __a ( self :Any) -> List[Any]:
return BigBirdTokenizer.from_pretrained('''google/bigbird-roberta-base''')
@slow
def __a ( self :int) -> List[Any]:
UpperCAmelCase_ = '''Hello World!'''
UpperCAmelCase_ = [65, 18536, 2260, 101, 66]
self.assertListEqual(_lowercase , self.big_tokenizer.encode(_lowercase))
@slow
def __a ( self :int) -> Any:
UpperCAmelCase_ = (
'''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'''
''' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'''
)
# fmt: off
UpperCAmelCase_ = [65, 871, 419, 358, 946, 991, 2521, 452, 358, 1357, 387, 7751, 3536, 112, 985, 456, 126, 865, 938, 5400, 5734, 458, 1368, 467, 786, 2462, 5246, 1159, 633, 865, 4519, 457, 582, 852, 2557, 427, 916, 508, 405, 34324, 497, 391, 408, 11342, 1244, 385, 100, 938, 985, 456, 574, 362, 12597, 3200, 3129, 1172, 66] # noqa: E231
# fmt: on
self.assertListEqual(_lowercase , self.big_tokenizer.encode(_lowercase))
@require_torch
@slow
def __a ( self :Dict) -> Union[str, Any]:
import torch
from transformers import BigBirdConfig, BigBirdModel
# Build sequence
UpperCAmelCase_ = list(self.big_tokenizer.get_vocab().keys())[:10]
UpperCAmelCase_ = ''' '''.join(_lowercase)
UpperCAmelCase_ = self.big_tokenizer.encode_plus(_lowercase , return_tensors='''pt''' , return_token_type_ids=_lowercase)
UpperCAmelCase_ = self.big_tokenizer.batch_encode_plus(
[sequence + ''' ''' + sequence] , return_tensors='''pt''' , return_token_type_ids=_lowercase)
UpperCAmelCase_ = BigBirdConfig(attention_type='''original_full''')
UpperCAmelCase_ = BigBirdModel(_lowercase)
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**_lowercase)
model(**_lowercase)
@slow
def __a ( self :Optional[int]) -> Any:
UpperCAmelCase_ = BigBirdTokenizer.from_pretrained('''google/bigbird-roberta-base''')
UpperCAmelCase_ = tokenizer.decode(tokenizer('''Paris is the [MASK].''').input_ids)
self.assertTrue(decoded_text == '''[CLS] Paris is the[MASK].[SEP]''')
@slow
def __a ( self :Dict) -> List[str]:
# fmt: off
UpperCAmelCase_ = {'''input_ids''': [[65, 39286, 458, 36335, 2001, 456, 13073, 13266, 455, 113, 7746, 1741, 11157, 391, 13073, 13266, 455, 113, 3967, 35412, 113, 4936, 109, 3870, 2377, 113, 30084, 45720, 458, 134, 17496, 112, 503, 11672, 113, 118, 112, 5665, 13347, 38687, 112, 1496, 31389, 112, 3268, 47264, 134, 962, 112, 16377, 8035, 23130, 430, 12169, 15518, 28592, 458, 146, 41697, 109, 391, 12169, 15518, 16689, 458, 146, 41358, 109, 452, 726, 4034, 111, 763, 35412, 5082, 388, 1903, 111, 9051, 391, 2870, 48918, 1900, 1123, 550, 998, 112, 9586, 15985, 455, 391, 410, 22955, 37636, 114, 66], [65, 448, 17496, 419, 3663, 385, 763, 113, 27533, 2870, 3283, 13043, 1639, 24713, 523, 656, 24013, 18550, 2521, 517, 27014, 21244, 420, 1212, 1465, 391, 927, 4833, 388, 578, 11786, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [65, 484, 2169, 7687, 21932, 18146, 726, 363, 17032, 3391, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_lowercase , model_name='''google/bigbird-roberta-base''' , revision='''215c99f1600e06f83acce68422f2035b2b5c3510''' , )
| 344 | 0 |
'''simple docstring'''
import tempfile
import torch
from diffusers import PNDMScheduler
from .test_schedulers import SchedulerCommonTest
class a ( _lowerCamelCase ):
snake_case_ = (PNDMScheduler,)
snake_case_ = (("num_inference_steps", 50),)
def A_ ( self : Tuple , **lowercase_ : Tuple ):
snake_case_ = {
'''num_train_timesteps''': 1000,
'''beta_start''': 0.0001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
}
config.update(**lowercase_ )
return config
def A_ ( self : Any , lowercase_ : Optional[int]=0 , **lowercase_ : int ):
snake_case_ = dict(self.forward_default_kwargs )
snake_case_ = kwargs.pop('''num_inference_steps''' , lowercase_ )
snake_case_ = self.dummy_sample
snake_case_ = 0.1 * sample
snake_case_ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
snake_case_ = self.get_scheduler_config(**lowercase_ )
snake_case_ = scheduler_class(**lowercase_ )
scheduler.set_timesteps(lowercase_ )
# copy over dummy past residuals
snake_case_ = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowercase_ )
snake_case_ = scheduler_class.from_pretrained(lowercase_ )
new_scheduler.set_timesteps(lowercase_ )
# copy over dummy past residuals
snake_case_ = dummy_past_residuals[:]
snake_case_ = scheduler.step_prk(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
snake_case_ = new_scheduler.step_prk(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
snake_case_ = scheduler.step_plms(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
snake_case_ = new_scheduler.step_plms(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def A_ ( self : Any ):
pass
def A_ ( self : Any , lowercase_ : Dict=0 , **lowercase_ : Optional[int] ):
snake_case_ = dict(self.forward_default_kwargs )
snake_case_ = kwargs.pop('''num_inference_steps''' , lowercase_ )
snake_case_ = self.dummy_sample
snake_case_ = 0.1 * sample
snake_case_ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
snake_case_ = self.get_scheduler_config()
snake_case_ = scheduler_class(**lowercase_ )
scheduler.set_timesteps(lowercase_ )
# copy over dummy past residuals (must be after setting timesteps)
snake_case_ = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowercase_ )
snake_case_ = scheduler_class.from_pretrained(lowercase_ )
# copy over dummy past residuals
new_scheduler.set_timesteps(lowercase_ )
# copy over dummy past residual (must be after setting timesteps)
snake_case_ = dummy_past_residuals[:]
snake_case_ = scheduler.step_prk(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
snake_case_ = new_scheduler.step_prk(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
snake_case_ = scheduler.step_plms(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
snake_case_ = new_scheduler.step_plms(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def A_ ( self : Dict , **lowercase_ : str ):
snake_case_ = self.scheduler_classes[0]
snake_case_ = self.get_scheduler_config(**lowercase_ )
snake_case_ = scheduler_class(**lowercase_ )
snake_case_ = 10
snake_case_ = self.dummy_model()
snake_case_ = self.dummy_sample_deter
scheduler.set_timesteps(lowercase_ )
for i, t in enumerate(scheduler.prk_timesteps ):
snake_case_ = model(lowercase_ , lowercase_ )
snake_case_ = scheduler.step_prk(lowercase_ , lowercase_ , lowercase_ ).prev_sample
for i, t in enumerate(scheduler.plms_timesteps ):
snake_case_ = model(lowercase_ , lowercase_ )
snake_case_ = scheduler.step_plms(lowercase_ , lowercase_ , lowercase_ ).prev_sample
return sample
def A_ ( self : Dict ):
snake_case_ = dict(self.forward_default_kwargs )
snake_case_ = kwargs.pop('''num_inference_steps''' , lowercase_ )
for scheduler_class in self.scheduler_classes:
snake_case_ = self.get_scheduler_config()
snake_case_ = scheduler_class(**lowercase_ )
snake_case_ = self.dummy_sample
snake_case_ = 0.1 * sample
if num_inference_steps is not None and hasattr(lowercase_ , '''set_timesteps''' ):
scheduler.set_timesteps(lowercase_ )
elif num_inference_steps is not None and not hasattr(lowercase_ , '''set_timesteps''' ):
snake_case_ = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
snake_case_ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
snake_case_ = dummy_past_residuals[:]
snake_case_ = scheduler.step_prk(lowercase_ , 0 , lowercase_ , **lowercase_ ).prev_sample
snake_case_ = scheduler.step_prk(lowercase_ , 1 , lowercase_ , **lowercase_ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
snake_case_ = scheduler.step_plms(lowercase_ , 0 , lowercase_ , **lowercase_ ).prev_sample
snake_case_ = scheduler.step_plms(lowercase_ , 1 , lowercase_ , **lowercase_ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def A_ ( self : List[Any] ):
for timesteps in [100, 1000]:
self.check_over_configs(num_train_timesteps=lowercase_ )
def A_ ( self : List[str] ):
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=lowercase_ )
snake_case_ = self.scheduler_classes[0]
snake_case_ = self.get_scheduler_config(steps_offset=1 )
snake_case_ = scheduler_class(**lowercase_ )
scheduler.set_timesteps(10 )
assert torch.equal(
scheduler.timesteps , torch.LongTensor(
[901, 851, 851, 801, 801, 751, 751, 701, 701, 651, 651, 601, 601, 501, 401, 301, 201, 101, 1] ) , )
def A_ ( self : Any ):
for beta_start, beta_end in zip([0.0001, 0.001] , [0.002, 0.02] ):
self.check_over_configs(beta_start=lowercase_ , beta_end=lowercase_ )
def A_ ( self : List[str] ):
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=lowercase_ )
def A_ ( self : str ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowercase_ )
def A_ ( self : str ):
for t in [1, 5, 10]:
self.check_over_forward(time_step=lowercase_ )
def A_ ( self : str ):
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ):
self.check_over_forward(num_inference_steps=lowercase_ )
def A_ ( self : Any ):
# earlier version of set_timesteps() caused an error indexing alpha's with inference steps as power of 3
snake_case_ = 27
for scheduler_class in self.scheduler_classes:
snake_case_ = self.dummy_sample
snake_case_ = 0.1 * sample
snake_case_ = self.get_scheduler_config()
snake_case_ = scheduler_class(**lowercase_ )
scheduler.set_timesteps(lowercase_ )
# before power of 3 fix, would error on first step, so we only need to do two
for i, t in enumerate(scheduler.prk_timesteps[:2] ):
snake_case_ = scheduler.step_prk(lowercase_ , lowercase_ , lowercase_ ).prev_sample
def A_ ( self : Tuple ):
with self.assertRaises(lowercase_ ):
snake_case_ = self.scheduler_classes[0]
snake_case_ = self.get_scheduler_config()
snake_case_ = scheduler_class(**lowercase_ )
scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample ).prev_sample
def A_ ( self : str ):
snake_case_ = self.full_loop()
snake_case_ = torch.sum(torch.abs(lowercase_ ) )
snake_case_ = torch.mean(torch.abs(lowercase_ ) )
assert abs(result_sum.item() - 198.1318 ) < 1e-2
assert abs(result_mean.item() - 0.2580 ) < 1e-3
def A_ ( self : Any ):
snake_case_ = self.full_loop(prediction_type='''v_prediction''' )
snake_case_ = torch.sum(torch.abs(lowercase_ ) )
snake_case_ = torch.mean(torch.abs(lowercase_ ) )
assert abs(result_sum.item() - 67.3986 ) < 1e-2
assert abs(result_mean.item() - 0.0878 ) < 1e-3
def A_ ( self : Optional[Any] ):
# We specify different beta, so that the first alpha is 0.99
snake_case_ = self.full_loop(set_alpha_to_one=lowercase_ , beta_start=0.01 )
snake_case_ = torch.sum(torch.abs(lowercase_ ) )
snake_case_ = torch.mean(torch.abs(lowercase_ ) )
assert abs(result_sum.item() - 230.0399 ) < 1e-2
assert abs(result_mean.item() - 0.2995 ) < 1e-3
def A_ ( self : str ):
# We specify different beta, so that the first alpha is 0.99
snake_case_ = self.full_loop(set_alpha_to_one=lowercase_ , beta_start=0.01 )
snake_case_ = torch.sum(torch.abs(lowercase_ ) )
snake_case_ = torch.mean(torch.abs(lowercase_ ) )
assert abs(result_sum.item() - 186.9482 ) < 1e-2
assert abs(result_mean.item() - 0.2434 ) < 1e-3
| 56 |
"""simple docstring"""
def lowercase ( __snake_case : int ):
if n == 1 or not isinstance(__snake_case , __snake_case ):
return 0
elif n == 2:
return 1
else:
lowercase_ : Dict = [0, 1]
for i in range(2 , n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def lowercase ( __snake_case : int ):
lowercase_ : str = 0
lowercase_ : List[str] = 2
while digits < n:
index += 1
lowercase_ : Any = len(str(fibonacci(__snake_case ) ) )
return index
def lowercase ( __snake_case : int = 1_0_0_0 ):
return fibonacci_digits_index(__snake_case )
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 33 | 0 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_rembert import RemBertTokenizer
else:
lowercase__ = None
lowercase__ = logging.get_logger(__name__)
lowercase__ = {"""vocab_file""": """sentencepiece.model""", """tokenizer_file""": """tokenizer.json"""}
lowercase__ = {
"""vocab_file""": {
"""google/rembert""": """https://huggingface.co/google/rembert/resolve/main/sentencepiece.model""",
},
"""tokenizer_file""": {
"""google/rembert""": """https://huggingface.co/google/rembert/resolve/main/tokenizer.json""",
},
}
lowercase__ = {
"""google/rembert""": 256,
}
lowercase__ = """▁"""
class __lowerCamelCase ( A__ ):
'''simple docstring'''
a_ : List[Any] = VOCAB_FILES_NAMES
a_ : Dict = PRETRAINED_VOCAB_FILES_MAP
a_ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ : List[str] = RemBertTokenizer
def __init__( self : Optional[int] , a_ : Union[str, Any]=None , a_ : Any=None , a_ : Dict=True , a_ : List[Any]=True , a_ : List[Any]=False , a_ : int="[CLS]" , a_ : Any="[SEP]" , a_ : str="<unk>" , a_ : Dict="[SEP]" , a_ : Tuple="<pad>" , a_ : Optional[Any]="[CLS]" , a_ : List[str]="[MASK]" , **a_ : Optional[Any] , ):
# Mask token behave like a normal word, i.e. include the space before it
lowerCAmelCase_ : List[str] = AddedToken(a_ , lstrip=a_ , rstrip=a_ ) if isinstance(a_ , a_ ) else mask_token
super().__init__(
a_ , tokenizer_file=a_ , do_lower_case=a_ , remove_space=a_ , keep_accents=a_ , bos_token=a_ , eos_token=a_ , unk_token=a_ , sep_token=a_ , pad_token=a_ , cls_token=a_ , mask_token=a_ , **a_ , )
lowerCAmelCase_ : str = do_lower_case
lowerCAmelCase_ : str = remove_space
lowerCAmelCase_ : Any = keep_accents
lowerCAmelCase_ : Dict = vocab_file
lowerCAmelCase_ : int = False if not self.vocab_file else True
def lowerCamelCase ( self : Tuple , a_ : List[int] , a_ : Optional[List[int]] = None ):
lowerCAmelCase_ : Optional[Any] = [self.sep_token_id]
lowerCAmelCase_ : Tuple = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def lowerCamelCase ( self : str , a_ : List[int] , a_ : Optional[List[int]] = None , a_ : bool = False ):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"You should not supply a second sequence if the provided sequence of "
"ids is already formatted with special tokens for the model." )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(a_ )) + [1] + ([0] * len(a_ )) + [1]
return [1] + ([0] * len(a_ )) + [1]
def lowerCamelCase ( self : Any , a_ : List[int] , a_ : Optional[List[int]] = None ):
lowerCAmelCase_ : Dict = [self.sep_token_id]
lowerCAmelCase_ : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCamelCase ( self : Optional[int] , a_ : str , a_ : Optional[str] = None ):
if not os.path.isdir(a_ ):
logger.error("Vocabulary path ({}) should be a directory".format(a_ ) )
return
lowerCAmelCase_ : List[Any] = os.path.join(
a_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a_ ):
copyfile(self.vocab_file , a_ )
return (out_vocab_file,)
| 358 |
"""simple docstring"""
import numpy as np
import pandas as pd
from sklearn.preprocessing import Normalizer
from sklearn.svm import SVR
from statsmodels.tsa.statespace.sarimax import SARIMAX
def __lowerCamelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> float:
"""simple docstring"""
lowerCAmelCase_ : Tuple = np.array([[1, item, train_mtch[i]] for i, item in enumerate(__UpperCamelCase )] )
lowerCAmelCase_ : str = np.array(__UpperCamelCase )
lowerCAmelCase_ : Optional[Any] = np.dot(np.dot(np.linalg.inv(np.dot(x.transpose() , __UpperCamelCase ) ) , x.transpose() ) , __UpperCamelCase )
return abs(beta[0] + test_dt[0] * beta[1] + test_mtch[0] + beta[2] )
def __lowerCamelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> float:
"""simple docstring"""
lowerCAmelCase_ : Union[str, Any] = (1, 2, 1)
lowerCAmelCase_ : str = (1, 1, 0, 7)
lowerCAmelCase_ : List[Any] = SARIMAX(
__UpperCamelCase , exog=__UpperCamelCase , order=__UpperCamelCase , seasonal_order=__UpperCamelCase )
lowerCAmelCase_ : Optional[int] = model.fit(disp=__UpperCamelCase , maxiter=600 , method="nm" )
lowerCAmelCase_ : Optional[Any] = model_fit.predict(1 , len(__UpperCamelCase ) , exog=[test_match] )
return result[0]
def __lowerCamelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> float:
"""simple docstring"""
lowerCAmelCase_ : int = SVR(kernel="rbf" , C=1 , gamma=0.1 , epsilon=0.1 )
regressor.fit(__UpperCamelCase , __UpperCamelCase )
lowerCAmelCase_ : Dict = regressor.predict(__UpperCamelCase )
return y_pred[0]
def __lowerCamelCase ( __UpperCamelCase ) -> float:
"""simple docstring"""
train_user.sort()
lowerCAmelCase_ : Optional[Any] = np.percentile(__UpperCamelCase , 25 )
lowerCAmelCase_ : List[Any] = np.percentile(__UpperCamelCase , 75 )
lowerCAmelCase_ : Union[str, Any] = qa - qa
lowerCAmelCase_ : List[Any] = qa - (iqr * 0.1)
return low_lim
def __lowerCamelCase ( __UpperCamelCase , __UpperCamelCase ) -> bool:
"""simple docstring"""
lowerCAmelCase_ : Optional[Any] = 0
lowerCAmelCase_ : Union[str, Any] = 0
for i in list_vote:
if i > actual_result:
lowerCAmelCase_ : Tuple = not_safe + 1
else:
if abs(abs(__UpperCamelCase ) - abs(__UpperCamelCase ) ) <= 0.1:
safe += 1
else:
not_safe += 1
return safe > not_safe
if __name__ == "__main__":
# data_input_df = pd.read_csv("ex_data.csv", header=None)
lowercase__ = [[18231, 0.0, 1], [22621, 1.0, 2], [15675, 0.0, 3], [23583, 1.0, 4]]
lowercase__ = pd.DataFrame(
data_input, columns=["""total_user""", """total_even""", """days"""]
)
lowercase__ = Normalizer().fit_transform(data_input_df.values)
# split data
lowercase__ = normalize_df[:, 2].tolist()
lowercase__ = normalize_df[:, 0].tolist()
lowercase__ = normalize_df[:, 1].tolist()
# for svr (input variable = total date and total match)
lowercase__ = normalize_df[:, [1, 2]].tolist()
lowercase__ = x[: len(x) - 1]
lowercase__ = x[len(x) - 1 :]
# for linear regression & sarimax
lowercase__ = total_date[: len(total_date) - 1]
lowercase__ = total_user[: len(total_user) - 1]
lowercase__ = total_match[: len(total_match) - 1]
lowercase__ = total_date[len(total_date) - 1 :]
lowercase__ = total_user[len(total_user) - 1 :]
lowercase__ = total_match[len(total_match) - 1 :]
# voting system with forecasting
lowercase__ = [
linear_regression_prediction(
trn_date, trn_user, trn_match, tst_date, tst_match
),
sarimax_predictor(trn_user, trn_match, tst_match),
support_vector_regressor(x_train, x_test, trn_user),
]
# check the safety of today's data
lowercase__ = """""" if data_safety_checker(res_vote, tst_user) else """not """
print("""Today's data is {not_str}safe.""")
| 161 | 0 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinvaConfig, SwinvaForImageClassification
def UpperCamelCase_( snake_case : List[str] ):
'''simple docstring'''
snake_case_ = SwinvaConfig()
snake_case_ = swinva_name.split("_" )
snake_case_ = name_split[1]
if "to" in name_split[3]:
snake_case_ = int(name_split[3][-3:] )
else:
snake_case_ = int(name_split[3] )
if "to" in name_split[2]:
snake_case_ = int(name_split[2][-2:] )
else:
snake_case_ = int(name_split[2][6:] )
if model_size == "tiny":
snake_case_ = 9_6
snake_case_ = (2, 2, 6, 2)
snake_case_ = (3, 6, 1_2, 2_4)
elif model_size == "small":
snake_case_ = 9_6
snake_case_ = (2, 2, 1_8, 2)
snake_case_ = (3, 6, 1_2, 2_4)
elif model_size == "base":
snake_case_ = 1_2_8
snake_case_ = (2, 2, 1_8, 2)
snake_case_ = (4, 8, 1_6, 3_2)
else:
snake_case_ = 1_9_2
snake_case_ = (2, 2, 1_8, 2)
snake_case_ = (6, 1_2, 2_4, 4_8)
if "to" in swinva_name:
snake_case_ = (1_2, 1_2, 1_2, 6)
if ("22k" in swinva_name) and ("to" not in swinva_name):
snake_case_ = 2_1_8_4_1
snake_case_ = "huggingface/label-files"
snake_case_ = "imagenet-22k-id2label.json"
snake_case_ = json.load(open(hf_hub_download(snake_case , snake_case , repo_type="dataset" ) , "r" ) )
snake_case_ = {int(snake_case ): v for k, v in idalabel.items()}
snake_case_ = idalabel
snake_case_ = {v: k for k, v in idalabel.items()}
else:
snake_case_ = 1_0_0_0
snake_case_ = "huggingface/label-files"
snake_case_ = "imagenet-1k-id2label.json"
snake_case_ = json.load(open(hf_hub_download(snake_case , snake_case , repo_type="dataset" ) , "r" ) )
snake_case_ = {int(snake_case ): v for k, v in idalabel.items()}
snake_case_ = idalabel
snake_case_ = {v: k for k, v in idalabel.items()}
snake_case_ = img_size
snake_case_ = num_classes
snake_case_ = embed_dim
snake_case_ = depths
snake_case_ = num_heads
snake_case_ = window_size
return config
def UpperCamelCase_( snake_case : Any ):
'''simple docstring'''
if "patch_embed.proj" in name:
snake_case_ = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
if "patch_embed.norm" in name:
snake_case_ = name.replace("patch_embed.norm" , "embeddings.norm" )
if "layers" in name:
snake_case_ = "encoder." + name
if "attn.proj" in name:
snake_case_ = name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name:
snake_case_ = name.replace("attn" , "attention.self" )
if "norm1" in name:
snake_case_ = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
snake_case_ = name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
snake_case_ = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
snake_case_ = name.replace("mlp.fc2" , "output.dense" )
if "q_bias" in name:
snake_case_ = name.replace("q_bias" , "query.bias" )
if "k_bias" in name:
snake_case_ = name.replace("k_bias" , "key.bias" )
if "v_bias" in name:
snake_case_ = name.replace("v_bias" , "value.bias" )
if "cpb_mlp" in name:
snake_case_ = name.replace("cpb_mlp" , "continuous_position_bias_mlp" )
if name == "norm.weight":
snake_case_ = "layernorm.weight"
if name == "norm.bias":
snake_case_ = "layernorm.bias"
if "head" in name:
snake_case_ = name.replace("head" , "classifier" )
else:
snake_case_ = "swinv2." + name
return name
def UpperCamelCase_( snake_case : int , snake_case : Tuple ):
'''simple docstring'''
for key in orig_state_dict.copy().keys():
snake_case_ = orig_state_dict.pop(snake_case )
if "mask" in key:
continue
elif "qkv" in key:
snake_case_ = key.split("." )
snake_case_ = int(key_split[1] )
snake_case_ = int(key_split[3] )
snake_case_ = model.swinva.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
snake_case_ = val[:dim, :]
snake_case_ = val[dim : dim * 2, :]
snake_case_ = val[-dim:, :]
else:
snake_case_ = val[:dim]
snake_case_ = val[
dim : dim * 2
]
snake_case_ = val[-dim:]
else:
snake_case_ = val
return orig_state_dict
def UpperCamelCase_( snake_case : Any , snake_case : List[Any] ):
'''simple docstring'''
snake_case_ = timm.create_model(snake_case , pretrained=snake_case )
timm_model.eval()
snake_case_ = get_swinva_config(snake_case )
snake_case_ = SwinvaForImageClassification(snake_case )
model.eval()
snake_case_ = convert_state_dict(timm_model.state_dict() , snake_case )
model.load_state_dict(snake_case )
snake_case_ = "http://images.cocodataset.org/val2017/000000039769.jpg"
snake_case_ = AutoImageProcessor.from_pretrained("microsoft/{}".format(swinva_name.replace("_" , "-" ) ) )
snake_case_ = Image.open(requests.get(snake_case , stream=snake_case ).raw )
snake_case_ = image_processor(images=snake_case , return_tensors="pt" )
snake_case_ = timm_model(inputs["pixel_values"] )
snake_case_ = model(**snake_case ).logits
assert torch.allclose(snake_case , snake_case , atol=1e-3 )
print(f'Saving model {swinva_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(snake_case )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(snake_case )
model.push_to_hub(
repo_path_or_name=Path(snake_case , snake_case ) , organization="nandwalritik" , commit_message="Add model" , )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--swinv2_name",
default="swinv2_tiny_patch4_window8_256",
type=str,
help="Name of the Swinv2 timm model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
_SCREAMING_SNAKE_CASE : str = parser.parse_args()
convert_swinva_checkpoint(args.swinva_name, args.pytorch_dump_folder_path)
| 85 |
"""simple docstring"""
import inspect
import unittest
import warnings
from math import ceil, floor
from transformers import LevitConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
LevitForImageClassification,
LevitForImageClassificationWithTeacher,
LevitModel,
)
from transformers.models.levit.modeling_levit import LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class lowerCAmelCase__ ( UpperCAmelCase__ ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : Tuple):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[int] = self.config_class(**self.inputs_dict)
self.parent.assertTrue(hasattr(lowercase_ , '''hidden_sizes'''))
self.parent.assertTrue(hasattr(lowercase_ , '''num_attention_heads'''))
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self : str , lowercase_ : Union[str, Any] , lowercase_ : List[Any]=13 , lowercase_ : Dict=64 , lowercase_ : Dict=3 , lowercase_ : Optional[Any]=3 , lowercase_ : List[Any]=2 , lowercase_ : Any=1 , lowercase_ : List[Any]=16 , lowercase_ : int=[128, 256, 384] , lowercase_ : str=[4, 6, 8] , lowercase_ : Optional[Any]=[2, 3, 4] , lowercase_ : Union[str, Any]=[16, 16, 16] , lowercase_ : Optional[Any]=0 , lowercase_ : Optional[int]=[2, 2, 2] , lowercase_ : Any=[2, 2, 2] , lowercase_ : List[str]=0.02 , lowercase_ : Any=True , lowercase_ : Union[str, Any]=True , lowercase_ : Optional[int]=2 , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[str] = parent
SCREAMING_SNAKE_CASE_ : Any = batch_size
SCREAMING_SNAKE_CASE_ : Optional[Any] = image_size
SCREAMING_SNAKE_CASE_ : int = num_channels
SCREAMING_SNAKE_CASE_ : List[Any] = kernel_size
SCREAMING_SNAKE_CASE_ : Optional[Any] = stride
SCREAMING_SNAKE_CASE_ : List[str] = padding
SCREAMING_SNAKE_CASE_ : int = hidden_sizes
SCREAMING_SNAKE_CASE_ : Tuple = num_attention_heads
SCREAMING_SNAKE_CASE_ : int = depths
SCREAMING_SNAKE_CASE_ : Optional[Any] = key_dim
SCREAMING_SNAKE_CASE_ : Optional[Any] = drop_path_rate
SCREAMING_SNAKE_CASE_ : Tuple = patch_size
SCREAMING_SNAKE_CASE_ : Optional[Any] = attention_ratio
SCREAMING_SNAKE_CASE_ : str = mlp_ratio
SCREAMING_SNAKE_CASE_ : Union[str, Any] = initializer_range
SCREAMING_SNAKE_CASE_ : List[Any] = [
['''Subsample''', key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
['''Subsample''', key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
SCREAMING_SNAKE_CASE_ : Any = is_training
SCREAMING_SNAKE_CASE_ : Tuple = use_labels
SCREAMING_SNAKE_CASE_ : Union[str, Any] = num_labels
SCREAMING_SNAKE_CASE_ : List[Any] = initializer_range
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
SCREAMING_SNAKE_CASE_ : Dict = None
if self.use_labels:
SCREAMING_SNAKE_CASE_ : str = ids_tensor([self.batch_size] , self.num_labels)
SCREAMING_SNAKE_CASE_ : Optional[int] = self.get_config()
return config, pixel_values, labels
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
'''simple docstring'''
return LevitConfig(
image_size=self.image_size , num_channels=self.num_channels , kernel_size=self.kernel_size , stride=self.stride , padding=self.padding , patch_size=self.patch_size , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , depths=self.depths , key_dim=self.key_dim , drop_path_rate=self.drop_path_rate , mlp_ratio=self.mlp_ratio , attention_ratio=self.attention_ratio , initializer_range=self.initializer_range , down_ops=self.down_ops , )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowercase_ : Any , lowercase_ : int , lowercase_ : Optional[int]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[Any] = LevitModel(config=lowercase_)
model.to(lowercase_)
model.eval()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = model(lowercase_)
SCREAMING_SNAKE_CASE_ : Any = (self.image_size, self.image_size)
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple = image_size[0], image_size[1]
for _ in range(4):
SCREAMING_SNAKE_CASE_ : List[Any] = floor(((height + 2 * self.padding - self.kernel_size) / self.stride) + 1)
SCREAMING_SNAKE_CASE_ : Dict = floor(((width + 2 * self.padding - self.kernel_size) / self.stride) + 1)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, ceil(height / 4) * ceil(width / 4), self.hidden_sizes[-1]) , )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowercase_ : int , lowercase_ : Union[str, Any] , lowercase_ : Tuple):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Dict = self.num_labels
SCREAMING_SNAKE_CASE_ : Union[str, Any] = LevitForImageClassification(lowercase_)
model.to(lowercase_)
model.eval()
SCREAMING_SNAKE_CASE_ : List[Any] = model(lowercase_ , labels=lowercase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def _SCREAMING_SNAKE_CASE ( self : Tuple):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[Any] = config_and_inputs
SCREAMING_SNAKE_CASE_ : int = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class lowerCAmelCase__ ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase = (
(LevitModel, LevitForImageClassification, LevitForImageClassificationWithTeacher)
if is_torch_available()
else ()
)
__UpperCamelCase = (
{
"feature-extraction": LevitModel,
"image-classification": (LevitForImageClassification, LevitForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Union[str, Any] = LevitModelTester(self)
SCREAMING_SNAKE_CASE_ : List[Any] = ConfigTester(self , config_class=lowercase_ , has_text_modality=lowercase_ , hidden_size=37)
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
'''simple docstring'''
return
@unittest.skip(reason='''Levit does not use inputs_embeds''')
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
'''simple docstring'''
pass
@unittest.skip(reason='''Levit does not support input and output embeddings''')
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
'''simple docstring'''
pass
@unittest.skip(reason='''Levit does not output attentions''')
def _SCREAMING_SNAKE_CASE ( self : Tuple):
'''simple docstring'''
pass
def _SCREAMING_SNAKE_CASE ( self : Tuple):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ : Any = model_class(lowercase_)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE_ : Dict = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE_ : Optional[Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowercase_)
def _SCREAMING_SNAKE_CASE ( self : str):
'''simple docstring'''
def check_hidden_states_output(lowercase_ : List[str] , lowercase_ : Optional[Any] , lowercase_ : str):
SCREAMING_SNAKE_CASE_ : str = model_class(lowercase_)
model.to(lowercase_)
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : Tuple = model(**self._prepare_for_class(lowercase_ , lowercase_))
SCREAMING_SNAKE_CASE_ : str = outputs.hidden_states
SCREAMING_SNAKE_CASE_ : Optional[int] = len(self.model_tester.depths) + 1
self.assertEqual(len(lowercase_) , lowercase_)
SCREAMING_SNAKE_CASE_ : List[Any] = (self.model_tester.image_size, self.model_tester.image_size)
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = image_size[0], image_size[1]
for _ in range(4):
SCREAMING_SNAKE_CASE_ : Optional[Any] = floor(
(
(height + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1)
SCREAMING_SNAKE_CASE_ : Optional[int] = floor(
(
(width + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1)
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-2:]) , [
height * width,
self.model_tester.hidden_sizes[0],
] , )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ : Optional[int] = True
check_hidden_states_output(lowercase_ , lowercase_ , lowercase_)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE_ : Tuple = True
check_hidden_states_output(lowercase_ , lowercase_ , lowercase_)
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''')
def _SCREAMING_SNAKE_CASE ( self : int):
'''simple docstring'''
pass
def _SCREAMING_SNAKE_CASE ( self : int , lowercase_ : Optional[Any] , lowercase_ : Optional[int] , lowercase_ : Tuple=False):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : int = super()._prepare_for_class(lowercase_ , lowercase_ , return_labels=lowercase_)
if return_labels:
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def _SCREAMING_SNAKE_CASE ( self : List[str]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_)
def _SCREAMING_SNAKE_CASE ( self : Any):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase_)
def _SCREAMING_SNAKE_CASE ( self : List[str]):
'''simple docstring'''
if not self.model_tester.is_training:
return
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = True
for model_class in self.all_model_classes:
# LevitForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(lowercase_)
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
SCREAMING_SNAKE_CASE_ : Union[str, Any] = model_class(lowercase_)
model.to(lowercase_)
model.train()
SCREAMING_SNAKE_CASE_ : Optional[Any] = self._prepare_for_class(lowercase_ , lowercase_ , return_labels=lowercase_)
SCREAMING_SNAKE_CASE_ : Dict = model(**lowercase_).loss
loss.backward()
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
SCREAMING_SNAKE_CASE_ : Union[str, Any] = False
SCREAMING_SNAKE_CASE_ : Optional[int] = True
for model_class in self.all_model_classes:
if model_class in get_values(lowercase_) or not model_class.supports_gradient_checkpointing:
continue
# LevitForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
continue
SCREAMING_SNAKE_CASE_ : List[str] = model_class(lowercase_)
model.gradient_checkpointing_enable()
model.to(lowercase_)
model.train()
SCREAMING_SNAKE_CASE_ : Dict = self._prepare_for_class(lowercase_ , lowercase_ , return_labels=lowercase_)
SCREAMING_SNAKE_CASE_ : List[Any] = model(**lowercase_).loss
loss.backward()
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : str = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ : List[Any] = [
{'''title''': '''multi_label_classification''', '''num_labels''': 2, '''dtype''': torch.float},
{'''title''': '''single_label_classification''', '''num_labels''': 1, '''dtype''': torch.long},
{'''title''': '''regression''', '''num_labels''': 1, '''dtype''': torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(lowercase_),
]
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=F'Testing {model_class} with {problem_type["title"]}'):
SCREAMING_SNAKE_CASE_ : Optional[int] = problem_type['''title''']
SCREAMING_SNAKE_CASE_ : Optional[int] = problem_type['''num_labels''']
SCREAMING_SNAKE_CASE_ : str = model_class(lowercase_)
model.to(lowercase_)
model.train()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self._prepare_for_class(lowercase_ , lowercase_ , return_labels=lowercase_)
if problem_type["num_labels"] > 1:
SCREAMING_SNAKE_CASE_ : str = inputs['''labels'''].unsqueeze(1).repeat(1 , problem_type['''num_labels'''])
SCREAMING_SNAKE_CASE_ : Any = inputs['''labels'''].to(problem_type['''dtype'''])
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=lowercase_) as warning_list:
SCREAMING_SNAKE_CASE_ : int = model(**lowercase_).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message):
raise ValueError(
F'Something is going wrong in the regression problem: intercepted {w.message}')
loss.backward()
@slow
def _SCREAMING_SNAKE_CASE ( self : Any):
'''simple docstring'''
for model_name in LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE_ : Optional[Any] = LevitModel.from_pretrained(lowercase_)
self.assertIsNotNone(lowercase_)
def _A () -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _SCREAMING_SNAKE_CASE ( self : Tuple):
'''simple docstring'''
return LevitImageProcessor.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0])
@slow
def _SCREAMING_SNAKE_CASE ( self : Any):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[int] = LevitForImageClassificationWithTeacher.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0]).to(
lowercase_)
SCREAMING_SNAKE_CASE_ : Optional[int] = self.default_image_processor
SCREAMING_SNAKE_CASE_ : str = prepare_img()
SCREAMING_SNAKE_CASE_ : List[Any] = image_processor(images=lowercase_ , return_tensors='''pt''').to(lowercase_)
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : Any = model(**lowercase_)
# verify the logits
SCREAMING_SNAKE_CASE_ : Tuple = torch.Size((1, 1000))
self.assertEqual(outputs.logits.shape , lowercase_)
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.tensor([1.04_48, -0.37_45, -1.83_17]).to(lowercase_)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase_ , atol=1e-4))
| 91 | 0 |
from __future__ import annotations
from math import pow, sqrt
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> dict[str, float]:
if (resistance, reactance, impedance).count(0 ) != 1:
raise ValueError("One and only one argument must be 0" )
if resistance == 0:
return {"resistance": sqrt(pow(__lowerCAmelCase , 2 ) - pow(__lowerCAmelCase , 2 ) )}
elif reactance == 0:
return {"reactance": sqrt(pow(__lowerCAmelCase , 2 ) - pow(__lowerCAmelCase , 2 ) )}
elif impedance == 0:
return {"impedance": sqrt(pow(__lowerCAmelCase , 2 ) + pow(__lowerCAmelCase , 2 ) )}
else:
raise ValueError("Exactly one argument must be 0" )
if __name__ == "__main__":
import doctest
doctest.testmod() | 196 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPImageProcessor, CLIPProcessor
@require_vision
class __a ( unittest.TestCase ):
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
UpperCamelCase__ : Dict = tempfile.mkdtemp()
# fmt: off
UpperCamelCase__ : List[Any] = ["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"]
# fmt: on
UpperCamelCase__ : List[Any] = dict(zip(SCREAMING_SNAKE_CASE , range(len(SCREAMING_SNAKE_CASE ) ) ) )
UpperCamelCase__ : Tuple = ["#version: 0.2", "l o", "lo w</w>", "e r</w>", ""]
UpperCamelCase__ : Tuple = {"unk_token": "<unk>"}
UpperCamelCase__ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
UpperCamelCase__ : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(SCREAMING_SNAKE_CASE ) )
UpperCamelCase__ : List[str] = {
"do_resize": True,
"size": 20,
"do_center_crop": True,
"crop_size": 18,
"do_normalize": True,
"image_mean": [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3],
"image_std": [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1],
}
UpperCamelCase__ : int = os.path.join(self.tmpdirname , SCREAMING_SNAKE_CASE )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __lowercase ( self : List[str] , **SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
return CLIPTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE )
def __lowercase ( self : List[Any] , **SCREAMING_SNAKE_CASE : Union[str, Any] ):
'''simple docstring'''
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE )
def __lowercase ( self : Any , **SCREAMING_SNAKE_CASE : Union[str, Any] ):
'''simple docstring'''
return CLIPImageProcessor.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE )
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def __lowercase ( self : Any ):
'''simple docstring'''
UpperCamelCase__ : int = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
UpperCamelCase__ : int = [Image.fromarray(np.moveaxis(SCREAMING_SNAKE_CASE , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __lowercase ( self : Tuple ):
'''simple docstring'''
UpperCamelCase__ : Union[str, Any] = self.get_tokenizer()
UpperCamelCase__ : List[str] = self.get_rust_tokenizer()
UpperCamelCase__ : str = self.get_image_processor()
UpperCamelCase__ : List[str] = CLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE , image_processor=SCREAMING_SNAKE_CASE )
processor_slow.save_pretrained(self.tmpdirname )
UpperCamelCase__ : List[Any] = CLIPProcessor.from_pretrained(self.tmpdirname , use_fast=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Tuple = CLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE , image_processor=SCREAMING_SNAKE_CASE )
processor_fast.save_pretrained(self.tmpdirname )
UpperCamelCase__ : Any = CLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , SCREAMING_SNAKE_CASE )
self.assertIsInstance(processor_fast.tokenizer , SCREAMING_SNAKE_CASE )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , SCREAMING_SNAKE_CASE )
self.assertIsInstance(processor_fast.image_processor , SCREAMING_SNAKE_CASE )
def __lowercase ( self : List[str] ):
'''simple docstring'''
UpperCamelCase__ : str = CLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
UpperCamelCase__ : Union[str, Any] = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
UpperCamelCase__ : int = self.get_image_processor(do_normalize=SCREAMING_SNAKE_CASE , padding_value=1.0 )
UpperCamelCase__ : Tuple = CLIPProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=SCREAMING_SNAKE_CASE , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , SCREAMING_SNAKE_CASE )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , SCREAMING_SNAKE_CASE )
def __lowercase ( self : List[Any] ):
'''simple docstring'''
UpperCamelCase__ : List[str] = self.get_image_processor()
UpperCamelCase__ : Union[str, Any] = self.get_tokenizer()
UpperCamelCase__ : Any = CLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE , image_processor=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Dict = self.prepare_image_inputs()
UpperCamelCase__ : List[Any] = image_processor(SCREAMING_SNAKE_CASE , return_tensors="np" )
UpperCamelCase__ : Optional[Any] = processor(images=SCREAMING_SNAKE_CASE , return_tensors="np" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
UpperCamelCase__ : str = self.get_image_processor()
UpperCamelCase__ : int = self.get_tokenizer()
UpperCamelCase__ : Any = CLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE , image_processor=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[Any] = "lower newer"
UpperCamelCase__ : int = processor(text=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Tuple = tokenizer(SCREAMING_SNAKE_CASE )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __lowercase ( self : int ):
'''simple docstring'''
UpperCamelCase__ : List[str] = self.get_image_processor()
UpperCamelCase__ : Dict = self.get_tokenizer()
UpperCamelCase__ : Optional[Any] = CLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE , image_processor=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Dict = "lower newer"
UpperCamelCase__ : List[Any] = self.prepare_image_inputs()
UpperCamelCase__ : Tuple = processor(text=SCREAMING_SNAKE_CASE , images=SCREAMING_SNAKE_CASE )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(SCREAMING_SNAKE_CASE ):
processor()
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
UpperCamelCase__ : Optional[Any] = self.get_image_processor()
UpperCamelCase__ : Optional[int] = self.get_tokenizer()
UpperCamelCase__ : Optional[Any] = CLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE , image_processor=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[str] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
UpperCamelCase__ : Optional[Any] = processor.batch_decode(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[Any] = tokenizer.batch_decode(SCREAMING_SNAKE_CASE )
self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __lowercase ( self : List[str] ):
'''simple docstring'''
UpperCamelCase__ : Dict = self.get_image_processor()
UpperCamelCase__ : Tuple = self.get_tokenizer()
UpperCamelCase__ : Dict = CLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE , image_processor=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Dict = "lower newer"
UpperCamelCase__ : List[str] = self.prepare_image_inputs()
UpperCamelCase__ : str = processor(text=SCREAMING_SNAKE_CASE , images=SCREAMING_SNAKE_CASE )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names ) | 196 | 1 |
"""simple docstring"""
import json
import logging
import os
import sys
from time import time
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, require_torch_tpu
logging.basicConfig(level=logging.DEBUG)
lowercase__ = logging.getLogger()
def _snake_case ( lowercase__ ):
_lowerCamelCase : List[Any] = {}
_lowerCamelCase : List[Any] = os.path.join(lowercase__ , 'all_results.json' )
if os.path.exists(lowercase__ ):
with open(lowercase__ , 'r' ) as f:
_lowerCamelCase : List[Any] = json.load(lowercase__ )
else:
raise ValueError(f'''can\'t find {path}''' )
return results
lowercase__ = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
@require_torch_tpu
class lowerCAmelCase__ ( lowercase ):
'''simple docstring'''
def A_ ( self ):
import xla_spawn
_lowerCamelCase : List[Any] = self.get_auto_remove_tmp_dir()
_lowerCamelCase : List[Any] = F'''
./examples/pytorch/text-classification/run_glue.py
--num_cores=8
./examples/pytorch/text-classification/run_glue.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--overwrite_output_dir
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--do_train
--do_eval
--debug tpu_metrics_debug
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--max_steps=10
--warmup_steps=2
--seed=42
--max_seq_length=128
'''.split()
with patch.object(lowercase , 'argv' , lowercase ):
_lowerCamelCase : Dict = time()
xla_spawn.main()
_lowerCamelCase : Any = time()
_lowerCamelCase : Optional[int] = get_results(lowercase )
self.assertGreaterEqual(result['eval_accuracy'] , 0.75 )
# Assert that the script takes less than 500 seconds to make sure it doesn't hang.
self.assertLess(end - start , 500 )
def A_ ( self ):
import xla_spawn
_lowerCamelCase : Tuple = '\n ./tests/test_trainer_tpu.py\n --num_cores=8\n ./tests/test_trainer_tpu.py\n '.split()
with patch.object(lowercase , 'argv' , lowercase ):
xla_spawn.main() | 96 |
"""simple docstring"""
from __future__ import annotations
import math
import numpy as np
from numpy.linalg import norm
def _snake_case ( lowercase__ , lowercase__ ):
return math.sqrt(sum(pow(a - b , 2 ) for a, b in zip(lowercase__ , lowercase__ ) ) )
def _snake_case ( lowercase__ , lowercase__ ):
if dataset.ndim != value_array.ndim:
_lowerCamelCase : Tuple = (
'Wrong input data\'s dimensions... '
f'''dataset : {dataset.ndim}, value_array : {value_array.ndim}'''
)
raise ValueError(lowercase__ )
try:
if dataset.shape[1] != value_array.shape[1]:
_lowerCamelCase : Optional[int] = (
'Wrong input data\'s shape... '
f'''dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}'''
)
raise ValueError(lowercase__ )
except IndexError:
if dataset.ndim != value_array.ndim:
raise TypeError('Wrong shape' )
if dataset.dtype != value_array.dtype:
_lowerCamelCase : int = (
'Input data have different datatype... '
f'''dataset : {dataset.dtype}, value_array : {value_array.dtype}'''
)
raise TypeError(lowercase__ )
_lowerCamelCase : Optional[int] = []
for value in value_array:
_lowerCamelCase : Tuple = euclidean(lowercase__ , dataset[0] )
_lowerCamelCase : Union[str, Any] = dataset[0].tolist()
for dataset_value in dataset[1:]:
_lowerCamelCase : Optional[Any] = euclidean(lowercase__ , lowercase__ )
if dist > temp_dist:
_lowerCamelCase : List[Any] = temp_dist
_lowerCamelCase : List[str] = dataset_value.tolist()
answer.append([vector, dist] )
return answer
def _snake_case ( lowercase__ , lowercase__ ):
return np.dot(lowercase__ , lowercase__ ) / (norm(lowercase__ ) * norm(lowercase__ ))
if __name__ == "__main__":
import doctest
doctest.testmod() | 96 | 1 |
import argparse
import torch
from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> Union[str, Any]:
# Construct model
if gpta_config_file == "":
UpperCamelCase_: Union[str, Any] = GPTaConfig()
else:
UpperCamelCase_: Optional[Any] = GPTaConfig.from_json_file(lowerCamelCase )
UpperCamelCase_: List[Any] = GPTaModel(lowerCamelCase )
# Load weights from numpy
load_tf_weights_in_gpta(lowerCamelCase , lowerCamelCase , lowerCamelCase )
# Save pytorch-model
UpperCamelCase_: int = pytorch_dump_folder_path + """/""" + WEIGHTS_NAME
UpperCamelCase_: Any = pytorch_dump_folder_path + """/""" + CONFIG_NAME
print(F'''Save PyTorch model to {pytorch_weights_dump_path}''' )
torch.save(model.state_dict() , lowerCamelCase )
print(F'''Save configuration file to {pytorch_config_dump_path}''' )
with open(lowerCamelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
lowerCamelCase_ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--gpt2_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--gpt2_config_file""",
default="""""",
type=str,
help=(
"""An optional config json file corresponding to the pre-trained OpenAI model. \n"""
"""This specifies the model architecture."""
),
)
lowerCamelCase_ : List[str] = parser.parse_args()
convert_gpta_checkpoint_to_pytorch(args.gpta_checkpoint_path, args.gpta_config_file, args.pytorch_dump_folder_path)
| 223 |
def A__ ( lowerCamelCase , lowerCamelCase ) -> float:
if mass < 0:
raise ValueError("""The mass of a body cannot be negative""" )
return 0.5 * mass * abs(lowerCamelCase ) * abs(lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 223 | 1 |
import argparse
import os
import re
__A : int = 'src/transformers/models/auto'
# re pattern that matches mapping introductions:
# SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict
__A : Optional[int] = re.compile(R'''[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict''')
# re pattern that matches identifiers in mappings
__A : Optional[int] = re.compile(R'''\s*\(\s*"(\S[^"]+)"''')
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase = False ) -> List[str]:
'''simple docstring'''
with open(a_, 'r', encoding='utf-8' ) as f:
lowerCAmelCase : Optional[int] = f.read()
lowerCAmelCase : List[Any] = content.split('\n' )
lowerCAmelCase : List[Any] = []
lowerCAmelCase : Optional[int] = 0
while line_idx < len(a_ ):
if _re_intro_mapping.search(lines[line_idx] ) is not None:
lowerCAmelCase : Optional[int] = len(re.search(r'^(\s*)\S', lines[line_idx] ).groups()[0] ) + 8
# Start of a new mapping!
while not lines[line_idx].startswith(' ' * indent + '(' ):
new_lines.append(lines[line_idx] )
line_idx += 1
lowerCAmelCase : Any = []
while lines[line_idx].strip() != "]":
# Blocks either fit in one line or not
if lines[line_idx].strip() == "(":
lowerCAmelCase : List[str] = line_idx
while not lines[line_idx].startswith(' ' * indent + ')' ):
line_idx += 1
blocks.append('\n'.join(lines[start_idx : line_idx + 1] ) )
else:
blocks.append(lines[line_idx] )
line_idx += 1
# Sort blocks by their identifiers
lowerCAmelCase : Optional[int] = sorted(a_, key=lambda _UpperCAmelCase : _re_identifier.search(a_ ).groups()[0] )
new_lines += blocks
else:
new_lines.append(lines[line_idx] )
line_idx += 1
if overwrite:
with open(a_, 'w', encoding='utf-8' ) as f:
f.write('\n'.join(a_ ) )
elif "\n".join(a_ ) != content:
return True
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase = False ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase : List[str] = [os.path.join(a_, a_ ) for f in os.listdir(a_ ) if f.endswith('.py' )]
lowerCAmelCase : Union[str, Any] = [sort_auto_mapping(a_, overwrite=a_ ) for fname in fnames]
if not overwrite and any(a_ ):
lowerCAmelCase : Tuple = [f for f, d in zip(a_, a_ ) if d]
raise ValueError(
f"The following files have auto mappings that need sorting: {', '.join(a_ )}. Run `make style` to fix"
' this.' )
if __name__ == "__main__":
__A : int = argparse.ArgumentParser()
parser.add_argument('''--check_only''', action='''store_true''', help='''Whether to only check or fix style.''')
__A : List[Any] = parser.parse_args()
sort_all_auto_mappings(not args.check_only)
| 138 |
'''simple docstring'''
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Union[str, Any]:
A_ : Optional[Any] = name
A_ : Dict = value
A_ : Union[str, Any] = weight
def __repr__( self ) -> List[str]:
return F"{self.__class__.__name__}({self.name}, {self.value}, {self.weight})"
def UpperCAmelCase_ ( self ) -> Optional[Any]:
return self.value
def UpperCAmelCase_ ( self ) -> List[str]:
return self.name
def UpperCAmelCase_ ( self ) -> Tuple:
return self.weight
def UpperCAmelCase_ ( self ) -> Optional[int]:
return self.value / self.weight
def UpperCAmelCase ( a_ , a_ , a_ ) -> str:
"""simple docstring"""
A_ : Optional[int] = []
for i in range(len(a_ ) ):
menu.append(Things(name[i] , value[i] , weight[i] ) )
return menu
def UpperCAmelCase ( a_ , a_ , a_ ) -> List[Any]:
"""simple docstring"""
A_ : Optional[Any] = sorted(a_ , key=a_ , reverse=a_ )
A_ : str = []
A_ , A_ : Dict = 0.0, 0.0
for i in range(len(a_ ) ):
if (total_cost + items_copy[i].get_weight()) <= max_cost:
result.append(items_copy[i] )
total_cost += items_copy[i].get_weight()
total_value += items_copy[i].get_value()
return (result, total_value)
def UpperCAmelCase ( ) -> Tuple:
"""simple docstring"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| 344 | 0 |
# using dfs for finding eulerian path traversal
def UpperCamelCase_( _snake_case : int , _snake_case : Tuple , _snake_case : Any , _snake_case : Optional[int]=None ):
"""simple docstring"""
__a =(path or []) + [u]
for v in graph[u]:
if visited_edge[u][v] is False:
__a , __a =True, True
__a =dfs(_snake_case , _snake_case , _snake_case , _snake_case )
return path
def UpperCamelCase_( _snake_case : int , _snake_case : Optional[Any] ):
"""simple docstring"""
__a =0
__a =-1
for i in range(_snake_case ):
if i not in graph.keys():
continue
if len(graph[i] ) % 2 == 1:
odd_degree_nodes += 1
__a =i
if odd_degree_nodes == 0:
return 1, odd_node
if odd_degree_nodes == 2:
return 2, odd_node
return 3, odd_node
def UpperCamelCase_( _snake_case : Optional[int] , _snake_case : Tuple ):
"""simple docstring"""
__a =[[False for _ in range(max_node + 1 )] for _ in range(max_node + 1 )]
__a , __a =check_circuit_or_path(_snake_case , _snake_case )
if check == 3:
print('graph is not Eulerian' )
print('no path' )
return
__a =1
if check == 2:
__a =odd_node
print('graph has a Euler path' )
if check == 1:
print('graph has a Euler cycle' )
__a =dfs(_snake_case , _snake_case , _snake_case )
print(_snake_case )
def UpperCamelCase_( ):
"""simple docstring"""
__a ={1: [2, 3, 4], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [4]}
__a ={1: [2, 3, 4, 5], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [1, 4]}
__a ={1: [2, 3, 4], 2: [1, 3, 4], 3: [1, 2], 4: [1, 2, 5], 5: [4]}
__a ={1: [2, 3], 2: [1, 3], 3: [1, 2]}
__a ={
1: [],
2: []
# all degree is zero
}
__a =10
check_euler(_snake_case , _snake_case )
check_euler(_snake_case , _snake_case )
check_euler(_snake_case , _snake_case )
check_euler(_snake_case , _snake_case )
check_euler(_snake_case , _snake_case )
if __name__ == "__main__":
main()
| 361 |
import warnings
from ...utils import logging
from .image_processing_mobilevit import MobileViTImageProcessor
_lowerCAmelCase : Any = logging.get_logger(__name__)
class __magic_name__ ( lowerCAmelCase_ ):
def __init__( self , *__snake_case , **__snake_case ) -> None:
'''simple docstring'''
warnings.warn(
'The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use MobileViTImageProcessor instead.' , __snake_case , )
super().__init__(*__snake_case , **__snake_case )
| 308 | 0 |
"""simple docstring"""
from maths.prime_factors import prime_factors
def _snake_case ( lowercase__ : Dict ) -> int:
'''simple docstring'''
if not isinstance(lowercase__ , lowercase__ ):
lowerCAmelCase_ :Union[str, Any] = f"""Input value of [number={number}] must be an integer"""
raise TypeError(lowercase__ )
if number < 1:
raise ValueError("""Input must be a positive integer""" )
return -1 if len(prime_factors(lowercase__ ) ) % 2 else 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 84 |
'''simple docstring'''
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEmbeddings,
BertLayer,
BertPooler,
BertPreTrainedModel,
)
def snake_case ( UpperCAmelCase )-> Dict:
"""simple docstring"""
__A = torch.exp(UpperCAmelCase )
__A = torch.sum(UpperCAmelCase , dim=1 ) # sum of exp(x_i)
__A = torch.sum(x * exp_x , dim=1 ) # sum of x_i * exp(x_i)
return torch.log(UpperCAmelCase ) - B / A
class UpperCamelCase__ ( nn.Module):
def __init__( self :Any , _A :int ) -> Union[str, Any]:
'''simple docstring'''
super().__init__()
__A = config.output_attentions
__A = config.output_hidden_states
__A = nn.ModuleList([BertLayer(_A ) for _ in range(config.num_hidden_layers )] )
__A = nn.ModuleList([BertHighway(_A ) for _ in range(config.num_hidden_layers )] )
__A = [-1 for _ in range(config.num_hidden_layers )]
def lowercase_ ( self :Any , _A :List[Any] ) -> Tuple:
'''simple docstring'''
if (type(_A ) is float) or (type(_A ) is int):
for i in range(len(self.early_exit_entropy ) ):
__A = x
else:
__A = x
def lowercase_ ( self :Optional[Any] , _A :List[str] ) -> Dict:
'''simple docstring'''
__A = pooler.state_dict()
for highway in self.highway:
for name, param in highway.pooler.state_dict().items():
param.copy_(loaded_model[name] )
def lowercase_ ( self :List[Any] , _A :Tuple , _A :Tuple=None , _A :int=None , _A :List[Any]=None , _A :str=None , ) -> Tuple:
'''simple docstring'''
__A = ()
__A = ()
__A = ()
for i, layer_module in enumerate(self.layer ):
if self.output_hidden_states:
__A = all_hidden_states + (hidden_states,)
__A = layer_module(
_A , _A , head_mask[i] , _A , _A )
__A = layer_outputs[0]
if self.output_attentions:
__A = all_attentions + (layer_outputs[1],)
__A = (hidden_states,)
if self.output_hidden_states:
__A = current_outputs + (all_hidden_states,)
if self.output_attentions:
__A = current_outputs + (all_attentions,)
__A = self.highway[i](_A )
# logits, pooled_output
if not self.training:
__A = highway_exit[0]
__A = entropy(_A )
__A = highway_exit + (highway_entropy,) # logits, hidden_states(?), entropy
__A = all_highway_exits + (highway_exit,)
if highway_entropy < self.early_exit_entropy[i]:
__A = (highway_logits,) + current_outputs[1:] + (all_highway_exits,)
raise HighwayException(_A , i + 1 )
else:
__A = all_highway_exits + (highway_exit,)
# Add last layer
if self.output_hidden_states:
__A = all_hidden_states + (hidden_states,)
__A = (hidden_states,)
if self.output_hidden_states:
__A = outputs + (all_hidden_states,)
if self.output_attentions:
__A = outputs + (all_attentions,)
__A = outputs + (all_highway_exits,)
return outputs # last-layer hidden state, (all hidden states), (all attentions), all highway exits
@add_start_docstrings(
'The Bert Model transformer with early exiting (DeeBERT). ' , SCREAMING_SNAKE_CASE , )
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE):
def __init__( self :Tuple , _A :List[str] ) -> str:
'''simple docstring'''
super().__init__(_A )
__A = config
__A = BertEmbeddings(_A )
__A = DeeBertEncoder(_A )
__A = BertPooler(_A )
self.init_weights()
def lowercase_ ( self :Union[str, Any] ) -> str:
'''simple docstring'''
self.encoder.init_highway_pooler(self.pooler )
def lowercase_ ( self :Optional[Any] ) -> Dict:
'''simple docstring'''
return self.embeddings.word_embeddings
def lowercase_ ( self :Tuple , _A :Tuple ) -> Union[str, Any]:
'''simple docstring'''
__A = value
def lowercase_ ( self :int , _A :int ) -> Tuple:
'''simple docstring'''
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(_A )
@add_start_docstrings_to_model_forward(_A )
def lowercase_ ( self :Tuple , _A :int=None , _A :List[Any]=None , _A :Optional[int]=None , _A :Optional[int]=None , _A :Optional[int]=None , _A :Any=None , _A :List[str]=None , _A :Optional[Any]=None , ) -> Union[str, Any]:
'''simple docstring'''
if input_ids is not None and inputs_embeds is not None:
raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time' )
elif input_ids is not None:
__A = input_ids.size()
elif inputs_embeds is not None:
__A = inputs_embeds.size()[:-1]
else:
raise ValueError('You have to specify either input_ids or inputs_embeds' )
__A = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
__A = torch.ones(_A , device=_A )
if encoder_attention_mask is None:
__A = torch.ones(_A , device=_A )
if token_type_ids is None:
__A = torch.zeros(_A , dtype=torch.long , device=_A )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
__A = self.get_extended_attention_mask(_A , _A , _A )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if encoder_attention_mask.dim() == 3:
__A = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.dim() == 2:
__A = encoder_attention_mask[:, None, None, :]
__A = encoder_extended_attention_mask.to(
dtype=next(self.parameters() ).dtype ) # fp16 compatibility
__A = (1.0 - encoder_extended_attention_mask) * -10_000.0
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
__A = self.get_head_mask(_A , self.config.num_hidden_layers )
__A = self.embeddings(
input_ids=_A , position_ids=_A , token_type_ids=_A , inputs_embeds=_A )
__A = self.encoder(
_A , attention_mask=_A , head_mask=_A , encoder_hidden_states=_A , encoder_attention_mask=_A , )
__A = encoder_outputs[0]
__A = self.pooler(_A )
__A = (
sequence_output,
pooled_output,
) + encoder_outputs[
1:
] # add hidden_states and attentions if they are here
return outputs # sequence_output, pooled_output, (hidden_states), (attentions), highway exits
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE):
def __init__( self :Optional[Any] , _A :str , _A :List[str] ) -> Optional[int]:
'''simple docstring'''
__A = message
__A = exit_layer # start from 1!
class UpperCamelCase__ ( nn.Module):
def __init__( self :Any , _A :Dict ) -> Tuple:
'''simple docstring'''
super().__init__()
__A = BertPooler(_A )
__A = nn.Dropout(config.hidden_dropout_prob )
__A = nn.Linear(config.hidden_size , config.num_labels )
def lowercase_ ( self :List[Any] , _A :Optional[Any] ) -> int:
'''simple docstring'''
__A = encoder_outputs[0]
__A = self.pooler(_A )
# "return" pooler_output
# BertModel
__A = (pooler_input, pooler_output) + encoder_outputs[1:]
# "return" bmodel_output
# Dropout and classification
__A = bmodel_output[1]
__A = self.dropout(_A )
__A = self.classifier(_A )
return logits, pooled_output
@add_start_docstrings(
'Bert Model (with early exiting - DeeBERT) with a classifier on top,\n also takes care of multi-layer training. ' , SCREAMING_SNAKE_CASE , )
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE):
def __init__( self :str , _A :Optional[Any] ) -> str:
'''simple docstring'''
super().__init__(_A )
__A = config.num_labels
__A = config.num_hidden_layers
__A = DeeBertModel(_A )
__A = nn.Dropout(config.hidden_dropout_prob )
__A = nn.Linear(config.hidden_size , self.config.num_labels )
self.init_weights()
@add_start_docstrings_to_model_forward(_A )
def lowercase_ ( self :Tuple , _A :str=None , _A :Optional[int]=None , _A :Any=None , _A :str=None , _A :int=None , _A :Tuple=None , _A :Any=None , _A :List[str]=-1 , _A :Optional[Any]=False , ) -> List[str]:
'''simple docstring'''
__A = self.num_layers
try:
__A = self.bert(
_A , attention_mask=_A , token_type_ids=_A , position_ids=_A , head_mask=_A , inputs_embeds=_A , )
# sequence_output, pooled_output, (hidden_states), (attentions), highway exits
__A = outputs[1]
__A = self.dropout(_A )
__A = self.classifier(_A )
__A = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
__A = e.message
__A = e.exit_layer
__A = outputs[0]
if not self.training:
__A = entropy(_A )
__A = []
__A = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
__A = MSELoss()
__A = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
__A = CrossEntropyLoss()
__A = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
__A = []
for highway_exit in outputs[-1]:
__A = highway_exit[0]
if not self.training:
highway_logits_all.append(_A )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
__A = MSELoss()
__A = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
__A = CrossEntropyLoss()
__A = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(_A )
if train_highway:
__A = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
__A = (loss,) + outputs
if not self.training:
__A = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
__A = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), (highway_exits)
| 161 | 0 |
"""simple docstring"""
lowerCamelCase = {
"""Pillow""": """Pillow""",
"""accelerate""": """accelerate>=0.11.0""",
"""compel""": """compel==0.1.8""",
"""black""": """black~=23.1""",
"""datasets""": """datasets""",
"""filelock""": """filelock""",
"""flax""": """flax>=0.4.1""",
"""hf-doc-builder""": """hf-doc-builder>=0.3.0""",
"""huggingface-hub""": """huggingface-hub>=0.13.2""",
"""requests-mock""": """requests-mock==1.10.0""",
"""importlib_metadata""": """importlib_metadata""",
"""invisible-watermark""": """invisible-watermark""",
"""isort""": """isort>=5.5.4""",
"""jax""": """jax>=0.2.8,!=0.3.2""",
"""jaxlib""": """jaxlib>=0.1.65""",
"""Jinja2""": """Jinja2""",
"""k-diffusion""": """k-diffusion>=0.0.12""",
"""torchsde""": """torchsde""",
"""note_seq""": """note_seq""",
"""librosa""": """librosa""",
"""numpy""": """numpy""",
"""omegaconf""": """omegaconf""",
"""parameterized""": """parameterized""",
"""protobuf""": """protobuf>=3.20.3,<4""",
"""pytest""": """pytest""",
"""pytest-timeout""": """pytest-timeout""",
"""pytest-xdist""": """pytest-xdist""",
"""ruff""": """ruff>=0.0.241""",
"""safetensors""": """safetensors""",
"""sentencepiece""": """sentencepiece>=0.1.91,!=0.1.92""",
"""scipy""": """scipy""",
"""onnx""": """onnx""",
"""regex""": """regex!=2019.12.17""",
"""requests""": """requests""",
"""tensorboard""": """tensorboard""",
"""torch""": """torch>=1.4""",
"""torchvision""": """torchvision""",
"""transformers""": """transformers>=4.25.1""",
"""urllib3""": """urllib3<=2.0.0""",
}
| 241 |
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
if is_torch_available():
import torch
from transformers import XLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_torch
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowercase__ ( self : Tuple ) -> str:
'''simple docstring'''
UpperCAmelCase_ = XLMRobertaModel.from_pretrained("xlm-roberta-base" )
UpperCAmelCase_ = torch.tensor([[0, 581, 10269, 83, 99942, 136, 60742, 23, 70, 80583, 18276, 2]] )
# The dog is cute and lives in the garden house
UpperCAmelCase_ = torch.Size((1, 12, 768) ) # batch_size, sequence_length, embedding_vector_dim
UpperCAmelCase_ = torch.tensor(
[[-0.0101, 0.1218, -0.0803, 0.0801, 0.1327, 0.0776, -0.1215, 0.2383, 0.3338, 0.3106, 0.0300, 0.0252]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
UpperCAmelCase_ = model(_UpperCAmelCase )["last_hidden_state"].detach()
self.assertEqual(output.shape , _UpperCAmelCase )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , _UpperCAmelCase , atol=1e-3 ) )
@slow
def lowercase__ ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ = XLMRobertaModel.from_pretrained("xlm-roberta-large" )
UpperCAmelCase_ = torch.tensor([[0, 581, 10269, 83, 99942, 136, 60742, 23, 70, 80583, 18276, 2]] )
# The dog is cute and lives in the garden house
UpperCAmelCase_ = torch.Size((1, 12, 1024) ) # batch_size, sequence_length, embedding_vector_dim
UpperCAmelCase_ = torch.tensor(
[[-0.0699, -0.0318, 0.0705, -0.1241, 0.0999, -0.0520, 0.1004, -0.1838, -0.4704, 0.1437, 0.0821, 0.0126]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
UpperCAmelCase_ = model(_UpperCAmelCase )["last_hidden_state"].detach()
self.assertEqual(output.shape , _UpperCAmelCase )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , _UpperCAmelCase , atol=1e-3 ) )
| 241 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__lowerCAmelCase = {
'''configuration_mask2former''': [
'''MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''Mask2FormerConfig''',
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = ['''Mask2FormerImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
'''MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Mask2FormerForUniversalSegmentation''',
'''Mask2FormerModel''',
'''Mask2FormerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_maskaformer import MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskaFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_maskaformer import MaskaFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskaformer import (
MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskaFormerForUniversalSegmentation,
MaskaFormerModel,
MaskaFormerPreTrainedModel,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 196 |
__lowerCAmelCase = {
'''A''': ['''B''', '''C''', '''E'''],
'''B''': ['''A''', '''D''', '''E'''],
'''C''': ['''A''', '''F''', '''G'''],
'''D''': ['''B'''],
'''E''': ['''A''', '''B''', '''D'''],
'''F''': ['''C'''],
'''G''': ['''C'''],
}
def snake_case_ ( snake_case , snake_case , snake_case ) -> list[str]:
lowercase__: int = set()
# keep track of all the paths to be checked
lowercase__: Optional[int] = [[start]]
# return path if start is goal
if start == goal:
return [start]
# keeps looping until all possible paths have been checked
while queue:
# pop the first path from the queue
lowercase__: List[Any] = queue.pop(0 )
# get the last node from the path
lowercase__: Optional[int] = path[-1]
if node not in explored:
lowercase__: Optional[Any] = graph[node]
# go through all neighbour nodes, construct a new path and
# push it into the queue
for neighbour in neighbours:
lowercase__: Tuple = list(snake_case )
new_path.append(snake_case )
queue.append(snake_case )
# return path if neighbour is goal
if neighbour == goal:
return new_path
# mark node as explored
explored.add(snake_case )
# in case there's no path between the 2 nodes
return []
def snake_case_ ( snake_case , snake_case , snake_case ) -> int:
if not graph or start not in graph or target not in graph:
return -1
if start == target:
return 0
lowercase__: Tuple = [start]
lowercase__: List[Any] = set(snake_case )
# Keep tab on distances from `start` node.
lowercase__: Tuple = {start: 0, target: -1}
while queue:
lowercase__: Dict = queue.pop(0 )
if node == target:
lowercase__: str = (
dist[node] if dist[target] == -1 else min(dist[target] , dist[node] )
)
for adjacent in graph[node]:
if adjacent not in visited:
visited.add(snake_case )
queue.append(snake_case )
lowercase__: List[str] = dist[node] + 1
return dist[target]
if __name__ == "__main__":
print(bfs_shortest_path(demo_graph, '''G''', '''D''')) # returns ['G', 'C', 'A', 'B', 'D']
print(bfs_shortest_path_distance(demo_graph, '''G''', '''D''')) # returns 4
| 196 | 1 |
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
a : List[Any] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If BertLMPredictionHead is changed in modeling_bert.py, this code needs to be manually updated.
a : int = ' def __init__(self, config):\n super().__init__()\n self.transform = BertPredictionHeadTransform(config)\n\n # The output weights are the same as the input embeddings, but there is\n # an output-only bias for each token.\n self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)\n\n self.bias = nn.Parameter(torch.zeros(config.vocab_size))\n\n # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`\n self.decoder.bias = self.bias\n\n def forward(self, hidden_states):\n hidden_states = self.transform(hidden_states)\n hidden_states = self.decoder(hidden_states)\n return hidden_states\n'
class _a ( unittest.TestCase ):
def __snake_case (self ) -> Union[str, Any]:
UpperCAmelCase_: List[Any] = tempfile.mkdtemp()
os.makedirs(os.path.join(self.transformer_dir, """models/bert/""" ) )
UpperCAmelCase_: Dict = self.transformer_dir
shutil.copy(
os.path.join(SCREAMING_SNAKE_CASE_, """src/transformers/models/bert/modeling_bert.py""" ), os.path.join(self.transformer_dir, """models/bert/modeling_bert.py""" ), )
def __snake_case (self ) -> Union[str, Any]:
UpperCAmelCase_: Any = """src/transformers"""
shutil.rmtree(self.transformer_dir )
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=None ) -> List[Any]:
UpperCAmelCase_: List[str] = comment + f'\nclass {class_name}(nn.Module):\n' + class_code
if overwrite_result is not None:
UpperCAmelCase_: Optional[Any] = comment + f'\nclass {class_name}(nn.Module):\n' + overwrite_result
UpperCAmelCase_: Dict = black.Mode(target_versions={black.TargetVersion.PYaa}, line_length=119 )
UpperCAmelCase_: Dict = black.format_str(SCREAMING_SNAKE_CASE_, mode=SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: str = os.path.join(self.transformer_dir, """new_code.py""" )
with open(SCREAMING_SNAKE_CASE_, """w""", newline="""\n""" ) as f:
f.write(SCREAMING_SNAKE_CASE_ )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(SCREAMING_SNAKE_CASE_ ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name, overwrite=SCREAMING_SNAKE_CASE_ )
with open(SCREAMING_SNAKE_CASE_, """r""" ) as f:
self.assertTrue(f.read(), SCREAMING_SNAKE_CASE_ )
def __snake_case (self ) -> Optional[int]:
UpperCAmelCase_: str = check_copies.find_code_in_transformers("""models.bert.modeling_bert.BertLMPredictionHead""" )
self.assertEqual(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
def __snake_case (self ) -> List[str]:
# Base copy consistency
self.check_copy_consistency(
"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead""", """BertLMPredictionHead""", REFERENCE_CODE + """\n""", )
# With no empty line at the end
self.check_copy_consistency(
"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead""", """BertLMPredictionHead""", SCREAMING_SNAKE_CASE_, )
# Copy consistency with rename
self.check_copy_consistency(
"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel""", """TestModelLMPredictionHead""", re.sub("""Bert""", """TestModel""", SCREAMING_SNAKE_CASE_ ), )
# Copy consistency with a really long name
UpperCAmelCase_: Any = """TestModelWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason"""
self.check_copy_consistency(
f'# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->{long_class_name}', f'{long_class_name}LMPredictionHead', re.sub("""Bert""", SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ), )
# Copy consistency with overwrite
self.check_copy_consistency(
"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel""", """TestModelLMPredictionHead""", SCREAMING_SNAKE_CASE_, overwrite_result=re.sub("""Bert""", """TestModel""", SCREAMING_SNAKE_CASE_ ), )
def __snake_case (self ) -> Union[str, Any]:
UpperCAmelCase_: Union[str, Any] = check_copies.LOCALIZED_READMES["""README_zh-hans.md"""]
UpperCAmelCase_: Tuple = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the"""
""" Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for"""
""" Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong"""
""" Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.\n1."""
""" **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (from HuggingFace),"""
""" released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and"""
""" lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same"""
""" method has been applied to compress GPT2 into"""
""" [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into"""
""" [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),"""
""" Multilingual BERT into"""
""" [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German"""
""" version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)**"""
""" (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders"""
""" as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang"""
""" Luong, Quoc V. Le, Christopher D. Manning."""
)
UpperCAmelCase_: Optional[Any] = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"""
""" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"""
""" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"""
""" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"""
)
UpperCAmelCase_: Tuple = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"""
""" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"""
""" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"""
""" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n1."""
""" **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (来自 HuggingFace) 伴随论文"""
""" [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and"""
""" lighter](https://arxiv.org/abs/1910.01108) 由 Victor Sanh, Lysandre Debut and Thomas Wolf 发布。 The same"""
""" method has been applied to compress GPT2 into"""
""" [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into"""
""" [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),"""
""" Multilingual BERT into"""
""" [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German"""
""" version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)** (来自"""
""" Google Research/Stanford University) 伴随论文 [ELECTRA: Pre-training text encoders as discriminators rather"""
""" than generators](https://arxiv.org/abs/2003.10555) 由 Kevin Clark, Minh-Thang Luong, Quoc V. Le,"""
""" Christopher D. Manning 发布。\n"""
)
UpperCAmelCase_ , UpperCAmelCase_: int = check_copies.convert_to_localized_md(
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, localized_readme["""format_model_list"""] )
self.assertFalse(SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_ , UpperCAmelCase_: List[str] = check_copies.convert_to_localized_md(
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, localized_readme["""format_model_list"""] )
# Check whether the number of models is equal to README.md after conversion.
self.assertTrue(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Union[str, Any] = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the"""
""" Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for"""
""" Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong"""
""" Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut."""
)
UpperCAmelCase_: Optional[int] = (
"""1. **[ALBERT](https://huggingface.co/transformers/main/model_doc/albert.html)** (来自 Google Research and"""
""" the Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"""
""" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"""
""" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"""
)
UpperCAmelCase_: int = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"""
""" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"""
""" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"""
""" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"""
)
UpperCAmelCase_ , UpperCAmelCase_: List[str] = check_copies.convert_to_localized_md(
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, localized_readme["""format_model_list"""] )
# Check if the model link is synchronized.
self.assertEqual(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
| 82 |
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
class _a ( unittest.TestCase , _lowerCAmelCase ):
def __snake_case (self ) -> Optional[int]:
UpperCAmelCase_: Optional[int] = load_tool("""text-classification""" )
self.tool.setup()
UpperCAmelCase_: str = load_tool("""text-classification""", remote=SCREAMING_SNAKE_CASE_ )
def __snake_case (self ) -> Optional[Any]:
UpperCAmelCase_: Any = self.tool("""That's quite cool""", ["""positive""", """negative"""] )
self.assertEqual(SCREAMING_SNAKE_CASE_, """positive""" )
def __snake_case (self ) -> Optional[int]:
UpperCAmelCase_: List[str] = self.remote_tool("""That's quite cool""", ["""positive""", """negative"""] )
self.assertEqual(SCREAMING_SNAKE_CASE_, """positive""" )
def __snake_case (self ) -> Any:
UpperCAmelCase_: Tuple = self.tool(text="""That's quite cool""", labels=["""positive""", """negative"""] )
self.assertEqual(SCREAMING_SNAKE_CASE_, """positive""" )
def __snake_case (self ) -> int:
UpperCAmelCase_: Dict = self.remote_tool(text="""That's quite cool""", labels=["""positive""", """negative"""] )
self.assertEqual(SCREAMING_SNAKE_CASE_, """positive""" )
| 82 | 1 |
'''simple docstring'''
import math
from collections.abc import Iterator
from itertools import takewhile
def UpperCAmelCase_ ( __lowerCamelCase : int ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 ,int(math.sqrt(__lowerCamelCase ) + 1 ) ,6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def UpperCAmelCase_ ( ):
lowercase_ :Optional[Any] = 2
while True:
if is_prime(__lowerCamelCase ):
yield num
num += 1
def UpperCAmelCase_ ( __lowerCamelCase : int = 2_00_00_00 ):
return sum(takewhile(lambda __lowerCamelCase : x < n ,prime_generator() ) )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 223 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.bert.modeling_flax_bert import (
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
)
class a_ ( unittest.TestCase ):
def __init__( self : List[str] , lowercase : str , lowercase : Union[str, Any]=13 , lowercase : int=7 , lowercase : List[str]=True , lowercase : int=True , lowercase : str=True , lowercase : Any=True , lowercase : List[str]=99 , lowercase : Union[str, Any]=32 , lowercase : Optional[Any]=5 , lowercase : Dict=4 , lowercase : Dict=37 , lowercase : Dict="gelu" , lowercase : Optional[int]=0.1 , lowercase : str=0.1 , lowercase : List[Any]=512 , lowercase : str=16 , lowercase : Dict=2 , lowercase : Any=0.02 , lowercase : Any=4 , ):
"""simple docstring"""
lowercase_ :List[str] = parent
lowercase_ :Any = batch_size
lowercase_ :Dict = seq_length
lowercase_ :Union[str, Any] = is_training
lowercase_ :Optional[int] = use_attention_mask
lowercase_ :Any = use_token_type_ids
lowercase_ :Union[str, Any] = use_labels
lowercase_ :Dict = vocab_size
lowercase_ :Tuple = hidden_size
lowercase_ :Tuple = num_hidden_layers
lowercase_ :Optional[int] = num_attention_heads
lowercase_ :Optional[Any] = intermediate_size
lowercase_ :str = hidden_act
lowercase_ :Tuple = hidden_dropout_prob
lowercase_ :Optional[Any] = attention_probs_dropout_prob
lowercase_ :Tuple = max_position_embeddings
lowercase_ :Any = type_vocab_size
lowercase_ :int = type_sequence_label_size
lowercase_ :Tuple = initializer_range
lowercase_ :Optional[Any] = num_choices
def lowercase__ ( self : List[Any] ):
"""simple docstring"""
lowercase_ :int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase_ :Union[str, Any] = None
if self.use_attention_mask:
lowercase_ :Dict = random_attention_mask([self.batch_size, self.seq_length] )
lowercase_ :List[str] = None
if self.use_token_type_ids:
lowercase_ :str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase_ :Optional[Any] = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowercase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def lowercase__ ( self : Union[str, Any] ):
"""simple docstring"""
lowercase_ :int = self.prepare_config_and_inputs()
lowercase_ , lowercase_ , lowercase_ , lowercase_ :Tuple = config_and_inputs
lowercase_ :Tuple = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
def lowercase__ ( self : List[Any] ):
"""simple docstring"""
lowercase_ :Any = self.prepare_config_and_inputs()
lowercase_ , lowercase_ , lowercase_ , lowercase_ :Union[str, Any] = config_and_inputs
lowercase_ :Dict = True
lowercase_ :Optional[int] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowercase_ :str = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
attention_mask,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class a_ ( _lowerCAmelCase , unittest.TestCase ):
__A = True
__A = (
(
FlaxBertModel,
FlaxBertForPreTraining,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForQuestionAnswering,
FlaxBertForNextSentencePrediction,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowercase__ ( self : Any ):
"""simple docstring"""
lowercase_ :Optional[Any] = FlaxBertModelTester(self )
@slow
def lowercase__ ( self : List[str] ):
"""simple docstring"""
lowercase_ :List[str] = FlaxBertModel.from_pretrained("bert-base-cased" )
lowercase_ :str = model(np.ones((1, 1) ) )
self.assertIsNotNone(lowercase )
| 223 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase : Dict = {
'''configuration_blip_2''': [
'''BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''Blip2Config''',
'''Blip2QFormerConfig''',
'''Blip2VisionConfig''',
],
'''processing_blip_2''': ['''Blip2Processor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : int = [
'''BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Blip2Model''',
'''Blip2QFormerModel''',
'''Blip2PreTrainedModel''',
'''Blip2ForConditionalGeneration''',
'''Blip2VisionModel''',
]
if TYPE_CHECKING:
from .configuration_blip_a import (
BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlipaConfig,
BlipaQFormerConfig,
BlipaVisionConfig,
)
from .processing_blip_a import BlipaProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip_a import (
BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipaForConditionalGeneration,
BlipaModel,
BlipaPreTrainedModel,
BlipaQFormerModel,
BlipaVisionModel,
)
else:
import sys
lowerCamelCase : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 352 |
import math
class _a :
def __init__( self : List[Any] , _SCREAMING_SNAKE_CASE : Any=0 )-> Optional[Any]: # a graph with Node 0,1,...,N-1
lowerCAmelCase__ : Optional[int] = n
lowerCAmelCase__ : List[Any] = [
[math.inf for j in range(0 , _SCREAMING_SNAKE_CASE )] for i in range(0 , _SCREAMING_SNAKE_CASE )
] # adjacency matrix for weight
lowerCAmelCase__ : str = [
[math.inf for j in range(0 , _SCREAMING_SNAKE_CASE )] for i in range(0 , _SCREAMING_SNAKE_CASE )
] # dp[i][j] stores minimum distance from i to j
def UpperCAmelCase__( self : Optional[Any] , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : str )-> List[str]:
lowerCAmelCase__ : Optional[int] = w
def UpperCAmelCase__( self : List[Any] )-> Optional[int]:
for k in range(0 , self.n ):
for i in range(0 , self.n ):
for j in range(0 , self.n ):
lowerCAmelCase__ : Dict = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] )
def UpperCAmelCase__( self : str , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : str )-> str:
return self.dp[u][v]
if __name__ == "__main__":
lowerCamelCase = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 10)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 10)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3)
| 211 | 0 |
"""simple docstring"""
import numpy as np
import torch
from torch.utils.data import Dataset, IterableDataset
from ..utils.generic import ModelOutput
class _UpperCAmelCase( _lowerCamelCase ):
def __init__( self , __a , __a , __a) -> int:
'''simple docstring'''
_UpperCamelCase = dataset
_UpperCamelCase = process
_UpperCamelCase = params
def __len__( self) -> int:
'''simple docstring'''
return len(self.dataset)
def __getitem__( self , __a) -> Any:
'''simple docstring'''
_UpperCamelCase = self.dataset[i]
_UpperCamelCase = self.process(_A , **self.params)
return processed
class _UpperCAmelCase( _lowerCamelCase ):
def __init__( self , __a , __a , __a , __a=None) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = loader
_UpperCamelCase = infer
_UpperCamelCase = params
if loader_batch_size == 1:
# Let's spare some time by deactivating altogether
_UpperCamelCase = None
_UpperCamelCase = loader_batch_size
# Internal bookkeeping
_UpperCamelCase = None
_UpperCamelCase = None
def __len__( self) -> Optional[int]:
'''simple docstring'''
return len(self.loader)
def __iter__( self) -> int:
'''simple docstring'''
_UpperCamelCase = iter(self.loader)
return self
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
if isinstance(self._loader_batch_data , torch.Tensor):
# Batch data is simple tensor, just fetch the slice
_UpperCamelCase = self._loader_batch_data[self._loader_batch_index]
else:
# Batch data is assumed to be BaseModelOutput (or dict)
_UpperCamelCase = {}
for k, element in self._loader_batch_data.items():
if isinstance(_A , _A):
# Convert ModelOutput to tuple first
_UpperCamelCase = element.to_tuple()
if isinstance(element[0] , torch.Tensor):
_UpperCamelCase = tuple(el[self._loader_batch_index].unsqueeze(0) for el in element)
elif isinstance(element[0] , np.ndarray):
_UpperCamelCase = tuple(np.expand_dims(el[self._loader_batch_index] , 0) for el in element)
continue
if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(_A , _A):
# Those are stored as lists of tensors so need specific unbatching.
if isinstance(element[0] , torch.Tensor):
_UpperCamelCase = tuple(el[self._loader_batch_index].unsqueeze(0) for el in element)
elif isinstance(element[0] , np.ndarray):
_UpperCamelCase = tuple(np.expand_dims(el[self._loader_batch_index] , 0) for el in element)
continue
if element is None:
# This can happen for optional data that get passed around
_UpperCamelCase = None
elif isinstance(element[self._loader_batch_index] , torch.Tensor):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
_UpperCamelCase = element[self._loader_batch_index].unsqueeze(0)
elif isinstance(element[self._loader_batch_index] , np.ndarray):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
_UpperCamelCase = np.expand_dims(element[self._loader_batch_index] , 0)
else:
# This is typically a list, so no need to `unsqueeze`.
_UpperCamelCase = element[self._loader_batch_index]
# Recreate the element by reusing the original class to make it look
# batch_size=1
_UpperCamelCase = self._loader_batch_data.__class__(_A)
self._loader_batch_index += 1
return result
def UpperCAmelCase ( self) -> Union[str, Any]:
'''simple docstring'''
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
# We are currently unrolling a batch so we just need to return
# the current item within a batch
return self.loader_batch_item()
# We're out of items within a batch
_UpperCamelCase = next(self.iterator)
_UpperCamelCase = self.infer(_A , **self.params)
# We now have a batch of "inferred things".
if self.loader_batch_size is not None:
# Try to infer the size of the batch
if isinstance(_A , torch.Tensor):
_UpperCamelCase = processed
else:
_UpperCamelCase = list(processed.keys())[0]
_UpperCamelCase = processed[key]
if isinstance(_A , _A):
_UpperCamelCase = len(_A)
else:
_UpperCamelCase = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
_UpperCamelCase = observed_batch_size
# Setting internal index to unwrap the batch
_UpperCamelCase = processed
_UpperCamelCase = 0
return self.loader_batch_item()
else:
# We're not unrolling batches
return processed
class _UpperCAmelCase( _lowerCamelCase ):
def __init__( self , __a , __a , __a , __a=None) -> Union[str, Any]:
'''simple docstring'''
super().__init__(_A , _A , _A)
def __iter__( self) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = iter(self.loader)
_UpperCamelCase = None
return self
def UpperCAmelCase ( self) -> List[str]:
'''simple docstring'''
if self.subiterator is None:
_UpperCamelCase = self.infer(next(self.iterator) , **self.params)
try:
# Try to return next item
_UpperCamelCase = next(self.subiterator)
except StopIteration:
# When a preprocess iterator ends, we can start lookig at the next item
# ChunkIterator will keep feeding until ALL elements of iterator
# all have created their subiterator and have been iterating against.
#
# Another way to look at it, is we're basically flattening lists of lists
# into a single list, but with generators
_UpperCamelCase = self.infer(next(self.iterator) , **self.params)
_UpperCamelCase = next(self.subiterator)
return processed
class _UpperCAmelCase( _lowerCamelCase ):
def __iter__( self) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = iter(self.loader)
return self
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
_UpperCamelCase = False
_UpperCamelCase = []
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
while self._loader_batch_index < self.loader_batch_size:
_UpperCamelCase = self.loader_batch_item()
_UpperCamelCase = item.pop('''is_last''')
accumulator.append(_A)
if is_last:
return accumulator
while not is_last:
_UpperCamelCase = self.infer(next(self.iterator) , **self.params)
if self.loader_batch_size is not None:
if isinstance(_A , torch.Tensor):
_UpperCamelCase = processed
else:
_UpperCamelCase = list(processed.keys())[0]
_UpperCamelCase = processed[key]
if isinstance(_A , _A):
_UpperCamelCase = len(_A)
else:
_UpperCamelCase = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
_UpperCamelCase = observed_batch_size
_UpperCamelCase = processed
_UpperCamelCase = 0
while self._loader_batch_index < self.loader_batch_size:
_UpperCamelCase = self.loader_batch_item()
_UpperCamelCase = item.pop('''is_last''')
accumulator.append(_A)
if is_last:
return accumulator
else:
_UpperCamelCase = processed
_UpperCamelCase = item.pop('''is_last''')
accumulator.append(_A)
return accumulator
class _UpperCAmelCase( _lowerCamelCase ):
def __init__( self , __a , __a) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = dataset
_UpperCamelCase = key
def __len__( self) -> Any:
'''simple docstring'''
return len(self.dataset)
def __getitem__( self , __a) -> List[str]:
'''simple docstring'''
return self.dataset[i][self.key]
class _UpperCAmelCase( _lowerCamelCase ):
def __init__( self , __a , __a , __a) -> Dict:
'''simple docstring'''
_UpperCamelCase = dataset
_UpperCamelCase = keya
_UpperCamelCase = keya
def __len__( self) -> Any:
'''simple docstring'''
return len(self.dataset)
def __getitem__( self , __a) -> Optional[int]:
'''simple docstring'''
return {"text": self.dataset[i][self.keya], "text_pair": self.dataset[i][self.keya]}
| 194 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def snake_case( ) -> List[str]:
'''simple docstring'''
lowercase : Any = ArgumentParser('''Accelerate CLI tool''' , usage='''accelerate <command> [<args>]''' , allow_abbrev=__magic_name__ )
lowercase : Optional[Any] = parser.add_subparsers(help='''accelerate command helpers''' )
# Register commands
get_config_parser(subparsers=__magic_name__ )
env_command_parser(subparsers=__magic_name__ )
launch_command_parser(subparsers=__magic_name__ )
tpu_command_parser(subparsers=__magic_name__ )
test_command_parser(subparsers=__magic_name__ )
# Let's go
lowercase : Dict = parser.parse_args()
if not hasattr(__magic_name__ , '''func''' ):
parser.print_help()
exit(1 )
# Run
args.func(__magic_name__ )
if __name__ == "__main__":
main() | 308 | 0 |
import copy
import tempfile
import unittest
from huggingface_hub import HfFolder, delete_repo
from parameterized import parameterized
from requests.exceptions import HTTPError
from transformers import AutoConfig, GenerationConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@parameterized.expand([(None,), ('''foo.json''',)] )
def __magic_name__ ( self : Dict , lowerCAmelCase_ : Tuple ):
"""simple docstring"""
_A: int = GenerationConfig(
do_sample=lowerCamelCase__ , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(lowerCamelCase__ , config_name=lowerCamelCase__ )
_A: Optional[int] = GenerationConfig.from_pretrained(lowerCamelCase__ , config_name=lowerCamelCase__ )
# Checks parameters that were specified
self.assertEqual(loaded_config.do_sample , lowerCamelCase__ )
self.assertEqual(loaded_config.temperature , 0.7 )
self.assertEqual(loaded_config.length_penalty , 1.0 )
self.assertEqual(loaded_config.bad_words_ids , [[1, 2, 3], [4, 5]] )
# Checks parameters that were not specified (defaults)
self.assertEqual(loaded_config.top_k , 5_0 )
self.assertEqual(loaded_config.max_length , 2_0 )
self.assertEqual(loaded_config.max_time , lowerCamelCase__ )
def __magic_name__ ( self : Tuple ):
"""simple docstring"""
_A: Dict = AutoConfig.from_pretrained('''gpt2''' )
_A: Tuple = GenerationConfig.from_model_config(lowerCamelCase__ )
_A: Tuple = GenerationConfig()
# The generation config has loaded a few non-default parameters from the model config
self.assertNotEqual(lowerCamelCase__ , lowerCamelCase__ )
# One of those parameters is eos_token_id -- check if it matches
self.assertNotEqual(generation_config_from_model.eos_token_id , default_generation_config.eos_token_id )
self.assertEqual(generation_config_from_model.eos_token_id , model_config.eos_token_id )
def __magic_name__ ( self : Optional[Any] ):
"""simple docstring"""
_A: List[str] = GenerationConfig()
_A: Optional[Any] = {
'''max_new_tokens''': 1_0_2_4,
'''foo''': '''bar''',
}
_A: Dict = copy.deepcopy(lowerCamelCase__ )
_A: List[Any] = generation_config.update(**lowerCamelCase__ )
# update_kwargs was not modified (no side effects)
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(generation_config.max_new_tokens , 1_0_2_4 )
# `.update()` returns a dictionary of unused kwargs
self.assertEqual(lowerCamelCase__ , {'''foo''': '''bar'''} )
def __magic_name__ ( self : Any ):
"""simple docstring"""
_A: Optional[int] = GenerationConfig()
_A: Union[str, Any] = '''bar'''
with tempfile.TemporaryDirectory('''test-generation-config''' ) as tmp_dir:
generation_config.save_pretrained(lowerCamelCase__ )
_A: Optional[Any] = GenerationConfig.from_pretrained(lowerCamelCase__ )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(new_config.foo , '''bar''' )
_A: Dict = GenerationConfig.from_model_config(lowerCamelCase__ )
assert not hasattr(lowerCamelCase__ , '''foo''' ) # no new kwargs should be initialized if from config
def __magic_name__ ( self : List[str] ):
"""simple docstring"""
_A: Any = GenerationConfig()
self.assertEqual(default_config.temperature , 1.0 )
self.assertEqual(default_config.do_sample , lowerCamelCase__ )
self.assertEqual(default_config.num_beams , 1 )
_A: List[Any] = GenerationConfig(
do_sample=lowerCamelCase__ , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
self.assertEqual(config.temperature , 0.7 )
self.assertEqual(config.do_sample , lowerCamelCase__ )
self.assertEqual(config.num_beams , 1 )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(lowerCamelCase__ )
_A: Tuple = GenerationConfig.from_pretrained(lowerCamelCase__ , temperature=1.0 )
self.assertEqual(loaded_config.temperature , 1.0 )
self.assertEqual(loaded_config.do_sample , lowerCamelCase__ )
self.assertEqual(loaded_config.num_beams , 1 ) # default value
@is_staging_test
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def __magic_name__ ( cls : Union[str, Any] ):
"""simple docstring"""
_A: int = TOKEN
HfFolder.save_token(lowerCamelCase__ )
@classmethod
def __magic_name__ ( cls : Union[str, Any] ):
"""simple docstring"""
try:
delete_repo(token=cls._token , repo_id='''test-generation-config''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-generation-config-org''' )
except HTTPError:
pass
def __magic_name__ ( self : int ):
"""simple docstring"""
_A: Any = GenerationConfig(
do_sample=lowerCamelCase__ , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub('''test-generation-config''' , use_auth_token=self._token )
_A: List[Any] = GenerationConfig.from_pretrained(F"""{USER}/test-generation-config""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase__ , getattr(lowerCamelCase__ , lowerCamelCase__ ) )
# Reset repo
delete_repo(token=self._token , repo_id='''test-generation-config''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
lowerCamelCase__ , repo_id='''test-generation-config''' , push_to_hub=lowerCamelCase__ , use_auth_token=self._token )
_A: Tuple = GenerationConfig.from_pretrained(F"""{USER}/test-generation-config""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase__ , getattr(lowerCamelCase__ , lowerCamelCase__ ) )
def __magic_name__ ( self : Tuple ):
"""simple docstring"""
_A: str = GenerationConfig(
do_sample=lowerCamelCase__ , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub('''valid_org/test-generation-config-org''' , use_auth_token=self._token )
_A: Optional[int] = GenerationConfig.from_pretrained('''valid_org/test-generation-config-org''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase__ , getattr(lowerCamelCase__ , lowerCamelCase__ ) )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-generation-config-org''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
lowerCamelCase__ , repo_id='''valid_org/test-generation-config-org''' , push_to_hub=lowerCamelCase__ , use_auth_token=self._token )
_A: Dict = GenerationConfig.from_pretrained('''valid_org/test-generation-config-org''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase__ , getattr(lowerCamelCase__ , lowerCamelCase__ ) )
| 357 |
import math
import random
from typing import Any
from .hill_climbing import SearchProblem
def lowerCamelCase__ ( a , a = True , a = math.inf , a = -math.inf , a = math.inf , a = -math.inf , a = False , a = 1_00 , a = 0.01 , a = 1 , ) -> Any:
_A: Optional[Any] = False
_A: Dict = search_prob
_A: str = start_temperate
_A: Optional[int] = []
_A: int = 0
_A: Dict = None
while not search_end:
_A: Dict = current_state.score()
if best_state is None or current_score > best_state.score():
_A: List[Any] = current_state
scores.append(a )
iterations += 1
_A: List[str] = None
_A: str = current_state.get_neighbors()
while (
next_state is None and neighbors
): # till we do not find a neighbor that we can move to
_A: Any = random.randint(0 , len(a ) - 1 ) # picking a random neighbor
_A: Union[str, Any] = neighbors.pop(a )
_A: List[str] = picked_neighbor.score() - current_score
if (
picked_neighbor.x > max_x
or picked_neighbor.x < min_x
or picked_neighbor.y > max_y
or picked_neighbor.y < min_y
):
continue # neighbor outside our bounds
if not find_max:
_A: Optional[Any] = change * -1 # in case we are finding minimum
if change > 0: # improves the solution
_A: str = picked_neighbor
else:
_A: Tuple = (math.e) ** (
change / current_temp
) # probability generation function
if random.random() < probability: # random number within probability
_A: Optional[int] = picked_neighbor
_A: Dict = current_temp - (current_temp * rate_of_decrease)
if current_temp < threshold_temp or next_state is None:
# temperature below threshold, or could not find a suitable neighbor
_A: Any = True
else:
_A: List[Any] = next_state
if visualization:
from matplotlib import pyplot as plt
plt.plot(range(a ) , a )
plt.xlabel('''Iterations''' )
plt.ylabel('''Function values''' )
plt.show()
return best_state
if __name__ == "__main__":
def lowerCamelCase__ ( a , a ) -> Optional[Any]:
return (x**2) + (y**2)
# starting the problem with initial coordinates (12, 47)
UpperCAmelCase__ : Optional[int] = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
UpperCAmelCase__ : Optional[Any] = simulated_annealing(
prob, find_max=False, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
'The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 '
F"""and 50 > y > - 5 found via hill climbing: {local_min.score()}"""
)
# starting the problem with initial coordinates (12, 47)
UpperCAmelCase__ : Optional[Any] = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
UpperCAmelCase__ : List[str] = simulated_annealing(
prob, find_max=True, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
'The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 '
F"""and 50 > y > - 5 found via hill climbing: {local_min.score()}"""
)
def lowerCamelCase__ ( a , a ) -> Optional[Any]:
return (3 * x**2) - (6 * y)
UpperCAmelCase__ : Union[str, Any] = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
UpperCAmelCase__ : List[str] = simulated_annealing(prob, find_max=False, visualization=True)
print(
'The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: '
F"""{local_min.score()}"""
)
UpperCAmelCase__ : Optional[Any] = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
UpperCAmelCase__ : List[Any] = simulated_annealing(prob, find_max=True, visualization=True)
print(
'The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: '
F"""{local_min.score()}"""
)
| 301 | 0 |
"""simple docstring"""
import os
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from huggingface_hub.file_download import http_get
from requests.exceptions import HTTPError
from transformers import (
AlbertTokenizer,
AutoTokenizer,
BertTokenizer,
BertTokenizerFast,
GPTaTokenizerFast,
is_tokenizers_available,
)
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_tokenizers
from transformers.tokenization_utils import Trie
sys.path.append(str(Path(__file__).parent.parent / """utils"""))
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class __lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCamelCase ( self : str ):
# A mock response for an HTTP head request to emulate server down
lowerCAmelCase_ : Optional[Any] = mock.Mock()
lowerCAmelCase_ : Tuple = 5_00
lowerCAmelCase_ : List[str] = {}
lowerCAmelCase_ : Any = HTTPError
lowerCAmelCase_ : List[str] = {}
# Download this model to make sure it's in the cache.
lowerCAmelCase_ : Tuple = BertTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert" )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch("requests.Session.request" , return_value=a_ ) as mock_head:
lowerCAmelCase_ : Optional[Any] = BertTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert" )
# This check we did call the fake head request
mock_head.assert_called()
@require_tokenizers
def lowerCamelCase ( self : Optional[int] ):
# A mock response for an HTTP head request to emulate server down
lowerCAmelCase_ : Any = mock.Mock()
lowerCAmelCase_ : List[Any] = 5_00
lowerCAmelCase_ : List[str] = {}
lowerCAmelCase_ : Optional[Any] = HTTPError
lowerCAmelCase_ : int = {}
# Download this model to make sure it's in the cache.
lowerCAmelCase_ : int = GPTaTokenizerFast.from_pretrained("gpt2" )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch("requests.Session.request" , return_value=a_ ) as mock_head:
lowerCAmelCase_ : List[Any] = GPTaTokenizerFast.from_pretrained("gpt2" )
# This check we did call the fake head request
mock_head.assert_called()
def lowerCamelCase ( self : Optional[int] ):
# This test is for deprecated behavior and can be removed in v5
try:
lowerCAmelCase_ : List[str] = tempfile.mktemp()
with open(a_ , "wb" ) as f:
http_get("https://huggingface.co/albert-base-v1/resolve/main/spiece.model" , a_ )
lowerCAmelCase_ : Any = AlbertTokenizer.from_pretrained(a_ )
finally:
os.remove(a_ )
# Supporting this legacy load introduced a weird bug where the tokenizer would load local files if they are in
# the current folder and have the right name.
if os.path.isfile("tokenizer.json" ):
# We skip the test if the user has a `tokenizer.json` in this folder to avoid deleting it.
return
try:
with open("tokenizer.json" , "wb" ) as f:
http_get("https://huggingface.co/hf-internal-testing/tiny-random-bert/blob/main/tokenizer.json" , a_ )
lowerCAmelCase_ : Optional[Any] = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
# The tiny random BERT has a vocab size of 1024, tiny gpt2 as a vocab size of 1000
self.assertEqual(tokenizer.vocab_size , 10_00 )
# Tokenizer should depend on the remote checkpoint, not the local tokenizer.json file.
finally:
os.remove("tokenizer.json" )
def lowerCamelCase ( self : List[str] ):
# This test is for deprecated behavior and can be removed in v5
lowerCAmelCase_ : List[Any] = AlbertTokenizer.from_pretrained("https://huggingface.co/albert-base-v1/resolve/main/spiece.model" )
@is_staging_test
class __lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
a_ : Optional[Any] = ["""[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """bla""", """blou"""]
@classmethod
def lowerCamelCase ( cls : Dict ):
lowerCAmelCase_ : Optional[Any] = TOKEN
HfFolder.save_token(a_ )
@classmethod
def lowerCamelCase ( cls : List[str] ):
try:
delete_repo(token=cls._token , repo_id="test-tokenizer" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-tokenizer-org" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="test-dynamic-tokenizer" )
except HTTPError:
pass
def lowerCamelCase ( self : Dict ):
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCAmelCase_ : int = os.path.join(a_ , "vocab.txt" )
with open(a_ , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
lowerCAmelCase_ : Optional[int] = BertTokenizer(a_ )
tokenizer.push_to_hub("test-tokenizer" , use_auth_token=self._token )
lowerCAmelCase_ : Tuple = BertTokenizer.from_pretrained(f'''{USER}/test-tokenizer''' )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
# Reset repo
delete_repo(token=self._token , repo_id="test-tokenizer" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(a_ , repo_id="test-tokenizer" , push_to_hub=a_ , use_auth_token=self._token )
lowerCAmelCase_ : Optional[int] = BertTokenizer.from_pretrained(f'''{USER}/test-tokenizer''' )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
def lowerCamelCase ( self : Union[str, Any] ):
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCAmelCase_ : List[str] = os.path.join(a_ , "vocab.txt" )
with open(a_ , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
lowerCAmelCase_ : str = BertTokenizer(a_ )
tokenizer.push_to_hub("valid_org/test-tokenizer-org" , use_auth_token=self._token )
lowerCAmelCase_ : str = BertTokenizer.from_pretrained("valid_org/test-tokenizer-org" )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
# Reset repo
delete_repo(token=self._token , repo_id="valid_org/test-tokenizer-org" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(
a_ , repo_id="valid_org/test-tokenizer-org" , push_to_hub=a_ , use_auth_token=self._token )
lowerCAmelCase_ : str = BertTokenizer.from_pretrained("valid_org/test-tokenizer-org" )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
@require_tokenizers
def lowerCamelCase ( self : Optional[int] ):
CustomTokenizer.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCAmelCase_ : List[Any] = os.path.join(a_ , "vocab.txt" )
with open(a_ , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
lowerCAmelCase_ : List[str] = CustomTokenizer(a_ )
# No fast custom tokenizer
tokenizer.push_to_hub("test-dynamic-tokenizer" , use_auth_token=self._token )
lowerCAmelCase_ : List[str] = AutoTokenizer.from_pretrained(f'''{USER}/test-dynamic-tokenizer''' , trust_remote_code=a_ )
# Can't make an isinstance check because the new_model.config is from the CustomTokenizer class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , "CustomTokenizer" )
# Fast and slow custom tokenizer
CustomTokenizerFast.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCAmelCase_ : Dict = os.path.join(a_ , "vocab.txt" )
with open(a_ , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
lowerCAmelCase_ : Optional[int] = BertTokenizerFast.from_pretrained(a_ )
bert_tokenizer.save_pretrained(a_ )
lowerCAmelCase_ : int = CustomTokenizerFast.from_pretrained(a_ )
tokenizer.push_to_hub("test-dynamic-tokenizer" , use_auth_token=self._token )
lowerCAmelCase_ : Optional[Any] = AutoTokenizer.from_pretrained(f'''{USER}/test-dynamic-tokenizer''' , trust_remote_code=a_ )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , "CustomTokenizerFast" )
lowerCAmelCase_ : int = AutoTokenizer.from_pretrained(
f'''{USER}/test-dynamic-tokenizer''' , use_fast=a_ , trust_remote_code=a_ )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , "CustomTokenizer" )
class __lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCamelCase ( self : List[str] ):
lowerCAmelCase_ : Optional[Any] = Trie()
trie.add("Hello 友達" )
self.assertEqual(trie.data , {"H": {"e": {"l": {"l": {"o": {" ": {"友": {"達": {"": 1}}}}}}}}} )
trie.add("Hello" )
trie.data
self.assertEqual(trie.data , {"H": {"e": {"l": {"l": {"o": {"": 1, " ": {"友": {"達": {"": 1}}}}}}}}} )
def lowerCamelCase ( self : Dict ):
lowerCAmelCase_ : Any = Trie()
self.assertEqual(trie.split("[CLS] This is a extra_id_100" ) , ["[CLS] This is a extra_id_100"] )
trie.add("[CLS]" )
trie.add("extra_id_1" )
trie.add("extra_id_100" )
self.assertEqual(trie.split("[CLS] This is a extra_id_100" ) , ["[CLS]", " This is a ", "extra_id_100"] )
def lowerCamelCase ( self : Optional[Any] ):
lowerCAmelCase_ : Tuple = Trie()
trie.add("A" )
self.assertEqual(trie.split("ABC" ) , ["A", "BC"] )
self.assertEqual(trie.split("BCA" ) , ["BC", "A"] )
def lowerCamelCase ( self : str ):
lowerCAmelCase_ : str = Trie()
trie.add("TOKEN]" )
trie.add("[SPECIAL_TOKEN]" )
self.assertEqual(trie.split("This is something [SPECIAL_TOKEN]" ) , ["This is something ", "[SPECIAL_TOKEN]"] )
def lowerCamelCase ( self : Optional[Any] ):
lowerCAmelCase_ : Dict = Trie()
trie.add("A" )
trie.add("P" )
trie.add("[SPECIAL_TOKEN]" )
self.assertEqual(trie.split("This is something [SPECIAL_TOKEN]" ) , ["This is something ", "[SPECIAL_TOKEN]"] )
def lowerCamelCase ( self : Union[str, Any] ):
lowerCAmelCase_ : Optional[int] = Trie()
trie.add("AB" )
trie.add("B" )
trie.add("C" )
self.assertEqual(trie.split("ABC" ) , ["AB", "C"] )
def lowerCamelCase ( self : str ):
lowerCAmelCase_ : List[Any] = Trie()
trie.add("ABC" )
trie.add("B" )
trie.add("CD" )
self.assertEqual(trie.split("ABCD" ) , ["ABC", "D"] )
def lowerCamelCase ( self : Optional[Any] ):
# Even if the offsets are wrong, we necessarily output correct string
# parts.
lowerCAmelCase_ : Tuple = Trie()
lowerCAmelCase_ : List[Any] = trie.cut_text("ABC" , [0, 0, 2, 1, 2, 3] )
self.assertEqual(a_ , ["AB", "C"] )
| 241 |
"""simple docstring"""
from decimal import Decimal, getcontext
from math import ceil, factorial
def __lowerCamelCase ( __UpperCamelCase ) -> str:
"""simple docstring"""
if not isinstance(__UpperCamelCase , __UpperCamelCase ):
raise TypeError("Undefined for non-integers" )
elif precision < 1:
raise ValueError("Undefined for non-natural numbers" )
lowerCAmelCase_ : str = precision
lowerCAmelCase_ : Tuple = ceil(precision / 14 )
lowerCAmelCase_ : Optional[int] = 426880 * Decimal(10005 ).sqrt()
lowerCAmelCase_ : Any = 1
lowerCAmelCase_ : Any = 13591409
lowerCAmelCase_ : List[str] = Decimal(__UpperCamelCase )
for k in range(1 , __UpperCamelCase ):
lowerCAmelCase_ : List[Any] = factorial(6 * k ) // (factorial(3 * k ) * factorial(__UpperCamelCase ) ** 3)
linear_term += 545140134
exponential_term *= -262537412640768000
partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term
return str(constant_term / partial_sum )[:-1]
if __name__ == "__main__":
lowercase__ = 50
print(F"""The first {n} digits of pi is: {pi(n)}""")
| 241 | 1 |
from __future__ import annotations
import unittest
import numpy as np
from transformers import OPTConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import GPTaTokenizer, TFOPTForCausalLM, TFOPTModel
def a__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=None , __UpperCamelCase=None ):
if attention_mask is None:
SCREAMING_SNAKE_CASE_ = tf.cast(tf.math.not_equal(__UpperCamelCase , config.pad_token_id ) , tf.inta )
return {"input_ids": input_ids, "attention_mask": attention_mask}
@require_tf
class lowerCamelCase :
"""simple docstring"""
lowerCamelCase__ = OPTConfig
lowerCamelCase__ = {}
lowerCamelCase__ = '''gelu'''
def __init__( self : int , __magic_name__ : Optional[Any] , __magic_name__ : int=13 , __magic_name__ : Any=7 , __magic_name__ : int=True , __magic_name__ : Tuple=False , __magic_name__ : str=99 , __magic_name__ : str=16 , __magic_name__ : int=2 , __magic_name__ : List[str]=4 , __magic_name__ : Tuple=4 , __magic_name__ : str="gelu" , __magic_name__ : Union[str, Any]=0.1 , __magic_name__ : str=0.1 , __magic_name__ : Union[str, Any]=20 , __magic_name__ : Any=2 , __magic_name__ : Optional[int]=1 , __magic_name__ : Optional[Any]=0 , __magic_name__ : Union[str, Any]=16 , __magic_name__ : List[Any]=16 , ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = parent
SCREAMING_SNAKE_CASE_ = batch_size
SCREAMING_SNAKE_CASE_ = seq_length
SCREAMING_SNAKE_CASE_ = is_training
SCREAMING_SNAKE_CASE_ = use_labels
SCREAMING_SNAKE_CASE_ = vocab_size
SCREAMING_SNAKE_CASE_ = hidden_size
SCREAMING_SNAKE_CASE_ = num_hidden_layers
SCREAMING_SNAKE_CASE_ = num_attention_heads
SCREAMING_SNAKE_CASE_ = intermediate_size
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ = max_position_embeddings
SCREAMING_SNAKE_CASE_ = eos_token_id
SCREAMING_SNAKE_CASE_ = pad_token_id
SCREAMING_SNAKE_CASE_ = bos_token_id
SCREAMING_SNAKE_CASE_ = embed_dim
SCREAMING_SNAKE_CASE_ = word_embed_proj_dim
SCREAMING_SNAKE_CASE_ = False
def __A ( self : str ) -> int:
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
SCREAMING_SNAKE_CASE_ = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
SCREAMING_SNAKE_CASE_ = tf.concat([input_ids, eos_tensor] , axis=1 )
SCREAMING_SNAKE_CASE_ = self.config_cls(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , embed_dim=self.embed_dim , word_embed_proj_dim=self.word_embed_proj_dim , is_encoder_decoder=__magic_name__ , **self.config_updates , )
SCREAMING_SNAKE_CASE_ = prepare_opt_inputs_dict(__magic_name__ , __magic_name__ )
return config, inputs_dict
def __A ( self : Dict , __magic_name__ : Any , __magic_name__ : str ) -> List[str]:
SCREAMING_SNAKE_CASE_ = TFOPTModel(config=__magic_name__ )
SCREAMING_SNAKE_CASE_ = inputs_dict["input_ids"]
SCREAMING_SNAKE_CASE_ = input_ids[:1, :]
SCREAMING_SNAKE_CASE_ = inputs_dict["attention_mask"][:1, :]
SCREAMING_SNAKE_CASE_ = 1
# first forward pass
SCREAMING_SNAKE_CASE_ = model(__magic_name__ , attention_mask=__magic_name__ , use_cache=__magic_name__ )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
SCREAMING_SNAKE_CASE_ = ids_tensor((self.batch_size, 3) , config.vocab_size )
SCREAMING_SNAKE_CASE_ = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
SCREAMING_SNAKE_CASE_ = tf.concat([input_ids, next_tokens] , axis=-1 )
SCREAMING_SNAKE_CASE_ = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
SCREAMING_SNAKE_CASE_ = model(__magic_name__ , attention_mask=__magic_name__ )[0]
SCREAMING_SNAKE_CASE_ = model(__magic_name__ , attention_mask=__magic_name__ , past_key_values=__magic_name__ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
SCREAMING_SNAKE_CASE_ = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
SCREAMING_SNAKE_CASE_ = output_from_no_past[:, -3:, random_slice_idx]
SCREAMING_SNAKE_CASE_ = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(__magic_name__ , __magic_name__ , rtol=1e-3 )
@require_tf
class lowerCamelCase (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase__ = (TFOPTModel, TFOPTForCausalLM) if is_tf_available() else ()
lowerCamelCase__ = (TFOPTForCausalLM,) if is_tf_available() else ()
lowerCamelCase__ = (
{'''feature-extraction''': TFOPTModel, '''text-generation''': TFOPTForCausalLM} if is_tf_available() else {}
)
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = 1_0
def __A ( self : List[Any] ) -> List[str]:
SCREAMING_SNAKE_CASE_ = TFOPTModelTester(self )
SCREAMING_SNAKE_CASE_ = ConfigTester(self , config_class=__magic_name__ )
def __A ( self : Union[str, Any] ) -> Tuple:
self.config_tester.run_common_tests()
def __A ( self : Any ) -> Tuple:
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*__magic_name__ )
def __A ( self : Optional[int] ) -> Any:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
def _get_word_embedding_weight(__magic_name__ : int , __magic_name__ : List[str] ):
if hasattr(__magic_name__ , "weight" ):
return embedding_layer.weight
else:
# Here we build the word embeddings weights if not exists.
# And then we retry to get the attribute once built.
model.build()
if hasattr(__magic_name__ , "weight" ):
return embedding_layer.weight
else:
return None
for model_class in self.all_model_classes:
for size in [config.vocab_size - 10, config.vocab_size + 10]:
# build the embeddings
SCREAMING_SNAKE_CASE_ = model_class(config=__magic_name__ )
SCREAMING_SNAKE_CASE_ = _get_word_embedding_weight(__magic_name__ , model.get_input_embeddings() )
SCREAMING_SNAKE_CASE_ = _get_word_embedding_weight(__magic_name__ , model.get_output_embeddings() )
# reshape the embeddings
model.resize_token_embeddings(__magic_name__ )
SCREAMING_SNAKE_CASE_ = _get_word_embedding_weight(__magic_name__ , model.get_input_embeddings() )
SCREAMING_SNAKE_CASE_ = _get_word_embedding_weight(__magic_name__ , model.get_output_embeddings() )
# check that the resized embeddings size matches the desired size.
SCREAMING_SNAKE_CASE_ = size if size is not None else config.vocab_size
self.assertEqual(new_input_embeddings.shape[0] , __magic_name__ )
# check that weights remain the same after resizing
SCREAMING_SNAKE_CASE_ = True
for pa, pa in zip(old_input_embeddings.value() , new_input_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
SCREAMING_SNAKE_CASE_ = False
self.assertTrue(__magic_name__ )
if old_output_embeddings is not None and new_output_embeddings is not None:
self.assertEqual(new_output_embeddings.shape[0] , __magic_name__ )
SCREAMING_SNAKE_CASE_ = True
for pa, pa in zip(old_output_embeddings.value() , new_output_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
SCREAMING_SNAKE_CASE_ = False
self.assertTrue(__magic_name__ )
def a__ ( __UpperCamelCase ):
return tf.constant(__UpperCamelCase , dtype=tf.intaa )
@require_tf
class lowerCamelCase (unittest.TestCase ):
"""simple docstring"""
lowerCamelCase__ = 9_9
def __A ( self : List[str] ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = tf.ones((4, 1) , dtype=tf.intaa ) * 2
SCREAMING_SNAKE_CASE_ = tf.concat([ids_tensor((4, 6) , self.vocab_size - 3 ) + 3, eos_column_vector] , axis=1 )
SCREAMING_SNAKE_CASE_ = input_ids.shape[0]
SCREAMING_SNAKE_CASE_ = OPTConfig(
vocab_size=self.vocab_size , hidden_size=24 , num_hidden_layers=2 , num_attention_heads=2 , ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
@require_sentencepiece
@require_tf
class lowerCamelCase (unittest.TestCase ):
"""simple docstring"""
@slow
def __A ( self : str ) -> Any:
SCREAMING_SNAKE_CASE_ = TFOPTModel.from_pretrained("facebook/opt-350m" )
SCREAMING_SNAKE_CASE_ = _long_tensor([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]] )
SCREAMING_SNAKE_CASE_ = tf.not_equal(__magic_name__ , model.config.pad_token_id )
with tf.GradientTape():
SCREAMING_SNAKE_CASE_ = model(input_ids=__magic_name__ , attention_mask=__magic_name__ ).last_hidden_state
SCREAMING_SNAKE_CASE_ = (1, 11, 512)
self.assertEqual(output.shape , __magic_name__ )
SCREAMING_SNAKE_CASE_ = tf.constant(
[[-0.2873, -1.9218, -0.3033], [-1.2710, -0.1338, -0.1902], [0.4095, 0.1214, -1.3121]] )
self.assertTrue(np.allclose(output[:, :3, :3] , __magic_name__ , atol=4e-3 ) )
SCREAMING_SNAKE_CASE_ = tf.function(__magic_name__ , jit_compile=__magic_name__ )
SCREAMING_SNAKE_CASE_ = xla_generate(__magic_name__ , __magic_name__ )[0]
self.assertTrue(np.allclose(output[:, :3, :3] , __magic_name__ , atol=4e-2 ) )
@require_tf
@slow
class lowerCamelCase (unittest.TestCase ):
"""simple docstring"""
def __A ( self : int ) -> Tuple:
super().setUp()
SCREAMING_SNAKE_CASE_ = "facebook/opt-350m"
def __A ( self : int ) -> List[str]:
SCREAMING_SNAKE_CASE_ = TFOPTForCausalLM.from_pretrained(self.path_model )
SCREAMING_SNAKE_CASE_ = GPTaTokenizer.from_pretrained(self.path_model )
SCREAMING_SNAKE_CASE_ = [
"Today is a beautiful day and I want to",
"In the city of",
"Paris is the capital of France and",
"Computers and mobile phones have taken",
]
# verify that prompt without BOS token is identical to Metaseq -> add_special_tokens=False
SCREAMING_SNAKE_CASE_ = tokenizer(__magic_name__ , return_tensors="tf" , padding=__magic_name__ , add_special_tokens=__magic_name__ )
SCREAMING_SNAKE_CASE_ = tf.math.reduce_mean(model(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
SCREAMING_SNAKE_CASE_ = tf.constant(
[
[1.3851, -13.8923, -10.5229, -10.7533, -0.2309, -10.2384, -0.5365, -9.0947, -5.1670],
[-4.7073, -10.6276, -3.9415, -21.5242, -0.2822, -0.2822, -0.2822, -0.2822, -0.2822],
[0.6247, -3.4229, -8.9179, -1.4297, -14.1650, 1.4146, -9.0218, -0.2703, -0.2703],
[6.4783, -1.9913, -10.7926, -2.3336, 1.5092, -0.9974, -6.8213, 1.3477, 1.3477],
] )
self.assertTrue(np.allclose(__magic_name__ , __magic_name__ , atol=1e-4 ) )
SCREAMING_SNAKE_CASE_ = tf.function(__magic_name__ , jit_compile=__magic_name__ )
SCREAMING_SNAKE_CASE_ = tf.math.reduce_mean(xla_generate(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
self.assertTrue(np.allclose(__magic_name__ , __magic_name__ , atol=1e-4 ) )
@require_tf
@slow
class lowerCamelCase (unittest.TestCase ):
"""simple docstring"""
@property
def __A ( self : Dict ) -> int:
return [
"Today is a beautiful day and I want",
"In the city of",
"Paris is the capital of France and",
"Computers and mobile phones have taken",
]
def __A ( self : int ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = "facebook/opt-125m"
SCREAMING_SNAKE_CASE_ = [
"Today is a beautiful day and I want to",
"In the city of New York, the city",
"Paris is the capital of France and the capital",
"Computers and mobile phones have taken over the",
]
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = GPTaTokenizer.from_pretrained(__magic_name__ )
SCREAMING_SNAKE_CASE_ = TFOPTForCausalLM.from_pretrained(__magic_name__ )
for prompt in self.prompts:
SCREAMING_SNAKE_CASE_ = tokenizer(__magic_name__ , return_tensors="tf" ).input_ids
SCREAMING_SNAKE_CASE_ = model.generate(__magic_name__ , max_length=10 )
SCREAMING_SNAKE_CASE_ = tokenizer.batch_decode(__magic_name__ , skip_special_tokens=__magic_name__ )
predicted_outputs += generated_string
self.assertListEqual(__magic_name__ , __magic_name__ )
def __A ( self : Optional[int] ) -> Tuple:
SCREAMING_SNAKE_CASE_ = "facebook/opt-350m"
SCREAMING_SNAKE_CASE_ = GPTaTokenizer.from_pretrained(__magic_name__ )
SCREAMING_SNAKE_CASE_ = TFOPTForCausalLM.from_pretrained(__magic_name__ )
SCREAMING_SNAKE_CASE_ = "left"
# use different length sentences to test batching
SCREAMING_SNAKE_CASE_ = [
"Hello, my dog is a little",
"Today, I",
]
SCREAMING_SNAKE_CASE_ = tokenizer(__magic_name__ , return_tensors="tf" , padding=__magic_name__ )
SCREAMING_SNAKE_CASE_ = inputs["input_ids"]
SCREAMING_SNAKE_CASE_ = model.generate(input_ids=__magic_name__ , attention_mask=inputs["attention_mask"] )
SCREAMING_SNAKE_CASE_ = tokenizer(sentences[0] , return_tensors="tf" ).input_ids
SCREAMING_SNAKE_CASE_ = model.generate(input_ids=__magic_name__ )
SCREAMING_SNAKE_CASE_ = inputs_non_padded.shape[-1] - tf.math.reduce_sum(
tf.cast(inputs["attention_mask"][-1] , tf.intaa ) )
SCREAMING_SNAKE_CASE_ = tokenizer(sentences[1] , return_tensors="tf" ).input_ids
SCREAMING_SNAKE_CASE_ = model.generate(input_ids=__magic_name__ , max_length=model.config.max_length - num_paddings )
SCREAMING_SNAKE_CASE_ = tokenizer.batch_decode(__magic_name__ , skip_special_tokens=__magic_name__ )
SCREAMING_SNAKE_CASE_ = tokenizer.decode(output_non_padded[0] , skip_special_tokens=__magic_name__ )
SCREAMING_SNAKE_CASE_ = tokenizer.decode(output_padded[0] , skip_special_tokens=__magic_name__ )
SCREAMING_SNAKE_CASE_ = [
"Hello, my dog is a little bit of a dork.\nI'm a little bit",
"Today, I was in the middle of a conversation with a friend about the",
]
self.assertListEqual(__magic_name__ , __magic_name__ )
self.assertListEqual(__magic_name__ , [non_padded_sentence, padded_sentence] )
def __A ( self : Dict ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = "facebook/opt-350m"
SCREAMING_SNAKE_CASE_ = [
"Today is a beautiful day and I want to",
"In the city of San Francisco, the city",
"Paris is the capital of France and the capital",
"Computers and mobile phones have taken over the",
]
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = GPTaTokenizer.from_pretrained(__magic_name__ )
SCREAMING_SNAKE_CASE_ = TFOPTForCausalLM.from_pretrained(__magic_name__ )
for prompt in self.prompts:
SCREAMING_SNAKE_CASE_ = tokenizer(__magic_name__ , return_tensors="tf" ).input_ids
SCREAMING_SNAKE_CASE_ = model.generate(__magic_name__ , max_length=10 )
SCREAMING_SNAKE_CASE_ = tokenizer.batch_decode(__magic_name__ , skip_special_tokens=__magic_name__ )
predicted_outputs += generated_string
self.assertListEqual(__magic_name__ , __magic_name__ )
| 305 | from ....utils import logging
A : List[str] = logging.get_logger(__name__)
class lowerCamelCase (SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __init__( self : List[str] , __magic_name__ : Optional[Any] , __magic_name__ : Any=None , __magic_name__ : List[str]=2_048 ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = config.__dict__
SCREAMING_SNAKE_CASE_ = modal_hidden_size
if num_labels:
SCREAMING_SNAKE_CASE_ = num_labels
| 305 | 1 |
from math import isqrt, loga
def _UpperCAmelCase ( snake_case ):
"""simple docstring"""
_lowerCAmelCase = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , snake_case , snake_case ):
_lowerCAmelCase = False
return [i for i in range(2 , snake_case ) if is_prime[i]]
def _UpperCAmelCase ( snake_case = 80_08_00 , snake_case = 80_08_00 ):
"""simple docstring"""
_lowerCAmelCase = degree * loga(snake_case )
_lowerCAmelCase = int(snake_case )
_lowerCAmelCase = calculate_prime_numbers(snake_case )
_lowerCAmelCase = 0
_lowerCAmelCase = 0
_lowerCAmelCase = len(snake_case ) - 1
while left < right:
while (
prime_numbers[right] * loga(prime_numbers[left] )
+ prime_numbers[left] * loga(prime_numbers[right] )
> upper_bound
):
right -= 1
hybrid_integers_count += right - left
left += 1
return hybrid_integers_count
if __name__ == "__main__":
print(f"{solution() = }")
| 82 |
import argparse
import glob
import logging
import os
import sys
import time
from collections import defaultdict
from pathlib import Path
from typing import Dict, List, Tuple
import numpy as np
import pytorch_lightning as pl
import torch
from callbacks import SeqaSeqLoggingCallback, get_checkpoint_callback, get_early_stopping_callback
from torch import nn
from torch.utils.data import DataLoader
from transformers import MBartTokenizer, TaForConditionalGeneration
from transformers.models.bart.modeling_bart import shift_tokens_right
from utils import (
ROUGE_KEYS,
LegacySeqaSeqDataset,
SeqaSeqDataset,
assert_all_frozen,
calculate_bleu,
calculate_rouge,
check_output_dir,
flatten_list,
freeze_embeds,
freeze_params,
get_git_info,
label_smoothed_nll_loss,
lmap,
pickle_save,
save_git_info,
save_json,
use_task_specific_params,
)
# need the parent dir module
sys.path.insert(2, str(Path(__file__).resolve().parents[1]))
from lightning_base import BaseTransformer, add_generic_args, generic_train # noqa
A__ = logging.getLogger(__name__)
class __lowerCAmelCase ( lowerCamelCase__ ):
__lowerCamelCase = '''summarization'''
__lowerCamelCase = ['''loss''']
__lowerCamelCase = ROUGE_KEYS
__lowerCamelCase = '''rouge2'''
def __init__( self , _snake_case , **_snake_case ):
"""simple docstring"""
if hparams.sortish_sampler and hparams.gpus > 1:
_lowerCAmelCase = False
elif hparams.max_tokens_per_batch is not None:
if hparams.gpus > 1:
raise NotImplementedError("""Dynamic Batch size does not work for multi-gpu training""" )
if hparams.sortish_sampler:
raise ValueError("""--sortish_sampler and --max_tokens_per_batch may not be used simultaneously""" )
super().__init__(_snake_case , num_labels=_snake_case , mode=self.mode , **_snake_case )
use_task_specific_params(self.model , """summarization""" )
save_git_info(self.hparams.output_dir )
_lowerCAmelCase = Path(self.output_dir ) / """metrics.json"""
_lowerCAmelCase = Path(self.output_dir ) / """hparams.pkl"""
pickle_save(self.hparams , self.hparams_save_path )
_lowerCAmelCase = 0
_lowerCAmelCase = defaultdict(_snake_case )
_lowerCAmelCase = self.config.model_type
_lowerCAmelCase = self.config.tgt_vocab_size if self.model_type == """fsmt""" else self.config.vocab_size
_lowerCAmelCase = {
"data_dir": self.hparams.data_dir,
"max_source_length": self.hparams.max_source_length,
"prefix": self.model.config.prefix or "",
}
_lowerCAmelCase = {
"""train""": self.hparams.n_train,
"""val""": self.hparams.n_val,
"""test""": self.hparams.n_test,
}
_lowerCAmelCase = {k: v if v >= 0 else None for k, v in n_observations_per_split.items()}
_lowerCAmelCase = {
"""train""": self.hparams.max_target_length,
"""val""": self.hparams.val_max_target_length,
"""test""": self.hparams.test_max_target_length,
}
assert self.target_lens["train"] <= self.target_lens["val"], F'target_lens: {self.target_lens}'
assert self.target_lens["train"] <= self.target_lens["test"], F'target_lens: {self.target_lens}'
if self.hparams.freeze_embeds:
freeze_embeds(self.model )
if self.hparams.freeze_encoder:
freeze_params(self.model.get_encoder() )
assert_all_frozen(self.model.get_encoder() )
_lowerCAmelCase = get_git_info()["""repo_sha"""]
_lowerCAmelCase = hparams.num_workers
_lowerCAmelCase = None # default to config
if self.model.config.decoder_start_token_id is None and isinstance(self.tokenizer , _snake_case ):
_lowerCAmelCase = self.tokenizer.lang_code_to_id[hparams.tgt_lang]
_lowerCAmelCase = self.decoder_start_token_id
_lowerCAmelCase = (
SeqaSeqDataset if hasattr(self.tokenizer , """prepare_seq2seq_batch""" ) else LegacySeqaSeqDataset
)
_lowerCAmelCase = False
_lowerCAmelCase = self.model.config.num_beams if self.hparams.eval_beams is None else self.hparams.eval_beams
if self.hparams.eval_max_gen_length is not None:
_lowerCAmelCase = self.hparams.eval_max_gen_length
else:
_lowerCAmelCase = self.model.config.max_length
_lowerCAmelCase = self.default_val_metric if self.hparams.val_metric is None else self.hparams.val_metric
def snake_case ( self , _snake_case ):
"""simple docstring"""
_lowerCAmelCase = {
k: self.tokenizer.batch_decode(v.tolist() ) if """mask""" not in k else v.shape for k, v in batch.items()
}
save_json(_snake_case , Path(self.output_dir ) / """text_batch.json""" )
save_json({k: v.tolist() for k, v in batch.items()} , Path(self.output_dir ) / """tok_batch.json""" )
_lowerCAmelCase = True
return readable_batch
def snake_case ( self , _snake_case , **_snake_case ):
"""simple docstring"""
return self.model(_snake_case , **_snake_case )
def snake_case ( self , _snake_case ):
"""simple docstring"""
_lowerCAmelCase = self.tokenizer.batch_decode(
_snake_case , skip_special_tokens=_snake_case , clean_up_tokenization_spaces=_snake_case )
return lmap(str.strip , _snake_case )
def snake_case ( self , _snake_case ):
"""simple docstring"""
_lowerCAmelCase = self.tokenizer.pad_token_id
_lowerCAmelCase , _lowerCAmelCase = batch["""input_ids"""], batch["""attention_mask"""]
_lowerCAmelCase = batch["""labels"""]
if isinstance(self.model , _snake_case ):
_lowerCAmelCase = self.model._shift_right(_snake_case )
else:
_lowerCAmelCase = shift_tokens_right(_snake_case , _snake_case )
if not self.already_saved_batch: # This would be slightly better if it only happened on rank zero
_lowerCAmelCase = decoder_input_ids
self.save_readable_batch(_snake_case )
_lowerCAmelCase = self(_snake_case , attention_mask=_snake_case , decoder_input_ids=_snake_case , use_cache=_snake_case )
_lowerCAmelCase = outputs["""logits"""]
if self.hparams.label_smoothing == 0:
# Same behavior as modeling_bart.py, besides ignoring pad_token_id
_lowerCAmelCase = nn.CrossEntropyLoss(ignore_index=_snake_case )
assert lm_logits.shape[-1] == self.vocab_size
_lowerCAmelCase = ce_loss_fct(lm_logits.view(-1 , lm_logits.shape[-1] ) , tgt_ids.view(-1 ) )
else:
_lowerCAmelCase = nn.functional.log_softmax(_snake_case , dim=-1 )
_lowerCAmelCase , _lowerCAmelCase = label_smoothed_nll_loss(
_snake_case , _snake_case , self.hparams.label_smoothing , ignore_index=_snake_case )
return (loss,)
@property
def snake_case ( self ):
"""simple docstring"""
return self.tokenizer.pad_token_id
def snake_case ( self , _snake_case , _snake_case ):
"""simple docstring"""
_lowerCAmelCase = self._step(_snake_case )
_lowerCAmelCase = dict(zip(self.loss_names , _snake_case ) )
# tokens per batch
_lowerCAmelCase = batch["""input_ids"""].ne(self.pad ).sum() + batch["""labels"""].ne(self.pad ).sum()
_lowerCAmelCase = batch["""input_ids"""].shape[0]
_lowerCAmelCase = batch["""input_ids"""].eq(self.pad ).sum()
_lowerCAmelCase = batch["""input_ids"""].eq(self.pad ).float().mean()
# TODO(SS): make a wandb summary metric for this
return {"loss": loss_tensors[0], "log": logs}
def snake_case ( self , _snake_case , _snake_case ):
"""simple docstring"""
return self._generative_step(_snake_case )
def snake_case ( self , _snake_case , _snake_case="val" ):
"""simple docstring"""
self.step_count += 1
_lowerCAmelCase = {k: torch.stack([x[k] for x in outputs] ).mean() for k in self.loss_names}
_lowerCAmelCase = losses["""loss"""]
_lowerCAmelCase = {
k: np.array([x[k] for x in outputs] ).mean() for k in self.metric_names + ["""gen_time""", """gen_len"""]
}
_lowerCAmelCase = (
generative_metrics[self.val_metric] if self.val_metric in generative_metrics else losses[self.val_metric]
)
_lowerCAmelCase = torch.tensor(_snake_case ).type_as(_snake_case )
generative_metrics.update({k: v.item() for k, v in losses.items()} )
losses.update(_snake_case )
_lowerCAmelCase = {F'{prefix}_avg_{k}': x for k, x in losses.items()}
_lowerCAmelCase = self.step_count
self.metrics[prefix].append(_snake_case ) # callback writes this to self.metrics_save_path
_lowerCAmelCase = flatten_list([x["""preds"""] for x in outputs] )
return {
"log": all_metrics,
"preds": preds,
F'{prefix}_loss': loss,
F'{prefix}_{self.val_metric}': metric_tensor,
}
def snake_case ( self , _snake_case , _snake_case ):
"""simple docstring"""
return calculate_rouge(_snake_case , _snake_case )
def snake_case ( self , _snake_case ):
"""simple docstring"""
_lowerCAmelCase = time.time()
# parser.add_argument('--eval_max_gen_length', type=int, default=None, help='never generate more than n tokens')
_lowerCAmelCase = self.model.generate(
batch["""input_ids"""] , attention_mask=batch["""attention_mask"""] , use_cache=_snake_case , decoder_start_token_id=self.decoder_start_token_id , num_beams=self.eval_beams , max_length=self.eval_max_length , )
_lowerCAmelCase = (time.time() - ta) / batch["""input_ids"""].shape[0]
_lowerCAmelCase = self.ids_to_clean_text(_snake_case )
_lowerCAmelCase = self.ids_to_clean_text(batch["""labels"""] )
_lowerCAmelCase = self._step(_snake_case )
_lowerCAmelCase = dict(zip(self.loss_names , _snake_case ) )
_lowerCAmelCase = self.calc_generative_metrics(_snake_case , _snake_case )
_lowerCAmelCase = np.mean(lmap(_snake_case , _snake_case ) )
base_metrics.update(gen_time=_snake_case , gen_len=_snake_case , preds=_snake_case , target=_snake_case , **_snake_case )
return base_metrics
def snake_case ( self , _snake_case , _snake_case ):
"""simple docstring"""
return self._generative_step(_snake_case )
def snake_case ( self , _snake_case ):
"""simple docstring"""
return self.validation_epoch_end(_snake_case , prefix="""test""" )
def snake_case ( self , _snake_case ):
"""simple docstring"""
_lowerCAmelCase = self.n_obs[type_path]
_lowerCAmelCase = self.target_lens[type_path]
_lowerCAmelCase = self.dataset_class(
self.tokenizer , type_path=_snake_case , n_obs=_snake_case , max_target_length=_snake_case , **self.dataset_kwargs , )
return dataset
def snake_case ( self , _snake_case , _snake_case , _snake_case = False ):
"""simple docstring"""
_lowerCAmelCase = self.get_dataset(_snake_case )
if self.hparams.sortish_sampler and type_path != "test" and type_path != "val":
_lowerCAmelCase = dataset.make_sortish_sampler(_snake_case , distributed=self.hparams.gpus > 1 )
return DataLoader(
_snake_case , batch_size=_snake_case , collate_fn=dataset.collate_fn , shuffle=_snake_case , num_workers=self.num_workers , sampler=_snake_case , )
elif self.hparams.max_tokens_per_batch is not None and type_path != "test" and type_path != "val":
_lowerCAmelCase = dataset.make_dynamic_sampler(
self.hparams.max_tokens_per_batch , distributed=self.hparams.gpus > 1 )
return DataLoader(
_snake_case , batch_sampler=_snake_case , collate_fn=dataset.collate_fn , num_workers=self.num_workers , )
else:
return DataLoader(
_snake_case , batch_size=_snake_case , collate_fn=dataset.collate_fn , shuffle=_snake_case , num_workers=self.num_workers , sampler=_snake_case , )
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = self.get_dataloader("""train""" , batch_size=self.hparams.train_batch_size , shuffle=_snake_case )
return dataloader
def snake_case ( self ):
"""simple docstring"""
return self.get_dataloader("""val""" , batch_size=self.hparams.eval_batch_size )
def snake_case ( self ):
"""simple docstring"""
return self.get_dataloader("""test""" , batch_size=self.hparams.eval_batch_size )
@staticmethod
def snake_case ( _snake_case , _snake_case ):
"""simple docstring"""
BaseTransformer.add_model_specific_args(_snake_case , _snake_case )
add_generic_args(_snake_case , _snake_case )
parser.add_argument(
"""--max_source_length""" , default=1024 , type=_snake_case , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--max_target_length""" , default=56 , type=_snake_case , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--val_max_target_length""" , default=142 , type=_snake_case , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--test_max_target_length""" , default=142 , type=_snake_case , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument("""--freeze_encoder""" , action="""store_true""" )
parser.add_argument("""--freeze_embeds""" , action="""store_true""" )
parser.add_argument("""--sortish_sampler""" , action="""store_true""" , default=_snake_case )
parser.add_argument("""--overwrite_output_dir""" , action="""store_true""" , default=_snake_case )
parser.add_argument("""--max_tokens_per_batch""" , type=_snake_case , default=_snake_case )
parser.add_argument("""--logger_name""" , type=_snake_case , choices=["""default""", """wandb""", """wandb_shared"""] , default="""default""" )
parser.add_argument("""--n_train""" , type=_snake_case , default=-1 , required=_snake_case , help="""# examples. -1 means use all.""" )
parser.add_argument("""--n_val""" , type=_snake_case , default=500 , required=_snake_case , help="""# examples. -1 means use all.""" )
parser.add_argument("""--n_test""" , type=_snake_case , default=-1 , required=_snake_case , help="""# examples. -1 means use all.""" )
parser.add_argument(
"""--task""" , type=_snake_case , default="""summarization""" , required=_snake_case , help="""# examples. -1 means use all.""" )
parser.add_argument("""--label_smoothing""" , type=_snake_case , default=0.0 , required=_snake_case )
parser.add_argument("""--src_lang""" , type=_snake_case , default="""""" , required=_snake_case )
parser.add_argument("""--tgt_lang""" , type=_snake_case , default="""""" , required=_snake_case )
parser.add_argument("""--eval_beams""" , type=_snake_case , default=_snake_case , required=_snake_case )
parser.add_argument(
"""--val_metric""" , type=_snake_case , default=_snake_case , required=_snake_case , choices=["""bleu""", """rouge2""", """loss""", None] )
parser.add_argument("""--eval_max_gen_length""" , type=_snake_case , default=_snake_case , help="""never generate more than n tokens""" )
parser.add_argument("""--save_top_k""" , type=_snake_case , default=1 , required=_snake_case , help="""How many checkpoints to save""" )
parser.add_argument(
"""--early_stopping_patience""" , type=_snake_case , default=-1 , required=_snake_case , help=(
"""-1 means never early stop. early_stopping_patience is measured in validation checks, not epochs. So"""
""" val_check_interval will effect it."""
) , )
return parser
class __lowerCAmelCase ( lowerCamelCase__ ):
__lowerCamelCase = '''translation'''
__lowerCamelCase = ['''loss''']
__lowerCamelCase = ['''bleu''']
__lowerCamelCase = '''bleu'''
def __init__( self , _snake_case , **_snake_case ):
"""simple docstring"""
super().__init__(_snake_case , **_snake_case )
_lowerCAmelCase = hparams.src_lang
_lowerCAmelCase = hparams.tgt_lang
def snake_case ( self , _snake_case , _snake_case ):
"""simple docstring"""
return calculate_bleu(_snake_case , _snake_case )
def _UpperCAmelCase ( snake_case , snake_case=None ):
"""simple docstring"""
Path(args.output_dir ).mkdir(exist_ok=snake_case )
check_output_dir(snake_case , expected_items=3 )
if model is None:
if "summarization" in args.task:
_lowerCAmelCase = SummarizationModule(snake_case )
else:
_lowerCAmelCase = TranslationModule(snake_case )
_lowerCAmelCase = Path(args.data_dir ).name
if (
args.logger_name == "default"
or args.fast_dev_run
or str(args.output_dir ).startswith("""/tmp""" )
or str(args.output_dir ).startswith("""/var""" )
):
_lowerCAmelCase = True # don't pollute wandb logs unnecessarily
elif args.logger_name == "wandb":
from pytorch_lightning.loggers import WandbLogger
_lowerCAmelCase = os.environ.get("""WANDB_PROJECT""" , snake_case )
_lowerCAmelCase = WandbLogger(name=model.output_dir.name , project=snake_case )
elif args.logger_name == "wandb_shared":
from pytorch_lightning.loggers import WandbLogger
_lowerCAmelCase = WandbLogger(name=model.output_dir.name , project=F'hf_{dataset}' )
if args.early_stopping_patience >= 0:
_lowerCAmelCase = get_early_stopping_callback(model.val_metric , args.early_stopping_patience )
else:
_lowerCAmelCase = False
_lowerCAmelCase = args.val_metric == """loss"""
_lowerCAmelCase = generic_train(
snake_case , snake_case , logging_callback=SeqaSeqLoggingCallback() , checkpoint_callback=get_checkpoint_callback(
args.output_dir , model.val_metric , args.save_top_k , snake_case ) , early_stopping_callback=snake_case , logger=snake_case , )
pickle_save(model.hparams , model.output_dir / """hparams.pkl""" )
if not args.do_predict:
return model
_lowerCAmelCase = """"""
_lowerCAmelCase = sorted(glob.glob(os.path.join(args.output_dir , """*.ckpt""" ) , recursive=snake_case ) )
if checkpoints:
_lowerCAmelCase = checkpoints[-1]
_lowerCAmelCase = checkpoints[-1]
trainer.logger.log_hyperparams(model.hparams )
# test() without a model tests using the best checkpoint automatically
trainer.test()
return model
if __name__ == "__main__":
A__ = argparse.ArgumentParser()
A__ = pl.Trainer.add_argparse_args(parser)
A__ = SummarizationModule.add_model_specific_args(parser, os.getcwd())
A__ = parser.parse_args()
main(args)
| 82 | 1 |
import flax.linen as nn
import jax
import jax.numpy as jnp
class a ( nn.Module ):
'''simple docstring'''
lowerCAmelCase : int
lowerCAmelCase : jnp.dtype = jnp.floataa
def lowerCamelCase_ ( self : str ):
UpperCAmelCase_ = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self : Union[str, Any] , __snake_case : int ):
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = hidden_states.shape
UpperCAmelCase_ = jax.image.resize(
__snake_case , shape=(batch, height * 2, width * 2, channels) , method='''nearest''' , )
UpperCAmelCase_ = self.conv(__snake_case )
return hidden_states
class a ( nn.Module ):
'''simple docstring'''
lowerCAmelCase : int
lowerCAmelCase : jnp.dtype = jnp.floataa
def lowerCamelCase_ ( self : int ):
UpperCAmelCase_ = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self : List[str] , __snake_case : Union[str, Any] ):
# pad = ((0, 0), (0, 1), (0, 1), (0, 0)) # pad height and width dim
# hidden_states = jnp.pad(hidden_states, pad_width=pad)
UpperCAmelCase_ = self.conv(__snake_case )
return hidden_states
class a ( nn.Module ):
'''simple docstring'''
lowerCAmelCase : int
lowerCAmelCase : int = None
lowerCAmelCase : float = 0.0
lowerCAmelCase : bool = None
lowerCAmelCase : jnp.dtype = jnp.floataa
def lowerCamelCase_ ( self : List[str] ):
UpperCAmelCase_ = self.in_channels if self.out_channels is None else self.out_channels
UpperCAmelCase_ = nn.GroupNorm(num_groups=32 , epsilon=1E-5 )
UpperCAmelCase_ = nn.Conv(
__snake_case , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
UpperCAmelCase_ = nn.Dense(__snake_case , dtype=self.dtype )
UpperCAmelCase_ = nn.GroupNorm(num_groups=32 , epsilon=1E-5 )
UpperCAmelCase_ = nn.Dropout(self.dropout_prob )
UpperCAmelCase_ = nn.Conv(
__snake_case , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
UpperCAmelCase_ = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut
UpperCAmelCase_ = None
if use_nin_shortcut:
UpperCAmelCase_ = nn.Conv(
__snake_case , kernel_size=(1, 1) , strides=(1, 1) , padding='''VALID''' , dtype=self.dtype , )
def __call__( self : Tuple , __snake_case : Tuple , __snake_case : Optional[int] , __snake_case : Any=True ):
UpperCAmelCase_ = hidden_states
UpperCAmelCase_ = self.norma(__snake_case )
UpperCAmelCase_ = nn.swish(__snake_case )
UpperCAmelCase_ = self.conva(__snake_case )
UpperCAmelCase_ = self.time_emb_proj(nn.swish(__snake_case ) )
UpperCAmelCase_ = jnp.expand_dims(jnp.expand_dims(__snake_case , 1 ) , 1 )
UpperCAmelCase_ = hidden_states + temb
UpperCAmelCase_ = self.norma(__snake_case )
UpperCAmelCase_ = nn.swish(__snake_case )
UpperCAmelCase_ = self.dropout(__snake_case , __snake_case )
UpperCAmelCase_ = self.conva(__snake_case )
if self.conv_shortcut is not None:
UpperCAmelCase_ = self.conv_shortcut(__snake_case )
return hidden_states + residual
| 177 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DetrConfig, DetrForObjectDetection, DetrForSegmentation, DetrImageProcessor, ResNetConfig
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCamelCase = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Tuple ) -> Dict:
# initialize config
if "resnet-50" in model_name:
UpperCAmelCase_ = ResNetConfig.from_pretrained('''microsoft/resnet-50''' )
elif "resnet-101" in model_name:
UpperCAmelCase_ = ResNetConfig.from_pretrained('''microsoft/resnet-101''' )
else:
raise ValueError('''Model name should include either resnet50 or resnet101''' )
UpperCAmelCase_ = DetrConfig(use_timm_backbone=__UpperCamelCase , backbone_config=__UpperCamelCase )
# set label attributes
UpperCAmelCase_ = '''panoptic''' in model_name
if is_panoptic:
UpperCAmelCase_ = 250
else:
UpperCAmelCase_ = 91
UpperCAmelCase_ = '''huggingface/label-files'''
UpperCAmelCase_ = '''coco-detection-id2label.json'''
UpperCAmelCase_ = json.load(open(hf_hub_download(__UpperCamelCase , __UpperCamelCase , repo_type='''dataset''' ) , '''r''' ) )
UpperCAmelCase_ = {int(__UpperCamelCase ): v for k, v in idalabel.items()}
UpperCAmelCase_ = idalabel
UpperCAmelCase_ = {v: k for k, v in idalabel.items()}
return config, is_panoptic
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[str] ) -> Union[str, Any]:
# here we list all keys to be renamed (original name on the left, our name on the right)
UpperCAmelCase_ = []
# stem
# fmt: off
rename_keys.append(('''backbone.0.body.conv1.weight''', '''backbone.conv_encoder.model.embedder.embedder.convolution.weight''') )
rename_keys.append(('''backbone.0.body.bn1.weight''', '''backbone.conv_encoder.model.embedder.embedder.normalization.weight''') )
rename_keys.append(('''backbone.0.body.bn1.bias''', '''backbone.conv_encoder.model.embedder.embedder.normalization.bias''') )
rename_keys.append(('''backbone.0.body.bn1.running_mean''', '''backbone.conv_encoder.model.embedder.embedder.normalization.running_mean''') )
rename_keys.append(('''backbone.0.body.bn1.running_var''', '''backbone.conv_encoder.model.embedder.embedder.normalization.running_var''') )
# stages
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
# shortcut
if layer_idx == 0:
rename_keys.append(
(
f'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.0.weight',
f'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.convolution.weight',
) )
rename_keys.append(
(
f'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.weight',
f'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.weight',
) )
rename_keys.append(
(
f'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.bias',
f'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.bias',
) )
rename_keys.append(
(
f'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_mean',
f'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_mean',
) )
rename_keys.append(
(
f'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_var',
f'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_var',
) )
# 3 convs
for i in range(3 ):
rename_keys.append(
(
f'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.conv{i+1}.weight',
f'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.convolution.weight',
) )
rename_keys.append(
(
f'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.weight',
f'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.weight',
) )
rename_keys.append(
(
f'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.bias',
f'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.bias',
) )
rename_keys.append(
(
f'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_mean',
f'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_mean',
) )
rename_keys.append(
(
f'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_var',
f'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_var',
) )
# fmt: on
for i in range(config.encoder_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(
f'transformer.encoder.layers.{i}.self_attn.out_proj.weight',
f'encoder.layers.{i}.self_attn.out_proj.weight',
) )
rename_keys.append(
(f'transformer.encoder.layers.{i}.self_attn.out_proj.bias', f'encoder.layers.{i}.self_attn.out_proj.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.linear1.weight', f'encoder.layers.{i}.fc1.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.linear1.bias', f'encoder.layers.{i}.fc1.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.linear2.weight', f'encoder.layers.{i}.fc2.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.linear2.bias', f'encoder.layers.{i}.fc2.bias') )
rename_keys.append(
(f'transformer.encoder.layers.{i}.norm1.weight', f'encoder.layers.{i}.self_attn_layer_norm.weight') )
rename_keys.append(
(f'transformer.encoder.layers.{i}.norm1.bias', f'encoder.layers.{i}.self_attn_layer_norm.bias') )
rename_keys.append(
(f'transformer.encoder.layers.{i}.norm2.weight', f'encoder.layers.{i}.final_layer_norm.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.norm2.bias', f'encoder.layers.{i}.final_layer_norm.bias') )
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(
f'transformer.decoder.layers.{i}.self_attn.out_proj.weight',
f'decoder.layers.{i}.self_attn.out_proj.weight',
) )
rename_keys.append(
(f'transformer.decoder.layers.{i}.self_attn.out_proj.bias', f'decoder.layers.{i}.self_attn.out_proj.bias') )
rename_keys.append(
(
f'transformer.decoder.layers.{i}.multihead_attn.out_proj.weight',
f'decoder.layers.{i}.encoder_attn.out_proj.weight',
) )
rename_keys.append(
(
f'transformer.decoder.layers.{i}.multihead_attn.out_proj.bias',
f'decoder.layers.{i}.encoder_attn.out_proj.bias',
) )
rename_keys.append((f'transformer.decoder.layers.{i}.linear1.weight', f'decoder.layers.{i}.fc1.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.linear1.bias', f'decoder.layers.{i}.fc1.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.linear2.weight', f'decoder.layers.{i}.fc2.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.linear2.bias', f'decoder.layers.{i}.fc2.bias') )
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm1.weight', f'decoder.layers.{i}.self_attn_layer_norm.weight') )
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm1.bias', f'decoder.layers.{i}.self_attn_layer_norm.bias') )
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm2.weight', f'decoder.layers.{i}.encoder_attn_layer_norm.weight') )
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm2.bias', f'decoder.layers.{i}.encoder_attn_layer_norm.bias') )
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm3.weight', f'decoder.layers.{i}.final_layer_norm.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.norm3.bias', f'decoder.layers.{i}.final_layer_norm.bias') )
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
('''input_proj.weight''', '''input_projection.weight'''),
('''input_proj.bias''', '''input_projection.bias'''),
('''query_embed.weight''', '''query_position_embeddings.weight'''),
('''transformer.decoder.norm.weight''', '''decoder.layernorm.weight'''),
('''transformer.decoder.norm.bias''', '''decoder.layernorm.bias'''),
('''class_embed.weight''', '''class_labels_classifier.weight'''),
('''class_embed.bias''', '''class_labels_classifier.bias'''),
('''bbox_embed.layers.0.weight''', '''bbox_predictor.layers.0.weight'''),
('''bbox_embed.layers.0.bias''', '''bbox_predictor.layers.0.bias'''),
('''bbox_embed.layers.1.weight''', '''bbox_predictor.layers.1.weight'''),
('''bbox_embed.layers.1.bias''', '''bbox_predictor.layers.1.bias'''),
('''bbox_embed.layers.2.weight''', '''bbox_predictor.layers.2.weight'''),
('''bbox_embed.layers.2.bias''', '''bbox_predictor.layers.2.bias'''),
] )
return rename_keys
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[Any] , __UpperCamelCase : int , __UpperCamelCase : Optional[int] ) -> Union[str, Any]:
UpperCAmelCase_ = state_dict.pop(__UpperCamelCase )
UpperCAmelCase_ = val
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : str , __UpperCamelCase : List[Any]=False ) -> Dict:
UpperCAmelCase_ = ''''''
if is_panoptic:
UpperCAmelCase_ = '''detr.'''
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
UpperCAmelCase_ = state_dict.pop(f'{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight' )
UpperCAmelCase_ = state_dict.pop(f'{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase_ = in_proj_weight[:256, :]
UpperCAmelCase_ = in_proj_bias[:256]
UpperCAmelCase_ = in_proj_weight[256:512, :]
UpperCAmelCase_ = in_proj_bias[256:512]
UpperCAmelCase_ = in_proj_weight[-256:, :]
UpperCAmelCase_ = in_proj_bias[-256:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
UpperCAmelCase_ = state_dict.pop(f'{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight' )
UpperCAmelCase_ = state_dict.pop(f'{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase_ = in_proj_weight[:256, :]
UpperCAmelCase_ = in_proj_bias[:256]
UpperCAmelCase_ = in_proj_weight[256:512, :]
UpperCAmelCase_ = in_proj_bias[256:512]
UpperCAmelCase_ = in_proj_weight[-256:, :]
UpperCAmelCase_ = in_proj_bias[-256:]
# read in weights + bias of input projection layer of cross-attention
UpperCAmelCase_ = state_dict.pop(
f'{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight' )
UpperCAmelCase_ = state_dict.pop(f'{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias' )
# next, add query, keys and values (in that order) of cross-attention to the state dict
UpperCAmelCase_ = in_proj_weight_cross_attn[:256, :]
UpperCAmelCase_ = in_proj_bias_cross_attn[:256]
UpperCAmelCase_ = in_proj_weight_cross_attn[256:512, :]
UpperCAmelCase_ = in_proj_bias_cross_attn[256:512]
UpperCAmelCase_ = in_proj_weight_cross_attn[-256:, :]
UpperCAmelCase_ = in_proj_bias_cross_attn[-256:]
def SCREAMING_SNAKE_CASE ( ) -> int:
UpperCAmelCase_ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
UpperCAmelCase_ = Image.open(requests.get(__UpperCamelCase , stream=__UpperCamelCase ).raw )
return im
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Tuple , __UpperCamelCase : Any=None , __UpperCamelCase : Optional[Any]=False ) -> Optional[Any]:
UpperCAmelCase_ , UpperCAmelCase_ = get_detr_config(__UpperCamelCase )
# load original model from torch hub
UpperCAmelCase_ = {
'''detr-resnet-50''': '''detr_resnet50''',
'''detr-resnet-101''': '''detr_resnet101''',
}
logger.info(f'Converting model {model_name}...' )
UpperCAmelCase_ = torch.hub.load('''facebookresearch/detr''' , model_name_to_original_name[model_name] , pretrained=__UpperCamelCase ).eval()
UpperCAmelCase_ = detr.state_dict()
# rename keys
for src, dest in create_rename_keys(__UpperCamelCase ):
if is_panoptic:
UpperCAmelCase_ = '''detr.''' + src
rename_key(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# query, key and value matrices need special treatment
read_in_q_k_v(__UpperCamelCase , is_panoptic=__UpperCamelCase )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
UpperCAmelCase_ = '''detr.model.''' if is_panoptic else '''model.'''
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith('''detr''' )
and not key.startswith('''class_labels_classifier''' )
and not key.startswith('''bbox_predictor''' )
):
UpperCAmelCase_ = state_dict.pop(__UpperCamelCase )
UpperCAmelCase_ = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
UpperCAmelCase_ = state_dict.pop(__UpperCamelCase )
UpperCAmelCase_ = val
elif key.startswith('''bbox_attention''' ) or key.startswith('''mask_head''' ):
continue
else:
UpperCAmelCase_ = state_dict.pop(__UpperCamelCase )
UpperCAmelCase_ = val
else:
if not key.startswith('''class_labels_classifier''' ) and not key.startswith('''bbox_predictor''' ):
UpperCAmelCase_ = state_dict.pop(__UpperCamelCase )
UpperCAmelCase_ = val
# finally, create HuggingFace model and load state dict
UpperCAmelCase_ = DetrForSegmentation(__UpperCamelCase ) if is_panoptic else DetrForObjectDetection(__UpperCamelCase )
model.load_state_dict(__UpperCamelCase )
model.eval()
# verify our conversion on an image
UpperCAmelCase_ = '''coco_panoptic''' if is_panoptic else '''coco_detection'''
UpperCAmelCase_ = DetrImageProcessor(format=__UpperCamelCase )
UpperCAmelCase_ = processor(images=prepare_img() , return_tensors='''pt''' )
UpperCAmelCase_ = encoding['''pixel_values''']
UpperCAmelCase_ = detr(__UpperCamelCase )
UpperCAmelCase_ = model(__UpperCamelCase )
assert torch.allclose(outputs.logits , original_outputs['''pred_logits'''] , atol=1e-3 )
assert torch.allclose(outputs.pred_boxes , original_outputs['''pred_boxes'''] , atol=1e-3 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs['''pred_masks'''] , atol=1e-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(f'Saving PyTorch model and image processor to {pytorch_dump_folder_path}...' )
Path(__UpperCamelCase ).mkdir(exist_ok=__UpperCamelCase )
model.save_pretrained(__UpperCamelCase )
processor.save_pretrained(__UpperCamelCase )
if push_to_hub:
# Upload model and image processor to the hub
logger.info('''Uploading PyTorch model and image processor to the hub...''' )
model.push_to_hub(f'nielsr/{model_name}' )
processor.push_to_hub(f'nielsr/{model_name}' )
if __name__ == "__main__":
_lowerCamelCase = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
default='detr-resnet-50',
type=str,
choices=['detr-resnet-50', 'detr-resnet-101'],
help='Name of the DETR model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
parser.add_argument('--push_to_hub', action='store_true', help='Whether to push the model to the hub or not.')
_lowerCamelCase = parser.parse_args()
convert_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 177 | 1 |
from typing import List
import datasets
from datasets.tasks import AudioClassification
from ..folder_based_builder import folder_based_builder
lowercase_ = datasets.utils.logging.get_logger(__name__)
class __lowerCAmelCase ( folder_based_builder.FolderBasedBuilderConfig ):
_a = None
_a = None
class __lowerCAmelCase ( folder_based_builder.FolderBasedBuilder ):
_a = datasets.Audio()
_a = 'audio'
_a = AudioFolderConfig
_a = 42 # definition at the bottom of the script
_a = AudioClassification(audio_column="""audio""" , label_column="""label""" )
lowercase_ = [
'.aiff',
'.au',
'.avr',
'.caf',
'.flac',
'.htk',
'.svx',
'.mat4',
'.mat5',
'.mpc2k',
'.ogg',
'.paf',
'.pvf',
'.raw',
'.rf64',
'.sd2',
'.sds',
'.ircam',
'.voc',
'.w64',
'.wav',
'.nist',
'.wavex',
'.wve',
'.xi',
'.mp3',
'.opus',
]
lowercase_ = AUDIO_EXTENSIONS
| 205 |
'''simple docstring'''
from typing import List
import datasets
from datasets.tasks import AudioClassification
from ..folder_based_builder import folder_based_builder
lowercase_ = datasets.utils.logging.get_logger(__name__)
class __A ( folder_based_builder.FolderBasedBuilderConfig ):
'''simple docstring'''
__lowerCamelCase : bool = None
__lowerCamelCase : bool = None
class __A ( folder_based_builder.FolderBasedBuilder ):
'''simple docstring'''
__lowerCamelCase : int = datasets.Audio()
__lowerCamelCase : str = 'audio'
__lowerCamelCase : Optional[Any] = AudioFolderConfig
__lowerCamelCase : List[str] # definition at the bottom of the script
__lowerCamelCase : Union[str, Any] = AudioClassification(audio_column='audio' , label_column='label' )
lowercase_ = [
".aiff",
".au",
".avr",
".caf",
".flac",
".htk",
".svx",
".mat4",
".mat5",
".mpc2k",
".ogg",
".paf",
".pvf",
".raw",
".rf64",
".sd2",
".sds",
".ircam",
".voc",
".w64",
".wav",
".nist",
".wavex",
".wve",
".xi",
".mp3",
".opus",
]
lowercase_ = AUDIO_EXTENSIONS
| 211 | 0 |
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=False ) -> Dict:
if isinstance(_UpperCAmelCase , _UpperCAmelCase ) and isinstance(_UpperCAmelCase , _UpperCAmelCase ):
lowerCamelCase__ : List[str] = len(set_a.intersection(_UpperCAmelCase ) )
if alternative_union:
lowerCamelCase__ : int = len(_UpperCAmelCase ) + len(_UpperCAmelCase )
else:
lowerCamelCase__ : Any = len(set_a.union(_UpperCAmelCase ) )
return intersection / union
if isinstance(_UpperCAmelCase , (list, tuple) ) and isinstance(_UpperCAmelCase , (list, tuple) ):
lowerCamelCase__ : Any = [element for element in set_a if element in set_b]
if alternative_union:
lowerCamelCase__ : Optional[int] = len(_UpperCAmelCase ) + len(_UpperCAmelCase )
return len(_UpperCAmelCase ) / union
else:
lowerCamelCase__ : Tuple = set_a + [element for element in set_b if element not in set_a]
return len(_UpperCAmelCase ) / len(_UpperCAmelCase )
return len(_UpperCAmelCase ) / len(_UpperCAmelCase )
return None
if __name__ == "__main__":
_UpperCAmelCase : List[Any] = {"a", "b", "c", "d", "e"}
_UpperCAmelCase : List[Any] = {"c", "d", "e", "f", "h", "i"}
print(jaccard_similarity(set_a, set_b))
| 359 |
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_UpperCAmelCase : List[Any] = {
"""configuration_trajectory_transformer""": [
"""TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""TrajectoryTransformerConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : List[str] = [
"""TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TrajectoryTransformerModel""",
"""TrajectoryTransformerPreTrainedModel""",
"""load_tf_weights_in_trajectory_transformer""",
]
if TYPE_CHECKING:
from .configuration_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TrajectoryTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TrajectoryTransformerModel,
TrajectoryTransformerPreTrainedModel,
load_tf_weights_in_trajectory_transformer,
)
else:
import sys
_UpperCAmelCase : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 45 | 0 |
"""simple docstring"""
from __future__ import annotations
import sys
from collections import deque
from typing import Generic, TypeVar
_a : Any = TypeVar('T')
class __A ( Generic[T] ):
_UpperCamelCase : deque[T] # Cache store of keys
_UpperCamelCase : set[T] # References of the keys in cache
_UpperCamelCase : int = 10 # Maximum capacity of cache
def __init__( self , a__ ):
_lowerCAmelCase : Any = deque()
_lowerCAmelCase : List[Any] = set()
if not n:
_lowerCAmelCase : List[str] = sys.maxsize
elif n < 0:
raise ValueError("""n should be an integer greater than 0.""" )
else:
_lowerCAmelCase : List[Any] = n
def __A ( self , a__ ):
if x not in self.key_reference:
if len(self.dq_store ) == LRUCache._MAX_CAPACITY:
_lowerCAmelCase : Dict = self.dq_store.pop()
self.key_reference.remove(a__ )
else:
self.dq_store.remove(a__ )
self.dq_store.appendleft(a__ )
self.key_reference.add(a__ )
def __A ( self ):
for k in self.dq_store:
print(a__ )
def __repr__( self ):
return F"LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}"
if __name__ == "__main__":
import doctest
doctest.testmod()
_a : LRUCache[str | int] = LRUCache(4)
lru_cache.refer('A')
lru_cache.refer(2)
lru_cache.refer(3)
lru_cache.refer('A')
lru_cache.refer(4)
lru_cache.refer(5)
lru_cache.display()
print(lru_cache)
assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]"
| 44 |
"""simple docstring"""
import unittest
from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
SCREAMING_SNAKE_CASE_ = get_tests_dir('''fixtures/spiece.model''')
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase_ ( A__ , unittest.TestCase ):
'''simple docstring'''
_snake_case = DebertaVaTokenizer
_snake_case = DebertaVaTokenizerFast
_snake_case = True
_snake_case = True
def A__ ( self ) -> Optional[Any]:
super().setUp()
# We have a SentencePiece fixture for testing
__lowerCAmelCase = DebertaVaTokenizer(snake_case_ , unk_token="""<unk>""" )
tokenizer.save_pretrained(self.tmpdirname )
def A__ ( self , snake_case_ ) -> List[Any]:
__lowerCAmelCase = """this is a test"""
__lowerCAmelCase = """this is a test"""
return input_text, output_text
def A__ ( self ) -> Optional[Any]:
__lowerCAmelCase = """<pad>"""
__lowerCAmelCase = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case_ ) , snake_case_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case_ ) , snake_case_ )
def A__ ( self ) -> Any:
__lowerCAmelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<pad>""" )
self.assertEqual(vocab_keys[1] , """<unk>""" )
self.assertEqual(vocab_keys[-1] , """[PAD]""" )
self.assertEqual(len(snake_case_ ) , 30_001 )
def A__ ( self ) -> Optional[Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 30_000 )
def A__ ( self ) -> int:
# fmt: off
__lowerCAmelCase = """ \tHeLLo!how \n Are yoU? """
__lowerCAmelCase = ["""▁hello""", """!""", """how""", """▁are""", """▁you""", """?"""]
# fmt: on
__lowerCAmelCase = DebertaVaTokenizer(snake_case_ , do_lower_case=snake_case_ )
__lowerCAmelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) )
self.assertListEqual(snake_case_ , snake_case_ )
__lowerCAmelCase = DebertaVaTokenizerFast(snake_case_ , do_lower_case=snake_case_ )
__lowerCAmelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) )
self.assertListEqual(snake_case_ , snake_case_ )
@unittest.skip("""There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.""" )
def A__ ( self ) -> int:
pass
@unittest.skip("""There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.""" )
def A__ ( self ) -> Dict:
pass
def A__ ( self ) -> List[str]:
# fmt: off
__lowerCAmelCase = """I was born in 92000, and this is falsé."""
__lowerCAmelCase = ["""▁""", """<unk>""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """▁""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """▁""", """.""", ]
# fmt: on
__lowerCAmelCase = DebertaVaTokenizer(snake_case_ , split_by_punct=snake_case_ )
__lowerCAmelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) )
self.assertListEqual(snake_case_ , snake_case_ )
__lowerCAmelCase = DebertaVaTokenizerFast(snake_case_ , split_by_punct=snake_case_ )
__lowerCAmelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) )
self.assertListEqual(snake_case_ , snake_case_ )
def A__ ( self ) -> Dict:
# fmt: off
__lowerCAmelCase = """I was born in 92000, and this is falsé."""
__lowerCAmelCase = ["""▁i""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """▁""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """▁""", """.""", ]
# fmt: on
__lowerCAmelCase = DebertaVaTokenizer(snake_case_ , do_lower_case=snake_case_ , split_by_punct=snake_case_ )
__lowerCAmelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) )
self.assertListEqual(snake_case_ , snake_case_ )
__lowerCAmelCase = DebertaVaTokenizerFast(snake_case_ , do_lower_case=snake_case_ , split_by_punct=snake_case_ )
__lowerCAmelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) )
self.assertListEqual(snake_case_ , snake_case_ )
def A__ ( self ) -> Any:
# fmt: off
__lowerCAmelCase = """I was born in 92000, and this is falsé."""
__lowerCAmelCase = ["""▁i""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """.""", ]
# fmt: on
__lowerCAmelCase = DebertaVaTokenizer(snake_case_ , do_lower_case=snake_case_ , split_by_punct=snake_case_ )
__lowerCAmelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) )
self.assertListEqual(snake_case_ , snake_case_ )
__lowerCAmelCase = DebertaVaTokenizerFast(snake_case_ , do_lower_case=snake_case_ , split_by_punct=snake_case_ )
__lowerCAmelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) )
self.assertListEqual(snake_case_ , snake_case_ )
def A__ ( self ) -> Tuple:
# fmt: off
__lowerCAmelCase = """I was born in 92000, and this is falsé."""
__lowerCAmelCase = ["""▁""", """<unk>""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """▁""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """▁""", """.""", ]
# fmt: on
__lowerCAmelCase = DebertaVaTokenizer(snake_case_ , do_lower_case=snake_case_ , split_by_punct=snake_case_ )
__lowerCAmelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) )
self.assertListEqual(snake_case_ , snake_case_ )
__lowerCAmelCase = DebertaVaTokenizerFast(snake_case_ , do_lower_case=snake_case_ , split_by_punct=snake_case_ )
__lowerCAmelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) )
self.assertListEqual(snake_case_ , snake_case_ )
def A__ ( self ) -> Any:
# fmt: off
__lowerCAmelCase = """ \tHeLLo!how \n Are yoU? """
__lowerCAmelCase = ["""▁""", """<unk>""", """e""", """<unk>""", """o""", """!""", """how""", """▁""", """<unk>""", """re""", """▁yo""", """<unk>""", """?"""]
# fmt: on
__lowerCAmelCase = DebertaVaTokenizer(snake_case_ , do_lower_case=snake_case_ , split_by_punct=snake_case_ )
__lowerCAmelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) )
self.assertListEqual(snake_case_ , snake_case_ )
__lowerCAmelCase = DebertaVaTokenizerFast(snake_case_ , do_lower_case=snake_case_ , split_by_punct=snake_case_ )
__lowerCAmelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) )
self.assertListEqual(snake_case_ , snake_case_ )
def A__ ( self ) -> int:
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = self.get_rust_tokenizer()
__lowerCAmelCase = """I was born in 92000, and this is falsé."""
__lowerCAmelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) )
__lowerCAmelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) )
self.assertListEqual(snake_case_ , snake_case_ )
__lowerCAmelCase = tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ )
__lowerCAmelCase = rust_tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
__lowerCAmelCase = self.get_rust_tokenizer()
__lowerCAmelCase = tokenizer.encode(snake_case_ )
__lowerCAmelCase = rust_tokenizer.encode(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
def A__ ( self ) -> str:
__lowerCAmelCase = """This is a test"""
__lowerCAmelCase = [13, 1, 4_398, 25, 21, 1_289]
__lowerCAmelCase = ["""▁""", """T""", """his""", """▁is""", """▁a""", """▁test"""]
__lowerCAmelCase = ["""▁""", """<unk>""", """his""", """▁is""", """▁a""", """▁test"""]
__lowerCAmelCase = DebertaVaTokenizer(snake_case_ , keep_accents=snake_case_ )
__lowerCAmelCase = DebertaVaTokenizerFast(snake_case_ , keep_accents=snake_case_ )
__lowerCAmelCase = tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
__lowerCAmelCase = tokenizer.tokenize(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
__lowerCAmelCase = tokenizer.convert_ids_to_tokens(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
__lowerCAmelCase = rust_tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
__lowerCAmelCase = rust_tokenizer.tokenize(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
__lowerCAmelCase = rust_tokenizer.convert_ids_to_tokens(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
# fmt: off
__lowerCAmelCase = """I was born in 92000, and this is falsé."""
__lowerCAmelCase = [13, 1, 23, 386, 19, 561, 3_050, 15, 17, 48, 25, 8_256, 18, 1, 9]
__lowerCAmelCase = ["""▁""", """I""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """é""", """.""", ]
__lowerCAmelCase = ["""▁""", """<unk>""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """.""", ]
# fmt: on
__lowerCAmelCase = tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
__lowerCAmelCase = tokenizer.tokenize(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
__lowerCAmelCase = tokenizer.convert_ids_to_tokens(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
__lowerCAmelCase = rust_tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
__lowerCAmelCase = rust_tokenizer.tokenize(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
__lowerCAmelCase = rust_tokenizer.convert_ids_to_tokens(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
def A__ ( self ) -> Optional[int]:
__lowerCAmelCase = DebertaVaTokenizer(snake_case_ )
__lowerCAmelCase = tokenizer.encode("""sequence builders""" )
__lowerCAmelCase = tokenizer.encode("""multi-sequence build""" )
__lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(snake_case_ )
__lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(snake_case_ , snake_case_ )
self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] , snake_case_ )
self.assertEqual(
[tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id] , snake_case_ , )
@slow
def A__ ( self ) -> int:
# fmt: off
__lowerCAmelCase = {"""input_ids""": [[1, 39_867, 36, 19_390, 486, 27, 35_052, 81_436, 18, 60_685, 1_225, 7, 35_052, 81_436, 18, 9_367, 16_899, 18, 15_937, 53, 594, 773, 18, 16_287, 30_465, 36, 15_937, 6, 41_139, 38, 36_979, 60_763, 191, 6, 34_132, 99, 6, 50_538, 390, 43_230, 6, 34_132, 2_779, 20_850, 14, 699, 1_072, 1_194, 36, 382, 10_901, 53, 7, 699, 1_072, 2_084, 36, 20_422, 630, 53, 19, 105, 3_049, 1_896, 1_053, 16_899, 1_506, 11, 37_978, 4_243, 7, 1_237, 31_869, 200, 16_566, 654, 6, 35_052, 81_436, 7, 55_630, 13_593, 4, 2], [1, 26, 15_011, 13, 667, 8, 1_053, 18, 23_611, 1_237, 72_356, 12_820, 34, 104_134, 1_209, 35, 13_313, 6_627, 21, 202, 347, 7, 164, 2_399, 11, 46, 4_485, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 1_232, 2_864, 15_785, 14_951, 105, 5, 8_581, 1_250, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=snake_case_ , model_name="""microsoft/deberta-v2-xlarge""" , revision="""ad6e42c1532ddf3a15c39246b63f5559d558b670""" , )
| 301 | 0 |
def A__ ( __lowerCamelCase, __lowerCamelCase ):
return int((input_a, input_a).count(0 ) != 0 )
def A__ ( ):
assert nand_gate(0, 0 ) == 1
assert nand_gate(0, 1 ) == 1
assert nand_gate(1, 0 ) == 1
assert nand_gate(1, 1 ) == 0
if __name__ == "__main__":
print(nand_gate(0, 0))
print(nand_gate(0, 1))
print(nand_gate(1, 0))
print(nand_gate(1, 1))
| 365 |
import itertools
from dataclasses import dataclass
from typing import Optional
import pandas as pd
import pyarrow as pa
import datasets
from datasets.table import table_cast
@dataclass
class UpperCamelCase__ ( datasets.BuilderConfig ):
"""simple docstring"""
UpperCAmelCase_ =None
class UpperCamelCase__ ( datasets.ArrowBasedBuilder ):
"""simple docstring"""
UpperCAmelCase_ =PandasConfig
def _UpperCamelCase ( self ) -> int:
return datasets.DatasetInfo(features=self.config.features )
def _UpperCamelCase ( self , _A ) -> Tuple:
if not self.config.data_files:
raise ValueError(F'''At least one data file must be specified, but got data_files={self.config.data_files}''' )
SCREAMING_SNAKE_CASE_ = dl_manager.download_and_extract(self.config.data_files )
if isinstance(_A , (str, list, tuple) ):
SCREAMING_SNAKE_CASE_ = data_files
if isinstance(_A , _A ):
SCREAMING_SNAKE_CASE_ = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
SCREAMING_SNAKE_CASE_ = [dl_manager.iter_files(_A ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files} )]
SCREAMING_SNAKE_CASE_ = []
for split_name, files in data_files.items():
if isinstance(_A , _A ):
SCREAMING_SNAKE_CASE_ = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
SCREAMING_SNAKE_CASE_ = [dl_manager.iter_files(_A ) for file in files]
splits.append(datasets.SplitGenerator(name=_A , gen_kwargs={'''files''': files} ) )
return splits
def _UpperCamelCase ( self , _A ) -> pa.Table:
if self.config.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
SCREAMING_SNAKE_CASE_ = table_cast(_A , self.config.features.arrow_schema )
return pa_table
def _UpperCamelCase ( self , _A ) -> Any:
for i, file in enumerate(itertools.chain.from_iterable(_A ) ):
with open(_A , '''rb''' ) as f:
SCREAMING_SNAKE_CASE_ = pa.Table.from_pandas(pd.read_pickle(_A ) )
yield i, self._cast_table(_A )
| 257 | 0 |
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import numpy as np
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING,
AutoConfig,
AutoImageProcessor,
AutoModelForMaskedImageModeling,
HfArgumentParser,
Trainer,
TrainingArguments,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
A : Optional[int] = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.31.0')
require_version('datasets>=1.8.0', 'To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt')
A : Tuple = list(MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING.keys())
A : Optional[Any] = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class A :
'''simple docstring'''
A__ = field(
default='''cifar10''' , metadata={'''help''': '''Name of a dataset from the datasets package'''} )
A__ = field(
default=UpperCAmelCase__ , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} )
A__ = field(
default=UpperCAmelCase__ , metadata={'''help''': '''The column name of the images in the files. If not set, will try to use \'image\' or \'img\'.'''} , )
A__ = field(default=UpperCAmelCase__ , metadata={'''help''': '''A folder containing the training data.'''} )
A__ = field(default=UpperCAmelCase__ , metadata={'''help''': '''A folder containing the validation data.'''} )
A__ = field(
default=0.15 , metadata={'''help''': '''Percent to split off of train for validation.'''} )
A__ = field(default=32 , metadata={'''help''': '''The size of the square patches to use for masking.'''} )
A__ = field(
default=0.6 , metadata={'''help''': '''Percentage of patches to mask.'''} , )
A__ = field(
default=UpperCAmelCase__ , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of training examples to this '''
'''value if set.'''
)
} , )
A__ = field(
default=UpperCAmelCase__ , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of evaluation examples to this '''
'''value if set.'''
)
} , )
def lowerCamelCase__ (self : Tuple ) -> Dict:
"""simple docstring"""
lowercase__ = {}
if self.train_dir is not None:
lowercase__ = self.train_dir
if self.validation_dir is not None:
lowercase__ = self.validation_dir
lowercase__ = data_files if data_files else None
@dataclass
class A :
'''simple docstring'''
A__ = field(
default=UpperCAmelCase__ , metadata={
'''help''': (
'''The model checkpoint for weights initialization. Can be a local path to a pytorch_model.bin or a '''
'''checkpoint identifier on the hub. '''
'''Don\'t set if you want to train a model from scratch.'''
)
} , )
A__ = field(
default=UpperCAmelCase__ , metadata={'''help''': '''If training from scratch, pass a model type from the list: ''' + ''', '''.join(UpperCAmelCase__ )} , )
A__ = field(
default=UpperCAmelCase__ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
A__ = field(
default=UpperCAmelCase__ , metadata={
'''help''': (
'''Override some existing default config settings when a model is trained from scratch. Example: '''
'''n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index'''
)
} , )
A__ = field(
default=UpperCAmelCase__ , metadata={'''help''': '''Where do you want to store (cache) the pretrained models/datasets downloaded from the hub'''} , )
A__ = field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
A__ = field(default=UpperCAmelCase__ , metadata={'''help''': '''Name or path of preprocessor config.'''} )
A__ = field(
default=UpperCAmelCase__ , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
A__ = field(
default=UpperCAmelCase__ , metadata={
'''help''': (
'''The size (resolution) of each image. If not specified, will use `image_size` of the configuration.'''
)
} , )
A__ = field(
default=UpperCAmelCase__ , metadata={
'''help''': (
'''The size (resolution) of each patch. If not specified, will use `patch_size` of the configuration.'''
)
} , )
A__ = field(
default=UpperCAmelCase__ , metadata={'''help''': '''Stride to use for the encoder.'''} , )
class A :
'''simple docstring'''
def __init__(self : List[Any] , _UpperCAmelCase : Dict=192 , _UpperCAmelCase : Any=32 , _UpperCAmelCase : Dict=4 , _UpperCAmelCase : str=0.6 ) -> str:
"""simple docstring"""
lowercase__ = input_size
lowercase__ = mask_patch_size
lowercase__ = model_patch_size
lowercase__ = mask_ratio
if self.input_size % self.mask_patch_size != 0:
raise ValueError("""Input size must be divisible by mask patch size""" )
if self.mask_patch_size % self.model_patch_size != 0:
raise ValueError("""Mask patch size must be divisible by model patch size""" )
lowercase__ = self.input_size // self.mask_patch_size
lowercase__ = self.mask_patch_size // self.model_patch_size
lowercase__ = self.rand_size**2
lowercase__ = int(np.ceil(self.token_count * self.mask_ratio ) )
def __call__(self : int ) -> Tuple:
"""simple docstring"""
lowercase__ = np.random.permutation(self.token_count )[: self.mask_count]
lowercase__ = np.zeros(self.token_count , dtype=_UpperCAmelCase )
lowercase__ = 1
lowercase__ = mask.reshape((self.rand_size, self.rand_size) )
lowercase__ = mask.repeat(self.scale , axis=0 ).repeat(self.scale , axis=1 )
return torch.tensor(mask.flatten() )
def UpperCamelCase ( __magic_name__ : List[str] ) -> List[str]:
"""simple docstring"""
lowercase__ = torch.stack([example["""pixel_values"""] for example in examples] )
lowercase__ = torch.stack([example["""mask"""] for example in examples] )
return {"pixel_values": pixel_values, "bool_masked_pos": mask}
def UpperCamelCase ( ) -> List[Any]:
"""simple docstring"""
lowercase__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowercase__ , lowercase__ , lowercase__ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowercase__ , lowercase__ , lowercase__ = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("""run_mim""" , __magic_name__ , __magic_name__ )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
lowercase__ = training_args.get_process_log_level()
logger.setLevel(__magic_name__ )
transformers.utils.logging.set_verbosity(__magic_name__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ f'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(f'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
lowercase__ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowercase__ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Initialize our dataset.
lowercase__ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# If we don't have a validation split, split off a percentage of train as validation.
lowercase__ = None if """validation""" in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , __magic_name__ ) and data_args.train_val_split > 0.0:
lowercase__ = ds["""train"""].train_test_split(data_args.train_val_split )
lowercase__ = split["""train"""]
lowercase__ = split["""test"""]
# Create config
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowercase__ = {
"""cache_dir""": model_args.cache_dir,
"""revision""": model_args.model_revision,
"""use_auth_token""": True if model_args.use_auth_token else None,
}
if model_args.config_name_or_path:
lowercase__ = AutoConfig.from_pretrained(model_args.config_name_or_path , **__magic_name__ )
elif model_args.model_name_or_path:
lowercase__ = AutoConfig.from_pretrained(model_args.model_name_or_path , **__magic_name__ )
else:
lowercase__ = CONFIG_MAPPING[model_args.model_type]()
logger.warning("""You are instantiating a new config instance from scratch.""" )
if model_args.config_overrides is not None:
logger.info(f'''Overriding config: {model_args.config_overrides}''' )
config.update_from_string(model_args.config_overrides )
logger.info(f'''New config: {config}''' )
# make sure the decoder_type is "simmim" (only relevant for BEiT)
if hasattr(__magic_name__ , """decoder_type""" ):
lowercase__ = """simmim"""
# adapt config
lowercase__ = model_args.image_size if model_args.image_size is not None else config.image_size
lowercase__ = model_args.patch_size if model_args.patch_size is not None else config.patch_size
lowercase__ = (
model_args.encoder_stride if model_args.encoder_stride is not None else config.encoder_stride
)
config.update(
{
"""image_size""": model_args.image_size,
"""patch_size""": model_args.patch_size,
"""encoder_stride""": model_args.encoder_stride,
} )
# create image processor
if model_args.image_processor_name:
lowercase__ = AutoImageProcessor.from_pretrained(model_args.image_processor_name , **__magic_name__ )
elif model_args.model_name_or_path:
lowercase__ = AutoImageProcessor.from_pretrained(model_args.model_name_or_path , **__magic_name__ )
else:
lowercase__ = {
conf.model_type: image_processor_class for conf, image_processor_class in IMAGE_PROCESSOR_MAPPING.items()
}
lowercase__ = IMAGE_PROCESSOR_TYPES[model_args.model_type]()
# create model
if model_args.model_name_or_path:
lowercase__ = AutoModelForMaskedImageModeling.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=__magic_name__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info("""Training new model from scratch""" )
lowercase__ = AutoModelForMaskedImageModeling.from_config(__magic_name__ )
if training_args.do_train:
lowercase__ = ds["""train"""].column_names
else:
lowercase__ = ds["""validation"""].column_names
if data_args.image_column_name is not None:
lowercase__ = data_args.image_column_name
elif "image" in column_names:
lowercase__ = """image"""
elif "img" in column_names:
lowercase__ = """img"""
else:
lowercase__ = column_names[0]
# transformations as done in original SimMIM paper
# source: https://github.com/microsoft/SimMIM/blob/main/data/data_simmim.py
lowercase__ = Compose(
[
Lambda(lambda __magic_name__ : img.convert("""RGB""" ) if img.mode != "RGB" else img ),
RandomResizedCrop(model_args.image_size , scale=(0.6_7, 1.0) , ratio=(3.0 / 4.0, 4.0 / 3.0) ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean , std=image_processor.image_std ),
] )
# create mask generator
lowercase__ = MaskGenerator(
input_size=model_args.image_size , mask_patch_size=data_args.mask_patch_size , model_patch_size=model_args.patch_size , mask_ratio=data_args.mask_ratio , )
def preprocess_images(__magic_name__ : Any ):
lowercase__ = [transforms(__magic_name__ ) for image in examples[image_column_name]]
lowercase__ = [mask_generator() for i in range(len(examples[image_column_name] ) )]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError("""--do_train requires a train dataset""" )
if data_args.max_train_samples is not None:
lowercase__ = ds["""train"""].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(__magic_name__ )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError("""--do_eval requires a validation dataset""" )
if data_args.max_eval_samples is not None:
lowercase__ = (
ds["""validation"""].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(__magic_name__ )
# Initialize our trainer
lowercase__ = Trainer(
model=__magic_name__ , args=__magic_name__ , train_dataset=ds["""train"""] if training_args.do_train else None , eval_dataset=ds["""validation"""] if training_args.do_eval else None , tokenizer=__magic_name__ , data_collator=__magic_name__ , )
# Training
if training_args.do_train:
lowercase__ = None
if training_args.resume_from_checkpoint is not None:
lowercase__ = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowercase__ = last_checkpoint
lowercase__ = trainer.train(resume_from_checkpoint=__magic_name__ )
trainer.save_model()
trainer.log_metrics("""train""" , train_result.metrics )
trainer.save_metrics("""train""" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
lowercase__ = trainer.evaluate()
trainer.log_metrics("""eval""" , __magic_name__ )
trainer.save_metrics("""eval""" , __magic_name__ )
# Write model card and (optionally) push to hub
lowercase__ = {
"""finetuned_from""": model_args.model_name_or_path,
"""tasks""": """masked-image-modeling""",
"""dataset""": data_args.dataset_name,
"""tags""": ["""masked-image-modeling"""],
}
if training_args.push_to_hub:
trainer.push_to_hub(**__magic_name__ )
else:
trainer.create_model_card(**__magic_name__ )
if __name__ == "__main__":
main()
| 305 |
from __future__ import annotations
from functools import lru_cache
from math import ceil
A : Optional[int] = 1_0_0
A : int = set(range(3, NUM_PRIMES, 2))
primes.add(2)
A : int
for prime in range(3, ceil(NUM_PRIMES**0.5), 2):
if prime not in primes:
continue
primes.difference_update(set(range(prime * prime, NUM_PRIMES, prime)))
@lru_cache(maxsize=100 )
def UpperCamelCase ( __magic_name__ : int ) -> set[int]:
"""simple docstring"""
if number_to_partition < 0:
return set()
elif number_to_partition == 0:
return {1}
lowercase__ = set()
lowercase__ = 42
lowercase__ = 42
for prime in primes:
if prime > number_to_partition:
continue
for sub in partition(number_to_partition - prime ):
ret.add(sub * prime )
return ret
def UpperCamelCase ( __magic_name__ : int = 5000 ) -> int | None:
"""simple docstring"""
for number_to_partition in range(1 , __magic_name__ ):
if len(partition(__magic_name__ ) ) > number_unique_partitions:
return number_to_partition
return None
if __name__ == "__main__":
print(F'{solution() = }')
| 305 | 1 |
"""simple docstring"""
from __future__ import annotations
import math
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ) ->float:
"""simple docstring"""
a_ = u
for i in range(1 , UpperCAmelCase ):
a_ = temp * (u - i)
return temp
def UpperCamelCase ( ) ->None:
"""simple docstring"""
a_ = int(input("enter the numbers of values: " ) )
a_ = []
for _ in range(UpperCAmelCase ):
y.append([] )
for i in range(UpperCAmelCase ):
for j in range(UpperCAmelCase ):
y[i].append(UpperCAmelCase )
a_ = 0
print("enter the values of parameters in a list: " )
a_ = list(map(UpperCAmelCase , input().split() ) )
print("enter the values of corresponding parameters: " )
for i in range(UpperCAmelCase ):
a_ = float(input() )
a_ = int(input("enter the value to interpolate: " ) )
a_ = (value - x[0]) / (x[1] - x[0])
# for calculating forward difference table
for i in range(1 , UpperCAmelCase ):
for j in range(n - i ):
a_ = y[j + 1][i - 1] - y[j][i - 1]
a_ = y[0][0]
for i in range(1 , UpperCAmelCase ):
summ += (ucal(UpperCAmelCase , UpperCAmelCase ) * y[0][i]) / math.factorial(UpperCAmelCase )
print(F'''the value at {value} is {summ}''' )
if __name__ == "__main__":
main() | 303 |
"""simple docstring"""
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_multi_gpu
from accelerate.utils import patch_environment
class snake_case ( unittest.TestCase ):
def UpperCAmelCase__ ( self) ->Dict:
a_ = inspect.getfile(accelerate.test_utils)
a_ = os.path.sep.join(mod_file.split(os.path.sep)[:-1] + ["scripts", "test_script.py"])
a_ = os.path.sep.join(
mod_file.split(os.path.sep)[:-1] + ["scripts", "test_distributed_data_loop.py"])
a_ = os.path.sep.join(mod_file.split(os.path.sep)[:-1] + ["scripts", "test_ops.py"])
@require_multi_gpu
def UpperCAmelCase__ ( self) ->Any:
print(F'''Found {torch.cuda.device_count()} devices.''')
a_ = ["torchrun", F'''--nproc_per_node={torch.cuda.device_count()}''', self.test_file_path]
with patch_environment(omp_num_threads=1):
execute_subprocess_async(__UpperCAmelCase , env=os.environ.copy())
@require_multi_gpu
def UpperCAmelCase__ ( self) ->str:
print(F'''Found {torch.cuda.device_count()} devices.''')
a_ = ["torchrun", F'''--nproc_per_node={torch.cuda.device_count()}''', self.operation_file_path]
print(F'''Command: {cmd}''')
with patch_environment(omp_num_threads=1):
execute_subprocess_async(__UpperCAmelCase , env=os.environ.copy())
@require_multi_gpu
def UpperCAmelCase__ ( self) ->Optional[int]:
a_ = ["torchrun", F'''--nproc_per_node={torch.cuda.device_count()}''', inspect.getfile(self.__class__)]
with patch_environment(omp_num_threads=1):
execute_subprocess_async(__UpperCAmelCase , env=os.environ.copy())
@require_multi_gpu
def UpperCAmelCase__ ( self) ->List[Any]:
print(F'''Found {torch.cuda.device_count()} devices, using 2 devices only''')
a_ = ["torchrun", F'''--nproc_per_node={torch.cuda.device_count()}''', self.data_loop_file_path]
with patch_environment(omp_num_threads=1 , cuda_visible_devices="0,1"):
execute_subprocess_async(__UpperCAmelCase , env=os.environ.copy())
if __name__ == "__main__":
UpperCamelCase_ = Accelerator()
UpperCamelCase_ = (accelerator.state.process_index + 2, 10)
UpperCamelCase_ = torch.randint(0, 10, shape).to(accelerator.device)
UpperCamelCase_ = ''
UpperCamelCase_ = accelerator.pad_across_processes(tensor)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0):
error_msg += "Padding was not done with the right value (0)."
UpperCamelCase_ = accelerator.pad_across_processes(tensor, pad_first=True)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
UpperCamelCase_ = accelerator.state.num_processes - accelerator.state.process_index - 1
if not torch.equal(tensora[index:], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[:index] == 0):
error_msg += "Padding was not done with the right value (0)."
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg) | 303 | 1 |
"""simple docstring"""
import os
def SCREAMING_SNAKE_CASE__ ( ) -> int:
with open(os.path.dirname(__UpperCAmelCase ) + '''/grid.txt''' ) as f:
lowercase__: Tuple = [] # noqa: E741
for _ in range(2_0 ):
l.append([int(__UpperCAmelCase ) for x in f.readline().split()] )
lowercase__: List[str] = 0
# right
for i in range(2_0 ):
for j in range(1_7 ):
lowercase__: Tuple = l[i][j] * l[i][j + 1] * l[i][j + 2] * l[i][j + 3]
if temp > maximum:
lowercase__: Any = temp
# down
for i in range(1_7 ):
for j in range(2_0 ):
lowercase__: Optional[Any] = l[i][j] * l[i + 1][j] * l[i + 2][j] * l[i + 3][j]
if temp > maximum:
lowercase__: str = temp
# diagonal 1
for i in range(1_7 ):
for j in range(1_7 ):
lowercase__: Optional[int] = l[i][j] * l[i + 1][j + 1] * l[i + 2][j + 2] * l[i + 3][j + 3]
if temp > maximum:
lowercase__: Union[str, Any] = temp
# diagonal 2
for i in range(1_7 ):
for j in range(3 , 2_0 ):
lowercase__: int = l[i][j] * l[i + 1][j - 1] * l[i + 2][j - 2] * l[i + 3][j - 3]
if temp > maximum:
lowercase__: Dict = temp
return maximum
if __name__ == "__main__":
print(solution())
| 177 | """simple docstring"""
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> int:
if not grid or not grid[0]:
raise TypeError('''The grid does not contain the appropriate information''' )
for cell_n in range(1 , len(grid[0] ) ):
grid[0][cell_n] += grid[0][cell_n - 1]
lowercase__: Tuple = grid[0]
for row_n in range(1 , len(__UpperCAmelCase ) ):
lowercase__: Tuple = grid[row_n]
lowercase__: Dict = fill_row(__UpperCAmelCase , __UpperCAmelCase )
lowercase__: Union[str, Any] = grid[row_n]
return grid[-1][-1]
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase ) -> list:
current_row[0] += row_above[0]
for cell_n in range(1 , len(__UpperCAmelCase ) ):
current_row[cell_n] += min(current_row[cell_n - 1] , row_above[cell_n] )
return current_row
if __name__ == "__main__":
import doctest
doctest.testmod()
| 177 | 1 |
"""simple docstring"""
def lowercase (SCREAMING_SNAKE_CASE_ : int = 50 ) -> int:
SCREAMING_SNAKE_CASE = [1] * (length + 1)
for row_length in range(3 , length + 1 ):
for block_length in range(3 , row_length + 1 ):
for block_start in range(row_length - block_length ):
ways_number[row_length] += ways_number[
row_length - block_start - block_length - 1
]
ways_number[row_length] += 1
return ways_number[length]
if __name__ == "__main__":
print(f'''{solution() = }''')
| 38 |
"""simple docstring"""
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def lowercase (SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : str ) -> Any:
# Load configuration defined in the metadata file
with open(SCREAMING_SNAKE_CASE_ ) as metadata_file:
SCREAMING_SNAKE_CASE = json.load(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = LukeConfig(use_entity_aware_attention=SCREAMING_SNAKE_CASE_ , **metadata['model_config'] )
# Load in the weights from the checkpoint_path
SCREAMING_SNAKE_CASE = torch.load(SCREAMING_SNAKE_CASE_ , map_location='cpu' )['module']
# Load the entity vocab file
SCREAMING_SNAKE_CASE = load_original_entity_vocab(SCREAMING_SNAKE_CASE_ )
# add an entry for [MASK2]
SCREAMING_SNAKE_CASE = max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
SCREAMING_SNAKE_CASE = XLMRobertaTokenizer.from_pretrained(metadata['model_config']['bert_model_name'] )
# Add special tokens to the token vocabulary for downstream tasks
SCREAMING_SNAKE_CASE = AddedToken('<ent>' , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = AddedToken('<ent2>' , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ )
tokenizer.add_special_tokens({'additional_special_tokens': [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(F'Saving tokenizer to {pytorch_dump_folder_path}' )
tokenizer.save_pretrained(SCREAMING_SNAKE_CASE_ )
with open(os.path.join(SCREAMING_SNAKE_CASE_ , 'tokenizer_config.json' ) , 'r' ) as f:
SCREAMING_SNAKE_CASE = json.load(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = 'MLukeTokenizer'
with open(os.path.join(SCREAMING_SNAKE_CASE_ , 'tokenizer_config.json' ) , 'w' ) as f:
json.dump(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
with open(os.path.join(SCREAMING_SNAKE_CASE_ , MLukeTokenizer.vocab_files_names['entity_vocab_file'] ) , 'w' ) as f:
json.dump(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = MLukeTokenizer.from_pretrained(SCREAMING_SNAKE_CASE_ )
# Initialize the embeddings of the special tokens
SCREAMING_SNAKE_CASE = tokenizer.convert_tokens_to_ids(['@'] )[0]
SCREAMING_SNAKE_CASE = tokenizer.convert_tokens_to_ids(['#'] )[0]
SCREAMING_SNAKE_CASE = state_dict['embeddings.word_embeddings.weight']
SCREAMING_SNAKE_CASE = word_emb[ent_init_index].unsqueeze(0 )
SCREAMING_SNAKE_CASE = word_emb[enta_init_index].unsqueeze(0 )
SCREAMING_SNAKE_CASE = torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
SCREAMING_SNAKE_CASE = state_dict[bias_name]
SCREAMING_SNAKE_CASE = decoder_bias[ent_init_index].unsqueeze(0 )
SCREAMING_SNAKE_CASE = decoder_bias[enta_init_index].unsqueeze(0 )
SCREAMING_SNAKE_CASE = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
SCREAMING_SNAKE_CASE = F'encoder.layer.{layer_index}.attention.self.'
SCREAMING_SNAKE_CASE = state_dict[prefix + matrix_name]
SCREAMING_SNAKE_CASE = state_dict[prefix + matrix_name]
SCREAMING_SNAKE_CASE = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
SCREAMING_SNAKE_CASE = state_dict['entity_embeddings.entity_embeddings.weight']
SCREAMING_SNAKE_CASE = entity_emb[entity_vocab['[MASK]']].unsqueeze(0 )
SCREAMING_SNAKE_CASE = torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
SCREAMING_SNAKE_CASE = state_dict['entity_predictions.bias']
SCREAMING_SNAKE_CASE = entity_prediction_bias[entity_vocab['[MASK]']].unsqueeze(0 )
SCREAMING_SNAKE_CASE = torch.cat([entity_prediction_bias, entity_mask_bias] )
SCREAMING_SNAKE_CASE = LukeForMaskedLM(config=SCREAMING_SNAKE_CASE_ ).eval()
state_dict.pop('entity_predictions.decoder.weight' )
state_dict.pop('lm_head.decoder.weight' )
state_dict.pop('lm_head.decoder.bias' )
SCREAMING_SNAKE_CASE = OrderedDict()
for key, value in state_dict.items():
if not (key.startswith('lm_head' ) or key.startswith('entity_predictions' )):
SCREAMING_SNAKE_CASE = state_dict[key]
else:
SCREAMING_SNAKE_CASE = state_dict[key]
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = model.load_state_dict(SCREAMING_SNAKE_CASE_ , strict=SCREAMING_SNAKE_CASE_ )
if set(SCREAMING_SNAKE_CASE_ ) != {"luke.embeddings.position_ids"}:
raise ValueError(F'Unexpected unexpected_keys: {unexpected_keys}' )
if set(SCREAMING_SNAKE_CASE_ ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(F'Unexpected missing_keys: {missing_keys}' )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
SCREAMING_SNAKE_CASE = MLukeTokenizer.from_pretrained(SCREAMING_SNAKE_CASE_ , task='entity_classification' )
SCREAMING_SNAKE_CASE = 'ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan).'
SCREAMING_SNAKE_CASE = (0, 9)
SCREAMING_SNAKE_CASE = tokenizer(SCREAMING_SNAKE_CASE_ , entity_spans=[span] , return_tensors='pt' )
SCREAMING_SNAKE_CASE = model(**SCREAMING_SNAKE_CASE_ )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
SCREAMING_SNAKE_CASE = torch.Size((1, 33, 7_68) )
SCREAMING_SNAKE_CASE = torch.tensor([[0.08_92, 0.05_96, -0.28_19], [0.01_34, 0.11_99, 0.05_73], [-0.01_69, 0.09_27, 0.06_44]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F'Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}' )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=1E-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
SCREAMING_SNAKE_CASE = torch.Size((1, 1, 7_68) )
SCREAMING_SNAKE_CASE = torch.tensor([[-0.14_82, 0.06_09, 0.03_22]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
F'Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is'
F' {expected_shape}' )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=1E-4 ):
raise ValueError
# Verify masked word/entity prediction
SCREAMING_SNAKE_CASE = MLukeTokenizer.from_pretrained(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = 'Tokyo is the capital of <mask>.'
SCREAMING_SNAKE_CASE = (24, 30)
SCREAMING_SNAKE_CASE = tokenizer(SCREAMING_SNAKE_CASE_ , entity_spans=[span] , return_tensors='pt' )
SCREAMING_SNAKE_CASE = model(**SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = encoding['input_ids'][0].tolist()
SCREAMING_SNAKE_CASE = input_ids.index(tokenizer.convert_tokens_to_ids('<mask>' ) )
SCREAMING_SNAKE_CASE = outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = outputs.entity_logits[0][0].argmax().item()
SCREAMING_SNAKE_CASE = [
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith('en:' )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print('Saving PyTorch model to {}'.format(SCREAMING_SNAKE_CASE_ ) )
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
def lowercase (SCREAMING_SNAKE_CASE_ : int ) -> int:
SCREAMING_SNAKE_CASE = ['[MASK]', '[PAD]', '[UNK]']
SCREAMING_SNAKE_CASE = [json.loads(SCREAMING_SNAKE_CASE_ ) for line in open(SCREAMING_SNAKE_CASE_ )]
SCREAMING_SNAKE_CASE = {}
for entry in data:
SCREAMING_SNAKE_CASE = entry['id']
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
SCREAMING_SNAKE_CASE = entity_id
break
SCREAMING_SNAKE_CASE = F'{language}:{entity_name}'
SCREAMING_SNAKE_CASE = entity_id
return new_mapping
if __name__ == "__main__":
__UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--checkpoint_path''', type=str, help='''Path to a pytorch_model.bin file.''')
parser.add_argument(
'''--metadata_path''', default=None, type=str, help='''Path to a metadata.json file, defining the configuration.'''
)
parser.add_argument(
'''--entity_vocab_path''',
default=None,
type=str,
help='''Path to an entity_vocab.tsv file, containing the entity vocabulary.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to where to dump the output PyTorch model.'''
)
parser.add_argument(
'''--model_size''', default='''base''', type=str, choices=['''base''', '''large'''], help='''Size of the model to be converted.'''
)
__UpperCamelCase = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 38 | 1 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_dpt import DPTImageProcessor
a_ : Any = logging.get_logger(__name__)
class __UpperCamelCase ( lowerCamelCase__ ):
def __init__( self, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
warnings.warn(
'''The class DPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use DPTImageProcessor instead.''', lowerCAmelCase, )
super().__init__(*lowerCAmelCase, **lowerCAmelCase )
| 75 |
"""simple docstring"""
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_multi_gpu
from accelerate.utils import patch_environment
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self ):
__a = inspect.getfile(accelerate.test_utils )
__a = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_script.py'''] )
__a = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_distributed_data_loop.py'''] )
__a = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_ops.py'''] )
@require_multi_gpu
def __UpperCAmelCase ( self ):
print(f'''Found {torch.cuda.device_count()} devices.''' )
__a = ['''torchrun''', f'''--nproc_per_node={torch.cuda.device_count()}''', self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_a , env=os.environ.copy() )
@require_multi_gpu
def __UpperCAmelCase ( self ):
print(f'''Found {torch.cuda.device_count()} devices.''' )
__a = ['''torchrun''', f'''--nproc_per_node={torch.cuda.device_count()}''', self.operation_file_path]
print(f'''Command: {cmd}''' )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_a , env=os.environ.copy() )
@require_multi_gpu
def __UpperCAmelCase ( self ):
__a = ['''torchrun''', f'''--nproc_per_node={torch.cuda.device_count()}''', inspect.getfile(self.__class__ )]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_a , env=os.environ.copy() )
@require_multi_gpu
def __UpperCAmelCase ( self ):
print(f'''Found {torch.cuda.device_count()} devices, using 2 devices only''' )
__a = ['''torchrun''', f'''--nproc_per_node={torch.cuda.device_count()}''', self.data_loop_file_path]
with patch_environment(omp_num_threads=1 , cuda_visible_devices='''0,1''' ):
execute_subprocess_async(_a , env=os.environ.copy() )
if __name__ == "__main__":
lowercase_ = Accelerator()
lowercase_ = (accelerator.state.process_index + 2, 1_0)
lowercase_ = torch.randint(0, 1_0, shape).to(accelerator.device)
lowercase_ = ""
lowercase_ = accelerator.pad_across_processes(tensor)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0):
error_msg += "Padding was not done with the right value (0)."
lowercase_ = accelerator.pad_across_processes(tensor, pad_first=True)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
lowercase_ = accelerator.state.num_processes - accelerator.state.process_index - 1
if not torch.equal(tensora[index:], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[:index] == 0):
error_msg += "Padding was not done with the right value (0)."
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 45 | 0 |
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from packaging import version
from .. import __version__
from .constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD
from .doc import (
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
copy_func,
replace_return_docstrings,
)
from .generic import (
ContextManagers,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
TensorType,
add_model_info_to_auto_map,
cached_property,
can_return_loss,
expand_dims,
find_labels,
flatten_dict,
infer_framework,
is_jax_tensor,
is_numpy_array,
is_tensor,
is_tf_symbolic_tensor,
is_tf_tensor,
is_torch_device,
is_torch_dtype,
is_torch_tensor,
reshape,
squeeze,
strtobool,
tensor_size,
to_numpy,
to_py_obj,
transpose,
working_or_temp_dir,
)
from .hub import (
CLOUDFRONT_DISTRIB_PREFIX,
DISABLE_TELEMETRY,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
EntryNotFoundError,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
cached_file,
default_cache_path,
define_sagemaker_information,
download_url,
extract_commit_hash,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
has_file,
http_user_agent,
is_offline_mode,
is_remote_url,
move_cache,
send_example_telemetry,
try_to_load_from_cache,
)
from .import_utils import (
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
TORCH_FX_REQUIRED_VERSION,
USE_JAX,
USE_TF,
USE_TORCH,
DummyObject,
OptionalDependencyNotAvailable,
_LazyModule,
ccl_version,
direct_transformers_import,
get_torch_version,
is_accelerate_available,
is_apex_available,
is_bitsandbytes_available,
is_bsa_available,
is_coloredlogs_available,
is_cython_available,
is_datasets_available,
is_decord_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_jieba_available,
is_jumanpp_available,
is_kenlm_available,
is_keras_nlp_available,
is_librosa_available,
is_natten_available,
is_ninja_available,
is_onnx_available,
is_openai_available,
is_optimum_available,
is_pandas_available,
is_peft_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytest_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sacremoses_available,
is_safetensors_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_sudachi_available,
is_tensorflow_probability_available,
is_tensorflow_text_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_bfaa_cpu_available,
is_torch_bfaa_gpu_available,
is_torch_compile_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_neuroncore_available,
is_torch_tensorrt_fx_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_torchdistx_available,
is_torchdynamo_available,
is_torchvision_available,
is_training_run_on_sagemaker,
is_vision_available,
requires_backends,
torch_only_method,
)
lowercase : Dict = """pytorch_model.bin"""
lowercase : str = """pytorch_model.bin.index.json"""
lowercase : Any = """adapter_config.json"""
lowercase : List[str] = """adapter_model.bin"""
lowercase : List[Any] = """adapter_model.safetensors"""
lowercase : Optional[Any] = """tf_model.h5"""
lowercase : Optional[Any] = """tf_model.h5.index.json"""
lowercase : Optional[Any] = """model.ckpt"""
lowercase : Optional[Any] = """flax_model.msgpack"""
lowercase : Optional[int] = """flax_model.msgpack.index.json"""
lowercase : Tuple = """model.safetensors"""
lowercase : Union[str, Any] = """model.safetensors.index.json"""
lowercase : Dict = """config.json"""
lowercase : Optional[Any] = """preprocessor_config.json"""
lowercase : Optional[int] = FEATURE_EXTRACTOR_NAME
lowercase : Union[str, Any] = """generation_config.json"""
lowercase : str = """modelcard.json"""
lowercase : str = """▁"""
lowercase : List[str] = SENTENCEPIECE_UNDERLINE # Kept for backward compatibility
lowercase : List[Any] = [
[[0, 1, 0, 1], [1, 0, 0, 1]]
] * 2 # Needs to have 0s and 1s only since XLM uses it for langs too.
lowercase : Dict = [[7, 6, 0, 0, 1], [1, 2, 3, 0, 0], [0, 0, 0, 4, 5]]
lowercase : List[str] = [[1, 1, 1, 1, 1], [1, 1, 1, 0, 0], [0, 0, 0, 1, 1]]
def A_ ( A__ ) -> Tuple:
if version.parse(A__ ) < version.parse(A__ ):
if "dev" in min_version:
a__ : Optional[int] = (
'This example requires a source install from HuggingFace Transformers (see '
'`https://huggingface.co/docs/transformers/installation#install-from-source`),'
)
else:
a__ : Optional[int] = F'This example requires a minimum version of {min_version},'
error_message += F' but the version found is {__version__}.\n'
raise ImportError(
error_message
+ 'Check out https://github.com/huggingface/transformers/tree/main/examples#important-note for the examples corresponding to other '
'versions of HuggingFace Transformers.' )
| 225 |
from __future__ import annotations
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import is_tf_available, is_vision_available
from ...test_modeling_tf_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_tf_bert import TFBertModelTester
from ..clip.test_modeling_tf_clip import TFCLIPVisionModelTester
from ..deit.test_modeling_tf_deit import TFDeiTModelTester
from ..roberta.test_modeling_tf_roberta import TFRobertaModelTester
from ..vit.test_modeling_tf_vit import TFViTModelTester
if is_tf_available():
from transformers import (
TFBertModel,
TFCLIPVisionModel,
TFDeiTModel,
TFRobertaModel,
TFVisionTextDualEncoderModel,
TFViTModel,
VisionTextDualEncoderConfig,
)
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor
def A_ ( A__ ) -> Dict:
if isinstance(A__ , collections.abc.Iterable ):
return x
return (x, x)
@require_tf
class A__ :
"""simple docstring"""
def __lowercase ( self , lowercase , lowercase) -> Union[str, Any]:
'''simple docstring'''
pass
def __lowercase ( self) -> Dict:
'''simple docstring'''
pass
def __lowercase ( self) -> Dict:
'''simple docstring'''
pass
def __lowercase ( self , lowercase , lowercase , lowercase , lowercase , lowercase=None , **lowercase) -> List[Any]:
'''simple docstring'''
a__ : Any = VisionTextDualEncoderConfig.from_vision_text_configs(lowercase , lowercase)
a__ : Any = TFVisionTextDualEncoderModel(lowercase)
a__ : Dict = model(input_ids=lowercase , pixel_values=lowercase , attention_mask=lowercase)
self.assertEqual(output['text_embeds'].shape , (input_ids.shape[0], config.projection_dim))
self.assertEqual(output['image_embeds'].shape , (pixel_values.shape[0], config.projection_dim))
def __lowercase ( self , lowercase , lowercase , lowercase , lowercase , lowercase=None , **lowercase) -> List[Any]:
'''simple docstring'''
a__ , a__ : List[Any] = self.get_vision_text_model(lowercase , lowercase)
a__ : List[str] = TFVisionTextDualEncoderModel(vision_model=lowercase , text_model=lowercase)
a__ : Dict = model(input_ids=lowercase , pixel_values=lowercase , attention_mask=lowercase)
self.assertEqual(output['text_embeds'].shape , (input_ids.shape[0], model.config.projection_dim))
self.assertEqual(output['image_embeds'].shape , (pixel_values.shape[0], model.config.projection_dim))
def __lowercase ( self , lowercase , lowercase , lowercase , lowercase , lowercase=None , **lowercase) -> Tuple:
'''simple docstring'''
a__ , a__ : Any = self.get_vision_text_model(lowercase , lowercase)
a__ : Tuple = {'vision_model': vision_model, 'text_model': text_model}
a__ : Union[str, Any] = TFVisionTextDualEncoderModel.from_vision_text_pretrained(**lowercase)
a__ : Any = model(input_ids=lowercase , pixel_values=lowercase , attention_mask=lowercase)
self.assertEqual(output['text_embeds'].shape , (input_ids.shape[0], model.config.projection_dim))
self.assertEqual(output['image_embeds'].shape , (pixel_values.shape[0], model.config.projection_dim))
def __lowercase ( self , lowercase , lowercase , lowercase , lowercase , lowercase=None , **lowercase) -> Optional[Any]:
'''simple docstring'''
a__ , a__ : int = self.get_vision_text_model(lowercase , lowercase)
a__ : List[Any] = TFVisionTextDualEncoderModel(vision_model=lowercase , text_model=lowercase)
a__ : Optional[Any] = model(input_ids=lowercase , pixel_values=lowercase , attention_mask=lowercase)
a__ : int = output[0].numpy()
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowercase)
a__ : str = TFVisionTextDualEncoderModel.from_pretrained(lowercase)
a__ : List[str] = model(input_ids=lowercase , pixel_values=lowercase , attention_mask=lowercase)
a__ : str = after_output[0].numpy()
a__ : str = np.amax(np.abs(out_a - out_a))
self.assertLessEqual(lowercase , 1e-5)
def __lowercase ( self , lowercase , lowercase , lowercase , lowercase , lowercase=None , **lowercase) -> Optional[int]:
'''simple docstring'''
a__ , a__ : Optional[Any] = self.get_vision_text_model(lowercase , lowercase)
a__ : Dict = TFVisionTextDualEncoderModel(vision_model=lowercase , text_model=lowercase)
a__ : Optional[int] = model(
input_ids=lowercase , pixel_values=lowercase , attention_mask=lowercase , output_attentions=lowercase)
a__ : List[str] = output.vision_model_output.attentions
self.assertEqual(len(lowercase) , vision_config.num_hidden_layers)
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
a__ : Optional[Any] = to_atuple(vision_model.config.image_size)
a__ : Dict = to_atuple(vision_model.config.patch_size)
a__ : int = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
a__ : List[str] = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len))
a__ : Union[str, Any] = output.text_model_output.attentions
self.assertEqual(len(lowercase) , text_config.num_hidden_layers)
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def __lowercase ( self , lowercase , lowercase , lowercase) -> Any:
'''simple docstring'''
a__ : str = np.abs((a - b)).max()
self.assertLessEqual(lowercase , lowercase , F'Difference between torch and flax is {diff} (>= {tol}).')
def __lowercase ( self) -> Optional[Any]:
'''simple docstring'''
a__ : List[Any] = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_model(**lowercase)
def __lowercase ( self) -> Dict:
'''simple docstring'''
a__ : List[Any] = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**lowercase)
def __lowercase ( self) -> Tuple:
'''simple docstring'''
a__ : Any = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**lowercase)
def __lowercase ( self) -> Optional[Any]:
'''simple docstring'''
a__ : int = self.prepare_config_and_inputs()
self.check_save_load(**lowercase)
def __lowercase ( self) -> Optional[int]:
'''simple docstring'''
a__ : List[str] = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**lowercase)
@slow
def __lowercase ( self) -> Union[str, Any]:
'''simple docstring'''
a__ , a__ : Union[str, Any] = self.get_pretrained_model_and_inputs()
a__ : Optional[int] = model_a(**lowercase)
a__ : int = outputs[0].numpy()
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(lowercase)
a__ : Union[str, Any] = TFVisionTextDualEncoderModel.from_pretrained(lowercase)
a__ : int = model_a(**lowercase)
a__ : str = after_outputs[0].numpy()
a__ : List[Any] = np.amax(np.abs(out_a - out_a))
self.assertLessEqual(lowercase , 1e-5)
@require_tf
class A__ ( __UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self) -> List[Any]:
'''simple docstring'''
a__ : Any = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
'hf-internal-testing/tiny-random-vit' , 'hf-internal-testing/tiny-random-bert')
a__ : str = 13
a__ : Optional[int] = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
])
a__ : Tuple = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size)
a__ : Optional[int] = random_attention_mask([batch_size, 4])
a__ : str = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask}
return model, inputs
def __lowercase ( self , lowercase , lowercase) -> List[Any]:
'''simple docstring'''
a__ : Optional[Any] = TFViTModel(lowercase , name='vision_model')
a__ : Tuple = TFBertModel(lowercase , name='text_model')
return vision_model, text_model
def __lowercase ( self) -> Any:
'''simple docstring'''
a__ : Tuple = TFViTModelTester(self)
a__ : int = TFBertModelTester(self)
a__ : Optional[Any] = vit_model_tester.prepare_config_and_inputs()
a__ : Any = bert_model_tester.prepare_config_and_inputs()
a__ , a__ , a__ : Optional[int] = vision_config_and_inputs
(
(
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) ,
) : Any = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class A__ ( __UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self) -> List[str]:
'''simple docstring'''
a__ : Optional[Any] = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
'Rocketknight1/tiny-random-deit-tf' , 'hf-internal-testing/tiny-random-roberta')
a__ : Union[str, Any] = 13
a__ : Dict = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
])
a__ : Optional[Any] = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size)
a__ : Any = random_attention_mask([batch_size, 4])
a__ : int = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask}
return model, inputs
def __lowercase ( self , lowercase , lowercase , lowercase , lowercase , lowercase=None , **lowercase) -> Optional[Any]:
'''simple docstring'''
a__ , a__ : List[Any] = self.get_vision_text_model(lowercase , lowercase)
a__ : Any = TFVisionTextDualEncoderModel(vision_model=lowercase , text_model=lowercase)
a__ : int = model(
input_ids=lowercase , pixel_values=lowercase , attention_mask=lowercase , output_attentions=lowercase)
a__ : Optional[Any] = output.vision_model_output.attentions
self.assertEqual(len(lowercase) , vision_config.num_hidden_layers)
# in DEiT, the seq_len equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
a__ : Optional[int] = to_atuple(vision_model.config.image_size)
a__ : str = to_atuple(vision_model.config.patch_size)
a__ : Tuple = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
a__ : List[str] = num_patches + 2
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len))
a__ : List[Any] = output.text_model_output.attentions
self.assertEqual(len(lowercase) , text_config.num_hidden_layers)
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def __lowercase ( self , lowercase , lowercase) -> Optional[Any]:
'''simple docstring'''
a__ : List[str] = TFDeiTModel(lowercase , name='vision_model')
a__ : Optional[int] = TFRobertaModel(lowercase , name='text_model')
return vision_model, text_model
def __lowercase ( self) -> Tuple:
'''simple docstring'''
a__ : Optional[int] = TFDeiTModelTester(self)
a__ : str = TFRobertaModelTester(self)
a__ : str = vit_model_tester.prepare_config_and_inputs()
a__ : Dict = bert_model_tester.prepare_config_and_inputs()
a__ , a__ , a__ : Any = vision_config_and_inputs
(
(
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) ,
) : Tuple = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class A__ ( __UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self) -> Any:
'''simple docstring'''
a__ : Dict = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
'Rocketknight1/tiny-random-clip-tf' , 'hf-internal-testing/tiny-random-bert')
a__ : Optional[int] = 13
a__ : Optional[Any] = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
])
a__ : int = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size)
a__ : str = random_attention_mask([batch_size, 4])
a__ : int = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask}
return model, inputs
def __lowercase ( self , lowercase , lowercase) -> Optional[Any]:
'''simple docstring'''
a__ : str = TFCLIPVisionModel(lowercase , name='vision_model')
a__ : str = TFBertModel(lowercase , name='text_model')
return vision_model, text_model
def __lowercase ( self) -> List[Any]:
'''simple docstring'''
a__ : List[str] = TFCLIPVisionModelTester(self)
a__ : Dict = TFBertModelTester(self)
a__ : Optional[Any] = clip_model_tester.prepare_config_and_inputs()
a__ : List[Any] = bert_model_tester.prepare_config_and_inputs()
a__ , a__ : Union[str, Any] = vision_config_and_inputs
(
(
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) ,
) : Optional[int] = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_vision
@require_tf
class A__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def __lowercase ( self) -> Any:
'''simple docstring'''
a__ : Tuple = TFVisionTextDualEncoderModel.from_pretrained(
'clip-italian/clip-italian' , logit_scale_init_value=1.0 , from_pt=lowercase)
a__ : str = VisionTextDualEncoderProcessor.from_pretrained('clip-italian/clip-italian')
a__ : Tuple = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
a__ : Optional[Any] = processor(
text=['una foto di un gatto', 'una foto di un cane'] , images=lowercase , padding=lowercase , return_tensors='np')
a__ : int = model(**lowercase)
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]))
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
a__ : List[str] = np.array([[1.2_28_47_27, 0.3_10_41_22]])
self.assertTrue(np.allclose(outputs.logits_per_image.numpy() , lowercase , atol=1e-3))
| 225 | 1 |
import warnings
from typing import Dict
import numpy as np
from ..utils import ExplicitEnum, add_end_docstrings, is_tf_available, is_torch_available
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
def _a ( SCREAMING_SNAKE_CASE_ : List[Any] ):
return 1.0 / (1.0 + np.exp(-_outputs ))
def _a ( SCREAMING_SNAKE_CASE_ : Tuple ):
__lowerCAmelCase = np.max(_outputs , axis=-1 , keepdims=a__ )
__lowerCAmelCase = np.exp(_outputs - maxes )
return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=a__ )
class a__ ( UpperCamelCase_ ):
_a : Union[str, Any] = '''sigmoid'''
_a : Dict = '''softmax'''
_a : Tuple = '''none'''
@add_end_docstrings(
UpperCamelCase_ , R"""
return_all_scores (`bool`, *optional*, defaults to `False`):
Whether to return all prediction scores or just the one of the predicted class.
function_to_apply (`str`, *optional*, defaults to `\"default\"`):
The function to apply to the model outputs in order to retrieve the scores. Accepts four different values:
- `\"default\"`: if the model has a single label, will apply the sigmoid function on the output. If the model
has several labels, will apply the softmax function on the output.
- `\"sigmoid\"`: Applies the sigmoid function on the output.
- `\"softmax\"`: Applies the softmax function on the output.
- `\"none\"`: Does not apply any function on the output.
""" , )
class a__ ( UpperCamelCase_ ):
_a : Optional[Any] = False
_a : str = ClassificationFunction.NONE
def __init__( self , **_A ):
"""simple docstring"""
super().__init__(**_A )
self.check_model_type(
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if self.framework == "tf"
else MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING )
def __SCREAMING_SNAKE_CASE( self , _A=None , _A=None , _A="" , **_A ):
"""simple docstring"""
__lowerCAmelCase = tokenizer_kwargs
__lowerCAmelCase = {}
if hasattr(self.model.config , "return_all_scores" ) and return_all_scores is None:
__lowerCAmelCase = self.model.config.return_all_scores
if isinstance(_A , _A ) or top_k is None:
__lowerCAmelCase = top_k
__lowerCAmelCase = False
elif return_all_scores is not None:
warnings.warn(
"`return_all_scores` is now deprecated, if want a similar functionality use `top_k=None` instead of"
" `return_all_scores=True` or `top_k=1` instead of `return_all_scores=False`." , _A , )
if return_all_scores:
__lowerCAmelCase = None
else:
__lowerCAmelCase = 1
if isinstance(_A , _A ):
__lowerCAmelCase = ClassificationFunction[function_to_apply.upper()]
if function_to_apply is not None:
__lowerCAmelCase = function_to_apply
return preprocess_params, {}, postprocess_params
def __call__( self , *_A , **_A ):
"""simple docstring"""
__lowerCAmelCase = super().__call__(*_A , **_A )
# TODO try and retrieve it in a nicer way from _sanitize_parameters.
__lowerCAmelCase = "top_k" not in kwargs
if isinstance(args[0] , _A ) and _legacy:
# This pipeline is odd, and return a list when single item is run
return [result]
else:
return result
def __SCREAMING_SNAKE_CASE( self , _A , **_A ):
"""simple docstring"""
__lowerCAmelCase = self.framework
if isinstance(_A , _A ):
return self.tokenizer(**_A , return_tensors=_A , **_A )
elif isinstance(_A , _A ) and len(_A ) == 1 and isinstance(inputs[0] , _A ) and len(inputs[0] ) == 2:
# It used to be valid to use a list of list of list for text pairs, keeping this path for BC
return self.tokenizer(
text=inputs[0][0] , text_pair=inputs[0][1] , return_tensors=_A , **_A )
elif isinstance(_A , _A ):
# This is likely an invalid usage of the pipeline attempting to pass text pairs.
raise ValueError(
"The pipeline received invalid inputs, if you are trying to send text pairs, you can try to send a"
" dictionary `{\"text\": \"My text\", \"text_pair\": \"My pair\"}` in order to send a text pair." )
return self.tokenizer(_A , return_tensors=_A , **_A )
def __SCREAMING_SNAKE_CASE( self , _A ):
"""simple docstring"""
return self.model(**_A )
def __SCREAMING_SNAKE_CASE( self , _A , _A=None , _A=1 , _A=True ):
"""simple docstring"""
if function_to_apply is None:
if self.model.config.problem_type == "multi_label_classification" or self.model.config.num_labels == 1:
__lowerCAmelCase = ClassificationFunction.SIGMOID
elif self.model.config.problem_type == "single_label_classification" or self.model.config.num_labels > 1:
__lowerCAmelCase = ClassificationFunction.SOFTMAX
elif hasattr(self.model.config , "function_to_apply" ) and function_to_apply is None:
__lowerCAmelCase = self.model.config.function_to_apply
else:
__lowerCAmelCase = ClassificationFunction.NONE
__lowerCAmelCase = model_outputs["logits"][0]
__lowerCAmelCase = outputs.numpy()
if function_to_apply == ClassificationFunction.SIGMOID:
__lowerCAmelCase = sigmoid(_A )
elif function_to_apply == ClassificationFunction.SOFTMAX:
__lowerCAmelCase = softmax(_A )
elif function_to_apply == ClassificationFunction.NONE:
__lowerCAmelCase = outputs
else:
raise ValueError(f"""Unrecognized `function_to_apply` argument: {function_to_apply}""" )
if top_k == 1 and _legacy:
return {"label": self.model.config.idalabel[scores.argmax().item()], "score": scores.max().item()}
__lowerCAmelCase = [
{"label": self.model.config.idalabel[i], "score": score.item()} for i, score in enumerate(_A )
]
if not _legacy:
dict_scores.sort(key=lambda _A : x["score"] , reverse=_A )
if top_k is not None:
__lowerCAmelCase = dict_scores[:top_k]
return dict_scores
| 92 |
def __lowercase ( a__=2_81_23 ) -> List[str]:
__SCREAMING_SNAKE_CASE = [1] * (limit + 1)
for i in range(2 , int(limit**0.5 ) + 1 ):
sum_divs[i * i] += i
for k in range(i + 1 , limit // i + 1 ):
sum_divs[k * i] += k + i
__SCREAMING_SNAKE_CASE = set()
__SCREAMING_SNAKE_CASE = 0
for n in range(1 , limit + 1 ):
if sum_divs[n] > n:
abundants.add(a__ )
if not any((n - a in abundants) for a in abundants ):
res += n
return res
if __name__ == "__main__":
print(solution())
| 257 | 0 |
from __future__ import annotations
class a :
"""simple docstring"""
def __init__( self : Tuple , lowerCamelCase : str , lowerCamelCase : str ) -> Tuple:
__snake_case , __snake_case : Any = text, pattern
__snake_case , __snake_case : Any = len(lowerCamelCase ), len(lowerCamelCase )
def __snake_case ( self : Any , lowerCamelCase : str ) -> int:
for i in range(self.patLen - 1 , -1 , -1 ):
if char == self.pattern[i]:
return i
return -1
def __snake_case ( self : Optional[Any] , lowerCamelCase : int ) -> int:
for i in range(self.patLen - 1 , -1 , -1 ):
if self.pattern[i] != self.text[current_pos + i]:
return current_pos + i
return -1
def __snake_case ( self : int ) -> list[int]:
# searches pattern in text and returns index positions
__snake_case : Optional[int] = []
for i in range(self.textLen - self.patLen + 1 ):
__snake_case : Optional[Any] = self.mismatch_in_text(lowerCamelCase )
if mismatch_index == -1:
positions.append(lowerCamelCase )
else:
__snake_case : Optional[int] = self.match_in_pattern(self.text[mismatch_index] )
__snake_case : Union[str, Any] = (
mismatch_index - match_index
) # shifting index lgtm [py/multiple-definition]
return positions
_snake_case : Dict = "ABAABA"
_snake_case : Dict = "AB"
_snake_case : Tuple = BoyerMooreSearch(text, pattern)
_snake_case : Dict = bms.bad_character_heuristic()
if len(positions) == 0:
print("No match found")
else:
print("Pattern found in following positions: ")
print(positions)
| 134 |
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase ):
return x if y == 0 else greatest_common_divisor(__lowerCamelCase , x % y )
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase ):
return (x * y) // greatest_common_divisor(__lowerCamelCase , __lowerCamelCase )
def lowerCAmelCase_ ( __lowerCamelCase = 2_0 ):
__snake_case : Optional[Any] = 1
for i in range(1 , n + 1 ):
__snake_case : Any = lcm(__lowerCamelCase , __lowerCamelCase )
return g
if __name__ == "__main__":
print(f'''{solution() = }''')
| 134 | 1 |
import gc
import unittest
from parameterized import parameterized
from diffusers import FlaxUNetaDConditionModel
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import load_hf_numpy, require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
@slow
@require_flax
class __UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self : Tuple , _A : Any , _A : Optional[int] ):
"""simple docstring"""
return F'''gaussian_noise_s={seed}_shape={"_".join([str(_A ) for s in shape] )}.npy'''
def UpperCAmelCase__ ( self : Tuple ):
"""simple docstring"""
super().tearDown()
gc.collect()
def UpperCAmelCase__ ( self : Optional[int] , _A : Optional[int]=0 , _A : Union[str, Any]=(4, 4, 64, 64) , _A : List[str]=False ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = jnp.bfloataa if fpaa else jnp.floataa
__SCREAMING_SNAKE_CASE : int = jnp.array(load_hf_numpy(self.get_file_format(_A , _A ) ) , dtype=_A )
return image
def UpperCAmelCase__ ( self : List[Any] , _A : Dict=False , _A : List[str]="CompVis/stable-diffusion-v1-4" ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = jnp.bfloataa if fpaa else jnp.floataa
__SCREAMING_SNAKE_CASE : Tuple = '''bf16''' if fpaa else None
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : str = FlaxUNetaDConditionModel.from_pretrained(
_A , subfolder='''unet''' , dtype=_A , revision=_A )
return model, params
def UpperCAmelCase__ ( self : Any , _A : Any=0 , _A : List[Any]=(4, 77, 768) , _A : Tuple=False ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = jnp.bfloataa if fpaa else jnp.floataa
__SCREAMING_SNAKE_CASE : Optional[Any] = jnp.array(load_hf_numpy(self.get_file_format(_A , _A ) ) , dtype=_A )
return hidden_states
@parameterized.expand(
[
# fmt: off
[83, 4, [-0.23_23, -0.13_04, 0.08_13, -0.30_93, -0.09_19, -0.15_71, -0.11_25, -0.58_06]],
[17, 0.55, [-0.08_31, -0.24_43, 0.09_01, -0.09_19, 0.33_96, 0.01_03, -0.37_43, 0.07_01]],
[8, 0.89, [-0.48_63, 0.08_59, 0.08_75, -0.16_58, 0.91_99, -0.01_14, 0.48_39, 0.46_39]],
[3, 1000, [-0.56_49, 0.24_02, -0.55_18, 0.12_48, 1.13_28, -0.24_43, -0.03_25, -1.00_78]],
# fmt: on
] )
def UpperCAmelCase__ ( self : Union[str, Any] , _A : int , _A : Dict , _A : Optional[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Optional[int] = self.get_unet_model(model_id='''CompVis/stable-diffusion-v1-4''' , fpaa=_A )
__SCREAMING_SNAKE_CASE : int = self.get_latents(_A , fpaa=_A )
__SCREAMING_SNAKE_CASE : str = self.get_encoder_hidden_states(_A , fpaa=_A )
__SCREAMING_SNAKE_CASE : Optional[int] = model.apply(
{'''params''': params} , _A , jnp.array(_A , dtype=jnp.intaa ) , encoder_hidden_states=_A , ).sample
assert sample.shape == latents.shape
__SCREAMING_SNAKE_CASE : Optional[Any] = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
__SCREAMING_SNAKE_CASE : Optional[Any] = jnp.array(_A , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, in the same hardware
assert jnp.allclose(_A , _A , atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[83, 4, [0.15_14, 0.08_07, 0.16_24, 0.10_16, -0.18_96, 0.02_63, 0.06_77, 0.23_10]],
[17, 0.55, [0.11_64, -0.02_16, 0.01_70, 0.15_89, -0.31_20, 0.10_05, -0.05_81, -0.14_58]],
[8, 0.89, [-0.17_58, -0.01_69, 0.10_04, -0.14_11, 0.13_12, 0.11_03, -0.19_96, 0.21_39]],
[3, 1000, [0.12_14, 0.03_52, -0.07_31, -0.15_62, -0.09_94, -0.09_06, -0.23_40, -0.05_39]],
# fmt: on
] )
def UpperCAmelCase__ ( self : List[Any] , _A : Union[str, Any] , _A : List[Any] , _A : Any ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : List[str] = self.get_unet_model(model_id='''stabilityai/stable-diffusion-2''' , fpaa=_A )
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_latents(_A , shape=(4, 4, 96, 96) , fpaa=_A )
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_encoder_hidden_states(_A , shape=(4, 77, 1024) , fpaa=_A )
__SCREAMING_SNAKE_CASE : Dict = model.apply(
{'''params''': params} , _A , jnp.array(_A , dtype=jnp.intaa ) , encoder_hidden_states=_A , ).sample
assert sample.shape == latents.shape
__SCREAMING_SNAKE_CASE : int = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
__SCREAMING_SNAKE_CASE : List[Any] = jnp.array(_A , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, on the same hardware
assert jnp.allclose(_A , _A , atol=1e-2 )
| 303 |
import unittest
import numpy as np
import torch
from torch import nn
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import enable_full_determinism, skip_mps
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __UpperCamelCase ( lowerCAmelCase__ , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase_ = KandinskyVaaPriorPipeline
lowerCAmelCase_ = ['''prompt''']
lowerCAmelCase_ = ['''prompt''', '''negative_prompt''']
lowerCAmelCase_ = [
'''num_images_per_prompt''',
'''generator''',
'''num_inference_steps''',
'''latents''',
'''negative_prompt''',
'''guidance_scale''',
'''output_type''',
'''return_dict''',
]
lowerCAmelCase_ = False
@property
def UpperCAmelCase__ ( self : int ):
"""simple docstring"""
return 32
@property
def UpperCAmelCase__ ( self : List[str] ):
"""simple docstring"""
return 32
@property
def UpperCAmelCase__ ( self : Dict ):
"""simple docstring"""
return self.time_input_dim
@property
def UpperCAmelCase__ ( self : Union[str, Any] ):
"""simple docstring"""
return self.time_input_dim * 4
@property
def UpperCAmelCase__ ( self : List[Any] ):
"""simple docstring"""
return 100
@property
def UpperCAmelCase__ ( self : str ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
return tokenizer
@property
def UpperCAmelCase__ ( self : List[Any] ):
"""simple docstring"""
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : List[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(_A )
@property
def UpperCAmelCase__ ( self : str ):
"""simple docstring"""
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : Dict = {
'''num_attention_heads''': 2,
'''attention_head_dim''': 12,
'''embedding_dim''': self.text_embedder_hidden_size,
'''num_layers''': 1,
}
__SCREAMING_SNAKE_CASE : Optional[Any] = PriorTransformer(**_A )
# clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0
__SCREAMING_SNAKE_CASE : List[Any] = nn.Parameter(torch.ones(model.clip_std.shape ) )
return model
@property
def UpperCAmelCase__ ( self : Union[str, Any] ):
"""simple docstring"""
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : int = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=224 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=14 , )
__SCREAMING_SNAKE_CASE : str = CLIPVisionModelWithProjection(_A )
return model
@property
def UpperCAmelCase__ ( self : Optional[int] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = CLIPImageProcessor(
crop_size=224 , do_center_crop=_A , do_normalize=_A , do_resize=_A , image_mean=[0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73] , image_std=[0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11] , resample=3 , size=224 , )
return image_processor
def UpperCAmelCase__ ( self : Tuple ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = self.dummy_prior
__SCREAMING_SNAKE_CASE : str = self.dummy_image_encoder
__SCREAMING_SNAKE_CASE : Optional[int] = self.dummy_text_encoder
__SCREAMING_SNAKE_CASE : List[Any] = self.dummy_tokenizer
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.dummy_image_processor
__SCREAMING_SNAKE_CASE : str = UnCLIPScheduler(
variance_type='''fixed_small_log''' , prediction_type='''sample''' , num_train_timesteps=1000 , clip_sample=_A , clip_sample_range=10.0 , )
__SCREAMING_SNAKE_CASE : int = {
'''prior''': prior,
'''image_encoder''': image_encoder,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''scheduler''': scheduler,
'''image_processor''': image_processor,
}
return components
def UpperCAmelCase__ ( self : Union[str, Any] , _A : int , _A : Dict=0 ):
"""simple docstring"""
if str(_A ).startswith('''mps''' ):
__SCREAMING_SNAKE_CASE : Tuple = torch.manual_seed(_A )
else:
__SCREAMING_SNAKE_CASE : str = torch.Generator(device=_A ).manual_seed(_A )
__SCREAMING_SNAKE_CASE : List[str] = {
'''prompt''': '''horse''',
'''generator''': generator,
'''guidance_scale''': 4.0,
'''num_inference_steps''': 2,
'''output_type''': '''np''',
}
return inputs
def UpperCAmelCase__ ( self : Any ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = '''cpu'''
__SCREAMING_SNAKE_CASE : Any = self.get_dummy_components()
__SCREAMING_SNAKE_CASE : Any = self.pipeline_class(**_A )
__SCREAMING_SNAKE_CASE : List[Any] = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
__SCREAMING_SNAKE_CASE : int = pipe(**self.get_dummy_inputs(_A ) )
__SCREAMING_SNAKE_CASE : Tuple = output.image_embeds
__SCREAMING_SNAKE_CASE : Optional[Any] = pipe(
**self.get_dummy_inputs(_A ) , return_dict=_A , )[0]
__SCREAMING_SNAKE_CASE : Tuple = image[0, -10:]
__SCREAMING_SNAKE_CASE : List[Any] = image_from_tuple[0, -10:]
assert image.shape == (1, 32)
__SCREAMING_SNAKE_CASE : List[str] = np.array(
[-0.05_32, 1.71_20, 0.36_56, -1.08_52, -0.89_46, -1.17_56, 0.43_48, 0.24_82, 0.51_46, -0.11_56] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def UpperCAmelCase__ ( self : List[str] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = torch_device == '''cpu'''
__SCREAMING_SNAKE_CASE : Any = True
__SCREAMING_SNAKE_CASE : int = False
self._test_inference_batch_single_identical(
test_max_difference=_A , relax_max_difference=_A , test_mean_pixel_difference=_A , )
@skip_mps
def UpperCAmelCase__ ( self : Optional[int] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = torch_device == '''cpu'''
__SCREAMING_SNAKE_CASE : List[Any] = False
self._test_attention_slicing_forward_pass(
test_max_difference=_A , test_mean_pixel_difference=_A , )
| 303 | 1 |
"""simple docstring"""
from typing import List
import jiwer
import jiwer.transforms as tr
from packaging import version
import datasets
from datasets.config import PY_VERSION
if PY_VERSION < version.parse("""3.8"""):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
A_ : int =''''''
if version.parse(importlib_metadata.version("""jiwer""")) < version.parse("""2.3.0"""):
class __a ( tr.AbstractTransform ):
def __init__( self , a__ = " " ):
_lowerCamelCase = sentence_delimiter
def snake_case_ ( self , a__ ):
return list(lowercase_ )
def snake_case_ ( self , a__ ):
_lowerCamelCase = []
for sent_idx, sentence in enumerate(lowercase_ ):
chars.extend(self.process_string(lowercase_ ) )
if self.sentence_delimiter is not None and self.sentence_delimiter != "" and sent_idx < len(lowercase_ ) - 1:
chars.append(self.sentence_delimiter )
return chars
A_ : Union[str, Any] =tr.Compose(
[tr.RemoveMultipleSpaces(), tr.Strip(), SentencesToListOfCharacters(SENTENCE_DELIMITER)]
)
else:
A_ : Optional[Any] =tr.Compose(
[
tr.RemoveMultipleSpaces(),
tr.Strip(),
tr.ReduceToSingleSentence(SENTENCE_DELIMITER),
tr.ReduceToListOfListOfChars(),
]
)
A_ : List[str] ='''\
@inproceedings{inproceedings,
author = {Morris, Andrew and Maier, Viktoria and Green, Phil},
year = {2004},
month = {01},
pages = {},
title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}
}
'''
A_ : Dict ='''\
Character error rate (CER) is a common metric of the performance of an automatic speech recognition system.
CER is similar to Word Error Rate (WER), but operates on character instead of word. Please refer to docs of WER for further information.
Character error rate can be computed as:
CER = (S + D + I) / N = (S + D + I) / (S + D + C)
where
S is the number of substitutions,
D is the number of deletions,
I is the number of insertions,
C is the number of correct characters,
N is the number of characters in the reference (N=S+D+C).
CER\'s output is not always a number between 0 and 1, in particular when there is a high number of insertions. This value is often associated to the percentage of characters that were incorrectly predicted. The lower the value, the better the
performance of the ASR system with a CER of 0 being a perfect score.
'''
A_ : str ='''
Computes CER score of transcribed segments against references.
Args:
references: list of references for each speech input.
predictions: list of transcribtions to score.
concatenate_texts: Whether or not to concatenate sentences before evaluation, set to True for more accurate result.
Returns:
(float): the character error rate
Examples:
>>> predictions = ["this is the prediction", "there is an other sample"]
>>> references = ["this is the reference", "there is another one"]
>>> cer = datasets.load_metric("cer")
>>> cer_score = cer.compute(predictions=predictions, references=references)
>>> print(cer_score)
0.34146341463414637
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __a ( datasets.Metric ):
def snake_case_ ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , codebase_urls=['https://github.com/jitsi/jiwer/'] , reference_urls=[
'https://en.wikipedia.org/wiki/Word_error_rate',
'https://sites.google.com/site/textdigitisation/qualitymeasures/computingerrorrates',
] , )
def snake_case_ ( self , a__ , a__ , a__=False ):
if concatenate_texts:
return jiwer.compute_measures(
lowercase_ , lowercase_ , truth_transform=lowercase_ , hypothesis_transform=lowercase_ , )["wer"]
_lowerCamelCase = 0
_lowerCamelCase = 0
for prediction, reference in zip(lowercase_ , lowercase_ ):
_lowerCamelCase = jiwer.compute_measures(
lowercase_ , lowercase_ , truth_transform=lowercase_ , hypothesis_transform=lowercase_ , )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 358 |
"""simple docstring"""
from dataclasses import dataclass
from typing import Dict, Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .attention_processor import AttentionProcessor, AttnProcessor
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, DiagonalGaussianDistribution, Encoder
@dataclass
class __a ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE__ : "DiagonalGaussianDistribution"
class __a ( lowerCAmelCase__ , lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE__ : Any = True
@register_to_config
def __init__( self , a__ = 3 , a__ = 3 , a__ = ("DownEncoderBlock2D",) , a__ = ("UpDecoderBlock2D",) , a__ = (64,) , a__ = 1 , a__ = "silu" , a__ = 4 , a__ = 32 , a__ = 32 , a__ = 0.18215 , ):
super().__init__()
# pass init params to Encoder
_lowerCamelCase = Encoder(
in_channels=a__ , out_channels=a__ , down_block_types=a__ , block_out_channels=a__ , layers_per_block=a__ , act_fn=a__ , norm_num_groups=a__ , double_z=a__ , )
# pass init params to Decoder
_lowerCamelCase = Decoder(
in_channels=a__ , out_channels=a__ , up_block_types=a__ , block_out_channels=a__ , layers_per_block=a__ , norm_num_groups=a__ , act_fn=a__ , )
_lowerCamelCase = nn.Convad(2 * latent_channels , 2 * latent_channels , 1 )
_lowerCamelCase = nn.Convad(a__ , a__ , 1 )
_lowerCamelCase = False
_lowerCamelCase = False
# only relevant if vae tiling is enabled
_lowerCamelCase = self.config.sample_size
_lowerCamelCase = (
self.config.sample_size[0]
if isinstance(self.config.sample_size , (list, tuple) )
else self.config.sample_size
)
_lowerCamelCase = int(sample_size / (2 ** (len(self.config.block_out_channels ) - 1)) )
_lowerCamelCase = 0.25
def snake_case_ ( self , a__ , a__=False ):
if isinstance(a__ , (Encoder, Decoder) ):
_lowerCamelCase = value
def snake_case_ ( self , a__ = True ):
_lowerCamelCase = use_tiling
def snake_case_ ( self ):
self.enable_tiling(a__ )
def snake_case_ ( self ):
_lowerCamelCase = True
def snake_case_ ( self ):
_lowerCamelCase = False
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def snake_case_ ( self ):
_lowerCamelCase = {}
def fn_recursive_add_processors(a__ , a__ , a__ ):
if hasattr(a__ , 'set_processor' ):
_lowerCamelCase = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(F'{name}.{sub_name}' , a__ , a__ )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(a__ , a__ , a__ )
return processors
def snake_case_ ( self , a__ ):
_lowerCamelCase = len(self.attn_processors.keys() )
if isinstance(a__ , a__ ) and len(a__ ) != count:
raise ValueError(
F'A dict of processors was passed, but the number of processors {len(a__ )} does not match the'
F' number of attention layers: {count}. Please make sure to pass {count} processor classes.' )
def fn_recursive_attn_processor(a__ , a__ , a__ ):
if hasattr(a__ , 'set_processor' ):
if not isinstance(a__ , a__ ):
module.set_processor(a__ )
else:
module.set_processor(processor.pop(F'{name}.processor' ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(F'{name}.{sub_name}' , a__ , a__ )
for name, module in self.named_children():
fn_recursive_attn_processor(a__ , a__ , a__ )
def snake_case_ ( self ):
self.set_attn_processor(AttnProcessor() )
@apply_forward_hook
def snake_case_ ( self , a__ , a__ = True ):
if self.use_tiling and (x.shape[-1] > self.tile_sample_min_size or x.shape[-2] > self.tile_sample_min_size):
return self.tiled_encode(a__ , return_dict=a__ )
if self.use_slicing and x.shape[0] > 1:
_lowerCamelCase = [self.encoder(a__ ) for x_slice in x.split(1 )]
_lowerCamelCase = torch.cat(a__ )
else:
_lowerCamelCase = self.encoder(a__ )
_lowerCamelCase = self.quant_conv(a__ )
_lowerCamelCase = DiagonalGaussianDistribution(a__ )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=a__ )
def snake_case_ ( self , a__ , a__ = True ):
if self.use_tiling and (z.shape[-1] > self.tile_latent_min_size or z.shape[-2] > self.tile_latent_min_size):
return self.tiled_decode(a__ , return_dict=a__ )
_lowerCamelCase = self.post_quant_conv(a__ )
_lowerCamelCase = self.decoder(a__ )
if not return_dict:
return (dec,)
return DecoderOutput(sample=a__ )
@apply_forward_hook
def snake_case_ ( self , a__ , a__ = True ):
if self.use_slicing and z.shape[0] > 1:
_lowerCamelCase = [self._decode(a__ ).sample for z_slice in z.split(1 )]
_lowerCamelCase = torch.cat(a__ )
else:
_lowerCamelCase = self._decode(a__ ).sample
if not return_dict:
return (decoded,)
return DecoderOutput(sample=a__ )
def snake_case_ ( self , a__ , a__ , a__ ):
_lowerCamelCase = min(a.shape[2] , b.shape[2] , a__ )
for y in range(a__ ):
_lowerCamelCase = a[:, :, -blend_extent + y, :] * (1 - y / blend_extent) + b[:, :, y, :] * (y / blend_extent)
return b
def snake_case_ ( self , a__ , a__ , a__ ):
_lowerCamelCase = min(a.shape[3] , b.shape[3] , a__ )
for x in range(a__ ):
_lowerCamelCase = a[:, :, :, -blend_extent + x] * (1 - x / blend_extent) + b[:, :, :, x] * (x / blend_extent)
return b
def snake_case_ ( self , a__ , a__ = True ):
_lowerCamelCase = int(self.tile_sample_min_size * (1 - self.tile_overlap_factor) )
_lowerCamelCase = int(self.tile_latent_min_size * self.tile_overlap_factor )
_lowerCamelCase = self.tile_latent_min_size - blend_extent
# Split the image into 512x512 tiles and encode them separately.
_lowerCamelCase = []
for i in range(0 , x.shape[2] , a__ ):
_lowerCamelCase = []
for j in range(0 , x.shape[3] , a__ ):
_lowerCamelCase = x[:, :, i : i + self.tile_sample_min_size, j : j + self.tile_sample_min_size]
_lowerCamelCase = self.encoder(a__ )
_lowerCamelCase = self.quant_conv(a__ )
row.append(a__ )
rows.append(a__ )
_lowerCamelCase = []
for i, row in enumerate(a__ ):
_lowerCamelCase = []
for j, tile in enumerate(a__ ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
_lowerCamelCase = self.blend_v(rows[i - 1][j] , a__ , a__ )
if j > 0:
_lowerCamelCase = self.blend_h(row[j - 1] , a__ , a__ )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(a__ , dim=3 ) )
_lowerCamelCase = torch.cat(a__ , dim=2 )
_lowerCamelCase = DiagonalGaussianDistribution(a__ )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=a__ )
def snake_case_ ( self , a__ , a__ = True ):
_lowerCamelCase = int(self.tile_latent_min_size * (1 - self.tile_overlap_factor) )
_lowerCamelCase = int(self.tile_sample_min_size * self.tile_overlap_factor )
_lowerCamelCase = self.tile_sample_min_size - blend_extent
# Split z into overlapping 64x64 tiles and decode them separately.
# The tiles have an overlap to avoid seams between tiles.
_lowerCamelCase = []
for i in range(0 , z.shape[2] , a__ ):
_lowerCamelCase = []
for j in range(0 , z.shape[3] , a__ ):
_lowerCamelCase = z[:, :, i : i + self.tile_latent_min_size, j : j + self.tile_latent_min_size]
_lowerCamelCase = self.post_quant_conv(a__ )
_lowerCamelCase = self.decoder(a__ )
row.append(a__ )
rows.append(a__ )
_lowerCamelCase = []
for i, row in enumerate(a__ ):
_lowerCamelCase = []
for j, tile in enumerate(a__ ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
_lowerCamelCase = self.blend_v(rows[i - 1][j] , a__ , a__ )
if j > 0:
_lowerCamelCase = self.blend_h(row[j - 1] , a__ , a__ )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(a__ , dim=3 ) )
_lowerCamelCase = torch.cat(a__ , dim=2 )
if not return_dict:
return (dec,)
return DecoderOutput(sample=a__ )
def snake_case_ ( self , a__ , a__ = False , a__ = True , a__ = None , ):
_lowerCamelCase = sample
_lowerCamelCase = self.encode(a__ ).latent_dist
if sample_posterior:
_lowerCamelCase = posterior.sample(generator=a__ )
else:
_lowerCamelCase = posterior.mode()
_lowerCamelCase = self.decode(a__ ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=a__ )
| 80 | 0 |
from typing import List, Optional, Union
import numpy as np
import PIL.Image
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
PILImageResampling,
get_image_size,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
UpperCAmelCase_ : List[Any] = logging.get_logger(__name__)
class _SCREAMING_SNAKE_CASE ( _a ):
snake_case__ : str = ["""pixel_values"""]
def __init__( self : str , __lowerCamelCase : bool = True , __lowerCamelCase : int = 32 , __lowerCamelCase : List[str]=PILImageResampling.BILINEAR , __lowerCamelCase : bool = True , **__lowerCamelCase : Optional[Any] , ):
UpperCamelCase :List[str] = do_resize
UpperCamelCase :Tuple = do_rescale
UpperCamelCase :str = size_divisor
UpperCamelCase :Any = resample
super().__init__(**__lowerCamelCase )
def _A ( self : Optional[Any] , __lowerCamelCase : np.ndarray , __lowerCamelCase : int , __lowerCamelCase : str , __lowerCamelCase : Optional[ChannelDimension] = None , **__lowerCamelCase : List[Any] ):
UpperCamelCase , UpperCamelCase :Tuple = get_image_size(__lowerCamelCase )
# Rounds the height and width down to the closest multiple of size_divisor
UpperCamelCase :Union[str, Any] = height // size_divisor * size_divisor
UpperCamelCase :int = width // size_divisor * size_divisor
UpperCamelCase :List[str] = resize(__lowerCamelCase , (new_h, new_w) , resample=__lowerCamelCase , data_format=__lowerCamelCase , **__lowerCamelCase )
return image
def _A ( self : str , __lowerCamelCase : np.ndarray , __lowerCamelCase : float , __lowerCamelCase : Optional[ChannelDimension] = None , **__lowerCamelCase : List[str] ):
return rescale(image=__lowerCamelCase , scale=__lowerCamelCase , data_format=__lowerCamelCase , **__lowerCamelCase )
def _A ( self : Dict , __lowerCamelCase : Union["PIL.Image.Image", TensorType, List["PIL.Image.Image"], List[TensorType]] , __lowerCamelCase : Optional[bool] = None , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : Optional[Any]=None , __lowerCamelCase : Optional[bool] = None , __lowerCamelCase : Optional[Union[TensorType, str]] = None , __lowerCamelCase : ChannelDimension = ChannelDimension.FIRST , **__lowerCamelCase : Optional[int] , ):
UpperCamelCase :int = do_resize if do_resize is not None else self.do_resize
UpperCamelCase :Optional[Any] = do_rescale if do_rescale is not None else self.do_rescale
UpperCamelCase :Union[str, Any] = size_divisor if size_divisor is not None else self.size_divisor
UpperCamelCase :Optional[int] = resample if resample is not None else self.resample
if do_resize and size_divisor is None:
raise ValueError("""size_divisor is required for resizing""" )
UpperCamelCase :str = make_list_of_images(__lowerCamelCase )
if not valid_images(__lowerCamelCase ):
raise ValueError("""Invalid image(s)""" )
# All transformations expect numpy arrays.
UpperCamelCase :Dict = [to_numpy_array(__lowerCamelCase ) for img in images]
if do_resize:
UpperCamelCase :Any = [self.resize(__lowerCamelCase , size_divisor=__lowerCamelCase , resample=__lowerCamelCase ) for image in images]
if do_rescale:
UpperCamelCase :Dict = [self.rescale(__lowerCamelCase , scale=1 / 255 ) for image in images]
UpperCamelCase :Tuple = [to_channel_dimension_format(__lowerCamelCase , __lowerCamelCase ) for image in images]
UpperCamelCase :List[str] = {"""pixel_values""": images}
return BatchFeature(data=__lowerCamelCase , tensor_type=__lowerCamelCase )
| 38 |
import html
from ...feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from ...utils import is_bsa_available, logging, requires_backends
if is_bsa_available():
import bsa
from bsa import BeautifulSoup
UpperCAmelCase_ : Any = logging.get_logger(__name__)
class _SCREAMING_SNAKE_CASE ( _a ):
def __init__( self : Optional[int] , **__lowerCamelCase : Optional[int] ):
requires_backends(self , ["""bs4"""] )
super().__init__(**__lowerCamelCase )
def _A ( self : List[str] , __lowerCamelCase : Any ):
UpperCamelCase :Optional[int] = []
UpperCamelCase :List[str] = []
UpperCamelCase :Union[str, Any] = element if element.name else element.parent
for parent in child.parents: # type: bs4.element.Tag
UpperCamelCase :Optional[Any] = parent.find_all(child.name , recursive=__lowerCamelCase )
xpath_tags.append(child.name )
xpath_subscripts.append(
0 if 1 == len(__lowerCamelCase ) else next(i for i, s in enumerate(__lowerCamelCase , 1 ) if s is child ) )
UpperCamelCase :Any = parent
xpath_tags.reverse()
xpath_subscripts.reverse()
return xpath_tags, xpath_subscripts
def _A ( self : Any , __lowerCamelCase : Tuple ):
UpperCamelCase :Any = BeautifulSoup(__lowerCamelCase , """html.parser""" )
UpperCamelCase :Union[str, Any] = []
UpperCamelCase :Tuple = []
UpperCamelCase :Tuple = []
for element in html_code.descendants:
if type(__lowerCamelCase ) == bsa.element.NavigableString:
if type(element.parent ) != bsa.element.Tag:
continue
UpperCamelCase :Any = html.unescape(__lowerCamelCase ).strip()
if not text_in_this_tag:
continue
all_doc_strings.append(__lowerCamelCase )
UpperCamelCase , UpperCamelCase :Optional[Any] = self.xpath_soup(__lowerCamelCase )
stringaxtag_seq.append(__lowerCamelCase )
stringaxsubs_seq.append(__lowerCamelCase )
if len(__lowerCamelCase ) != len(__lowerCamelCase ):
raise ValueError("""Number of doc strings and xtags does not correspond""" )
if len(__lowerCamelCase ) != len(__lowerCamelCase ):
raise ValueError("""Number of doc strings and xsubs does not correspond""" )
return all_doc_strings, stringaxtag_seq, stringaxsubs_seq
def _A ( self : int , __lowerCamelCase : List[Any] , __lowerCamelCase : List[str] ):
UpperCamelCase :Tuple = """"""
for tagname, subs in zip(__lowerCamelCase , __lowerCamelCase ):
xpath += F"""/{tagname}"""
if subs != 0:
xpath += F"""[{subs}]"""
return xpath
def __call__( self : Any , __lowerCamelCase : Dict ):
UpperCamelCase :Any = False
# Check that strings has a valid type
if isinstance(__lowerCamelCase , __lowerCamelCase ):
UpperCamelCase :List[Any] = True
elif isinstance(__lowerCamelCase , (list, tuple) ):
if len(__lowerCamelCase ) == 0 or isinstance(html_strings[0] , __lowerCamelCase ):
UpperCamelCase :Any = True
if not valid_strings:
raise ValueError(
"""HTML strings must of type `str`, `List[str]` (batch of examples), """
F"""but is of type {type(__lowerCamelCase )}.""" )
UpperCamelCase :str = bool(isinstance(__lowerCamelCase , (list, tuple) ) and (isinstance(html_strings[0] , __lowerCamelCase )) )
if not is_batched:
UpperCamelCase :Any = [html_strings]
# Get nodes + xpaths
UpperCamelCase :Union[str, Any] = []
UpperCamelCase :str = []
for html_string in html_strings:
UpperCamelCase , UpperCamelCase , UpperCamelCase :int = self.get_three_from_single(__lowerCamelCase )
nodes.append(__lowerCamelCase )
UpperCamelCase :int = []
for node, tag_list, sub_list in zip(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
UpperCamelCase :str = self.construct_xpath(__lowerCamelCase , __lowerCamelCase )
xpath_strings.append(__lowerCamelCase )
xpaths.append(__lowerCamelCase )
# return as Dict
UpperCamelCase :Optional[int] = {"""nodes""": nodes, """xpaths""": xpaths}
UpperCamelCase :Any = BatchFeature(data=__lowerCamelCase , tensor_type=__lowerCamelCase )
return encoded_inputs
| 38 | 1 |
from __future__ import annotations
def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__) -> dict[str, float]:
if (voltage, current, resistance).count(0) != 1:
raise ValueError("""One and only one argument must be 0""")
if resistance < 0:
raise ValueError("""Resistance cannot be negative""")
if voltage == 0:
return {"voltage": float(current * resistance)}
elif current == 0:
return {"current": voltage / resistance}
elif resistance == 0:
return {"resistance": voltage / current}
else:
raise ValueError("""Exactly one argument must be 0""")
if __name__ == "__main__":
import doctest
doctest.testmod()
| 293 |
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartaaTokenizer, MBartaaTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from ...test_tokenization_common import TokenizerTesterMixin
__UpperCAmelCase : str = get_tests_dir("fixtures/test_sentencepiece.model")
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
__UpperCAmelCase : Any = 250_004
__UpperCAmelCase : List[str] = 250_020
@require_sentencepiece
@require_tokenizers
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = MBartaaTokenizer
lowerCAmelCase__ = MBartaaTokenizerFast
lowerCAmelCase__ = True
lowerCAmelCase__ = True
def UpperCAmelCase__ ( self : Tuple ):
super().setUp()
# We have a SentencePiece fixture for testing
__snake_case: Optional[int] = MBartaaTokenizer(A , src_lang="""en_XX""" , tgt_lang="""ro_RO""" , keep_accents=A )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCAmelCase__ ( self : Union[str, Any] ):
__snake_case: Any = """<s>"""
__snake_case: Tuple = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(A ) , A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(A ) , A )
def UpperCAmelCase__ ( self : Any ):
__snake_case: Dict = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(vocab_keys[-1] , """<mask>""" )
self.assertEqual(len(A ) , 1_054 )
def UpperCAmelCase__ ( self : Any ):
self.assertEqual(self.get_tokenizer().vocab_size , 1_054 )
def UpperCAmelCase__ ( self : Union[str, Any] ):
__snake_case: Dict = MBartaaTokenizer(A , src_lang="""en_XX""" , tgt_lang="""ro_RO""" , keep_accents=A )
__snake_case: int = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(A , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(A ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
__snake_case: Union[str, Any] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
A , [SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """9""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """é""", """."""] , )
__snake_case: List[Any] = tokenizer.convert_tokens_to_ids(A )
self.assertListEqual(
A , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
__snake_case: int = tokenizer.convert_ids_to_tokens(A )
self.assertListEqual(
A , [SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """<unk>""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """<unk>""", """."""] , )
@slow
def UpperCAmelCase__ ( self : Optional[int] ):
# fmt: off
__snake_case: List[str] = {"""input_ids""": [[250_004, 11_062, 82_772, 7, 15, 82_772, 538, 51_529, 237, 17_198, 1_290, 206, 9, 215_175, 1_314, 136, 17_198, 1_290, 206, 9, 56_359, 42, 122_009, 9, 16_466, 16, 87_344, 4_537, 9, 4_717, 78_381, 6, 159_958, 7, 15, 24_480, 618, 4, 527, 22_693, 5_428, 4, 2_777, 24_480, 9_874, 4, 43_523, 594, 4, 803, 18_392, 33_189, 18, 4, 43_523, 24_447, 12_399, 100, 24_955, 83_658, 9_626, 144_057, 15, 839, 22_335, 16, 136, 24_955, 83_658, 83_479, 15, 39_102, 724, 16, 678, 645, 2_789, 1_328, 4_589, 42, 122_009, 115_774, 23, 805, 1_328, 46_876, 7, 136, 53_894, 1_940, 42_227, 41_159, 17_721, 823, 425, 4, 27_512, 98_722, 206, 136, 5_531, 4_970, 919, 17_336, 5, 2], [250_004, 20_080, 618, 83, 82_775, 47, 479, 9, 1_517, 73, 53_894, 333, 80_581, 110_117, 18_811, 5_256, 1_295, 51, 152_526, 297, 7_986, 390, 124_416, 538, 35_431, 214, 98, 15_044, 25_737, 136, 7_108, 43_701, 23, 756, 135_355, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [250_004, 581, 63_773, 119_455, 6, 147_797, 88_203, 7, 645, 70, 21, 3_285, 10_269, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=A , model_name="""facebook/mbart-large-50""" , revision="""d3913889c59cd5c9e456b269c376325eabad57e2""" , )
def UpperCAmelCase__ ( self : Union[str, Any] ):
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
__snake_case: Any = (self.rust_tokenizer_class, """hf-internal-testing/tiny-random-mbart50""", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
__snake_case: Optional[int] = self.rust_tokenizer_class.from_pretrained(A , **A )
__snake_case: Union[str, Any] = self.tokenizer_class.from_pretrained(A , **A )
__snake_case: List[str] = tempfile.mkdtemp()
__snake_case: Tuple = tokenizer_r.save_pretrained(A )
__snake_case: Optional[int] = tokenizer_p.save_pretrained(A )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
__snake_case: Dict = tuple(f for f in tokenizer_r_files if """tokenizer.json""" not in f )
self.assertSequenceEqual(A , A )
# Checks everything loads correctly in the same way
__snake_case: Tuple = tokenizer_r.from_pretrained(A )
__snake_case: Optional[Any] = tokenizer_p.from_pretrained(A )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(A , A ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(A )
# Save tokenizer rust, legacy_format=True
__snake_case: Tuple = tempfile.mkdtemp()
__snake_case: Any = tokenizer_r.save_pretrained(A , legacy_format=A )
__snake_case: List[str] = tokenizer_p.save_pretrained(A )
# Checks it save with the same files
self.assertSequenceEqual(A , A )
# Checks everything loads correctly in the same way
__snake_case: List[Any] = tokenizer_r.from_pretrained(A )
__snake_case: Dict = tokenizer_p.from_pretrained(A )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(A , A ) )
shutil.rmtree(A )
# Save tokenizer rust, legacy_format=False
__snake_case: List[str] = tempfile.mkdtemp()
__snake_case: Any = tokenizer_r.save_pretrained(A , legacy_format=A )
__snake_case: Dict = tokenizer_p.save_pretrained(A )
# Checks it saved the tokenizer.json file
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
__snake_case: Any = tokenizer_r.from_pretrained(A )
__snake_case: Any = tokenizer_p.from_pretrained(A )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(A , A ) )
shutil.rmtree(A )
@require_torch
@require_sentencepiece
@require_tokenizers
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = """facebook/mbart-large-50-one-to-many-mmt"""
lowerCAmelCase__ = [
""" UN Chief Says There Is No Military Solution in Syria""",
""" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.""",
]
lowerCAmelCase__ = [
"""Şeful ONU declară că nu există o soluţie militară în Siria""",
"""Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei"""
""" pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor"""
""" face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.""",
]
lowerCAmelCase__ = [EN_CODE, 82_74, 12_78_73, 2_59_16, 7, 86_22, 20_71, 4_38, 6_74_85, 53, 18_78_95, 23, 5_17_12, 2]
@classmethod
def UpperCAmelCase__ ( cls : int ):
__snake_case: MBartaaTokenizer = MBartaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="""en_XX""" , tgt_lang="""ro_RO""" )
__snake_case: str = 1
return cls
def UpperCAmelCase__ ( self : Any ):
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ar_AR"""] , 250_001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""en_EN"""] , 250_004 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ro_RO"""] , 250_020 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""mr_IN"""] , 250_038 )
def UpperCAmelCase__ ( self : Optional[int] ):
__snake_case: List[str] = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , A )
def UpperCAmelCase__ ( self : Union[str, Any] ):
self.assertIn(A , self.tokenizer.all_special_ids )
__snake_case: Dict = [RO_CODE, 884, 9_019, 96, 9, 916, 86_792, 36, 18_743, 15_596, 5, 2]
__snake_case: str = self.tokenizer.decode(A , skip_special_tokens=A )
__snake_case: Union[str, Any] = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=A )
self.assertEqual(A , A )
self.assertNotIn(self.tokenizer.eos_token , A )
def UpperCAmelCase__ ( self : Dict ):
__snake_case: List[str] = ["""this is gunna be a long sentence """ * 20]
assert isinstance(src_text[0] , A )
__snake_case: Union[str, Any] = 10
__snake_case: List[Any] = self.tokenizer(A , max_length=A , truncation=A ).input_ids[0]
self.assertEqual(ids[0] , A )
self.assertEqual(ids[-1] , 2 )
self.assertEqual(len(A ) , A )
def UpperCAmelCase__ ( self : Tuple ):
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["""<mask>""", """ar_AR"""] ) , [250_053, 250_001] )
def UpperCAmelCase__ ( self : List[Any] ):
__snake_case: List[Any] = tempfile.mkdtemp()
__snake_case: Any = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(A )
__snake_case: Union[str, Any] = MBartaaTokenizer.from_pretrained(A )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , A )
@require_torch
def UpperCAmelCase__ ( self : Optional[int] ):
__snake_case: List[str] = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=A , return_tensors="""pt""" )
__snake_case: List[Any] = shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == RO_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2].tolist() == [2, RO_CODE]
@require_torch
def UpperCAmelCase__ ( self : Optional[int] ):
__snake_case: int = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=A , truncation=A , max_length=len(self.expected_src_tokens ) , return_tensors="""pt""" , )
__snake_case: Optional[Any] = shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id )
self.assertIsInstance(A , A )
self.assertEqual((2, 14) , batch.input_ids.shape )
self.assertEqual((2, 14) , batch.attention_mask.shape )
__snake_case: List[str] = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , A )
self.assertEqual(2 , batch.decoder_input_ids[0, 0] ) # decoder_start_token_id
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
def UpperCAmelCase__ ( self : str ):
__snake_case: List[Any] = self.tokenizer(self.src_text , padding=A , truncation=A , max_length=3 , return_tensors="""pt""" )
__snake_case: Union[str, Any] = self.tokenizer(
text_target=self.tgt_text , padding=A , truncation=A , max_length=10 , return_tensors="""pt""" )
__snake_case: Dict = targets["""input_ids"""]
__snake_case: Any = shift_tokens_right(A , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def UpperCAmelCase__ ( self : Union[str, Any] ):
__snake_case: int = self.tokenizer._build_translation_inputs(
"""A test""" , return_tensors="""pt""" , src_lang="""en_XX""" , tgt_lang="""ar_AR""" )
self.assertEqual(
nested_simplify(A ) , {
# en_XX, A, test, EOS
"""input_ids""": [[250_004, 62, 3_034, 2]],
"""attention_mask""": [[1, 1, 1, 1]],
# ar_AR
"""forced_bos_token_id""": 250_001,
} , )
| 293 | 1 |
from __future__ import annotations
from collections.abc import Callable
def UpperCAmelCase_ ( __UpperCAmelCase : Callable[[int | float], int | float] , __UpperCAmelCase : int | float , __UpperCAmelCase : int | float , __UpperCAmelCase : int = 1_00 , ) -> float:
SCREAMING_SNAKE_CASE_ = x_start
SCREAMING_SNAKE_CASE_ = fnc(__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = 0.0
for _ in range(__UpperCAmelCase ):
# Approximates small segments of curve as linear and solve
# for trapezoidal area
SCREAMING_SNAKE_CASE_ = (x_end - x_start) / steps + xa
SCREAMING_SNAKE_CASE_ = fnc(__UpperCAmelCase )
area += abs(fxa + fxa ) * (xa - xa) / 2
# Increment step
SCREAMING_SNAKE_CASE_ = xa
SCREAMING_SNAKE_CASE_ = fxa
return area
if __name__ == "__main__":
def UpperCAmelCase_ ( __UpperCAmelCase : str ) -> Optional[int]:
return x**3 + x**2
print('f(x) = x^3 + x^2')
print('The area between the curve, x = -5, x = 5 and the x axis is:')
lowerCamelCase__ : List[Any] = 10
while i <= 100_000:
print(f'''with {i} steps: {trapezoidal_area(f, -5, 5, i)}''')
i *= 10 | 225 |
import unittest
from parameterized import parameterized
from transformers import LlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self : Optional[Any] , _lowerCAmelCase : str , _lowerCAmelCase : Tuple=13 , _lowerCAmelCase : List[str]=7 , _lowerCAmelCase : Dict=True , _lowerCAmelCase : Dict=True , _lowerCAmelCase : Optional[int]=False , _lowerCAmelCase : Any=True , _lowerCAmelCase : str=99 , _lowerCAmelCase : List[str]=32 , _lowerCAmelCase : Tuple=5 , _lowerCAmelCase : List[str]=4 , _lowerCAmelCase : str=37 , _lowerCAmelCase : Any="gelu" , _lowerCAmelCase : str=0.1 , _lowerCAmelCase : Union[str, Any]=0.1 , _lowerCAmelCase : Optional[int]=512 , _lowerCAmelCase : Union[str, Any]=16 , _lowerCAmelCase : Tuple=2 , _lowerCAmelCase : Tuple=0.02 , _lowerCAmelCase : Tuple=3 , _lowerCAmelCase : Dict=4 , _lowerCAmelCase : Union[str, Any]=None , ):
SCREAMING_SNAKE_CASE_ = parent
SCREAMING_SNAKE_CASE_ = batch_size
SCREAMING_SNAKE_CASE_ = seq_length
SCREAMING_SNAKE_CASE_ = is_training
SCREAMING_SNAKE_CASE_ = use_input_mask
SCREAMING_SNAKE_CASE_ = use_token_type_ids
SCREAMING_SNAKE_CASE_ = use_labels
SCREAMING_SNAKE_CASE_ = vocab_size
SCREAMING_SNAKE_CASE_ = hidden_size
SCREAMING_SNAKE_CASE_ = num_hidden_layers
SCREAMING_SNAKE_CASE_ = num_attention_heads
SCREAMING_SNAKE_CASE_ = intermediate_size
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ = max_position_embeddings
SCREAMING_SNAKE_CASE_ = type_vocab_size
SCREAMING_SNAKE_CASE_ = type_sequence_label_size
SCREAMING_SNAKE_CASE_ = initializer_range
SCREAMING_SNAKE_CASE_ = num_labels
SCREAMING_SNAKE_CASE_ = num_choices
SCREAMING_SNAKE_CASE_ = scope
def lowerCAmelCase_ ( self : List[str] ):
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE_ = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE_ = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE_ = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
if self.use_labels:
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE_ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase_ ( self : Optional[int] ):
return LlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowerCAmelCase , initializer_range=self.initializer_range , )
def lowerCAmelCase_ ( self : Optional[Any] , _lowerCAmelCase : Tuple , _lowerCAmelCase : List[Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : str , _lowerCAmelCase : str , _lowerCAmelCase : Optional[Any] ):
SCREAMING_SNAKE_CASE_ = LlamaModel(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE_ = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = model(_lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase_ ( self : Any , _lowerCAmelCase : Dict , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Any , _lowerCAmelCase : Dict , _lowerCAmelCase : Any , _lowerCAmelCase : Tuple , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : List[str] , ):
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = LlamaModel(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE_ = model(
_lowerCAmelCase , attention_mask=_lowerCAmelCase , encoder_hidden_states=_lowerCAmelCase , encoder_attention_mask=_lowerCAmelCase , )
SCREAMING_SNAKE_CASE_ = model(
_lowerCAmelCase , attention_mask=_lowerCAmelCase , encoder_hidden_states=_lowerCAmelCase , )
SCREAMING_SNAKE_CASE_ = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase_ ( self : int , _lowerCAmelCase : str , _lowerCAmelCase : str , _lowerCAmelCase : List[str] , _lowerCAmelCase : Any , _lowerCAmelCase : str , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Any , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : str , ):
SCREAMING_SNAKE_CASE_ = LlamaForCausalLM(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE_ = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase_ ( self : List[str] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Dict , _lowerCAmelCase : List[Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Any , _lowerCAmelCase : List[str] , _lowerCAmelCase : List[str] , ):
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = LlamaForCausalLM(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
# first forward pass
SCREAMING_SNAKE_CASE_ = model(
_lowerCAmelCase , attention_mask=_lowerCAmelCase , encoder_hidden_states=_lowerCAmelCase , encoder_attention_mask=_lowerCAmelCase , use_cache=_lowerCAmelCase , )
SCREAMING_SNAKE_CASE_ = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
SCREAMING_SNAKE_CASE_ = ids_tensor((self.batch_size, 3) , config.vocab_size )
SCREAMING_SNAKE_CASE_ = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
SCREAMING_SNAKE_CASE_ = torch.cat([input_ids, next_tokens] , dim=-1 )
SCREAMING_SNAKE_CASE_ = torch.cat([input_mask, next_mask] , dim=-1 )
SCREAMING_SNAKE_CASE_ = model(
_lowerCAmelCase , attention_mask=_lowerCAmelCase , encoder_hidden_states=_lowerCAmelCase , encoder_attention_mask=_lowerCAmelCase , output_hidden_states=_lowerCAmelCase , )['hidden_states'][0]
SCREAMING_SNAKE_CASE_ = model(
_lowerCAmelCase , attention_mask=_lowerCAmelCase , encoder_hidden_states=_lowerCAmelCase , encoder_attention_mask=_lowerCAmelCase , past_key_values=_lowerCAmelCase , output_hidden_states=_lowerCAmelCase , )['hidden_states'][0]
# select random slice
SCREAMING_SNAKE_CASE_ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
SCREAMING_SNAKE_CASE_ = output_from_no_past[:, -3:, random_slice_idx].detach()
SCREAMING_SNAKE_CASE_ = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1E-3 ) )
def lowerCAmelCase_ ( self : List[str] ):
SCREAMING_SNAKE_CASE_ = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) ,
) = config_and_inputs
SCREAMING_SNAKE_CASE_ = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowercase_ = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else ()
lowercase_ = (LlamaForCausalLM,) if is_torch_available() else ()
lowercase_ = (
{
"feature-extraction": LlamaModel,
"text-classification": LlamaForSequenceClassification,
"text-generation": LlamaForCausalLM,
"zero-shot": LlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
lowercase_ = False
lowercase_ = False
def lowerCAmelCase_ ( self : Dict ):
SCREAMING_SNAKE_CASE_ = LlamaModelTester(self )
SCREAMING_SNAKE_CASE_ = ConfigTester(self , config_class=_lowerCAmelCase , hidden_size=37 )
def lowerCAmelCase_ ( self : Any ):
self.config_tester.run_common_tests()
def lowerCAmelCase_ ( self : List[Any] ):
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCAmelCase )
def lowerCAmelCase_ ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
SCREAMING_SNAKE_CASE_ = type
self.model_tester.create_and_check_model(*_lowerCAmelCase )
def lowerCAmelCase_ ( self : str ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ = 3
SCREAMING_SNAKE_CASE_ = input_dict['input_ids']
SCREAMING_SNAKE_CASE_ = input_ids.ne(1 ).to(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
SCREAMING_SNAKE_CASE_ = LlamaForSequenceClassification(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE_ = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , labels=_lowerCAmelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def lowerCAmelCase_ ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ = 3
SCREAMING_SNAKE_CASE_ = 'single_label_classification'
SCREAMING_SNAKE_CASE_ = input_dict['input_ids']
SCREAMING_SNAKE_CASE_ = input_ids.ne(1 ).to(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
SCREAMING_SNAKE_CASE_ = LlamaForSequenceClassification(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE_ = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , labels=_lowerCAmelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def lowerCAmelCase_ ( self : List[Any] ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ = 3
SCREAMING_SNAKE_CASE_ = 'multi_label_classification'
SCREAMING_SNAKE_CASE_ = input_dict['input_ids']
SCREAMING_SNAKE_CASE_ = input_ids.ne(1 ).to(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
SCREAMING_SNAKE_CASE_ = LlamaForSequenceClassification(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE_ = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , labels=_lowerCAmelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip('LLaMA buffers include complex numbers, which breaks this test' )
def lowerCAmelCase_ ( self : int ):
pass
@parameterized.expand([('linear',), ('dynamic',)] )
def lowerCAmelCase_ ( self : str , _lowerCAmelCase : Tuple ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ = ids_tensor([1, 10] , config.vocab_size )
SCREAMING_SNAKE_CASE_ = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
SCREAMING_SNAKE_CASE_ = LlamaModel(_lowerCAmelCase )
original_model.to(_lowerCAmelCase )
original_model.eval()
SCREAMING_SNAKE_CASE_ = original_model(_lowerCAmelCase ).last_hidden_state
SCREAMING_SNAKE_CASE_ = original_model(_lowerCAmelCase ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
SCREAMING_SNAKE_CASE_ = {'type': scaling_type, 'factor': 10.0}
SCREAMING_SNAKE_CASE_ = LlamaModel(_lowerCAmelCase )
scaled_model.to(_lowerCAmelCase )
scaled_model.eval()
SCREAMING_SNAKE_CASE_ = scaled_model(_lowerCAmelCase ).last_hidden_state
SCREAMING_SNAKE_CASE_ = scaled_model(_lowerCAmelCase ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1E-5 ) )
@require_torch
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def lowerCAmelCase_ ( self : List[str] ):
SCREAMING_SNAKE_CASE_ = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338]
SCREAMING_SNAKE_CASE_ = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-7b-hf' , device_map='auto' )
SCREAMING_SNAKE_CASE_ = model(torch.tensor([input_ids] ) )
# Expected mean on dim = -1
SCREAMING_SNAKE_CASE_ = torch.tensor([[-6.6550, -4.1227, -4.9859, -3.2406, 0.8262, -3.0033, 1.2964, -3.3699]] )
torch.testing.assert_close(out.mean(-1 ) , _lowerCAmelCase , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
SCREAMING_SNAKE_CASE_ = torch.tensor([-12.8281, -7.4453, -0.4639, -8.0625, -7.2500, -8.0000, -6.4883, -7.7695, -7.8438, -7.0312, -6.2188, -7.1328, -1.8496, 1.9961, -8.6250, -6.7227, -12.8281, -6.9492, -7.0742, -7.7852, -7.5820, -7.9062, -6.9375, -7.9805, -8.3438, -8.1562, -8.0469, -7.6250, -7.7422, -7.3398,] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , _lowerCAmelCase , atol=1E-5 , rtol=1E-5 )
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def lowerCAmelCase_ ( self : Optional[int] ):
SCREAMING_SNAKE_CASE_ = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338]
SCREAMING_SNAKE_CASE_ = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-13b-hf' , device_map='auto' )
SCREAMING_SNAKE_CASE_ = model(torch.tensor(_lowerCAmelCase ) )
# Expected mean on dim = -1
SCREAMING_SNAKE_CASE_ = torch.tensor([[-2.0622, -1.2794, -1.1638, -0.9788, -1.4603, -1.0238, -1.7893, -1.4411]] )
torch.testing.assert_close(out.mean(-1 ) , _lowerCAmelCase , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
SCREAMING_SNAKE_CASE_ = torch.tensor([-8.1406, -8.0547, 2.7461, -1.2344, -0.1448, -1.8262, -1.0020, -1.8154, -1.6895, -1.8516, -2.3574, -0.9277, 3.7598, 6.5742, -1.2998, -0.1177, -8.1406, -2.9688, -2.9199, -3.1699, -3.5254, -2.3555, -2.7988, -3.4141, -2.8262, -4.5195, -3.3379, -3.3164, -2.7832, -3.0273] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , _lowerCAmelCase , atol=1E-5 , rtol=1E-5 )
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def lowerCAmelCase_ ( self : int ):
SCREAMING_SNAKE_CASE_ = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338]
SCREAMING_SNAKE_CASE_ = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-13b-chat-hf' , device_map='auto' )
SCREAMING_SNAKE_CASE_ = model(torch.tensor(_lowerCAmelCase ) )
# Expected mean on dim = -1
SCREAMING_SNAKE_CASE_ = torch.tensor([[-0.8562, -1.8520, -0.7551, -0.4162, -1.5161, -1.2038, -2.4823, -2.3254]] )
torch.testing.assert_close(out.mean(-1 ) , _lowerCAmelCase , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
SCREAMING_SNAKE_CASE_ = torch.tensor([-2.2227, 4.8828, 0.9023, -0.4578, -0.7871, -0.1033, -0.6221, -0.5786, -0.7803, -1.0674, -1.2920, -0.1570, 0.8008, 2.0723, -0.9497, 0.2771, -2.2227, -0.7612, -1.4346, -1.2061, -1.6426, -0.3000, -0.7139, -1.1934, -1.8691, -1.6973, -1.5947, -1.2705, -0.3523, -0.5513] )
# fmt: on
torch.testing.assert_close(out.mean(-1 ) , _lowerCAmelCase , atol=1E-2 , rtol=1E-2 )
@unittest.skip(
'Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test' )
@slow
def lowerCAmelCase_ ( self : int ):
SCREAMING_SNAKE_CASE_ = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338]
SCREAMING_SNAKE_CASE_ = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-70b-hf' , device_map='auto' )
SCREAMING_SNAKE_CASE_ = model(torch.tensor(_lowerCAmelCase ) )
SCREAMING_SNAKE_CASE_ = torch.tensor(
[[-4.2327, -3.3360, -4.6665, -4.7631, -1.8180, -3.4170, -1.4211, -3.1810]] , dtype=torch.floataa )
torch.testing.assert_close(out.mean(-1 ) , _lowerCAmelCase , atol=1E-2 , rtol=1E-2 )
# fmt: off
SCREAMING_SNAKE_CASE_ = torch.tensor([-9.4922, -3.9551, 1.7998, -5.6758, -5.1055, -5.8984, -4.8320, -6.8086, -6.5391, -5.6172, -5.5820, -5.5352, 1.7881, 3.6289, -6.5117, -3.4785, -9.5000, -6.0352, -6.8125, -6.0195, -6.6836, -5.4727, -6.2812, -6.0391, -7.3398, -7.4297, -7.4844, -6.5820, -5.8789, -5.5312] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , _lowerCAmelCase , atol=1E-5 , rtol=1E-5 )
@unittest.skip('Model is curently gated' )
@slow
def lowerCAmelCase_ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE_ = 'Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the "princi'
SCREAMING_SNAKE_CASE_ = 'Simply put, the theory of relativity states that '
SCREAMING_SNAKE_CASE_ = LlamaTokenizer.from_pretrained('meta-llama/Llama-2-13b-chat-hf' )
SCREAMING_SNAKE_CASE_ = tokenizer.encode(_lowerCAmelCase , return_tensors='pt' )
SCREAMING_SNAKE_CASE_ = LlamaForCausalLM.from_pretrained(
'meta-llama/Llama-2-13b-chat-hf' , device_map='sequential' , use_safetensors=_lowerCAmelCase )
# greedy generation outputs
SCREAMING_SNAKE_CASE_ = model.generate(_lowerCAmelCase , max_new_tokens=64 , top_p=_lowerCAmelCase , temperature=1 , do_sample=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = tokenizer.decode(generated_ids[0] , skip_special_tokens=_lowerCAmelCase )
self.assertEqual(_lowerCAmelCase , _lowerCAmelCase ) | 225 | 1 |
from __future__ import annotations
from typing import TypedDict
class lowercase ( lowercase_ ):
__SCREAMING_SNAKE_CASE : str
__SCREAMING_SNAKE_CASE : int
def __lowerCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
raise TypeError('The parameter s type must be str.' )
return [s[i:] + s[:i] for i in range(len(UpperCamelCase__ ) )]
def __lowerCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
raise TypeError('The parameter s type must be str.' )
if not s:
raise ValueError('The parameter s must not be empty.' )
snake_case_ = all_rotations(UpperCamelCase__ )
rotations.sort() # sort the list of rotations in alphabetically order
# make a string composed of the last char of each rotation
snake_case_ = {
"bwt_string": "".join([word[-1] for word in rotations] ),
"idx_original_string": rotations.index(UpperCamelCase__ ),
}
return response
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
raise TypeError('The parameter bwt_string type must be str.' )
if not bwt_string:
raise ValueError('The parameter bwt_string must not be empty.' )
try:
snake_case_ = int(UpperCamelCase__ )
except ValueError:
raise TypeError(
'The parameter idx_original_string type must be int or passive'
' of cast to int.' )
if idx_original_string < 0:
raise ValueError('The parameter idx_original_string must not be lower than 0.' )
if idx_original_string >= len(UpperCamelCase__ ):
raise ValueError(
'The parameter idx_original_string must be lower than' ' len(bwt_string).' )
snake_case_ = [''] * len(UpperCamelCase__ )
for _ in range(len(UpperCamelCase__ ) ):
for i in range(len(UpperCamelCase__ ) ):
snake_case_ = bwt_string[i] + ordered_rotations[i]
ordered_rotations.sort()
return ordered_rotations[idx_original_string]
if __name__ == "__main__":
_UpperCAmelCase : Union[str, Any] = """Provide a string that I will generate its BWT transform: """
_UpperCAmelCase : Dict = input(entry_msg).strip()
_UpperCAmelCase : List[str] = bwt_transform(s)
print(
F'''Burrows Wheeler transform for string \'{s}\' results '''
F'''in \'{result["bwt_string"]}\''''
)
_UpperCAmelCase : Optional[Any] = reverse_bwt(result["""bwt_string"""], result["""idx_original_string"""])
print(
F'''Reversing Burrows Wheeler transform for entry \'{result["bwt_string"]}\' '''
F'''we get original string \'{original_string}\''''
)
| 200 |
import unittest
from transformers import XLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
class lowercase :
def __init__( self , snake_case , snake_case=13 , snake_case=7 , snake_case=True , snake_case=True , snake_case=True , snake_case=True , snake_case=True , snake_case=False , snake_case=False , snake_case=False , snake_case=2 , snake_case=99 , snake_case=0 , snake_case=32 , snake_case=5 , snake_case=4 , snake_case=0.1 , snake_case=0.1 , snake_case=512 , snake_case=2 , snake_case=0.02 , snake_case=2 , snake_case=4 , snake_case="last" , snake_case=True , snake_case=None , snake_case=0 , ):
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = seq_length
snake_case_ = is_training
snake_case_ = use_input_lengths
snake_case_ = use_token_type_ids
snake_case_ = use_labels
snake_case_ = gelu_activation
snake_case_ = sinusoidal_embeddings
snake_case_ = causal
snake_case_ = asm
snake_case_ = n_langs
snake_case_ = vocab_size
snake_case_ = n_special
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = max_position_embeddings
snake_case_ = type_sequence_label_size
snake_case_ = initializer_range
snake_case_ = num_labels
snake_case_ = num_choices
snake_case_ = summary_type
snake_case_ = use_proj
snake_case_ = scope
snake_case_ = bos_token_id
def a ( self ):
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ = random_attention_mask([self.batch_size, self.seq_length] )
snake_case_ = None
if self.use_input_lengths:
snake_case_ = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
snake_case_ = None
if self.use_token_type_ids:
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
snake_case_ = None
snake_case_ = None
snake_case_ = None
if self.use_labels:
snake_case_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case_ = ids_tensor([self.batch_size] , 2 ).float()
snake_case_ = ids_tensor([self.batch_size] , self.num_choices )
snake_case_ = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def a ( self ):
return XLMConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , )
def a ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , ):
snake_case_ = XLMModel(config=snake_case )
model.to(snake_case )
model.eval()
snake_case_ = model(snake_case , lengths=snake_case , langs=snake_case )
snake_case_ = model(snake_case , langs=snake_case )
snake_case_ = model(snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , ):
snake_case_ = XLMWithLMHeadModel(snake_case )
model.to(snake_case )
model.eval()
snake_case_ = model(snake_case , token_type_ids=snake_case , labels=snake_case )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def a ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , ):
snake_case_ = XLMForQuestionAnsweringSimple(snake_case )
model.to(snake_case )
model.eval()
snake_case_ = model(snake_case )
snake_case_ = model(snake_case , start_positions=snake_case , end_positions=snake_case )
snake_case_ = outputs
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def a ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , ):
snake_case_ = XLMForQuestionAnswering(snake_case )
model.to(snake_case )
model.eval()
snake_case_ = model(snake_case )
snake_case_ = model(
snake_case , start_positions=snake_case , end_positions=snake_case , cls_index=snake_case , is_impossible=snake_case , p_mask=snake_case , )
snake_case_ = model(
snake_case , start_positions=snake_case , end_positions=snake_case , cls_index=snake_case , is_impossible=snake_case , )
((snake_case_) , ) = result_with_labels.to_tuple()
snake_case_ = model(snake_case , start_positions=snake_case , end_positions=snake_case )
((snake_case_) , ) = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def a ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , ):
snake_case_ = XLMForSequenceClassification(snake_case )
model.to(snake_case )
model.eval()
snake_case_ = model(snake_case )
snake_case_ = model(snake_case , labels=snake_case )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def a ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , ):
snake_case_ = self.num_labels
snake_case_ = XLMForTokenClassification(snake_case )
model.to(snake_case )
model.eval()
snake_case_ = model(snake_case , attention_mask=snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def a ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , ):
snake_case_ = self.num_choices
snake_case_ = XLMForMultipleChoice(config=snake_case )
model.to(snake_case )
model.eval()
snake_case_ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case_ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case_ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case_ = model(
snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def a ( self ):
snake_case_ = self.prepare_config_and_inputs()
(
(
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) ,
) = config_and_inputs
snake_case_ = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'lengths': input_lengths}
return config, inputs_dict
@require_torch
class lowercase ( lowercase_ , lowercase_ , lowercase_ , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : List[Any] = (
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
XLMForTokenClassification,
XLMForMultipleChoice,
)
if is_torch_available()
else ()
)
__SCREAMING_SNAKE_CASE : Tuple = (
(XLMWithLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
__SCREAMING_SNAKE_CASE : int = (
{
'''feature-extraction''': XLMModel,
'''fill-mask''': XLMWithLMHeadModel,
'''question-answering''': XLMForQuestionAnsweringSimple,
'''text-classification''': XLMForSequenceClassification,
'''text-generation''': XLMWithLMHeadModel,
'''token-classification''': XLMForTokenClassification,
'''zero-shot''': XLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def a ( self , snake_case , snake_case , snake_case , snake_case , snake_case ):
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('Fast' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def a ( self , snake_case , snake_case , snake_case=False ):
snake_case_ = super()._prepare_for_class(snake_case , snake_case , return_labels=snake_case )
if return_labels:
if model_class.__name__ == "XLMForQuestionAnswering":
snake_case_ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=snake_case )
snake_case_ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=snake_case )
return inputs_dict
def a ( self ):
snake_case_ = XLMModelTester(self )
snake_case_ = ConfigTester(self , config_class=snake_case , emb_dim=37 )
def a ( self ):
self.config_tester.run_common_tests()
def a ( self ):
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*snake_case )
def a ( self ):
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*snake_case )
def a ( self ):
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*snake_case )
def a ( self ):
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*snake_case )
def a ( self ):
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*snake_case )
def a ( self ):
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_token_classif(*snake_case )
def a ( self ):
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*snake_case )
def a ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case=False , snake_case=1 ):
self.assertIsInstance(snake_case , snake_case )
self.assertListEqual(
[isinstance(snake_case , snake_case ) for iter_attentions in attentions] , [True] * len(snake_case ) )
self.assertEqual(len(snake_case ) , (max_length - min_length) * num_beam_groups )
for idx, iter_attentions in enumerate(snake_case ):
# adds PAD dummy token
snake_case_ = min_length + idx + 1
snake_case_ = min_length + idx + 1
snake_case_ = (
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(snake_case ) )
def a ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case=False , snake_case=1 ):
self.assertIsInstance(snake_case , snake_case )
self.assertListEqual(
[isinstance(snake_case , snake_case ) for iter_hidden_states in hidden_states] , [True] * len(snake_case ) , )
self.assertEqual(len(snake_case ) , (max_length - min_length) * num_beam_groups )
for idx, iter_hidden_states in enumerate(snake_case ):
# adds PAD dummy token
snake_case_ = min_length + idx + 1
snake_case_ = (batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(snake_case ) , )
pass
@slow
def a ( self ):
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ = XLMModel.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
@require_torch
class lowercase ( unittest.TestCase ):
@slow
def a ( self ):
snake_case_ = XLMWithLMHeadModel.from_pretrained('xlm-mlm-en-2048' )
model.to(snake_case )
snake_case_ = torch.tensor([[14, 447]] , dtype=torch.long , device=snake_case ) # the president
snake_case_ = [
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
snake_case_ = model.generate(snake_case , do_sample=snake_case )
self.assertListEqual(output_ids[0].cpu().numpy().tolist() , snake_case )
| 200 | 1 |
'''simple docstring'''
import os
from typing import List, Optional, Union
from ...image_processing_utils import BatchFeature
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
from ..auto import AutoTokenizer
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
__snake_case = ['image_processor', 'tokenizer']
__snake_case = 'BlipImageProcessor'
__snake_case = 'AutoTokenizer'
def __init__( self : Optional[Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : List[Any] ) -> Dict:
'''simple docstring'''
super().__init__(lowerCAmelCase_ , lowerCAmelCase_ )
# add QFormer tokenizer
A__ : Union[str, Any] =qformer_tokenizer
def __call__( self : Optional[Any] , lowerCAmelCase_ : ImageInput = None , lowerCAmelCase_ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Union[bool, str, PaddingStrategy] = False , lowerCAmelCase_ : Union[bool, str, TruncationStrategy] = None , lowerCAmelCase_ : Optional[int] = None , lowerCAmelCase_ : int = 0 , lowerCAmelCase_ : Optional[int] = None , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Optional[Union[str, TensorType]] = None , **lowerCAmelCase_ : Optional[int] , ) -> BatchFeature:
'''simple docstring'''
if images is None and text is None:
raise ValueError("""You have to specify at least images or text.""" )
A__ : List[str] =BatchFeature()
if text is not None:
A__ : Optional[int] =self.tokenizer(
text=lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , max_length=lowerCAmelCase_ , stride=lowerCAmelCase_ , pad_to_multiple_of=lowerCAmelCase_ , return_attention_mask=lowerCAmelCase_ , return_overflowing_tokens=lowerCAmelCase_ , return_special_tokens_mask=lowerCAmelCase_ , return_offsets_mapping=lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ , return_length=lowerCAmelCase_ , verbose=lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , **lowerCAmelCase_ , )
encoding.update(lowerCAmelCase_ )
A__ : List[Any] =self.qformer_tokenizer(
text=lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , max_length=lowerCAmelCase_ , stride=lowerCAmelCase_ , pad_to_multiple_of=lowerCAmelCase_ , return_attention_mask=lowerCAmelCase_ , return_overflowing_tokens=lowerCAmelCase_ , return_special_tokens_mask=lowerCAmelCase_ , return_offsets_mapping=lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ , return_length=lowerCAmelCase_ , verbose=lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , **lowerCAmelCase_ , )
A__ : List[str] =qformer_text_encoding.pop("""input_ids""" )
A__ : List[str] =qformer_text_encoding.pop("""attention_mask""" )
if images is not None:
A__ : Optional[Any] =self.image_processor(lowerCAmelCase_ , return_tensors=lowerCAmelCase_ )
encoding.update(lowerCAmelCase_ )
return encoding
def lowercase__ ( self : Optional[int] , *lowerCAmelCase_ : Optional[int] , **lowerCAmelCase_ : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
return self.tokenizer.batch_decode(*lowerCAmelCase_ , **lowerCAmelCase_ )
def lowercase__ ( self : int , *lowerCAmelCase_ : Union[str, Any] , **lowerCAmelCase_ : List[Any] ) -> List[str]:
'''simple docstring'''
return self.tokenizer.decode(*lowerCAmelCase_ , **lowerCAmelCase_ )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def lowercase__ ( self : str ) -> Tuple:
'''simple docstring'''
A__ : List[Any] =self.tokenizer.model_input_names
A__ : Dict =self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
def lowercase__ ( self : Dict , lowerCAmelCase_ : Optional[Any] , **lowerCAmelCase_ : int ) -> Dict:
'''simple docstring'''
if os.path.isfile(lowerCAmelCase_ ):
raise ValueError(f"Provided path ({save_directory}) should be a directory, not a file" )
os.makedirs(lowerCAmelCase_ , exist_ok=lowerCAmelCase_ )
A__ : List[Any] =os.path.join(lowerCAmelCase_ , """qformer_tokenizer""" )
self.qformer_tokenizer.save_pretrained(lowerCAmelCase_ )
return super().save_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
@classmethod
def lowercase__ ( cls : Union[str, Any] , lowerCAmelCase_ : List[str] , **lowerCAmelCase_ : Optional[int] ) -> int:
'''simple docstring'''
A__ : int =AutoTokenizer.from_pretrained(lowerCAmelCase_ , subfolder="""qformer_tokenizer""" )
A__ : Any =cls._get_arguments_from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
args.append(lowerCAmelCase_ )
return cls(*lowerCAmelCase_ )
| 134 |
'''simple docstring'''
def __lowerCamelCase ( __snake_case : int, __snake_case : int, __snake_case : list[list[int]] ) -> int:
"""simple docstring"""
def update_area_of_max_square(__snake_case : int, __snake_case : int ) -> int:
# BASE CASE
if row >= rows or col >= cols:
return 0
A__ : int =update_area_of_max_square(__snake_case, col + 1 )
A__ : int =update_area_of_max_square(row + 1, col + 1 )
A__ : int =update_area_of_max_square(row + 1, __snake_case )
if mat[row][col]:
A__ : Optional[Any] =1 + min([right, diagonal, down] )
A__ : Dict =max(largest_square_area[0], __snake_case )
return sub_problem_sol
else:
return 0
A__ : List[Any] =[0]
update_area_of_max_square(0, 0 )
return largest_square_area[0]
def __lowerCamelCase ( __snake_case : int, __snake_case : int, __snake_case : list[list[int]] ) -> int:
"""simple docstring"""
def update_area_of_max_square_using_dp_array(
__snake_case : int, __snake_case : int, __snake_case : list[list[int]] ) -> int:
if row >= rows or col >= cols:
return 0
if dp_array[row][col] != -1:
return dp_array[row][col]
A__ : str =update_area_of_max_square_using_dp_array(__snake_case, col + 1, __snake_case )
A__ : Any =update_area_of_max_square_using_dp_array(row + 1, col + 1, __snake_case )
A__ : List[str] =update_area_of_max_square_using_dp_array(row + 1, __snake_case, __snake_case )
if mat[row][col]:
A__ : Optional[int] =1 + min([right, diagonal, down] )
A__ : Any =max(largest_square_area[0], __snake_case )
A__ : Union[str, Any] =sub_problem_sol
return sub_problem_sol
else:
return 0
A__ : Any =[0]
A__ : Optional[Any] =[[-1] * cols for _ in range(__snake_case )]
update_area_of_max_square_using_dp_array(0, 0, __snake_case )
return largest_square_area[0]
def __lowerCamelCase ( __snake_case : int, __snake_case : int, __snake_case : list[list[int]] ) -> int:
"""simple docstring"""
A__ : Optional[int] =[[0] * (cols + 1) for _ in range(rows + 1 )]
A__ : str =0
for row in range(rows - 1, -1, -1 ):
for col in range(cols - 1, -1, -1 ):
A__ : List[Any] =dp_array[row][col + 1]
A__ : List[str] =dp_array[row + 1][col + 1]
A__ : str =dp_array[row + 1][col]
if mat[row][col] == 1:
A__ : str =1 + min(__snake_case, __snake_case, __snake_case )
A__ : Optional[Any] =max(dp_array[row][col], __snake_case )
else:
A__ : Tuple =0
return largest_square_area
def __lowerCamelCase ( __snake_case : int, __snake_case : int, __snake_case : list[list[int]] ) -> int:
"""simple docstring"""
A__ : Union[str, Any] =[0] * (cols + 1)
A__ : int =[0] * (cols + 1)
A__ : str =0
for row in range(rows - 1, -1, -1 ):
for col in range(cols - 1, -1, -1 ):
A__ : Union[str, Any] =current_row[col + 1]
A__ : List[str] =next_row[col + 1]
A__ : str =next_row[col]
if mat[row][col] == 1:
A__ : str =1 + min(__snake_case, __snake_case, __snake_case )
A__ : Dict =max(current_row[col], __snake_case )
else:
A__ : str =0
A__ : Optional[Any] =current_row
return largest_square_area
if __name__ == "__main__":
import doctest
doctest.testmod()
print(largest_square_area_in_matrix_bottom_up(2, 2, [[1, 1], [1, 1]]))
| 134 | 1 |
import gzip
import hashlib
import json
import multiprocessing
import os
import re
import shutil
import time
from pathlib import Path
import numpy as np
from arguments import PreprocessingArguments
from datasets import load_dataset
from minhash_deduplication import deduplicate_dataset
from transformers import AutoTokenizer, HfArgumentParser
__magic_name__ = re.compile(r"\s+")
def _lowerCAmelCase ( A__: Tuple ):
'''simple docstring'''
return {"hash": hashlib.mda(re.sub(A__ , '''''' , example['''content'''] ).encode('''utf-8''' ) ).hexdigest()}
def _lowerCAmelCase ( A__: str ):
'''simple docstring'''
UpperCAmelCase = [len(A__ ) for line in example['''content'''].splitlines()]
return {"line_mean": np.mean(A__ ), "line_max": max(A__ )}
def _lowerCAmelCase ( A__: Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase = np.mean([c.isalnum() for c in example['''content''']] )
return {"alpha_frac": alpha_frac}
def _lowerCAmelCase ( A__: str , A__: int ):
'''simple docstring'''
if example["hash"] in uniques:
uniques.remove(example['''hash'''] )
return True
else:
return False
def _lowerCAmelCase ( A__: Dict , A__: Optional[Any]=5 ):
'''simple docstring'''
UpperCAmelCase = ['''auto-generated''', '''autogenerated''', '''automatically generated''']
UpperCAmelCase = example['''content'''].splitlines()
for _, line in zip(range(A__ ) , A__ ):
for keyword in keywords:
if keyword in line.lower():
return {"autogenerated": True}
else:
return {"autogenerated": False}
def _lowerCAmelCase ( A__: Optional[Any] , A__: str=5 , A__: Union[str, Any]=0.05 ):
'''simple docstring'''
UpperCAmelCase = ['''unit tests''', '''test file''', '''configuration file''']
UpperCAmelCase = example['''content'''].splitlines()
UpperCAmelCase = 0
UpperCAmelCase = 0
# first test
for _, line in zip(range(A__ ) , A__ ):
for keyword in keywords:
if keyword in line.lower():
return {"config_or_test": True}
# second test
UpperCAmelCase = example['''content'''].count('''\n''' )
UpperCAmelCase = int(coeff * nlines )
for line in lines:
count_config += line.lower().count('''config''' )
count_test += line.lower().count('''test''' )
if count_config > threshold or count_test > threshold:
return {"config_or_test": True}
return {"config_or_test": False}
def _lowerCAmelCase ( A__: Optional[Any] ):
'''simple docstring'''
UpperCAmelCase = ['''def ''', '''class ''', '''for ''', '''while ''']
UpperCAmelCase = example['''content'''].splitlines()
for line in lines:
for keyword in keywords:
if keyword in line.lower():
return {"has_no_keywords": False}
return {"has_no_keywords": True}
def _lowerCAmelCase ( A__: str , A__: Tuple=4 ):
'''simple docstring'''
UpperCAmelCase = example['''content'''].splitlines()
UpperCAmelCase = 0
for line in lines:
counter += line.lower().count('''=''' )
if counter > minimum:
return {"has_few_assignments": False}
return {"has_few_assignments": True}
def _lowerCAmelCase ( A__: Tuple ):
'''simple docstring'''
UpperCAmelCase = tokenizer(example['''content'''] , truncation=A__ )['''input_ids''']
UpperCAmelCase = len(example['''content'''] ) / len(A__ )
return {"ratio": ratio}
def _lowerCAmelCase ( A__: Optional[Any] ):
'''simple docstring'''
UpperCAmelCase = {}
results.update(get_hash(A__ ) )
results.update(line_stats(A__ ) )
results.update(alpha_stats(A__ ) )
results.update(char_token_ratio(A__ ) )
results.update(is_autogenerated(A__ ) )
results.update(is_config_or_test(A__ ) )
results.update(has_no_keywords(A__ ) )
results.update(has_few_assignments(A__ ) )
return results
def _lowerCAmelCase ( A__: List[str] , A__: List[Any] , A__: Optional[Any] ):
'''simple docstring'''
if not check_uniques(A__ , A__ ):
return False
elif example["autogenerated"]:
return False
elif example["line_max"] > args.line_max:
return False
elif example["line_mean"] > args.line_mean:
return False
elif example["alpha_frac"] < args.alpha_frac:
return False
elif example["ratio"] < args.min_token_ratio:
return False
elif example["config_or_test"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_no_keywords"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_few_assignments"]:
return False
else:
return True
def _lowerCAmelCase ( A__: Tuple ):
'''simple docstring'''
with open(A__ , '''rb''' ) as f_in:
with gzip.open(str(A__ ) + '''.gz''' , '''wb''' , compresslevel=6 ) as f_out:
shutil.copyfileobj(A__ , A__ )
os.unlink(A__ )
# Settings
__magic_name__ = HfArgumentParser(PreprocessingArguments)
__magic_name__ = parser.parse_args()
if args.num_workers is None:
__magic_name__ = multiprocessing.cpu_count()
__magic_name__ = AutoTokenizer.from_pretrained(args.tokenizer_dir)
# Load dataset
__magic_name__ = time.time()
__magic_name__ = load_dataset(args.dataset_name, split="train")
print(f'''Time to load dataset: {time.time()-t_start:.2f}''')
# Run preprocessing
__magic_name__ = time.time()
__magic_name__ = ds.map(preprocess, num_proc=args.num_workers)
print(f'''Time to preprocess dataset: {time.time()-t_start:.2f}''')
# Deduplicate hashes
__magic_name__ = set(ds.unique("hash"))
__magic_name__ = len(uniques) / len(ds)
print(f'''Fraction of duplicates: {1-frac:.2%}''')
# Deduplicate data and apply heuristics
__magic_name__ = time.time()
__magic_name__ = ds.filter(filter, fn_kwargs={"uniques": uniques, "args": args})
print(f'''Time to filter dataset: {time.time()-t_start:.2f}''')
print(f'''Size of filtered dataset: {len(ds_filter)}''')
# Deduplicate with minhash and jaccard similarity
if args.near_deduplication:
__magic_name__ = time.time()
__magic_name__ , __magic_name__ = deduplicate_dataset(ds_filter, args.jaccard_threshold)
print(f'''Time to deduplicate dataset: {time.time()-t_start:.2f}''')
print(f'''Size of deduplicate dataset: {len(ds_filter)}''')
# Save data in batches of samples_per_file
__magic_name__ = Path(args.output_dir)
output_dir.mkdir(exist_ok=True)
# save duplicate_clusters in the output_dir as artifacts
# not sure it is the right place the save it
if args.near_deduplication:
with open(output_dir / "duplicate_clusters.json", "w") as f:
json.dump(duplicate_clusters, f)
__magic_name__ = output_dir / "data"
data_dir.mkdir(exist_ok=True)
__magic_name__ = time.time()
for file_number, index in enumerate(range(0, len(ds_filter), args.samples_per_file)):
__magic_name__ = str(data_dir / f'''file-{file_number+1:012}.json''')
__magic_name__ = min(len(ds_filter), index + args.samples_per_file)
ds_filter.select(list(range(index, end_index))).to_json(file_path)
compress_file(file_path)
print(f'''Time to save dataset: {time.time()-t_start:.2f}''')
| 152 |
import collections
import json
import os
import re
from typing import TYPE_CHECKING, List, Optional, Tuple
import numpy as np
from ...tokenization_utils_fast import PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {"vocab_file": "vocab.txt", "emoji_file": "emoji.json"}
__magic_name__ = {
"vocab_file": {
"abeja/gpt-neox-japanese-2.7b": "https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt",
},
"emoji_file": {
"abeja/gpt-neox-japanese-2.7b": "https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json",
},
}
__magic_name__ = {
"abeja/gpt-neox-japanese-2.7b": 2048,
}
def _lowerCAmelCase ( A__: List[Any] , A__: int ):
'''simple docstring'''
with open(A__ , '''r''' , encoding='''utf-8''' ) as f:
UpperCAmelCase = json.loads(f.read() )
UpperCAmelCase = collections.OrderedDict()
UpperCAmelCase = collections.OrderedDict()
UpperCAmelCase = collections.OrderedDict()
with open(A__ , '''r''' , encoding='''utf-8''' ) as f:
UpperCAmelCase = f.readlines()
UpperCAmelCase = [[t.rstrip('''\n''' )] if (t == ''',''' or ''',''' not in t) else t.rstrip('''\n''' ).split(''',''' ) for t in token]
for idx, b in enumerate(A__ ):
UpperCAmelCase = b
UpperCAmelCase = idx
for wd in b:
UpperCAmelCase = idx
return vocab, raw_vocab, ids_to_tokens, emoji
class lowercase ( A__ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE = ["""input_ids""", """attention_mask"""]
def __init__( self , _snake_case , _snake_case , _snake_case="<|endoftext|>" , _snake_case="<|endoftext|>" , _snake_case="<|startoftext|>" , _snake_case="<|endoftext|>" , _snake_case=False , **_snake_case , ) -> Tuple:
"""simple docstring"""
super().__init__(
unk_token=_snake_case , pad_token=_snake_case , bos_token=_snake_case , eos_token=_snake_case , do_clean_text=_snake_case , **_snake_case , )
if not os.path.isfile(_snake_case ):
raise ValueError(
f"""Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained"""
''' model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`''' )
if not os.path.isfile(_snake_case ):
raise ValueError(
f"""Can't find a emoji file at path '{emoji_file}'. To load the emoji information from a Google"""
''' pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`''' )
UpperCAmelCase = do_clean_text
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = load_vocab_and_emoji(_snake_case , _snake_case )
UpperCAmelCase = SubWordJapaneseTokenizer(
vocab=self.vocab , ids_to_tokens=self.ids_to_tokens , emoji=self.emoji )
@property
def snake_case_ ( self ) -> Any:
"""simple docstring"""
# self.vocab contains support for character fluctuation unique to Japanese, and has a large number of vocab
return len(self.raw_vocab )
def snake_case_ ( self ) -> Union[str, Any]:
"""simple docstring"""
return dict(self.raw_vocab , **self.added_tokens_encoder )
def snake_case_ ( self , _snake_case ) -> List[Any]:
"""simple docstring"""
return self.subword_tokenizer.tokenize(_snake_case , clean=self.do_clean_text )
def snake_case_ ( self , _snake_case ) -> Dict:
"""simple docstring"""
return self.vocab.get(_snake_case , self.vocab.get(self.unk_token ) )
def snake_case_ ( self , _snake_case ) -> Optional[int]:
"""simple docstring"""
return self.subword_tokenizer.convert_id_to_token(_snake_case )
def snake_case_ ( self , _snake_case ) -> List[str]:
"""simple docstring"""
UpperCAmelCase = ''''''.join(_snake_case ).strip()
return out_string
def snake_case_ ( self , _snake_case ) -> List[int]:
"""simple docstring"""
UpperCAmelCase = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(_snake_case , add_special_tokens=_snake_case ) + [self.eos_token_id] )
if len(_snake_case ) > self.model_max_length:
UpperCAmelCase = input_ids[-self.model_max_length :]
return input_ids
def snake_case_ ( self , _snake_case , _snake_case = None ) -> Tuple[str]:
"""simple docstring"""
UpperCAmelCase = 0
if os.path.isdir(_snake_case ):
UpperCAmelCase = os.path.join(
_snake_case , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
UpperCAmelCase = os.path.join(
_snake_case , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''emoji_file'''] )
else:
UpperCAmelCase = (
(filename_prefix + '''-''' if filename_prefix else '''''') + save_directory + VOCAB_FILES_NAMES['''vocab_file''']
)
UpperCAmelCase = (
(filename_prefix + '''-''' if filename_prefix else '''''') + save_directory + VOCAB_FILES_NAMES['''emoji_file''']
)
with open(_snake_case , '''w''' , encoding='''utf-8''' ) as writer:
for token_index, token in self.ids_to_tokens.items():
if index != token_index:
logger.warning(
f"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
''' Please check that the vocabulary is not corrupted!''' )
UpperCAmelCase = token_index
writer.write(''','''.join(_snake_case ) + '''\n''' )
index += 1
with open(_snake_case , '''w''' , encoding='''utf-8''' ) as writer:
json.dump(self.emoji , _snake_case )
return vocab_file, emoji_file
class lowercase ( A__ ):
'''simple docstring'''
def __init__( self , _snake_case , _snake_case , _snake_case ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = vocab # same as swe
UpperCAmelCase = ids_to_tokens # same as bpe
UpperCAmelCase = emoji
UpperCAmelCase = np.max([len(_snake_case ) for w in self.vocab.keys()] )
UpperCAmelCase = re.compile(R'''(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)''' )
UpperCAmelCase = re.compile(R'''[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*''' )
UpperCAmelCase = re.compile(R'''[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}''' )
UpperCAmelCase = re.compile(
R'''([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*''' )
UpperCAmelCase = re.compile(
R'''(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*''' )
UpperCAmelCase = re.compile(
R'''((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*''' )
UpperCAmelCase = '''─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿'''
UpperCAmelCase = '''▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟'''
UpperCAmelCase = str.maketrans({k: '''<BLOCK>''' for k in keisen + blocks} )
def __len__( self ) -> Dict:
"""simple docstring"""
return len(self.ids_to_tokens )
def snake_case_ ( self , _snake_case ) -> str:
"""simple docstring"""
UpperCAmelCase = self.content_repattera.sub('''<URL>''' , _snake_case )
UpperCAmelCase = self.content_repattera.sub('''<EMAIL>''' , _snake_case )
UpperCAmelCase = self.content_repattera.sub('''<TEL>''' , _snake_case )
UpperCAmelCase = self.content_repattera.sub('''<DATE>''' , _snake_case )
UpperCAmelCase = self.content_repattera.sub('''<DATE>''' , _snake_case )
UpperCAmelCase = self.content_repattera.sub('''<PRICE>''' , _snake_case )
UpperCAmelCase = content.translate(self.content_transa )
while "<BLOCK><BLOCK>" in content:
UpperCAmelCase = content.replace('''<BLOCK><BLOCK>''' , '''<BLOCK>''' )
return content
def snake_case_ ( self , _snake_case , _snake_case=False ) -> str:
"""simple docstring"""
UpperCAmelCase = text.replace(''' ''' , '''<SP>''' )
UpperCAmelCase = text.replace(''' ''' , '''<SP>''' )
UpperCAmelCase = text.replace('''\r\n''' , '''<BR>''' )
UpperCAmelCase = text.replace('''\n''' , '''<BR>''' )
UpperCAmelCase = text.replace('''\r''' , '''<BR>''' )
UpperCAmelCase = text.replace('''\t''' , '''<TAB>''' )
UpperCAmelCase = text.replace('''—''' , '''ー''' )
UpperCAmelCase = text.replace('''−''' , '''ー''' )
for k, v in self.emoji["emoji"].items():
if k in text:
UpperCAmelCase = text.replace(_snake_case , _snake_case )
if clean:
UpperCAmelCase = self.clean_text(_snake_case )
def check_simbol(_snake_case ):
UpperCAmelCase = x.encode()
if len(_snake_case ) == 1 and len(_snake_case ) == 2:
UpperCAmelCase = (int(e[0] ) << 8) + int(e[1] )
if (
(c >= 0XC2A1 and c <= 0XC2BF)
or (c >= 0XC780 and c <= 0XC783)
or (c >= 0XCAB9 and c <= 0XCBBF)
or (c >= 0XCC80 and c <= 0XCDA2)
):
return True
return False
def checkuae(_snake_case ):
UpperCAmelCase = x.encode()
if len(_snake_case ) == 1 and len(_snake_case ) == 3:
UpperCAmelCase = (int(e[0] ) << 16) + (int(e[1] ) << 8) + int(e[2] )
if c >= 0XE28080 and c <= 0XE2B07F:
return True
return False
UpperCAmelCase = 0
UpperCAmelCase = []
while pos < len(_snake_case ):
UpperCAmelCase = min(len(_snake_case ) , pos + self.maxlen + 1 ) if text[pos] == '''<''' else pos + 3
UpperCAmelCase = [] # (token_id, token, pos)
for e in range(_snake_case , _snake_case , -1 ):
UpperCAmelCase = text[pos:e]
if wd in self.vocab:
if wd[0] == "<" and len(_snake_case ) > 2:
UpperCAmelCase = [(self.vocab[wd], wd, e)]
break
else:
candidates.append((self.vocab[wd], wd, e) )
if len(_snake_case ) > 0:
# the smallest token_id is adopted
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = sorted(_snake_case , key=lambda _snake_case : x[0] )[0]
result.append(_snake_case )
UpperCAmelCase = e
else:
UpperCAmelCase = pos + 1
UpperCAmelCase = text[pos:end]
if check_simbol(_snake_case ):
result.append('''<KIGOU>''' )
elif checkuae(_snake_case ):
result.append('''<U2000U2BFF>''' )
else:
for i in wd.encode('''utf-8''' ):
result.append('''<|byte%d|>''' % i )
UpperCAmelCase = end
return result
def snake_case_ ( self , _snake_case , _snake_case="\n" ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = []
UpperCAmelCase = []
UpperCAmelCase = self.ids_to_tokens[index][0]
if word[:6] == "<|byte" and word[-2:] == "|>":
byte_tokens.append(int(word[6:-2] ) )
else:
if len(_snake_case ) > 0:
words.append(bytearray(_snake_case ).decode('''utf-8''' , errors='''replace''' ) )
UpperCAmelCase = []
if word[:7] == "<|emoji" and word[-2:] == "|>":
words.append(self.emoji['''emoji_inv'''][word] )
elif word == "<SP>":
words.append(''' ''' )
elif word == "<BR>":
words.append(_snake_case )
elif word == "<TAB>":
words.append('''\t''' )
elif word == "<BLOCK>":
words.append('''▀''' )
elif word == "<KIGOU>":
words.append('''ǀ''' )
elif word == "<U2000U2BFF>":
words.append('''‖''' )
else:
words.append(_snake_case )
if len(_snake_case ) > 0:
words.append(bytearray(_snake_case ).decode('''utf-8''' , errors='''replace''' ) )
UpperCAmelCase = ''''''.join(_snake_case )
return text
| 152 | 1 |
"""simple docstring"""
import os
# Precomputes a list of the 100 first triangular numbers
lowerCAmelCase__ = [int(0.5 * n * (n + 1)) for n in range(1, 101)]
def snake_case_ ( ):
'''simple docstring'''
_lowerCamelCase : Tuple = os.path.dirname(os.path.realpath(A_ ) )
_lowerCamelCase : Optional[Any] = os.path.join(A_, '''words.txt''' )
_lowerCamelCase : Dict = ''''''
with open(A_ ) as f:
_lowerCamelCase : Any = f.readline()
_lowerCamelCase : List[str] = [word.strip('''"''' ) for word in words.strip('''\r\n''' ).split(''',''' )]
_lowerCamelCase : Union[str, Any] = [
word
for word in [sum(ord(A_ ) - 64 for x in word ) for word in words]
if word in TRIANGULAR_NUMBERS
]
return len(A_ )
if __name__ == "__main__":
print(solution())
| 72 |
'''simple docstring'''
from __future__ import annotations
def _UpperCamelCase ( __A , __A , __A ) -> dict[str, float]:
'''simple docstring'''
if (voltage, current, resistance).count(0 ) != 1:
raise ValueError("One and only one argument must be 0" )
if resistance < 0:
raise ValueError("Resistance cannot be negative" )
if voltage == 0:
return {"voltage": float(current * resistance )}
elif current == 0:
return {"current": voltage / resistance}
elif resistance == 0:
return {"resistance": voltage / current}
else:
raise ValueError("Exactly one argument must be 0" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 80 | 0 |
"""simple docstring"""
from __future__ import annotations
import math
def UpperCAmelCase ( a_ ):
'''simple docstring'''
if num <= 0:
lowerCamelCase : Tuple = F"""{num}: Invalid input, please enter a positive integer."""
raise ValueError(a_ )
lowerCamelCase : Any = [True] * (num + 1)
lowerCamelCase : str = []
lowerCamelCase : Dict = 2
lowerCamelCase : Optional[int] = int(math.sqrt(a_ ) )
while start <= end:
# If start is a prime
if sieve[start] is True:
prime.append(a_ )
# Set multiples of start be False
for i in range(start * start, num + 1, a_ ):
if sieve[i] is True:
lowerCamelCase : int = False
start += 1
for j in range(end + 1, num + 1 ):
if sieve[j] is True:
prime.append(a_ )
return prime
if __name__ == "__main__":
print(prime_sieve(int(input('Enter a positive integer: ').strip())))
| 205 |
"""simple docstring"""
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pytest
from datasets.arrow_dataset import Dataset
from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex
from .utils import require_elasticsearch, require_faiss
_A = pytest.mark.integration
@require_faiss
class _lowercase ( __UpperCAmelCase ):
def _UpperCamelCase ( self ) -> Union[str, Any]:
lowerCamelCase : Any = Dataset.from_dict({'filename': ['my_name-train' + '_' + str(UpperCAmelCase_ ) for x in np.arange(30 ).tolist()]} )
return dset
def _UpperCamelCase ( self ) -> List[Any]:
import faiss
lowerCamelCase : Dataset = self._create_dummy_dataset()
lowerCamelCase : Optional[int] = dset.map(
lambda UpperCAmelCase_ , UpperCAmelCase_ : {"vecs": i * np.ones(5 , dtype=np.floataa )} , with_indices=UpperCAmelCase_ , keep_in_memory=UpperCAmelCase_ )
lowerCamelCase : Dict = dset.add_faiss_index('vecs' , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT )
lowerCamelCase , lowerCamelCase : List[str] = dset.get_nearest_examples('vecs' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
dset.drop_index('vecs' )
def _UpperCamelCase ( self ) -> Tuple:
import faiss
lowerCamelCase : Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='vecs' , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT , )
lowerCamelCase , lowerCamelCase : str = dset.get_nearest_examples('vecs' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
def _UpperCamelCase ( self ) -> int:
import faiss
lowerCamelCase : Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='vecs' , metric_type=faiss.METRIC_INNER_PRODUCT , )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=UpperCAmelCase_ ) as tmp_file:
dset.save_faiss_index('vecs' , tmp_file.name )
dset.load_faiss_index('vecs2' , tmp_file.name )
os.unlink(tmp_file.name )
lowerCamelCase , lowerCamelCase : List[str] = dset.get_nearest_examples('vecs2' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
def _UpperCamelCase ( self ) -> Any:
lowerCamelCase : Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='vecs' )
dset.drop_index('vecs' )
self.assertRaises(UpperCAmelCase_ , partial(dset.get_nearest_examples , 'vecs2' , np.ones(5 , dtype=np.floataa ) ) )
def _UpperCamelCase ( self ) -> Union[str, Any]:
from elasticsearch import Elasticsearch
lowerCamelCase : Dataset = self._create_dummy_dataset()
with patch('elasticsearch.Elasticsearch.search' ) as mocked_search, patch(
'elasticsearch.client.IndicesClient.create' ) as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk' ) as mocked_bulk:
lowerCamelCase : Tuple = {'acknowledged': True}
mocked_bulk.return_value([(True, None)] * 30 )
lowerCamelCase : int = {'hits': {'hits': [{'_score': 1, '_id': 29}]}}
lowerCamelCase : Optional[Any] = Elasticsearch()
dset.add_elasticsearch_index('filename' , es_client=UpperCAmelCase_ )
lowerCamelCase , lowerCamelCase : str = dset.get_nearest_examples('filename' , 'my_name-train_29' )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
@require_faiss
class _lowercase ( __UpperCAmelCase ):
def _UpperCamelCase ( self ) -> Union[str, Any]:
import faiss
lowerCamelCase : str = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
# add vectors
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsNotNone(index.faiss_index )
self.assertEqual(index.faiss_index.ntotal , 5 )
index.add_vectors(np.zeros((5, 5) , dtype=np.floataa ) )
self.assertEqual(index.faiss_index.ntotal , 10 )
# single query
lowerCamelCase : Optional[int] = np.zeros(5 , dtype=np.floataa )
lowerCamelCase : List[str] = 1
lowerCamelCase , lowerCamelCase : int = index.search(UpperCAmelCase_ )
self.assertRaises(UpperCAmelCase_ , index.search , query.reshape(-1 , 1 ) )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
# batched queries
lowerCamelCase : Tuple = np.eye(5 , dtype=np.floataa )[::-1]
lowerCamelCase , lowerCamelCase : List[str] = index.search_batch(UpperCAmelCase_ )
self.assertRaises(UpperCAmelCase_ , index.search_batch , queries[0] )
lowerCamelCase : List[str] = [scores[0] for scores in total_scores]
lowerCamelCase : List[str] = [indices[0] for indices in total_indices]
self.assertGreater(np.min(UpperCAmelCase_ ) , 0 )
self.assertListEqual([4, 3, 2, 1, 0] , UpperCAmelCase_ )
def _UpperCamelCase ( self ) -> Dict:
import faiss
lowerCamelCase : List[Any] = FaissIndex(string_factory='Flat' )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
lowerCamelCase : int = FaissIndex(string_factory='LSH' )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexLSH )
with self.assertRaises(UpperCAmelCase_ ):
lowerCamelCase : str = FaissIndex(string_factory='Flat' , custom_index=faiss.IndexFlat(5 ) )
def _UpperCamelCase ( self ) -> Any:
import faiss
lowerCamelCase : Any = faiss.IndexFlat(5 )
lowerCamelCase : Any = FaissIndex(custom_index=UpperCAmelCase_ )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
def _UpperCamelCase ( self ) -> Any:
import faiss
lowerCamelCase : Any = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=UpperCAmelCase_ ) as tmp_file:
index.save(tmp_file.name )
lowerCamelCase : List[str] = FaissIndex.load(tmp_file.name )
os.unlink(tmp_file.name )
lowerCamelCase : Dict = np.zeros(5 , dtype=np.floataa )
lowerCamelCase : Optional[Any] = 1
lowerCamelCase , lowerCamelCase : str = index.search(UpperCAmelCase_ )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
@require_faiss
def UpperCAmelCase ( a_ ):
'''simple docstring'''
import faiss
lowerCamelCase : int = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5, dtype=np.floataa ) )
lowerCamelCase : Union[str, Any] = 'index.faiss'
lowerCamelCase : List[Any] = F"""mock://{index_name}"""
index.save(a_, storage_options=mockfs.storage_options )
lowerCamelCase : Optional[int] = FaissIndex.load(a_, storage_options=mockfs.storage_options )
lowerCamelCase : str = np.zeros(5, dtype=np.floataa )
lowerCamelCase : str = 1
lowerCamelCase , lowerCamelCase : int = index.search(a_ )
assert scores[0] > 0
assert indices[0] == 1
@require_elasticsearch
class _lowercase ( __UpperCAmelCase ):
def _UpperCamelCase ( self ) -> int:
from elasticsearch import Elasticsearch
with patch('elasticsearch.Elasticsearch.search' ) as mocked_search, patch(
'elasticsearch.client.IndicesClient.create' ) as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk' ) as mocked_bulk:
lowerCamelCase : Union[str, Any] = Elasticsearch()
lowerCamelCase : Optional[Any] = {'acknowledged': True}
lowerCamelCase : str = ElasticSearchIndex(es_client=UpperCAmelCase_ )
mocked_bulk.return_value([(True, None)] * 3 )
index.add_documents(['foo', 'bar', 'foobar'] )
# single query
lowerCamelCase : Tuple = 'foo'
lowerCamelCase : List[str] = {'hits': {'hits': [{'_score': 1, '_id': 0}]}}
lowerCamelCase , lowerCamelCase : Any = index.search(UpperCAmelCase_ )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# single query with timeout
lowerCamelCase : Dict = 'foo'
lowerCamelCase : Optional[Any] = {'hits': {'hits': [{'_score': 1, '_id': 0}]}}
lowerCamelCase , lowerCamelCase : Optional[Any] = index.search(UpperCAmelCase_ , request_timeout=30 )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# batched queries
lowerCamelCase : str = ['foo', 'bar', 'foobar']
lowerCamelCase : Union[str, Any] = {'hits': {'hits': [{'_score': 1, '_id': 1}]}}
lowerCamelCase , lowerCamelCase : Optional[int] = index.search_batch(UpperCAmelCase_ )
lowerCamelCase : Dict = [scores[0] for scores in total_scores]
lowerCamelCase : Optional[int] = [indices[0] for indices in total_indices]
self.assertGreater(np.min(UpperCAmelCase_ ) , 0 )
self.assertListEqual([1, 1, 1] , UpperCAmelCase_ )
# batched queries with timeout
lowerCamelCase : List[str] = ['foo', 'bar', 'foobar']
lowerCamelCase : str = {'hits': {'hits': [{'_score': 1, '_id': 1}]}}
lowerCamelCase , lowerCamelCase : Dict = index.search_batch(UpperCAmelCase_ , request_timeout=30 )
lowerCamelCase : Dict = [scores[0] for scores in total_scores]
lowerCamelCase : int = [indices[0] for indices in total_indices]
self.assertGreater(np.min(UpperCAmelCase_ ) , 0 )
self.assertListEqual([1, 1, 1] , UpperCAmelCase_ )
| 205 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.