code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
"""simple docstring"""
import logging
import os
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
from tqdm import auto as tqdm_lib
_a = {
'debug': logging.DEBUG,
'info': logging.INFO,
'warning': logging.WARNING,
'error': logging.ERROR,
'critical': logging.CRITICAL,
}
_a = logging.WARNING
def _A ( ) -> int:
'''simple docstring'''
__lowercase = os.getenv("DATASETS_VERBOSITY", UpperCamelCase_)
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
F"""Unknown option DATASETS_VERBOSITY={env_level_str}, """
F"""has to be one of: { ", ".join(log_levels.keys()) }""")
return _default_log_level
def _A ( ) -> str:
'''simple docstring'''
return __name__.split(".")[0]
def _A ( ) -> logging.Logger:
'''simple docstring'''
return logging.getLogger(_get_library_name())
def _A ( ) -> None:
'''simple docstring'''
__lowercase = _get_library_root_logger()
library_root_logger.setLevel(_get_default_logging_level())
def _A ( ) -> None:
'''simple docstring'''
__lowercase = _get_library_root_logger()
library_root_logger.setLevel(logging.NOTSET)
def _A ( UpperCamelCase_ : Optional[str] = None) -> logging.Logger:
'''simple docstring'''
if name is None:
__lowercase = _get_library_name()
return logging.getLogger(UpperCamelCase_)
def _A ( ) -> int:
'''simple docstring'''
return _get_library_root_logger().getEffectiveLevel()
def _A ( UpperCamelCase_ : int) -> None:
'''simple docstring'''
_get_library_root_logger().setLevel(UpperCamelCase_)
def _A ( ) -> Any:
'''simple docstring'''
return set_verbosity(UpperCamelCase_)
def _A ( ) -> Optional[int]:
'''simple docstring'''
return set_verbosity(UpperCamelCase_)
def _A ( ) -> Optional[Any]:
'''simple docstring'''
return set_verbosity(UpperCamelCase_)
def _A ( ) -> str:
'''simple docstring'''
return set_verbosity(UpperCamelCase_)
def _A ( ) -> None:
'''simple docstring'''
__lowercase = False
def _A ( ) -> None:
'''simple docstring'''
__lowercase = True
# Configure the library root logger at the module level (singleton-like)
_configure_library_root_logger()
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : List[Any], *UpperCAmelCase__ : Optional[Any], **UpperCAmelCase__ : List[str] ): # pylint: disable=unused-argument
__lowercase = args[0] if args else None
def __iter__( self : Optional[int] ):
return iter(self._iterator )
def __getattr__( self : Tuple, UpperCAmelCase__ : Any ):
def empty_fn(*UpperCAmelCase__ : List[str], **UpperCAmelCase__ : str ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__( self : Tuple ):
return self
def __exit__( self : str, UpperCAmelCase__ : Any, UpperCAmelCase__ : Tuple, UpperCAmelCase__ : Optional[int] ):
return
_a = True
class _lowerCAmelCase :
"""simple docstring"""
def __call__( self : int, *UpperCAmelCase__ : Dict, UpperCAmelCase__ : Optional[int]=False, **UpperCAmelCase__ : Union[str, Any] ):
if _tqdm_active and not disable:
return tqdm_lib.tqdm(*UpperCAmelCase__, **UpperCAmelCase__ )
else:
return EmptyTqdm(*UpperCAmelCase__, **UpperCAmelCase__ )
def _lowercase ( self : List[Any], *UpperCAmelCase__ : List[Any], **UpperCAmelCase__ : Optional[int] ):
__lowercase = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*UpperCAmelCase__, **UpperCAmelCase__ )
def _lowercase ( self : str ):
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
_a = _tqdm_cls()
def _A ( ) -> bool:
'''simple docstring'''
global _tqdm_active
return bool(_tqdm_active)
def _A ( ) -> Optional[int]:
'''simple docstring'''
global _tqdm_active
__lowercase = True
def _A ( ) -> List[Any]:
'''simple docstring'''
global _tqdm_active
__lowercase = False
| 17 |
"""simple docstring"""
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import symbol_database as _symbol_database
from google.protobuf.internal import builder as _builder
# @@protoc_insertion_point(imports)
_a = _symbol_database.Default()
_a = _descriptor_pool.Default().AddSerializedFile(
b'\n\x19sentencepiece_model.proto\x12\rsentencepiece"\x80\x0c\n\x0bTrainerSpec\x12\r\n\x05input\x18\x01 \x03(\t\x12\x14\n\x0cinput_format\x18\x07 \x01(\t\x12\x14\n\x0cmodel_prefix\x18\x02 \x01(\t\x12\x41\n\nmodel_type\x18\x03 \x01(\x0e\x32$.sentencepiece.TrainerSpec.ModelType:\x07UNIGRAM\x12\x18\n\nvocab_size\x18\x04 \x01(\x05:\x04\x38\x30\x30\x30\x12\x17\n\x0f\x61\x63\x63\x65pt_language\x18\x05 \x03(\t\x12 \n\x15self_test_sample_size\x18\x06 \x01(\x05:\x01\x30\x12*\n\x1b\x65nable_differential_privacy\x18\x32 \x01(\x08:\x05\x66\x61lse\x12+\n differential_privacy_noise_level\x18\x33 \x01(\x02:\x01\x30\x12\x32\n\'differential_privacy_clipping_threshold\x18\x34 \x01(\x04:\x01\x30\x12"\n\x12\x63haracter_coverage\x18\n \x01(\x02:\x06\x30.9995\x12\x1e\n\x13input_sentence_size\x18\x0b \x01(\x04:\x01\x30\x12$\n\x16shuffle_input_sentence\x18\x13 \x01(\x08:\x04true\x12 \n\x14mining_sentence_size\x18\x0c \x01(\x05\x42\x02\x18\x01\x12"\n\x16training_sentence_size\x18\r \x01(\x05\x42\x02\x18\x01\x12(\n\x17seed_sentencepiece_size\x18\x0e \x01(\x05:\x07\x31\x30\x30\x30\x30\x30\x30\x12\x1e\n\x10shrinking_factor\x18\x0f \x01(\x02:\x04\x30.75\x12!\n\x13max_sentence_length\x18\x12 \x01(\x05:\x04\x34\x31\x39\x32\x12\x17\n\x0bnum_threads\x18\x10 \x01(\x05:\x02\x31\x36\x12\x1d\n\x12num_sub_iterations\x18\x11 \x01(\x05:\x01\x32\x12$\n\x18max_sentencepiece_length\x18\x14 \x01(\x05:\x02\x31\x36\x12%\n\x17split_by_unicode_script\x18\x15 \x01(\x08:\x04true\x12\x1d\n\x0fsplit_by_number\x18\x17 \x01(\x08:\x04true\x12!\n\x13split_by_whitespace\x18\x16 \x01(\x08:\x04true\x12)\n\x1atreat_whitespace_as_suffix\x18\x18 \x01(\x08:\x05\x66\x61lse\x12+\n\x1c\x61llow_whitespace_only_pieces\x18\x1a \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x0csplit_digits\x18\x19 \x01(\x08:\x05\x66\x61lse\x12#\n\x19pretokenization_delimiter\x18\x35 \x01(\t:\x00\x12\x17\n\x0f\x63ontrol_symbols\x18\x1e \x03(\t\x12\x1c\n\x14user_defined_symbols\x18\x1f \x03(\t\x12\x16\n\x0erequired_chars\x18$ \x01(\t\x12\x1c\n\rbyte_fallback\x18# \x01(\x08:\x05\x66\x61lse\x12+\n\x1dvocabulary_output_piece_score\x18 \x01(\x08:\x04true\x12\x1e\n\x10hard_vocab_limit\x18! \x01(\x08:\x04true\x12\x1c\n\ruse_all_vocab\x18" \x01(\x08:\x05\x66\x61lse\x12\x11\n\x06unk_id\x18( \x01(\x05:\x01\x30\x12\x11\n\x06\x62os_id\x18) \x01(\x05:\x01\x31\x12\x11\n\x06\x65os_id\x18* \x01(\x05:\x01\x32\x12\x12\n\x06pad_id\x18+ \x01(\x05:\x02-1\x12\x18\n\tunk_piece\x18- \x01(\t:\x05<unk>\x12\x16\n\tbos_piece\x18. \x01(\t:\x03<s>\x12\x17\n\teos_piece\x18/ \x01(\t:\x04</s>\x12\x18\n\tpad_piece\x18\x30 \x01(\t:\x05<pad>\x12\x1a\n\x0bunk_surface\x18, \x01(\t:\x05 \xe2\x81\x87 \x12+\n\x1ctrain_extremely_large_corpus\x18\x31 \x01(\x08:\x05\x66\x61lse"5\n\tModelType\x12\x0b\n\x07UNIGRAM\x10\x01\x12\x07\n\x03\x42PE\x10\x02\x12\x08\n\x04WORD\x10\x03\x12\x08\n\x04\x43HAR\x10\x04*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"\xd1\x01\n\x0eNormalizerSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1c\n\x14precompiled_charsmap\x18\x02 \x01(\x0c\x12\x1e\n\x10\x61\x64\x64_dummy_prefix\x18\x03 \x01(\x08:\x04true\x12&\n\x18remove_extra_whitespaces\x18\x04 \x01(\x08:\x04true\x12 \n\x12\x65scape_whitespaces\x18\x05 \x01(\x08:\x04true\x12\x1e\n\x16normalization_rule_tsv\x18\x06 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"y\n\x0cSelfTestData\x12\x33\n\x07samples\x18\x01 \x03(\x0b\x32".sentencepiece.SelfTestData.Sample\x1a)\n\x06Sample\x12\r\n\x05input\x18\x01 \x01(\t\x12\x10\n\x08\x65xpected\x18\x02 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"\xfe\x03\n\nModelProto\x12\x37\n\x06pieces\x18\x01 \x03(\x0b\x32\'.sentencepiece.ModelProto.SentencePiece\x12\x30\n\x0ctrainer_spec\x18\x02 \x01(\x0b\x32\x1a.sentencepiece.TrainerSpec\x12\x36\n\x0fnormalizer_spec\x18\x03 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x12\x33\n\x0eself_test_data\x18\x04 \x01(\x0b\x32\x1b.sentencepiece.SelfTestData\x12\x38\n\x11\x64\x65normalizer_spec\x18\x05 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x1a\xd2\x01\n\rSentencePiece\x12\r\n\x05piece\x18\x01 \x01(\t\x12\r\n\x05score\x18\x02 \x01(\x02\x12\x42\n\x04type\x18\x03 \x01(\x0e\x32,.sentencepiece.ModelProto.SentencePiece.Type:\x06NORMAL"T\n\x04Type\x12\n\n\x06NORMAL\x10\x01\x12\x0b\n\x07UNKNOWN\x10\x02\x12\x0b\n\x07\x43ONTROL\x10\x03\x12\x10\n\x0cUSER_DEFINED\x10\x04\x12\x08\n\x04\x42YTE\x10\x06\x12\n\n\x06UNUSED\x10\x05*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\x42\x02H\x03'
)
_a = globals()
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'sentencepiece_model_pb2', _globals)
if _descriptor._USE_C_DESCRIPTORS is False:
_a = None
_a = b'H\003'
# (generated by protobuf compiler, but `_TRAINERSPEC` is not defined)
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._serialized_options = b"\030\001"
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._serialized_options = b"\030\001"
_a = 45
_a = 15_81
_a = 15_17
_a = 15_70
_a = 15_84
_a = 17_93
_a = 17_95
_a = 19_16
_a = 18_64
_a = 19_05
_a = 19_19
_a = 24_29
_a = 22_08
_a = 24_18
_a = 23_23
_a = 24_07
# @@protoc_insertion_point(module_scope)
| 17 | 1 |
import gc
import unittest
from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline
from diffusers.utils import is_flax_available, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class lowercase ( unittest.TestCase ):
def __snake_case( self : Tuple ) -> str:
'''simple docstring'''
super().tearDown()
gc.collect()
def __snake_case( self : Union[str, Any] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = FlaxStableDiffusionPipeline.from_pretrained(
"stabilityai/stable-diffusion-2" , revision="bf16" , dtype=jnp.bfloataa , )
SCREAMING_SNAKE_CASE = """A painting of a squirrel eating a burger"""
SCREAMING_SNAKE_CASE = jax.device_count()
SCREAMING_SNAKE_CASE = num_samples * [prompt]
SCREAMING_SNAKE_CASE = sd_pipe.prepare_inputs(lowerCamelCase_ )
SCREAMING_SNAKE_CASE = replicate(lowerCamelCase_ )
SCREAMING_SNAKE_CASE = shard(lowerCamelCase_ )
SCREAMING_SNAKE_CASE = jax.random.PRNGKey(0 )
SCREAMING_SNAKE_CASE = jax.random.split(lowerCamelCase_ , jax.device_count() )
SCREAMING_SNAKE_CASE = sd_pipe(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , num_inference_steps=25 , jit=lowerCamelCase_ )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
SCREAMING_SNAKE_CASE = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
SCREAMING_SNAKE_CASE = images[0, 253:256, 253:256, -1]
SCREAMING_SNAKE_CASE = jnp.asarray(jax.device_get(image_slice.flatten() ) )
SCREAMING_SNAKE_CASE = jnp.array([0.4_2_3_8, 0.4_4_1_4, 0.4_3_9_5, 0.4_4_5_3, 0.4_6_2_9, 0.4_5_9_0, 0.4_5_3_1, 0.4_5_5_0_8, 0.4_5_1_2] )
print(F"output_slice: {output_slice}" )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
def __snake_case( self : Tuple ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = """stabilityai/stable-diffusion-2"""
SCREAMING_SNAKE_CASE = FlaxDPMSolverMultistepScheduler.from_pretrained(lowerCamelCase_ , subfolder="scheduler" )
SCREAMING_SNAKE_CASE = FlaxStableDiffusionPipeline.from_pretrained(
lowerCamelCase_ , scheduler=lowerCamelCase_ , revision="bf16" , dtype=jnp.bfloataa , )
SCREAMING_SNAKE_CASE = scheduler_params
SCREAMING_SNAKE_CASE = """A painting of a squirrel eating a burger"""
SCREAMING_SNAKE_CASE = jax.device_count()
SCREAMING_SNAKE_CASE = num_samples * [prompt]
SCREAMING_SNAKE_CASE = sd_pipe.prepare_inputs(lowerCamelCase_ )
SCREAMING_SNAKE_CASE = replicate(lowerCamelCase_ )
SCREAMING_SNAKE_CASE = shard(lowerCamelCase_ )
SCREAMING_SNAKE_CASE = jax.random.PRNGKey(0 )
SCREAMING_SNAKE_CASE = jax.random.split(lowerCamelCase_ , jax.device_count() )
SCREAMING_SNAKE_CASE = sd_pipe(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , num_inference_steps=25 , jit=lowerCamelCase_ )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
SCREAMING_SNAKE_CASE = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
SCREAMING_SNAKE_CASE = images[0, 253:256, 253:256, -1]
SCREAMING_SNAKE_CASE = jnp.asarray(jax.device_get(image_slice.flatten() ) )
SCREAMING_SNAKE_CASE = jnp.array([0.4_3_3_6, 0.4_2_9_6_9, 0.4_4_5_3, 0.4_1_9_9, 0.4_2_9_7, 0.4_5_3_1, 0.4_4_3_4, 0.4_4_3_4, 0.4_2_9_7] )
print(F"output_slice: {output_slice}" )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
| 365 | from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_herbert import HerbertTokenizer
_lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
_lowerCamelCase : Any = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
_lowerCamelCase : int = {
'''vocab_file''': {
'''allegro/herbert-base-cased''': '''https://huggingface.co/allegro/herbert-base-cased/resolve/main/vocab.json'''
},
'''merges_file''': {
'''allegro/herbert-base-cased''': '''https://huggingface.co/allegro/herbert-base-cased/resolve/main/merges.txt'''
},
}
_lowerCamelCase : Tuple = {'''allegro/herbert-base-cased''': 5_14}
_lowerCamelCase : Optional[int] = {}
class lowercase ( a ):
lowercase__ : List[str] = VOCAB_FILES_NAMES
lowercase__ : str = PRETRAINED_VOCAB_FILES_MAP
lowercase__ : Tuple = PRETRAINED_INIT_CONFIGURATION
lowercase__ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ : str = HerbertTokenizer
def __init__( self : Dict , _UpperCamelCase : Any=None , _UpperCamelCase : Any=None , _UpperCamelCase : Optional[int]=None , _UpperCamelCase : Optional[int]="<s>" , _UpperCamelCase : Union[str, Any]="<unk>" , _UpperCamelCase : List[str]="<pad>" , _UpperCamelCase : List[str]="<mask>" , _UpperCamelCase : Tuple="</s>" , **_UpperCamelCase : Any , ) -> str:
'''simple docstring'''
super().__init__(
_UpperCamelCase , _UpperCamelCase , tokenizer_file=_UpperCamelCase , cls_token=_UpperCamelCase , unk_token=_UpperCamelCase , pad_token=_UpperCamelCase , mask_token=_UpperCamelCase , sep_token=_UpperCamelCase , **_UpperCamelCase , )
def __snake_case( self : Optional[Any] , _UpperCamelCase : List[int] , _UpperCamelCase : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = [self.cls_token_id]
SCREAMING_SNAKE_CASE = [self.sep_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def __snake_case( self : Any , _UpperCamelCase : List[int] , _UpperCamelCase : Optional[List[int]] = None , _UpperCamelCase : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_UpperCamelCase , token_ids_a=_UpperCamelCase , already_has_special_tokens=_UpperCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(_UpperCamelCase )) + [1]
return [1] + ([0] * len(_UpperCamelCase )) + [1] + ([0] * len(_UpperCamelCase )) + [1]
def __snake_case( self : Union[str, Any] , _UpperCamelCase : List[int] , _UpperCamelCase : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = [self.sep_token_id]
SCREAMING_SNAKE_CASE = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __snake_case( self : str , _UpperCamelCase : str , _UpperCamelCase : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self._tokenizer.model.save(_UpperCamelCase , name=_UpperCamelCase )
return tuple(_UpperCamelCase )
| 206 | 0 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class _lowerCAmelCase ( unittest.TestCase ):
def __init__(self , lowercase , lowercase=7 , lowercase=3 , lowercase=18 , lowercase=30 , lowercase=400 , lowercase=True , lowercase=None , lowercase=True , lowercase=None , ):
A_ : str = size if size is not None else {"""shortest_edge""": 20}
A_ : str = crop_size if crop_size is not None else {"""height""": 18, """width""": 18}
A_ : Any = parent
A_ : Any = batch_size
A_ : str = num_channels
A_ : List[str] = image_size
A_ : Optional[Any] = min_resolution
A_ : Optional[int] = max_resolution
A_ : Any = do_resize
A_ : Any = size
A_ : Tuple = do_center_crop
A_ : Any = crop_size
def _a (self ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Optional[Any] = MobileNetVaImageProcessor if is_vision_available() else None
def _a (self ):
A_ : str = MobileNetVaImageProcessingTester(self )
@property
def _a (self ):
return self.image_processor_tester.prepare_image_processor_dict()
def _a (self ):
A_ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowercase , """do_resize""" ) )
self.assertTrue(hasattr(lowercase , """size""" ) )
self.assertTrue(hasattr(lowercase , """do_center_crop""" ) )
self.assertTrue(hasattr(lowercase , """crop_size""" ) )
def _a (self ):
A_ : Tuple = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 20} )
self.assertEqual(image_processor.crop_size , {"""height""": 18, """width""": 18} )
A_ : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42} )
self.assertEqual(image_processor.crop_size , {"""height""": 84, """width""": 84} )
def _a (self ):
pass
def _a (self ):
# Initialize image_processing
A_ : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A_ : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase )
for image in image_inputs:
self.assertIsInstance(lowercase , Image.Image )
# Test not batched input
A_ : Optional[Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
A_ : Dict = image_processing(lowercase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def _a (self ):
# Initialize image_processing
A_ : Any = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A_ : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase , numpify=lowercase )
for image in image_inputs:
self.assertIsInstance(lowercase , np.ndarray )
# Test not batched input
A_ : List[str] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
A_ : str = image_processing(lowercase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def _a (self ):
# Initialize image_processing
A_ : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A_ : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase , torchify=lowercase )
for image in image_inputs:
self.assertIsInstance(lowercase , torch.Tensor )
# Test not batched input
A_ : Tuple = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
A_ : List[Any] = image_processing(lowercase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , ) | 206 |
'''simple docstring'''
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : Any = ['image_processor', 'tokenizer']
__SCREAMING_SNAKE_CASE : Union[str, Any] = 'BlipImageProcessor'
__SCREAMING_SNAKE_CASE : List[Any] = ('BertTokenizer', 'BertTokenizerFast')
def __init__(self , lowercase , lowercase ):
A_ : List[Any] = False
super().__init__(lowercase , lowercase )
A_ : Tuple = self.image_processor
def __call__(self , lowercase = None , lowercase = None , lowercase = True , lowercase = False , lowercase = None , lowercase = None , lowercase = 0 , lowercase = None , lowercase = None , lowercase = False , lowercase = False , lowercase = False , lowercase = False , lowercase = False , lowercase = True , lowercase = None , **lowercase , ):
if images is None and text is None:
raise ValueError("""You have to specify either images or text.""" )
# Get only text
if images is None:
A_ : Optional[Any] = self.tokenizer
A_ : Tuple = self.tokenizer(
text=lowercase , add_special_tokens=lowercase , padding=lowercase , truncation=lowercase , max_length=lowercase , stride=lowercase , pad_to_multiple_of=lowercase , return_attention_mask=lowercase , return_overflowing_tokens=lowercase , return_special_tokens_mask=lowercase , return_offsets_mapping=lowercase , return_token_type_ids=lowercase , return_length=lowercase , verbose=lowercase , return_tensors=lowercase , **lowercase , )
return text_encoding
# add pixel_values
A_ : int = self.image_processor(lowercase , return_tensors=lowercase )
if text is not None:
A_ : Optional[Any] = self.tokenizer(
text=lowercase , add_special_tokens=lowercase , padding=lowercase , truncation=lowercase , max_length=lowercase , stride=lowercase , pad_to_multiple_of=lowercase , return_attention_mask=lowercase , return_overflowing_tokens=lowercase , return_special_tokens_mask=lowercase , return_offsets_mapping=lowercase , return_token_type_ids=lowercase , return_length=lowercase , verbose=lowercase , return_tensors=lowercase , **lowercase , )
else:
A_ : List[str] = None
if text_encoding is not None:
encoding_image_processor.update(lowercase )
return encoding_image_processor
def _a (self , *lowercase , **lowercase ):
return self.tokenizer.batch_decode(*lowercase , **lowercase )
def _a (self , *lowercase , **lowercase ):
return self.tokenizer.decode(*lowercase , **lowercase )
@property
def _a (self ):
A_ : int = self.tokenizer.model_input_names
A_ : Any = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) | 206 | 1 |
"""simple docstring"""
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from transformers import GradientAccumulator, create_optimizer
@require_tf
class __A ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase ( self : Union[str, Any] ,_snake_case : List[str] ,_snake_case : Tuple ,_snake_case : List[Any] ) -> Optional[int]:
"""simple docstring"""
self.assertEqual(len(_snake_case ) ,len(_snake_case ) )
for a, b in zip(_snake_case ,_snake_case ):
self.assertAlmostEqual(_snake_case ,_snake_case ,delta=_snake_case )
def UpperCAmelCase ( self : Tuple ) -> Tuple:
"""simple docstring"""
lowercase__ : Optional[Any] = GradientAccumulator()
accumulator([tf.constant([1.0, 2.0] )] )
accumulator([tf.constant([-2.0, 1.0] )] )
accumulator([tf.constant([-1.0, 2.0] )] )
with self.assertRaises(_snake_case ):
accumulator([tf.constant([1.0, 1.0] ), tf.constant([2.0, 2.0] )] )
self.assertEqual(accumulator.step ,3 )
self.assertEqual(len(accumulator.gradients ) ,1 )
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() ,[-2.0, 5.0] ,tol=1e-2 )
accumulator.reset()
self.assertEqual(accumulator.step ,0 )
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() ,[0.0, 0.0] ,tol=1e-2 )
def UpperCAmelCase ( self : List[Any] ) -> List[str]:
"""simple docstring"""
lowercase__ : Dict = None
ops.enable_eager_execution_internal()
lowercase__ : Optional[int] = tf.config.list_physical_devices('''CPU''' )
if len(_snake_case ) == 1:
tf.config.set_logical_device_configuration(
physical_devices[0] ,[tf.config.LogicalDeviceConfiguration(), tf.config.LogicalDeviceConfiguration()] )
lowercase__ : List[str] = tf.config.list_logical_devices(device_type='''CPU''' )
lowercase__ : Dict = tf.distribute.MirroredStrategy(devices=devices[:2] )
with strategy.scope():
lowercase__ : Optional[int] = GradientAccumulator()
lowercase__ : List[Any] = tf.Variable([4.0, 3.0] )
lowercase__ , lowercase__ : Optional[int] = create_optimizer(5e-5 ,10 ,5 )
lowercase__ : List[str] = tf.Variable([0.0, 0.0] ,trainable=_snake_case )
def accumulate_on_replica(_snake_case : Dict ):
accumulator([gradient] )
def apply_on_replica():
optimizer.apply_gradients(list(zip(accumulator.gradients ,[variable] ) ) )
@tf.function
def accumulate(_snake_case : Union[str, Any] ,_snake_case : Any ):
with strategy.scope():
lowercase__ : Union[str, Any] = strategy.experimental_local_results(_snake_case )
local_variables[0].assign(_snake_case )
local_variables[1].assign(_snake_case )
strategy.run(_snake_case ,args=(gradient_placeholder,) )
@tf.function
def apply_grad():
with strategy.scope():
strategy.run(_snake_case )
def _check_local_values(_snake_case : Any ,_snake_case : Tuple ):
lowercase__ : str = strategy.experimental_local_results(accumulator._gradients[0] )
self.assertListAlmostEqual(values[0].value() ,_snake_case ,tol=1e-2 )
self.assertListAlmostEqual(values[1].value() ,_snake_case ,tol=1e-2 )
accumulate([1.0, 2.0] ,[-1.0, 1.0] )
accumulate([3.0, -1.0] ,[-1.0, -1.0] )
accumulate([-2.0, 2.0] ,[3.0, -2.0] )
self.assertEqual(accumulator.step ,3 )
_check_local_values([2.0, 3.0] ,[1.0, -2.0] )
apply_grad()
self.assertListAlmostEqual(variable.value() ,[4.0, 3.0] ,tol=1e-2 )
accumulator.reset()
self.assertEqual(accumulator.step ,0 )
_check_local_values([0.0, 0.0] ,[0.0, 0.0] )
| 302 |
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import ScoreSdeVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class __A ( A_ ):
'''simple docstring'''
lowerCAmelCase : UNetaDModel
lowerCAmelCase : ScoreSdeVeScheduler
def __init__( self : Optional[Any] ,_snake_case : UNetaDModel ,_snake_case : ScoreSdeVeScheduler ) -> str:
"""simple docstring"""
super().__init__()
self.register_modules(unet=_snake_case ,scheduler=_snake_case )
@torch.no_grad()
def __call__( self : Any ,_snake_case : int = 1 ,_snake_case : int = 2_000 ,_snake_case : Optional[Union[torch.Generator, List[torch.Generator]]] = None ,_snake_case : Optional[str] = "pil" ,_snake_case : bool = True ,**_snake_case : Any ,) -> Union[ImagePipelineOutput, Tuple]:
"""simple docstring"""
lowercase__ : Optional[Any] = self.unet.config.sample_size
lowercase__ : Dict = (batch_size, 3, img_size, img_size)
lowercase__ : Tuple = self.unet
lowercase__ : Any = randn_tensor(_snake_case ,generator=_snake_case ) * self.scheduler.init_noise_sigma
lowercase__ : Union[str, Any] = sample.to(self.device )
self.scheduler.set_timesteps(_snake_case )
self.scheduler.set_sigmas(_snake_case )
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
lowercase__ : Tuple = self.scheduler.sigmas[i] * torch.ones(shape[0] ,device=self.device )
# correction step
for _ in range(self.scheduler.config.correct_steps ):
lowercase__ : List[str] = self.unet(_snake_case ,_snake_case ).sample
lowercase__ : Optional[Any] = self.scheduler.step_correct(_snake_case ,_snake_case ,generator=_snake_case ).prev_sample
# prediction step
lowercase__ : str = model(_snake_case ,_snake_case ).sample
lowercase__ : List[Any] = self.scheduler.step_pred(_snake_case ,_snake_case ,_snake_case ,generator=_snake_case )
lowercase__ , lowercase__ : Optional[int] = output.prev_sample, output.prev_sample_mean
lowercase__ : Union[str, Any] = sample_mean.clamp(0 ,1 )
lowercase__ : int = sample.cpu().permute(0 ,2 ,3 ,1 ).numpy()
if output_type == "pil":
lowercase__ : Any = self.numpy_to_pil(_snake_case )
if not return_dict:
return (sample,)
return ImagePipelineOutput(images=_snake_case )
| 302 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__a : Union[str, Any] = {"""configuration_yolos""": ["""YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP""", """YolosConfig""", """YolosOnnxConfig"""]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a : List[Any] = ["""YolosFeatureExtractor"""]
__a : Optional[int] = ["""YolosImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a : int = [
"""YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""YolosForObjectDetection""",
"""YolosModel""",
"""YolosPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_yolos import YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP, YolosConfig, YolosOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_yolos import YolosFeatureExtractor
from .image_processing_yolos import YolosImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_yolos import (
YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST,
YolosForObjectDetection,
YolosModel,
YolosPreTrainedModel,
)
else:
import sys
__a : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 210 |
import argparse
import requests
import torch
from PIL import Image
from transformers import ViTMAEConfig, ViTMAEForPreTraining, ViTMAEImageProcessor
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> Optional[Any]:
if "cls_token" in name:
lowercase : List[Any] = name.replace("""cls_token""" , """vit.embeddings.cls_token""" )
if "mask_token" in name:
lowercase : Any = name.replace("""mask_token""" , """decoder.mask_token""" )
if "decoder_pos_embed" in name:
lowercase : str = name.replace("""decoder_pos_embed""" , """decoder.decoder_pos_embed""" )
if "pos_embed" in name and "decoder" not in name:
lowercase : List[str] = name.replace("""pos_embed""" , """vit.embeddings.position_embeddings""" )
if "patch_embed.proj" in name:
lowercase : Tuple = name.replace("""patch_embed.proj""" , """vit.embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
lowercase : int = name.replace("""patch_embed.norm""" , """vit.embeddings.norm""" )
if "decoder_blocks" in name:
lowercase : Tuple = name.replace("""decoder_blocks""" , """decoder.decoder_layers""" )
if "blocks" in name:
lowercase : List[Any] = name.replace("""blocks""" , """vit.encoder.layer""" )
if "attn.proj" in name:
lowercase : List[str] = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name:
lowercase : Union[str, Any] = name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
lowercase : Optional[Any] = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
lowercase : Union[str, Any] = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
lowercase : Dict = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
lowercase : Dict = name.replace("""mlp.fc2""" , """output.dense""" )
if "decoder_embed" in name:
lowercase : List[str] = name.replace("""decoder_embed""" , """decoder.decoder_embed""" )
if "decoder_norm" in name:
lowercase : Dict = name.replace("""decoder_norm""" , """decoder.decoder_norm""" )
if "decoder_pred" in name:
lowercase : List[str] = name.replace("""decoder_pred""" , """decoder.decoder_pred""" )
if "norm.weight" in name and "decoder" not in name:
lowercase : Tuple = name.replace("""norm.weight""" , """vit.layernorm.weight""" )
if "norm.bias" in name and "decoder" not in name:
lowercase : int = name.replace("""norm.bias""" , """vit.layernorm.bias""" )
return name
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> List[Any]:
for key in orig_state_dict.copy().keys():
lowercase : List[Any] = orig_state_dict.pop(SCREAMING_SNAKE_CASE__ )
if "qkv" in key:
lowercase : int = key.split(""".""" )
lowercase : List[str] = int(key_split[1] )
if "decoder_blocks" in key:
lowercase : Tuple = config.decoder_hidden_size
lowercase : int = """decoder.decoder_layers."""
if "weight" in key:
lowercase : List[Any] = val[:dim, :]
lowercase : Tuple = val[dim : dim * 2, :]
lowercase : List[Any] = val[-dim:, :]
elif "bias" in key:
lowercase : str = val[:dim]
lowercase : Dict = val[dim : dim * 2]
lowercase : Union[str, Any] = val[-dim:]
else:
lowercase : Tuple = config.hidden_size
lowercase : Union[str, Any] = """vit.encoder.layer."""
if "weight" in key:
lowercase : Tuple = val[:dim, :]
lowercase : List[str] = val[dim : dim * 2, :]
lowercase : Dict = val[-dim:, :]
elif "bias" in key:
lowercase : Any = val[:dim]
lowercase : str = val[dim : dim * 2]
lowercase : Union[str, Any] = val[-dim:]
else:
lowercase : Union[str, Any] = val
return orig_state_dict
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> int:
lowercase : int = ViTMAEConfig()
if "large" in checkpoint_url:
lowercase : Dict = 1_024
lowercase : str = 4_096
lowercase : Optional[Any] = 24
lowercase : Optional[Any] = 16
elif "huge" in checkpoint_url:
lowercase : int = 14
lowercase : List[Any] = 1_280
lowercase : int = 5_120
lowercase : List[Any] = 32
lowercase : Any = 16
lowercase : List[str] = ViTMAEForPreTraining(SCREAMING_SNAKE_CASE__ )
lowercase : Optional[Any] = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE__ , map_location="""cpu""" )["""model"""]
lowercase : Tuple = ViTMAEImageProcessor(size=config.image_size )
lowercase : Optional[int] = convert_state_dict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
model.load_state_dict(SCREAMING_SNAKE_CASE__ )
model.eval()
lowercase : Union[str, Any] = """https://user-images.githubusercontent.com/11435359/147738734-196fd92f-9260-48d5-ba7e-bf103d29364d.jpg"""
lowercase : Union[str, Any] = Image.open(requests.get(SCREAMING_SNAKE_CASE__ , stream=SCREAMING_SNAKE_CASE__ ).raw )
lowercase : Optional[Any] = ViTMAEImageProcessor(size=config.image_size )
lowercase : List[Any] = image_processor(images=SCREAMING_SNAKE_CASE__ , return_tensors="""pt""" )
# forward pass
torch.manual_seed(2 )
lowercase : int = model(**SCREAMING_SNAKE_CASE__ )
lowercase : str = outputs.logits
if "large" in checkpoint_url:
lowercase : List[Any] = torch.tensor(
[[-0.7309, -0.7128, -1.0169], [-1.0161, -0.9058, -1.1878], [-1.0478, -0.9411, -1.1911]] )
elif "huge" in checkpoint_url:
lowercase : Tuple = torch.tensor(
[[-1.1599, -0.9199, -1.2221], [-1.1952, -0.9269, -1.2307], [-1.2143, -0.9337, -1.2262]] )
else:
lowercase : List[str] = torch.tensor(
[[-0.9192, -0.8481, -1.1259], [-1.1349, -1.0034, -1.2599], [-1.1757, -1.0429, -1.2726]] )
# verify logits
assert torch.allclose(logits[0, :3, :3] , SCREAMING_SNAKE_CASE__ , atol=1e-4 )
print(f"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
lowercase : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint_url""",
default="""https://dl.fbaipublicfiles.com/mae/visualize/mae_visualize_vit_base.pth""",
type=str,
help="""URL of the checkpoint you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
lowercase : List[Any] = parser.parse_args()
convert_vit_mae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 20 | 0 |
from collections.abc import Iterator, MutableMapping
from dataclasses import dataclass
from typing import Generic, TypeVar
snake_case = TypeVar("""KEY""")
snake_case = TypeVar("""VAL""")
@dataclass(frozen=lowerCAmelCase , slots=lowerCAmelCase )
class SCREAMING_SNAKE_CASE ( Generic[KEY, VAL] ):
'''simple docstring'''
UpperCamelCase_ : KEY
UpperCamelCase_ : VAL
class SCREAMING_SNAKE_CASE ( _Item ):
'''simple docstring'''
def __init__( self : Optional[int] ):
super().__init__(UpperCAmelCase_ , UpperCAmelCase_ )
def __bool__( self : List[str] ):
return False
snake_case = _DeletedItem()
class SCREAMING_SNAKE_CASE ( MutableMapping[KEY, VAL] ):
'''simple docstring'''
def __init__( self : List[str] , UpperCAmelCase_ : int = 8 , UpperCAmelCase_ : float = 0.75 ):
SCREAMING_SNAKE_CASE : Optional[Any] = initial_block_size
SCREAMING_SNAKE_CASE : list[_Item | None] = [None] * initial_block_size
assert 0.0 < capacity_factor < 1.0
SCREAMING_SNAKE_CASE : str = capacity_factor
SCREAMING_SNAKE_CASE : Optional[Any] = 0
def _A ( self : Union[str, Any] , UpperCAmelCase_ : KEY ):
return hash(UpperCAmelCase_ ) % len(self._buckets )
def _A ( self : Optional[Any] , UpperCAmelCase_ : int ):
return (ind + 1) % len(self._buckets )
def _A ( self : Dict , UpperCAmelCase_ : int , UpperCAmelCase_ : KEY , UpperCAmelCase_ : VAL ):
SCREAMING_SNAKE_CASE : Optional[int] = self._buckets[ind]
if not stored:
SCREAMING_SNAKE_CASE : Tuple = _Item(UpperCAmelCase_ , UpperCAmelCase_ )
self._len += 1
return True
elif stored.key == key:
SCREAMING_SNAKE_CASE : Tuple = _Item(UpperCAmelCase_ , UpperCAmelCase_ )
return True
else:
return False
def _A ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE : Any = len(self._buckets ) * self._capacity_factor
return len(self ) >= int(UpperCAmelCase_ )
def _A ( self : Any ):
if len(self._buckets ) <= self._initial_block_size:
return False
SCREAMING_SNAKE_CASE : Union[str, Any] = len(self._buckets ) * self._capacity_factor / 2
return len(self ) < limit
def _A ( self : Union[str, Any] , UpperCAmelCase_ : int ):
SCREAMING_SNAKE_CASE : Optional[Any] = self._buckets
SCREAMING_SNAKE_CASE : List[Any] = [None] * new_size
SCREAMING_SNAKE_CASE : str = 0
for item in old_buckets:
if item:
self._add_item(item.key , item.val )
def _A ( self : Any ):
self._resize(len(self._buckets ) * 2 )
def _A ( self : Tuple ):
self._resize(len(self._buckets ) // 2 )
def _A ( self : Tuple , UpperCAmelCase_ : KEY ):
SCREAMING_SNAKE_CASE : Optional[Any] = self._get_bucket_index(UpperCAmelCase_ )
for _ in range(len(self._buckets ) ):
yield ind
SCREAMING_SNAKE_CASE : Optional[int] = self._get_next_ind(UpperCAmelCase_ )
def _A ( self : Union[str, Any] , UpperCAmelCase_ : KEY , UpperCAmelCase_ : VAL ):
for ind in self._iterate_buckets(UpperCAmelCase_ ):
if self._try_set(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
break
def __setitem__( self : Optional[int] , UpperCAmelCase_ : KEY , UpperCAmelCase_ : VAL ):
if self._is_full():
self._size_up()
self._add_item(UpperCAmelCase_ , UpperCAmelCase_ )
def __delitem__( self : Union[str, Any] , UpperCAmelCase_ : KEY ):
for ind in self._iterate_buckets(UpperCAmelCase_ ):
SCREAMING_SNAKE_CASE : Optional[int] = self._buckets[ind]
if item is None:
raise KeyError(UpperCAmelCase_ )
if item is _deleted:
continue
if item.key == key:
SCREAMING_SNAKE_CASE : int = _deleted
self._len -= 1
break
if self._is_sparse():
self._size_down()
def __getitem__( self : Optional[int] , UpperCAmelCase_ : KEY ):
for ind in self._iterate_buckets(UpperCAmelCase_ ):
SCREAMING_SNAKE_CASE : Union[str, Any] = self._buckets[ind]
if item is None:
break
if item is _deleted:
continue
if item.key == key:
return item.val
raise KeyError(UpperCAmelCase_ )
def __len__( self : Optional[int] ):
return self._len
def __iter__( self : Union[str, Any] ):
yield from (item.key for item in self._buckets if item)
def __repr__( self : List[Any] ):
SCREAMING_SNAKE_CASE : Union[str, Any] = " ,".join(
f'''{item.key}: {item.val}''' for item in self._buckets if item )
return f'''HashMap({val_string})'''
| 319 |
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from transformers import (
VisualBertConfig,
VisualBertForMultipleChoice,
VisualBertForPreTraining,
VisualBertForQuestionAnswering,
VisualBertForVisualReasoning,
)
from transformers.utils import logging
logging.set_verbosity_info()
snake_case = logging.get_logger(__name__)
snake_case = [
("""bert.bert""", """visual_bert"""),
("""bert.cls""", """cls"""),
("""bert.classifier""", """cls"""),
("""token_type_embeddings_visual""", """visual_token_type_embeddings"""),
("""position_embeddings_visual""", """visual_position_embeddings"""),
("""projection""", """visual_projection"""),
]
snake_case = [
"""nlvr2_coco_pre_trained.th""",
"""nlvr2_fine_tuned.th""",
"""nlvr2_pre_trained.th""",
"""vcr_coco_pre_train.th""",
"""vcr_fine_tune.th""",
"""vcr_pre_train.th""",
"""vqa_coco_pre_trained.th""",
"""vqa_fine_tuned.th""",
"""vqa_pre_trained.th""",
]
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = torch.load(lowercase , map_location="cpu" )
return sd
def lowerCamelCase__ ( lowercase , lowercase , lowercase=rename_keys_prefix ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = OrderedDict()
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.arange(config.max_position_embeddings ).expand((1, -1) )
# detector_d = OrderedDict()
for key in d:
if "detector" in key:
# detector_d[key.replace('detector.','')] = d[key]
continue
SCREAMING_SNAKE_CASE : Optional[Any] = key
for name_pair in rename_keys_prefix:
SCREAMING_SNAKE_CASE : Tuple = new_key.replace(name_pair[0] , name_pair[1] )
SCREAMING_SNAKE_CASE : Union[str, Any] = d[key]
if key == "bert.cls.predictions.decoder.weight":
# Old bert code didn't have `decoder.bias`, but was added separately
SCREAMING_SNAKE_CASE : Union[str, Any] = new_d["cls.predictions.bias"]
return new_d
@torch.no_grad()
def lowerCamelCase__ ( lowercase , lowercase ):
"""simple docstring"""
assert (
checkpoint_path.split("/" )[-1] in ACCEPTABLE_CHECKPOINTS
), F'''The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}.'''
# Get Config
if "pre" in checkpoint_path:
SCREAMING_SNAKE_CASE : str = "pretraining"
if "vcr" in checkpoint_path:
SCREAMING_SNAKE_CASE : str = {"visual_embedding_dim": 512}
elif "vqa_advanced" in checkpoint_path:
SCREAMING_SNAKE_CASE : Union[str, Any] = {"visual_embedding_dim": 2048}
elif "vqa" in checkpoint_path:
SCREAMING_SNAKE_CASE : Optional[int] = {"visual_embedding_dim": 2048}
elif "nlvr" in checkpoint_path:
SCREAMING_SNAKE_CASE : Union[str, Any] = {"visual_embedding_dim": 1024}
else:
raise NotImplementedError(F'''No implementation found for `{checkpoint_path}`.''' )
else:
if "vcr" in checkpoint_path:
SCREAMING_SNAKE_CASE : Optional[Any] = {"visual_embedding_dim": 512}
SCREAMING_SNAKE_CASE : Union[str, Any] = "multichoice"
elif "vqa_advanced" in checkpoint_path:
SCREAMING_SNAKE_CASE : int = {"visual_embedding_dim": 2048}
SCREAMING_SNAKE_CASE : Any = "vqa_advanced"
elif "vqa" in checkpoint_path:
SCREAMING_SNAKE_CASE : Any = {"visual_embedding_dim": 2048, "num_labels": 3129}
SCREAMING_SNAKE_CASE : Tuple = "vqa"
elif "nlvr" in checkpoint_path:
SCREAMING_SNAKE_CASE : int = {
"visual_embedding_dim": 1024,
"num_labels": 2,
}
SCREAMING_SNAKE_CASE : Union[str, Any] = "nlvr"
SCREAMING_SNAKE_CASE : List[Any] = VisualBertConfig(**lowercase )
# Load State Dict
SCREAMING_SNAKE_CASE : Union[str, Any] = load_state_dict(lowercase )
SCREAMING_SNAKE_CASE : Union[str, Any] = get_new_dict(lowercase , lowercase )
if model_type == "pretraining":
SCREAMING_SNAKE_CASE : Union[str, Any] = VisualBertForPreTraining(lowercase )
elif model_type == "vqa":
SCREAMING_SNAKE_CASE : Optional[Any] = VisualBertForQuestionAnswering(lowercase )
elif model_type == "nlvr":
SCREAMING_SNAKE_CASE : Optional[Any] = VisualBertForVisualReasoning(lowercase )
elif model_type == "multichoice":
SCREAMING_SNAKE_CASE : List[Any] = VisualBertForMultipleChoice(lowercase )
model.load_state_dict(lowercase )
# Save Checkpoints
Path(lowercase ).mkdir(exist_ok=lowercase )
model.save_pretrained(lowercase )
if __name__ == "__main__":
snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""orig_checkpoint_path""", type=str, help="""A path to .th on local filesystem.""")
parser.add_argument("""pytorch_dump_folder_path""", type=str, help="""Path to the output PyTorch model.""")
snake_case = parser.parse_args()
convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
| 319 | 1 |
"""simple docstring"""
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import torch
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
@dataclass
class lowerCAmelCase__ ( UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = 42
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_text_to_video_synth import TextToVideoSDPipeline
from .pipeline_text_to_video_synth_imgaimg import VideoToVideoSDPipeline # noqa: F401
from .pipeline_text_to_video_zero import TextToVideoZeroPipeline
| 91 | '''simple docstring'''
import argparse
import io
import requests
import torch
from omegaconf import OmegaConf
from diffusers import AutoencoderKL
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import (
assign_to_checkpoint,
conv_attn_to_linear,
create_vae_diffusers_config,
renew_vae_attention_paths,
renew_vae_resnet_paths,
)
def lowerCAmelCase_ ( snake_case_ : List[Any] , snake_case_ : int ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ = checkpoint
UpperCAmelCase_ = {}
UpperCAmelCase_ = vae_state_dict["encoder.conv_in.weight"]
UpperCAmelCase_ = vae_state_dict["encoder.conv_in.bias"]
UpperCAmelCase_ = vae_state_dict["encoder.conv_out.weight"]
UpperCAmelCase_ = vae_state_dict["encoder.conv_out.bias"]
UpperCAmelCase_ = vae_state_dict["encoder.norm_out.weight"]
UpperCAmelCase_ = vae_state_dict["encoder.norm_out.bias"]
UpperCAmelCase_ = vae_state_dict["decoder.conv_in.weight"]
UpperCAmelCase_ = vae_state_dict["decoder.conv_in.bias"]
UpperCAmelCase_ = vae_state_dict["decoder.conv_out.weight"]
UpperCAmelCase_ = vae_state_dict["decoder.conv_out.bias"]
UpperCAmelCase_ = vae_state_dict["decoder.norm_out.weight"]
UpperCAmelCase_ = vae_state_dict["decoder.norm_out.bias"]
UpperCAmelCase_ = vae_state_dict["quant_conv.weight"]
UpperCAmelCase_ = vae_state_dict["quant_conv.bias"]
UpperCAmelCase_ = vae_state_dict["post_quant_conv.weight"]
UpperCAmelCase_ = vae_state_dict["post_quant_conv.bias"]
# Retrieves the keys for the encoder down blocks only
UpperCAmelCase_ = len({".".join(layer.split("." )[:3] ) for layer in vae_state_dict if "encoder.down" in layer} )
UpperCAmelCase_ = {
layer_id: [key for key in vae_state_dict if f"""down.{layer_id}""" in key] for layer_id in range(snake_case_ )
}
# Retrieves the keys for the decoder up blocks only
UpperCAmelCase_ = len({".".join(layer.split("." )[:3] ) for layer in vae_state_dict if "decoder.up" in layer} )
UpperCAmelCase_ = {
layer_id: [key for key in vae_state_dict if f"""up.{layer_id}""" in key] for layer_id in range(snake_case_ )
}
for i in range(snake_case_ ):
UpperCAmelCase_ = [key for key in down_blocks[i] if f"""down.{i}""" in key and f"""down.{i}.downsample""" not in key]
if f"""encoder.down.{i}.downsample.conv.weight""" in vae_state_dict:
UpperCAmelCase_ = vae_state_dict.pop(
f"""encoder.down.{i}.downsample.conv.weight""" )
UpperCAmelCase_ = vae_state_dict.pop(
f"""encoder.down.{i}.downsample.conv.bias""" )
UpperCAmelCase_ = renew_vae_resnet_paths(snake_case_ )
UpperCAmelCase_ = {"old": f"""down.{i}.block""", "new": f"""down_blocks.{i}.resnets"""}
assign_to_checkpoint(snake_case_ , snake_case_ , snake_case_ , additional_replacements=[meta_path] , config=snake_case_ )
UpperCAmelCase_ = [key for key in vae_state_dict if "encoder.mid.block" in key]
UpperCAmelCase_ = 2
for i in range(1 , num_mid_res_blocks + 1 ):
UpperCAmelCase_ = [key for key in mid_resnets if f"""encoder.mid.block_{i}""" in key]
UpperCAmelCase_ = renew_vae_resnet_paths(snake_case_ )
UpperCAmelCase_ = {"old": f"""mid.block_{i}""", "new": f"""mid_block.resnets.{i - 1}"""}
assign_to_checkpoint(snake_case_ , snake_case_ , snake_case_ , additional_replacements=[meta_path] , config=snake_case_ )
UpperCAmelCase_ = [key for key in vae_state_dict if "encoder.mid.attn" in key]
UpperCAmelCase_ = renew_vae_attention_paths(snake_case_ )
UpperCAmelCase_ = {"old": "mid.attn_1", "new": "mid_block.attentions.0"}
assign_to_checkpoint(snake_case_ , snake_case_ , snake_case_ , additional_replacements=[meta_path] , config=snake_case_ )
conv_attn_to_linear(snake_case_ )
for i in range(snake_case_ ):
UpperCAmelCase_ = num_up_blocks - 1 - i
UpperCAmelCase_ = [
key for key in up_blocks[block_id] if f"""up.{block_id}""" in key and f"""up.{block_id}.upsample""" not in key
]
if f"""decoder.up.{block_id}.upsample.conv.weight""" in vae_state_dict:
UpperCAmelCase_ = vae_state_dict[
f"""decoder.up.{block_id}.upsample.conv.weight"""
]
UpperCAmelCase_ = vae_state_dict[
f"""decoder.up.{block_id}.upsample.conv.bias"""
]
UpperCAmelCase_ = renew_vae_resnet_paths(snake_case_ )
UpperCAmelCase_ = {"old": f"""up.{block_id}.block""", "new": f"""up_blocks.{i}.resnets"""}
assign_to_checkpoint(snake_case_ , snake_case_ , snake_case_ , additional_replacements=[meta_path] , config=snake_case_ )
UpperCAmelCase_ = [key for key in vae_state_dict if "decoder.mid.block" in key]
UpperCAmelCase_ = 2
for i in range(1 , num_mid_res_blocks + 1 ):
UpperCAmelCase_ = [key for key in mid_resnets if f"""decoder.mid.block_{i}""" in key]
UpperCAmelCase_ = renew_vae_resnet_paths(snake_case_ )
UpperCAmelCase_ = {"old": f"""mid.block_{i}""", "new": f"""mid_block.resnets.{i - 1}"""}
assign_to_checkpoint(snake_case_ , snake_case_ , snake_case_ , additional_replacements=[meta_path] , config=snake_case_ )
UpperCAmelCase_ = [key for key in vae_state_dict if "decoder.mid.attn" in key]
UpperCAmelCase_ = renew_vae_attention_paths(snake_case_ )
UpperCAmelCase_ = {"old": "mid.attn_1", "new": "mid_block.attentions.0"}
assign_to_checkpoint(snake_case_ , snake_case_ , snake_case_ , additional_replacements=[meta_path] , config=snake_case_ )
conv_attn_to_linear(snake_case_ )
return new_checkpoint
def lowerCAmelCase_ ( snake_case_ : str , snake_case_ : str , ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = requests.get(
" https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml" )
UpperCAmelCase_ = io.BytesIO(r.content )
UpperCAmelCase_ = OmegaConf.load(snake_case_ )
UpperCAmelCase_ = 5_12
UpperCAmelCase_ = "cuda" if torch.cuda.is_available() else "cpu"
if checkpoint_path.endswith("safetensors" ):
from safetensors import safe_open
UpperCAmelCase_ = {}
with safe_open(snake_case_ , framework="pt" , device="cpu" ) as f:
for key in f.keys():
UpperCAmelCase_ = f.get_tensor(snake_case_ )
else:
UpperCAmelCase_ = torch.load(snake_case_ , map_location=snake_case_ )["state_dict"]
# Convert the VAE model.
UpperCAmelCase_ = create_vae_diffusers_config(snake_case_ , image_size=snake_case_ )
UpperCAmelCase_ = custom_convert_ldm_vae_checkpoint(snake_case_ , snake_case_ )
UpperCAmelCase_ = AutoencoderKL(**snake_case_ )
vae.load_state_dict(snake_case_ )
vae.save_pretrained(snake_case_ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_: Optional[int] =argparse.ArgumentParser()
parser.add_argument('--vae_pt_path', default=None, type=str, required=True, help='Path to the VAE.pt to convert.')
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the VAE.pt to convert.')
SCREAMING_SNAKE_CASE_: str =parser.parse_args()
vae_pt_to_vae_diffuser(args.vae_pt_path, args.dump_path)
| 1 | 0 |
"""simple docstring"""
import tempfile
import unittest
from make_student import create_student_by_copying_alternating_layers
from transformers import AutoConfig
from transformers.file_utils import cached_property
from transformers.testing_utils import require_torch
A_ = '''sshleifer/bart-tiny-random'''
A_ = '''patrickvonplaten/t5-tiny-random'''
@require_torch
class lowercase( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
return AutoConfig.from_pretrained(a_ )
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
_snake_case , *_snake_case : List[Any] = create_student_by_copying_alternating_layers(a_, tempfile.mkdtemp(), e=1, d=1 )
self.assertEqual(student.config.num_hidden_layers, 1 )
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
_snake_case , *_snake_case : Tuple = create_student_by_copying_alternating_layers(a_, tempfile.mkdtemp(), e=1, d=a_ )
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
_snake_case , *_snake_case : Optional[Any] = create_student_by_copying_alternating_layers(a_, tempfile.mkdtemp(), e=1, d=a_ )
self.assertEqual(student.config.encoder_layers, 1 )
self.assertEqual(student.config.decoder_layers, self.teacher_config.encoder_layers )
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
_snake_case , *_snake_case : Tuple = create_student_by_copying_alternating_layers(a_, tempfile.mkdtemp(), e=1, d=1 )
self.assertEqual(student.config.encoder_layers, 1 )
self.assertEqual(student.config.decoder_layers, 1 )
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
with self.assertRaises(a_ ):
create_student_by_copying_alternating_layers(a_, tempfile.mkdtemp(), e=a_, d=a_ )
| 132 |
"""simple docstring"""
import os
import re
import shutil
from argparse import ArgumentParser, Namespace
from datasets.commands import BaseDatasetsCLICommand
from datasets.utils.logging import get_logger
A_ = '''<<<<<<< This should probably be modified because it mentions: '''
A_ = '''=======
>>>>>>>
'''
A_ = [
'''TextEncoderConfig''',
'''ByteTextEncoder''',
'''SubwordTextEncoder''',
'''encoder_config''',
'''maybe_build_from_corpus''',
'''manual_dir''',
]
A_ = [
# (pattern, replacement)
# Order is important here for some replacements
(r'''tfds\.core''', r'''datasets'''),
(r'''tf\.io\.gfile\.GFile''', r'''open'''),
(r'''tf\.([\w\d]+)''', r'''datasets.Value(\'\1\')'''),
(r'''tfds\.features\.Text\(\)''', r'''datasets.Value(\'string\')'''),
(r'''tfds\.features\.Text\(''', r'''datasets.Value(\'string\'),'''),
(r'''features\s*=\s*tfds.features.FeaturesDict\(''', r'''features=datasets.Features('''),
(r'''tfds\.features\.FeaturesDict\(''', r'''dict('''),
(r'''The TensorFlow Datasets Authors''', r'''The TensorFlow Datasets Authors and the HuggingFace Datasets Authors'''),
(r'''tfds\.''', r'''datasets.'''),
(r'''dl_manager\.manual_dir''', r'''self.config.data_dir'''),
(r'''self\.builder_config''', r'''self.config'''),
]
def UpperCAmelCase__ (snake_case__ : Namespace ):
"""simple docstring"""
return ConvertCommand(args.tfds_path , args.datasets_directory )
class lowercase( __a ):
'''simple docstring'''
@staticmethod
def UpperCamelCase_ ( a_: ArgumentParser ):
'''simple docstring'''
_snake_case : Tuple = parser.add_parser(
"""convert""", help="""Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset.""", )
train_parser.add_argument(
"""--tfds_path""", type=a_, required=a_, help="""Path to a TensorFlow Datasets folder to convert or a single tfds file to convert.""", )
train_parser.add_argument(
"""--datasets_directory""", type=a_, required=a_, help="""Path to the HuggingFace Datasets folder.""" )
train_parser.set_defaults(func=a_ )
def __init__( self: List[str], a_: str, a_: str, *a_: str ):
'''simple docstring'''
_snake_case : Optional[Any] = get_logger("""datasets-cli/converting""" )
_snake_case : Any = tfds_path
_snake_case : Optional[Any] = datasets_directory
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
if os.path.isdir(self._tfds_path ):
_snake_case : int = os.path.abspath(self._tfds_path )
elif os.path.isfile(self._tfds_path ):
_snake_case : Any = os.path.dirname(self._tfds_path )
else:
raise ValueError("""--tfds_path is neither a directory nor a file. Please check path.""" )
_snake_case : Union[str, Any] = os.path.abspath(self._datasets_directory )
self._logger.info(f"Converting datasets from {abs_tfds_path} to {abs_datasets_path}" )
_snake_case : Tuple = []
_snake_case : Dict = []
_snake_case : Optional[Any] = {}
if os.path.isdir(self._tfds_path ):
_snake_case : List[str] = os.listdir(a_ )
else:
_snake_case : int = [os.path.basename(self._tfds_path )]
for f_name in file_names:
self._logger.info(f"Looking at file {f_name}" )
_snake_case : Dict = os.path.join(a_, a_ )
_snake_case : Union[str, Any] = os.path.join(a_, a_ )
if not os.path.isfile(a_ ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name:
self._logger.info("""Skipping file""" )
continue
with open(a_, encoding="""utf-8""" ) as f:
_snake_case : str = f.readlines()
_snake_case : List[str] = []
_snake_case : Any = False
_snake_case : Union[str, Any] = False
_snake_case : Optional[Any] = []
for line in lines:
_snake_case : Optional[int] = line
# Convert imports
if "import tensorflow.compat.v2 as tf" in out_line:
continue
elif "@tfds.core" in out_line:
continue
elif "builder=self" in out_line:
continue
elif "import tensorflow_datasets.public_api as tfds" in out_line:
_snake_case : Optional[Any] = """import datasets\n"""
elif "import tensorflow" in out_line:
# order is important here
_snake_case : Optional[int] = """"""
continue
elif "from absl import logging" in out_line:
_snake_case : int = """from datasets import logging\n"""
elif "getLogger" in out_line:
_snake_case : Any = out_line.replace("""getLogger""", """get_logger""" )
elif any(expression in out_line for expression in TO_HIGHLIGHT ):
_snake_case : Union[str, Any] = True
_snake_case : Optional[Any] = list(filter(lambda a_ : e in out_line, a_ ) )
out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(a_ ) + """\n""" )
out_lines.append(a_ )
out_lines.append(a_ )
continue
else:
for pattern, replacement in TO_CONVERT:
_snake_case : List[str] = re.sub(a_, a_, a_ )
# Take care of saving utilities (to later move them together with main script)
if "tensorflow_datasets" in out_line:
_snake_case : Dict = re.match(r"""from\stensorflow_datasets.*import\s([^\.\r\n]+)""", a_ )
tfds_imports.extend(imp.strip() for imp in match.group(1 ).split(""",""" ) )
_snake_case : Optional[Any] = """from . import """ + match.group(1 )
# Check we have not forget anything
if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line:
raise ValueError(f"Error converting {out_line.strip()}" )
if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line:
_snake_case : Tuple = True
out_lines.append(a_ )
if is_builder or "wmt" in f_name:
# We create a new directory for each dataset
_snake_case : List[str] = f_name.replace(""".py""", """""" )
_snake_case : str = os.path.join(a_, a_ )
_snake_case : str = os.path.join(a_, a_ )
os.makedirs(a_, exist_ok=a_ )
self._logger.info(f"Adding directory {output_dir}" )
imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} )
else:
# Utilities will be moved at the end
utils_files.append(a_ )
if needs_manual_update:
with_manual_update.append(a_ )
with open(a_, """w""", encoding="""utf-8""" ) as f:
f.writelines(a_ )
self._logger.info(f"Converted in {output_file}" )
for utils_file in utils_files:
try:
_snake_case : Optional[int] = os.path.basename(a_ )
_snake_case : Optional[Any] = imports_to_builder_map[f_name.replace(""".py""", """""" )]
self._logger.info(f"Moving {dest_folder} to {utils_file}" )
shutil.copy(a_, a_ )
except KeyError:
self._logger.error(f"Cannot find destination folder for {utils_file}. Please copy manually." )
if with_manual_update:
for file_path in with_manual_update:
self._logger.warning(
f"You need to manually update file {file_path} to remove configurations using 'TextEncoderConfig'." )
| 132 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a__ : str = {
'''configuration_xlm_roberta_xl''': [
'''XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''XLMRobertaXLConfig''',
'''XLMRobertaXLOnnxConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : str = [
'''XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XLMRobertaXLForCausalLM''',
'''XLMRobertaXLForMaskedLM''',
'''XLMRobertaXLForMultipleChoice''',
'''XLMRobertaXLForQuestionAnswering''',
'''XLMRobertaXLForSequenceClassification''',
'''XLMRobertaXLForTokenClassification''',
'''XLMRobertaXLModel''',
'''XLMRobertaXLPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaXLConfig,
XLMRobertaXLOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaXLForCausalLM,
XLMRobertaXLForMaskedLM,
XLMRobertaXLForMultipleChoice,
XLMRobertaXLForQuestionAnswering,
XLMRobertaXLForSequenceClassification,
XLMRobertaXLForTokenClassification,
XLMRobertaXLModel,
XLMRobertaXLPreTrainedModel,
)
else:
import sys
a__ : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 54 |
'''simple docstring'''
from __future__ import annotations
import math
def a ( lowerCamelCase__ ):
'''simple docstring'''
if num <= 0:
A_ : List[Any] = f'{num}: Invalid input, please enter a positive integer.'
raise ValueError(lowerCamelCase__ )
A_ : Dict = [True] * (num + 1)
A_ : List[Any] = []
A_ : Tuple = 2
A_ : Optional[int] = int(math.sqrt(lowerCamelCase__ ) )
while start <= end:
# If start is a prime
if sieve[start] is True:
prime.append(lowerCamelCase__ )
# Set multiples of start be False
for i in range(start * start , num + 1 , lowerCamelCase__ ):
if sieve[i] is True:
A_ : List[Any] = False
start += 1
for j in range(end + 1 , num + 1 ):
if sieve[j] is True:
prime.append(lowerCamelCase__ )
return prime
if __name__ == "__main__":
print(prime_sieve(int(input('''Enter a positive integer: ''').strip()))) | 206 | 0 |
import argparse
import json
import os
import sys
import tempfile
import unittest
from argparse import Namespace
from dataclasses import dataclass, field
from enum import Enum
from pathlib import Path
from typing import List, Literal, Optional
import yaml
from transformers import HfArgumentParser, TrainingArguments
from transformers.hf_argparser import make_choice_type_function, string_to_bool
# Since Python 3.10, we can use the builtin `|` operator for Union types
# See PEP 604: https://peps.python.org/pep-0604
__UpperCAmelCase : Optional[Any] = sys.version_info >= (3, 10)
def a ( SCREAMING_SNAKE_CASE_ : Union[str, Any]=None , SCREAMING_SNAKE_CASE_ : Union[str, Any]=None ):
"""simple docstring"""
return field(default_factory=lambda: default , metadata=SCREAMING_SNAKE_CASE_ )
@dataclass
class UpperCAmelCase_ :
'''simple docstring'''
__UpperCamelCase : int
__UpperCamelCase : float
__UpperCamelCase : str
__UpperCamelCase : bool
@dataclass
class UpperCAmelCase_ :
'''simple docstring'''
__UpperCamelCase : int = 42
__UpperCamelCase : str = field(default="toto", metadata={"help": "help message"})
@dataclass
class UpperCAmelCase_ :
'''simple docstring'''
__UpperCamelCase : bool = False
__UpperCamelCase : bool = True
__UpperCamelCase : Optional[bool] = None
class UpperCAmelCase_ ( _a):
'''simple docstring'''
__UpperCamelCase : Union[str, Any] = "titi"
__UpperCamelCase : Optional[Any] = "toto"
class UpperCAmelCase_ ( _a):
'''simple docstring'''
__UpperCamelCase : Optional[int] = "titi"
__UpperCamelCase : Union[str, Any] = "toto"
__UpperCamelCase : Optional[Any] = 42
@dataclass
class UpperCAmelCase_ :
'''simple docstring'''
__UpperCamelCase : BasicEnum = "toto"
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase = BasicEnum(self.foo )
@dataclass
class UpperCAmelCase_ :
'''simple docstring'''
__UpperCamelCase : MixedTypeEnum = "toto"
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase = MixedTypeEnum(self.foo )
@dataclass
class UpperCAmelCase_ :
'''simple docstring'''
__UpperCamelCase : Optional[int] = None
__UpperCamelCase : Optional[float] = field(default=_a, metadata={"help": "help message"})
__UpperCamelCase : Optional[str] = None
__UpperCamelCase : Optional[List[str]] = list_field(default=[])
__UpperCamelCase : Optional[List[int]] = list_field(default=[])
@dataclass
class UpperCAmelCase_ :
'''simple docstring'''
__UpperCamelCase : List[int] = list_field(default=[])
__UpperCamelCase : List[int] = list_field(default=[1, 2, 3])
__UpperCamelCase : List[str] = list_field(default=["Hallo", "Bonjour", "Hello"])
__UpperCamelCase : List[float] = list_field(default=[0.1, 0.2, 0.3])
@dataclass
class UpperCAmelCase_ :
'''simple docstring'''
__UpperCamelCase : List[int] = field()
__UpperCamelCase : str = field()
__UpperCamelCase : BasicEnum = field()
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase = BasicEnum(self.required_enum )
@dataclass
class UpperCAmelCase_ :
'''simple docstring'''
__UpperCamelCase : int
__UpperCamelCase : "BasicEnum" = field()
__UpperCamelCase : "Optional[bool]" = None
__UpperCamelCase : "str" = field(default="toto", metadata={"help": "help message"})
__UpperCamelCase : "List[str]" = list_field(default=["Hallo", "Bonjour", "Hello"])
if is_python_no_less_than_3_10:
@dataclass
class UpperCAmelCase_ :
'''simple docstring'''
__UpperCamelCase : bool = False
__UpperCamelCase : bool = True
__UpperCamelCase : bool | None = None
@dataclass
class UpperCAmelCase_ :
'''simple docstring'''
__UpperCamelCase : int | None = None
__UpperCamelCase : float | None = field(default=_a, metadata={"help": "help message"})
__UpperCamelCase : str | None = None
__UpperCamelCase : list[str] | None = list_field(default=[])
__UpperCamelCase : list[int] | None = list_field(default=[])
class UpperCAmelCase_ ( unittest.TestCase):
'''simple docstring'''
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
self.assertEqual(len(a._actions ) , len(b._actions ) )
for x, y in zip(a._actions , b._actions ):
UpperCamelCase = {k: v for k, v in vars(__SCREAMING_SNAKE_CASE ).items() if k != '''container'''}
UpperCamelCase = {k: v for k, v in vars(__SCREAMING_SNAKE_CASE ).items() if k != '''container'''}
# Choices with mixed type have custom function as "type"
# So we need to compare results directly for equality
if xx.get('''choices''' , __SCREAMING_SNAKE_CASE ) and yy.get('''choices''' , __SCREAMING_SNAKE_CASE ):
for expected_choice in yy["choices"] + xx["choices"]:
self.assertEqual(xx['''type'''](__SCREAMING_SNAKE_CASE ) , yy['''type'''](__SCREAMING_SNAKE_CASE ) )
del xx["type"], yy["type"]
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase = HfArgumentParser(__SCREAMING_SNAKE_CASE )
UpperCamelCase = argparse.ArgumentParser()
expected.add_argument('''--foo''' , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE )
expected.add_argument('''--bar''' , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE )
expected.add_argument('''--baz''' , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE )
expected.add_argument('''--flag''' , type=__SCREAMING_SNAKE_CASE , default=__SCREAMING_SNAKE_CASE , const=__SCREAMING_SNAKE_CASE , nargs='''?''' )
self.argparsersEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
UpperCamelCase = ['''--foo''', '''1''', '''--baz''', '''quux''', '''--bar''', '''0.5''']
(UpperCamelCase ) = parser.parse_args_into_dataclasses(__SCREAMING_SNAKE_CASE , look_for_args_file=__SCREAMING_SNAKE_CASE )
self.assertFalse(example.flag )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase = HfArgumentParser(__SCREAMING_SNAKE_CASE )
UpperCamelCase = argparse.ArgumentParser()
expected.add_argument('''--foo''' , default=42 , type=__SCREAMING_SNAKE_CASE )
expected.add_argument('''--baz''' , default='''toto''' , type=__SCREAMING_SNAKE_CASE , help='''help message''' )
self.argparsersEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase = argparse.ArgumentParser()
expected.add_argument('''--foo''' , type=__SCREAMING_SNAKE_CASE , default=__SCREAMING_SNAKE_CASE , const=__SCREAMING_SNAKE_CASE , nargs='''?''' )
expected.add_argument('''--baz''' , type=__SCREAMING_SNAKE_CASE , default=__SCREAMING_SNAKE_CASE , const=__SCREAMING_SNAKE_CASE , nargs='''?''' )
# A boolean no_* argument always has to come after its "default: True" regular counter-part
# and its default must be set to False
expected.add_argument('''--no_baz''' , action='''store_false''' , default=__SCREAMING_SNAKE_CASE , dest='''baz''' )
expected.add_argument('''--opt''' , type=__SCREAMING_SNAKE_CASE , default=__SCREAMING_SNAKE_CASE )
UpperCamelCase = [WithDefaultBoolExample]
if is_python_no_less_than_3_10:
dataclass_types.append(__SCREAMING_SNAKE_CASE )
for dataclass_type in dataclass_types:
UpperCamelCase = HfArgumentParser(__SCREAMING_SNAKE_CASE )
self.argparsersEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
UpperCamelCase = parser.parse_args([] )
self.assertEqual(__SCREAMING_SNAKE_CASE , Namespace(foo=__SCREAMING_SNAKE_CASE , baz=__SCREAMING_SNAKE_CASE , opt=__SCREAMING_SNAKE_CASE ) )
UpperCamelCase = parser.parse_args(['''--foo''', '''--no_baz'''] )
self.assertEqual(__SCREAMING_SNAKE_CASE , Namespace(foo=__SCREAMING_SNAKE_CASE , baz=__SCREAMING_SNAKE_CASE , opt=__SCREAMING_SNAKE_CASE ) )
UpperCamelCase = parser.parse_args(['''--foo''', '''--baz'''] )
self.assertEqual(__SCREAMING_SNAKE_CASE , Namespace(foo=__SCREAMING_SNAKE_CASE , baz=__SCREAMING_SNAKE_CASE , opt=__SCREAMING_SNAKE_CASE ) )
UpperCamelCase = parser.parse_args(['''--foo''', '''True''', '''--baz''', '''True''', '''--opt''', '''True'''] )
self.assertEqual(__SCREAMING_SNAKE_CASE , Namespace(foo=__SCREAMING_SNAKE_CASE , baz=__SCREAMING_SNAKE_CASE , opt=__SCREAMING_SNAKE_CASE ) )
UpperCamelCase = parser.parse_args(['''--foo''', '''False''', '''--baz''', '''False''', '''--opt''', '''False'''] )
self.assertEqual(__SCREAMING_SNAKE_CASE , Namespace(foo=__SCREAMING_SNAKE_CASE , baz=__SCREAMING_SNAKE_CASE , opt=__SCREAMING_SNAKE_CASE ) )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase = HfArgumentParser(__SCREAMING_SNAKE_CASE )
UpperCamelCase = argparse.ArgumentParser()
expected.add_argument(
'''--foo''' , default='''toto''' , choices=['''titi''', '''toto''', 42] , type=make_choice_type_function(['''titi''', '''toto''', 42] ) , )
self.argparsersEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
UpperCamelCase = parser.parse_args([] )
self.assertEqual(args.foo , '''toto''' )
UpperCamelCase = parser.parse_args_into_dataclasses([] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.toto )
UpperCamelCase = parser.parse_args(['''--foo''', '''titi'''] )
self.assertEqual(args.foo , '''titi''' )
UpperCamelCase = parser.parse_args_into_dataclasses(['''--foo''', '''titi'''] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.titi )
UpperCamelCase = parser.parse_args(['''--foo''', '''42'''] )
self.assertEqual(args.foo , 42 )
UpperCamelCase = parser.parse_args_into_dataclasses(['''--foo''', '''42'''] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo )
def _lowercase ( self ):
"""simple docstring"""
@dataclass
class UpperCAmelCase_ :
'''simple docstring'''
__UpperCamelCase : Literal["titi", "toto", 42] = "toto"
UpperCamelCase = HfArgumentParser(__SCREAMING_SNAKE_CASE )
UpperCamelCase = argparse.ArgumentParser()
expected.add_argument(
'''--foo''' , default='''toto''' , choices=('''titi''', '''toto''', 42) , type=make_choice_type_function(['''titi''', '''toto''', 42] ) , )
self.argparsersEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
UpperCamelCase = parser.parse_args([] )
self.assertEqual(args.foo , '''toto''' )
UpperCamelCase = parser.parse_args(['''--foo''', '''titi'''] )
self.assertEqual(args.foo , '''titi''' )
UpperCamelCase = parser.parse_args(['''--foo''', '''42'''] )
self.assertEqual(args.foo , 42 )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase = HfArgumentParser(__SCREAMING_SNAKE_CASE )
UpperCamelCase = argparse.ArgumentParser()
expected.add_argument('''--foo_int''' , nargs='''+''' , default=[] , type=__SCREAMING_SNAKE_CASE )
expected.add_argument('''--bar_int''' , nargs='''+''' , default=[1, 2, 3] , type=__SCREAMING_SNAKE_CASE )
expected.add_argument('''--foo_str''' , nargs='''+''' , default=['''Hallo''', '''Bonjour''', '''Hello'''] , type=__SCREAMING_SNAKE_CASE )
expected.add_argument('''--foo_float''' , nargs='''+''' , default=[0.1, 0.2, 0.3] , type=__SCREAMING_SNAKE_CASE )
self.argparsersEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
UpperCamelCase = parser.parse_args([] )
self.assertEqual(
__SCREAMING_SNAKE_CASE , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=['''Hallo''', '''Bonjour''', '''Hello'''] , foo_float=[0.1, 0.2, 0.3] ) , )
UpperCamelCase = parser.parse_args('''--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7'''.split() )
self.assertEqual(__SCREAMING_SNAKE_CASE , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=['''a''', '''b''', '''c'''] , foo_float=[0.1, 0.7] ) )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase = argparse.ArgumentParser()
expected.add_argument('''--foo''' , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE )
expected.add_argument('''--bar''' , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , help='''help message''' )
expected.add_argument('''--baz''' , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE )
expected.add_argument('''--ces''' , nargs='''+''' , default=[] , type=__SCREAMING_SNAKE_CASE )
expected.add_argument('''--des''' , nargs='''+''' , default=[] , type=__SCREAMING_SNAKE_CASE )
UpperCamelCase = [OptionalExample]
if is_python_no_less_than_3_10:
dataclass_types.append(__SCREAMING_SNAKE_CASE )
for dataclass_type in dataclass_types:
UpperCamelCase = HfArgumentParser(__SCREAMING_SNAKE_CASE )
self.argparsersEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
UpperCamelCase = parser.parse_args([] )
self.assertEqual(__SCREAMING_SNAKE_CASE , Namespace(foo=__SCREAMING_SNAKE_CASE , bar=__SCREAMING_SNAKE_CASE , baz=__SCREAMING_SNAKE_CASE , ces=[] , des=[] ) )
UpperCamelCase = parser.parse_args('''--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3'''.split() )
self.assertEqual(__SCREAMING_SNAKE_CASE , Namespace(foo=12 , bar=3.14 , baz='''42''' , ces=['''a''', '''b''', '''c'''] , des=[1, 2, 3] ) )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase = HfArgumentParser(__SCREAMING_SNAKE_CASE )
UpperCamelCase = argparse.ArgumentParser()
expected.add_argument('''--required_list''' , nargs='''+''' , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE )
expected.add_argument('''--required_str''' , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE )
expected.add_argument(
'''--required_enum''' , type=make_choice_type_function(['''titi''', '''toto'''] ) , choices=['''titi''', '''toto'''] , required=__SCREAMING_SNAKE_CASE , )
self.argparsersEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase = HfArgumentParser(__SCREAMING_SNAKE_CASE )
UpperCamelCase = argparse.ArgumentParser()
expected.add_argument('''--foo''' , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE )
expected.add_argument(
'''--required_enum''' , type=make_choice_type_function(['''titi''', '''toto'''] ) , choices=['''titi''', '''toto'''] , required=__SCREAMING_SNAKE_CASE , )
expected.add_argument('''--opt''' , type=__SCREAMING_SNAKE_CASE , default=__SCREAMING_SNAKE_CASE )
expected.add_argument('''--baz''' , default='''toto''' , type=__SCREAMING_SNAKE_CASE , help='''help message''' )
expected.add_argument('''--foo_str''' , nargs='''+''' , default=['''Hallo''', '''Bonjour''', '''Hello'''] , type=__SCREAMING_SNAKE_CASE )
self.argparsersEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase = HfArgumentParser(__SCREAMING_SNAKE_CASE )
UpperCamelCase = {
'''foo''': 12,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
}
UpperCamelCase = parser.parse_dict(__SCREAMING_SNAKE_CASE )[0]
UpperCamelCase = BasicExample(**__SCREAMING_SNAKE_CASE )
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase = HfArgumentParser(__SCREAMING_SNAKE_CASE )
UpperCamelCase = {
'''foo''': 12,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
'''extra''': 42,
}
self.assertRaises(__SCREAMING_SNAKE_CASE , parser.parse_dict , __SCREAMING_SNAKE_CASE , allow_extra_keys=__SCREAMING_SNAKE_CASE )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase = HfArgumentParser(__SCREAMING_SNAKE_CASE )
UpperCamelCase = {
'''foo''': 12,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCamelCase = os.path.join(__SCREAMING_SNAKE_CASE , '''temp_json''' )
os.mkdir(__SCREAMING_SNAKE_CASE )
with open(temp_local_path + '''.json''' , '''w+''' ) as f:
json.dump(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
UpperCamelCase = parser.parse_yaml_file(Path(temp_local_path + '''.json''' ) )[0]
UpperCamelCase = BasicExample(**__SCREAMING_SNAKE_CASE )
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase = HfArgumentParser(__SCREAMING_SNAKE_CASE )
UpperCamelCase = {
'''foo''': 12,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCamelCase = os.path.join(__SCREAMING_SNAKE_CASE , '''temp_yaml''' )
os.mkdir(__SCREAMING_SNAKE_CASE )
with open(temp_local_path + '''.yaml''' , '''w+''' ) as f:
yaml.dump(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
UpperCamelCase = parser.parse_yaml_file(Path(temp_local_path + '''.yaml''' ) )[0]
UpperCamelCase = BasicExample(**__SCREAMING_SNAKE_CASE )
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase = HfArgumentParser(__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
| 354 |
import qiskit
def a ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ):
"""simple docstring"""
UpperCamelCase : List[str] = qiskit.Aer.get_backend('''aer_simulator''' )
UpperCamelCase : Any = qiskit.QuantumCircuit(4 , 2 )
# encode inputs in qubits 0 and 1
if bita == 1:
qc_ha.x(0 )
if bita == 1:
qc_ha.x(1 )
qc_ha.barrier()
# use cnots to write XOR of the inputs on qubit2
qc_ha.cx(0 , 2 )
qc_ha.cx(1 , 2 )
# use ccx / toffoli gate to write AND of the inputs on qubit3
qc_ha.ccx(0 , 1 , 3 )
qc_ha.barrier()
# extract outputs
qc_ha.measure(2 , 0 ) # extract XOR value
qc_ha.measure(3 , 1 ) # extract AND value
# Execute the circuit on the qasm simulator
UpperCamelCase : Any = qiskit.execute(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , shots=1_0_0_0 )
# Return the histogram data of the results of the experiment
return job.result().get_counts(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
__UpperCAmelCase : int = half_adder(1, 1)
print(f'''Half Adder Output Qubit Counts: {counts}''')
| 315 | 0 |
from statistics import mean
import numpy as np
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
__a = 0
# Number of processes finished
__a = 0
# Displays the finished process.
# If it is 0, the performance is completed if it is 1, before the performance.
__a = [0] * no_of_process
# List to include calculation results
__a = [0] * no_of_process
# Sort by arrival time.
__a = [burst_time[i] for i in np.argsort(_SCREAMING_SNAKE_CASE )]
__a = [process_name[i] for i in np.argsort(_SCREAMING_SNAKE_CASE )]
arrival_time.sort()
while no_of_process > finished_process_count:
__a = 0
while finished_process[i] == 1:
i += 1
if current_time < arrival_time[i]:
__a = arrival_time[i]
__a = 0
# Index showing the location of the process being performed
__a = 0
# Saves the current response ratio.
__a = 0
for i in range(0 , _SCREAMING_SNAKE_CASE ):
if finished_process[i] == 0 and arrival_time[i] <= current_time:
__a = (burst_time[i] + (current_time - arrival_time[i])) / burst_time[
i
]
if response_ratio < temp:
__a = temp
__a = i
# Calculate the turn around time
__a = current_time + burst_time[loc] - arrival_time[loc]
current_time += burst_time[loc]
# Indicates that the process has been performed.
__a = 1
# Increase finished_process_count by 1
finished_process_count += 1
return turn_around_time
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
__a = [0] * no_of_process
for i in range(0 , _SCREAMING_SNAKE_CASE ):
__a = turn_around_time[i] - burst_time[i]
return waiting_time
if __name__ == "__main__":
lowerCamelCase__ = 5
lowerCamelCase__ = ["""A""", """B""", """C""", """D""", """E"""]
lowerCamelCase__ = [1, 2, 3, 4, 5]
lowerCamelCase__ = [1, 2, 3, 4, 5]
lowerCamelCase__ = calculate_turn_around_time(
process_name, arrival_time, burst_time, no_of_process
)
lowerCamelCase__ = calculate_waiting_time(
process_name, turn_around_time, burst_time, no_of_process
)
print("""Process name \tArrival time \tBurst time \tTurn around time \tWaiting time""")
for i in range(0, no_of_process):
print(
F"""{process_name[i]}\t\t{arrival_time[i]}\t\t{burst_time[i]}\t\t"""
F"""{turn_around_time[i]}\t\t\t{waiting_time[i]}"""
)
print(F"""average waiting time : {mean(waiting_time):.5f}""")
print(F"""average turn around time : {mean(turn_around_time):.5f}""")
| 302 |
from __future__ import annotations
lowerCamelCase__ = """#"""
class SCREAMING_SNAKE_CASE :
def __init__( self : Optional[Any] ):
'''simple docstring'''
__a = {}
def UpperCamelCase_ ( self : Optional[Any] , __lowercase : str ):
'''simple docstring'''
__a = self._trie
for char in text:
if char not in trie:
__a = {}
__a = trie[char]
__a = True
def UpperCamelCase_ ( self : Tuple , __lowercase : str ):
'''simple docstring'''
__a = self._trie
for char in prefix:
if char in trie:
__a = trie[char]
else:
return []
return self._elements(__lowercase )
def UpperCamelCase_ ( self : Optional[int] , __lowercase : dict ):
'''simple docstring'''
__a = []
for c, v in d.items():
__a = [""" """] if c == END else [(c + s) for s in self._elements(__lowercase )]
result.extend(__lowercase )
return tuple(__lowercase )
lowerCamelCase__ = Trie()
lowerCamelCase__ = ("""depart""", """detergent""", """daring""", """dog""", """deer""", """deal""")
for word in words:
trie.insert_word(word)
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
__a = trie.find_word(_SCREAMING_SNAKE_CASE )
return tuple(string + word for word in suffixes )
def lowerCAmelCase__ ( ):
"""simple docstring"""
print(autocomplete_using_trie("""de""" ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 302 | 1 |
from __future__ import annotations
def lowerCAmelCase_ ( _snake_case : dict , _snake_case : str ) -> set[str]:
'''simple docstring'''
__magic_name__ , __magic_name__ : str = set(_snake_case ), [start]
while stack:
__magic_name__ : Any = stack.pop()
explored.add(_snake_case )
# Differences from BFS:
# 1) pop last element instead of first one
# 2) add adjacent elements to stack without exploring them
for adj in reversed(graph[v] ):
if adj not in explored:
stack.append(_snake_case )
return explored
snake_case : List[Any] = {
"A": ["B", "C", "D"],
"B": ["A", "D", "E"],
"C": ["A", "F"],
"D": ["B", "D"],
"E": ["B", "F"],
"F": ["C", "E", "G"],
"G": ["F"],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
print(depth_first_search(G, "A"))
| 41 |
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
snake_case : Union[str, Any] = logging.get_logger(__name__)
snake_case : Optional[int] = ["model.decoder.embed_positions.weights"]
def lowerCAmelCase_ ( _snake_case : List[str] ) -> Optional[Any]:
'''simple docstring'''
if "emb" in name:
__magic_name__ : Optional[Any] = name.replace("emb" , "model.decoder.embed_tokens" )
if "transformer" in name:
__magic_name__ : List[str] = name.replace("transformer" , "model.decoder" )
if "cross_attention" in name:
__magic_name__ : Dict = name.replace("cross_attention" , "encoder_attn" )
if "linear1" in name:
__magic_name__ : Optional[Any] = name.replace("linear1" , "fc1" )
if "linear2" in name:
__magic_name__ : List[str] = name.replace("linear2" , "fc2" )
if "norm1" in name:
__magic_name__ : Optional[int] = name.replace("norm1" , "self_attn_layer_norm" )
if "norm_cross" in name:
__magic_name__ : Union[str, Any] = name.replace("norm_cross" , "encoder_attn_layer_norm" )
if "norm2" in name:
__magic_name__ : Any = name.replace("norm2" , "final_layer_norm" )
if "out_norm" in name:
__magic_name__ : Union[str, Any] = name.replace("out_norm" , "model.decoder.layer_norm" )
if "linears" in name:
__magic_name__ : Optional[Any] = name.replace("linears" , "lm_heads" )
if "condition_provider.conditioners.description.output_proj" in name:
__magic_name__ : Any = name.replace("condition_provider.conditioners.description.output_proj" , "enc_to_dec_proj" )
return name
def lowerCAmelCase_ ( _snake_case : OrderedDict , _snake_case : int ) -> Tuple[Dict, Dict]:
'''simple docstring'''
__magic_name__ : int = list(state_dict.keys() )
__magic_name__ : Dict = {}
for key in keys:
__magic_name__ : Any = state_dict.pop(_snake_case )
__magic_name__ : Optional[Any] = rename_keys(_snake_case )
if "in_proj_weight" in key:
# split fused qkv proj
__magic_name__ : Optional[int] = val[:hidden_size, :]
__magic_name__ : List[str] = val[hidden_size : 2 * hidden_size, :]
__magic_name__ : List[str] = val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
__magic_name__ : int = val
else:
__magic_name__ : str = val
return state_dict, enc_dec_proj_state_dict
def lowerCAmelCase_ ( _snake_case : str ) -> MusicgenDecoderConfig:
'''simple docstring'''
if checkpoint == "small":
# default config values
__magic_name__ : Tuple = 1024
__magic_name__ : List[str] = 24
__magic_name__ : str = 16
elif checkpoint == "medium":
__magic_name__ : Optional[int] = 1536
__magic_name__ : Dict = 48
__magic_name__ : List[Any] = 24
elif checkpoint == "large":
__magic_name__ : Any = 2048
__magic_name__ : int = 48
__magic_name__ : str = 32
else:
raise ValueError(F'''Checkpoint should be one of `[\'small\', \'medium\', \'large\']`, got {checkpoint}.''' )
__magic_name__ : str = MusicgenDecoderConfig(
hidden_size=_snake_case , ffn_dim=hidden_size * 4 , num_hidden_layers=_snake_case , num_attention_heads=_snake_case , )
return config
@torch.no_grad()
def lowerCAmelCase_ ( _snake_case : Optional[Any] , _snake_case : Union[str, Any]=None , _snake_case : List[str]=None , _snake_case : Optional[Any]="cpu" ) -> List[str]:
'''simple docstring'''
__magic_name__ : Dict = MusicGen.get_pretrained(_snake_case , device=_snake_case )
__magic_name__ : Any = decoder_config_from_checkpoint(_snake_case )
__magic_name__ : Any = fairseq_model.lm.state_dict()
__magic_name__ , __magic_name__ : Optional[Any] = rename_state_dict(
_snake_case , hidden_size=decoder_config.hidden_size )
__magic_name__ : str = TaEncoderModel.from_pretrained("t5-base" )
__magic_name__ : Any = EncodecModel.from_pretrained("facebook/encodec_32khz" )
__magic_name__ : int = MusicgenForCausalLM(_snake_case ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
__magic_name__ , __magic_name__ : List[str] = decoder.load_state_dict(_snake_case , strict=_snake_case )
for key in missing_keys.copy():
if key.startswith(("text_encoder", "audio_encoder") ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(_snake_case )
if len(_snake_case ) > 0:
raise ValueError(F'''Missing key(s) in state_dict: {missing_keys}''' )
if len(_snake_case ) > 0:
raise ValueError(F'''Unexpected key(s) in state_dict: {unexpected_keys}''' )
# init the composite model
__magic_name__ : Optional[Any] = MusicgenForConditionalGeneration(text_encoder=_snake_case , audio_encoder=_snake_case , decoder=_snake_case )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(_snake_case )
# check we can do a forward pass
__magic_name__ : Optional[Any] = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 )
__magic_name__ : List[Any] = input_ids.reshape(2 * 4 , -1 )
with torch.no_grad():
__magic_name__ : Dict = model(input_ids=_snake_case , decoder_input_ids=_snake_case ).logits
if logits.shape != (8, 1, 2048):
raise ValueError("Incorrect shape for logits" )
# now construct the processor
__magic_name__ : Optional[Any] = AutoTokenizer.from_pretrained("t5-base" )
__magic_name__ : List[str] = AutoFeatureExtractor.from_pretrained("facebook/encodec_32khz" , padding_side="left" )
__magic_name__ : Union[str, Any] = MusicgenProcessor(feature_extractor=_snake_case , tokenizer=_snake_case )
# set the appropriate bos/pad token ids
__magic_name__ : List[str] = 2048
__magic_name__ : List[str] = 2048
# set other default generation config params
__magic_name__ : Union[str, Any] = int(30 * audio_encoder.config.frame_rate )
__magic_name__ : Optional[Any] = True
__magic_name__ : Dict = 3.0
if pytorch_dump_folder is not None:
Path(_snake_case ).mkdir(exist_ok=_snake_case )
logger.info(F'''Saving model {checkpoint} to {pytorch_dump_folder}''' )
model.save_pretrained(_snake_case )
processor.save_pretrained(_snake_case )
if repo_id:
logger.info(F'''Pushing model {checkpoint} to {repo_id}''' )
model.push_to_hub(_snake_case )
processor.push_to_hub(_snake_case )
if __name__ == "__main__":
snake_case : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint",
default="small",
type=str,
help="Checkpoint size of the MusicGen model you'd like to convert. Can be one of: `['small', 'medium', 'large']`.",
)
parser.add_argument(
"--pytorch_dump_folder",
required=True,
default=None,
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
parser.add_argument(
"--device", default="cpu", type=str, help="Torch device to run the conversion, either cpu or cuda."
)
snake_case : Optional[Any] = parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 41 | 1 |
'''simple docstring'''
import pyarrow.parquet as pq
import pytest
from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config
from datasets.features.image import Image
from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> Optional[int]:
assert isinstance(__lowercase , __lowercase )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase ) -> int:
A: Union[str, Any] = tmp_path / '''cache'''
A: List[str] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
A: Union[str, Any] = ParquetDatasetReader(__lowercase , cache_dir=__lowercase , keep_in_memory=__lowercase ).read()
_check_parquet_dataset(__lowercase , __lowercase )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase ) -> List[Any]:
A: Union[str, Any] = tmp_path / '''cache'''
A: Tuple = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
A: List[str] = features.copy() if features else default_expected_features
A: Optional[Any] = (
Features({feature: Value(__lowercase ) for feature, dtype in features.items()} ) if features is not None else None
)
A: Tuple = ParquetDatasetReader(__lowercase , features=__lowercase , cache_dir=__lowercase ).read()
_check_parquet_dataset(__lowercase , __lowercase )
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase ) -> Union[str, Any]:
A: Optional[int] = tmp_path / '''cache'''
A: Union[str, Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
A: Optional[int] = ParquetDatasetReader(__lowercase , cache_dir=__lowercase , split=__lowercase ).read()
_check_parquet_dataset(__lowercase , __lowercase )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('''path_type''' , [str, list] )
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase ) -> List[Any]:
if issubclass(__lowercase , __lowercase ):
A: Dict = parquet_path
elif issubclass(__lowercase , __lowercase ):
A: Optional[Any] = [parquet_path]
A: Union[str, Any] = tmp_path / '''cache'''
A: List[str] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
A: Any = ParquetDatasetReader(__lowercase , cache_dir=__lowercase ).read()
_check_parquet_dataset(__lowercase , __lowercase )
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase=("train",) ) -> Tuple:
assert isinstance(__lowercase , __lowercase )
for split in splits:
A: Any = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase ) -> Any:
A: int = tmp_path / '''cache'''
A: Optional[int] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
A: List[str] = ParquetDatasetReader(
{'''train''': parquet_path} , cache_dir=__lowercase , keep_in_memory=__lowercase ).read()
_check_parquet_datasetdict(__lowercase , __lowercase )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase ) -> Dict:
A: List[Any] = tmp_path / '''cache'''
A: Any = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
A: str = features.copy() if features else default_expected_features
A: Tuple = (
Features({feature: Value(__lowercase ) for feature, dtype in features.items()} ) if features is not None else None
)
A: str = ParquetDatasetReader({'''train''': parquet_path} , features=__lowercase , cache_dir=__lowercase ).read()
_check_parquet_datasetdict(__lowercase , __lowercase )
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase ) -> Union[str, Any]:
if split:
A: str = {split: parquet_path}
else:
A: Any = '''train'''
A: Optional[Any] = {'''train''': parquet_path, '''test''': parquet_path}
A: Union[str, Any] = tmp_path / '''cache'''
A: int = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
A: int = ParquetDatasetReader(__lowercase , cache_dir=__lowercase ).read()
_check_parquet_datasetdict(__lowercase , __lowercase , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> List[str]:
A: Union[str, Any] = ParquetDatasetWriter(__lowercase , tmp_path / '''foo.parquet''' )
assert writer.write() > 0
A: Tuple = pq.ParquetFile(tmp_path / '''foo.parquet''' )
A: Optional[Any] = pf.read()
assert dataset.data.table == output_table
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> Optional[Any]:
A: Any = str(shared_datadir / '''test_image_rgb.jpg''' )
A: str = {'''image''': [image_path]}
A: Union[str, Any] = Features({'''image''': Image()} )
A: str = Dataset.from_dict(__lowercase , features=__lowercase )
A: Optional[Any] = ParquetDatasetWriter(__lowercase , tmp_path / '''foo.parquet''' )
assert writer.write() > 0
A: int = Dataset.from_parquet(str(tmp_path / '''foo.parquet''' ) )
assert dataset.features == reloaded_dataset.features
A: List[str] = ParquetDatasetReader(str(tmp_path / '''foo.parquet''' ) , streaming=__lowercase ).read()
assert dataset.features == reloaded_iterable_dataset.features
@pytest.mark.parametrize(
'''feature, expected''' , [
(Features({'''foo''': Value('''int32''' )} ), None),
(Features({'''image''': Image(), '''foo''': Value('''int32''' )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS),
(Features({'''nested''': Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS),
] , )
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> Union[str, Any]:
assert get_writer_batch_size(__lowercase ) == expected
| 319 |
'''simple docstring'''
import json
import logging
import math
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from datasets import Dataset, load_dataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_FOR_MASKED_LM_MAPPING,
AutoConfig,
AutoModelForMaskedLM,
AutoTokenizer,
DataCollatorForWholeWordMask,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
UpperCamelCase = logging.getLogger(__name__)
UpperCamelCase = list(MODEL_FOR_MASKED_LM_MAPPING.keys())
UpperCamelCase = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class lowerCAmelCase_ :
'''simple docstring'''
UpperCamelCase_ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={
"""help""": (
"""The model checkpoint for weights initialization.Don't set if you want to train a model from scratch."""
)
} , )
UpperCamelCase_ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={"""help""": """If training from scratch, pass a model type from the list: """ + """, """.join(UpperCAmelCase_ )} , )
UpperCamelCase_ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={
"""help""": (
"""Override some existing default config settings when a model is trained from scratch. Example: """
"""n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index"""
)
} , )
UpperCamelCase_ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
UpperCamelCase_ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
UpperCamelCase_ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
UpperCamelCase_ : bool = field(
default=UpperCAmelCase_ , metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""} , )
UpperCamelCase_ : str = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
UpperCamelCase_ : bool = field(
default=UpperCAmelCase_ , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
def _snake_case ( self : Tuple ) -> List[Any]:
'''simple docstring'''
if self.config_overrides is not None and (self.config_name is not None or self.model_name_or_path is not None):
raise ValueError(
'''--config_overrides can\'t be used in combination with --config_name or --model_name_or_path''' )
@dataclass
class lowerCAmelCase_ :
'''simple docstring'''
UpperCamelCase_ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={"""help""": """The name of the dataset to use (via the datasets library)."""} )
UpperCamelCase_ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} )
UpperCamelCase_ : Optional[str] = field(default=UpperCAmelCase_ , metadata={"""help""": """The input training data file (a text file)."""} )
UpperCamelCase_ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={"""help""": """An optional input evaluation data file to evaluate the perplexity on (a text file)."""} , )
UpperCamelCase_ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={"""help""": """An optional input train ref data file for whole word masking in Chinese."""} , )
UpperCamelCase_ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={"""help""": """An optional input validation ref data file for whole word masking in Chinese."""} , )
UpperCamelCase_ : bool = field(
default=UpperCAmelCase_ , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
UpperCamelCase_ : Optional[int] = field(
default=5 , metadata={
"""help""": """The percentage of the train set used as validation set in case there's no validation split"""
} , )
UpperCamelCase_ : Optional[int] = field(
default=UpperCAmelCase_ , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated. Default to the max input length of the model."""
)
} , )
UpperCamelCase_ : Optional[int] = field(
default=UpperCAmelCase_ , metadata={"""help""": """The number of processes to use for the preprocessing."""} , )
UpperCamelCase_ : float = field(
default=0.15 , metadata={"""help""": """Ratio of tokens to mask for masked language modeling loss"""} )
UpperCamelCase_ : bool = field(
default=UpperCAmelCase_ , metadata={
"""help""": (
"""Whether to pad all samples to `max_seq_length`. """
"""If False, will pad the samples dynamically when batching to the maximum length in the batch."""
)
} , )
def _snake_case ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
if self.train_file is not None:
A: Tuple = self.train_file.split('''.''' )[-1]
assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file."
if self.validation_file is not None:
A: str = self.validation_file.split('''.''' )[-1]
assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file."
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> List[str]:
with open(__lowercase , '''r''' , encoding='''utf-8''' ) as f:
A: List[Any] = [json.loads(__lowercase ) for line in f.read().splitlines() if (len(__lowercase ) > 0 and not line.isspace())]
assert len(__lowercase ) == len(__lowercase )
A: Optional[int] = {c: dataset[c] for c in dataset.column_names}
A: Union[str, Any] = refs
return Dataset.from_dict(__lowercase )
def SCREAMING_SNAKE_CASE( ) -> int:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
A: int = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
A , A , A: Optional[int] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
A , A , A: List[Any] = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
A: Any = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
A: Any = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('''Training/evaluation parameters %s''' , __lowercase )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
A: Dict = load_dataset(data_args.dataset_name , data_args.dataset_config_name )
if "validation" not in datasets.keys():
A: int = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"""train[:{data_args.validation_split_percentage}%]""" , )
A: Dict = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"""train[{data_args.validation_split_percentage}%:]""" , )
else:
A: Any = {}
if data_args.train_file is not None:
A: int = data_args.train_file
if data_args.validation_file is not None:
A: Optional[int] = data_args.validation_file
A: List[str] = data_args.train_file.split('''.''' )[-1]
if extension == "txt":
A: int = '''text'''
A: Any = load_dataset(__lowercase , data_files=__lowercase )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
A: Dict = {
'''cache_dir''': model_args.cache_dir,
'''revision''': model_args.model_revision,
'''use_auth_token''': True if model_args.use_auth_token else None,
}
if model_args.config_name:
A: List[Any] = AutoConfig.from_pretrained(model_args.config_name , **__lowercase )
elif model_args.model_name_or_path:
A: int = AutoConfig.from_pretrained(model_args.model_name_or_path , **__lowercase )
else:
A: str = CONFIG_MAPPING[model_args.model_type]()
logger.warning('''You are instantiating a new config instance from scratch.''' )
if model_args.config_overrides is not None:
logger.info(F"""Overriding config: {model_args.config_overrides}""" )
config.update_from_string(model_args.config_overrides )
logger.info(F"""New config: {config}""" )
A: Tuple = {
'''cache_dir''': model_args.cache_dir,
'''use_fast''': model_args.use_fast_tokenizer,
'''revision''': model_args.model_revision,
'''use_auth_token''': True if model_args.use_auth_token else None,
}
if model_args.tokenizer_name:
A: Optional[int] = AutoTokenizer.from_pretrained(model_args.tokenizer_name , **__lowercase )
elif model_args.model_name_or_path:
A: Union[str, Any] = AutoTokenizer.from_pretrained(model_args.model_name_or_path , **__lowercase )
else:
raise ValueError(
'''You are instantiating a new tokenizer from scratch. This is not supported by this script.'''
'''You can do it from another script, save it, and load it from here, using --tokenizer_name.''' )
if model_args.model_name_or_path:
A: List[Any] = AutoModelForMaskedLM.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=__lowercase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info('''Training new model from scratch''' )
A: List[Any] = AutoModelForMaskedLM.from_config(__lowercase )
model.resize_token_embeddings(len(__lowercase ) )
# Preprocessing the datasets.
# First we tokenize all the texts.
if training_args.do_train:
A: int = datasets['''train'''].column_names
else:
A: str = datasets['''validation'''].column_names
A: Tuple = '''text''' if '''text''' in column_names else column_names[0]
A: List[str] = '''max_length''' if data_args.pad_to_max_length else False
def tokenize_function(__lowercase ):
# Remove empty lines
A: int = [line for line in examples['''text'''] if len(__lowercase ) > 0 and not line.isspace()]
return tokenizer(examples['''text'''] , padding=__lowercase , truncation=__lowercase , max_length=data_args.max_seq_length )
A: str = datasets.map(
__lowercase , batched=__lowercase , num_proc=data_args.preprocessing_num_workers , remove_columns=[text_column_name] , load_from_cache_file=not data_args.overwrite_cache , )
# Add the chinese references if provided
if data_args.train_ref_file is not None:
A: List[str] = add_chinese_references(tokenized_datasets['''train'''] , data_args.train_ref_file )
if data_args.validation_ref_file is not None:
A: Dict = add_chinese_references(
tokenized_datasets['''validation'''] , data_args.validation_ref_file )
# If we have ref files, need to avoid it removed by trainer
A: Optional[Any] = data_args.train_ref_file or data_args.validation_ref_file
if has_ref:
A: List[Any] = False
# Data collator
# This one will take care of randomly masking the tokens.
A: Optional[Any] = DataCollatorForWholeWordMask(tokenizer=__lowercase , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
A: Optional[int] = Trainer(
model=__lowercase , args=__lowercase , train_dataset=tokenized_datasets['''train'''] if training_args.do_train else None , eval_dataset=tokenized_datasets['''validation'''] if training_args.do_eval else None , tokenizer=__lowercase , data_collator=__lowercase , )
# Training
if training_args.do_train:
if last_checkpoint is not None:
A: Optional[int] = last_checkpoint
elif model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path ):
A: str = model_args.model_name_or_path
else:
A: List[str] = None
A: str = trainer.train(resume_from_checkpoint=__lowercase )
trainer.save_model() # Saves the tokenizer too for easy upload
A: Union[str, Any] = os.path.join(training_args.output_dir , '''train_results.txt''' )
if trainer.is_world_process_zero():
with open(__lowercase , '''w''' ) as writer:
logger.info('''***** Train results *****''' )
for key, value in sorted(train_result.metrics.items() ):
logger.info(F""" {key} = {value}""" )
writer.write(F"""{key} = {value}\n""" )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , '''trainer_state.json''' ) )
# Evaluation
A: Optional[int] = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
A: Optional[Any] = trainer.evaluate()
A: Union[str, Any] = math.exp(eval_output['''eval_loss'''] )
A: Dict = perplexity
A: Any = os.path.join(training_args.output_dir , '''eval_results_mlm_wwm.txt''' )
if trainer.is_world_process_zero():
with open(__lowercase , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key, value in sorted(results.items() ):
logger.info(F""" {key} = {value}""" )
writer.write(F"""{key} = {value}\n""" )
return results
def SCREAMING_SNAKE_CASE( __lowercase ) -> List[Any]:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 319 | 1 |
"""simple docstring"""
from __future__ import annotations
SCREAMING_SNAKE_CASE = "Muhammad Umer Farooq"
SCREAMING_SNAKE_CASE = "MIT"
SCREAMING_SNAKE_CASE = "1.0.0"
SCREAMING_SNAKE_CASE = "Muhammad Umer Farooq"
SCREAMING_SNAKE_CASE = "[email protected]"
SCREAMING_SNAKE_CASE = "Alpha"
import re
from html.parser import HTMLParser
from urllib import parse
import requests
class UpperCAmelCase_ ( A_ ):
def __init__( self : int , snake_case_ : str ) -> None:
'''simple docstring'''
super().__init__()
A__ = []
A__ = domain
def __magic_name__ ( self : Union[str, Any] , snake_case_ : str , snake_case_ : list[tuple[str, str | None]] ) -> None:
'''simple docstring'''
if tag == "a":
# Check the list of defined attributes.
for name, value in attrs:
# If href is defined, and not empty nor # print it.
if name == "href" and value != "#" and value != "":
# If not already in urls.
if value not in self.urls:
A__ = parse.urljoin(self.domain , snake_case_ )
self.urls.append(snake_case_ )
def _SCREAMING_SNAKE_CASE ( lowercase_ ) -> str:
return ".".join(get_sub_domain_name(lowercase_ ).split("." )[-2:] )
def _SCREAMING_SNAKE_CASE ( lowercase_ ) -> str:
return parse.urlparse(lowercase_ ).netloc
def _SCREAMING_SNAKE_CASE ( lowercase_ = "https://github.com" ) -> list[str]:
A__ = get_domain_name(lowercase_ )
# Initialize the parser
A__ = Parser(lowercase_ )
try:
# Open URL
A__ = requests.get(lowercase_ )
# pass the raw HTML to the parser to get links
parser.feed(r.text )
# Get links and loop through
A__ = set()
for link in parser.urls:
# open URL.
# read = requests.get(link)
try:
A__ = requests.get(lowercase_ )
# Get the valid email.
A__ = re.findall("[a-zA-Z0-9]+@" + domain , read.text )
# If not in list then append it.
for email in emails:
valid_emails.add(lowercase_ )
except ValueError:
pass
except ValueError:
raise SystemExit(1 )
# Finally return a sorted list of email addresses with no duplicates.
return sorted(lowercase_ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = emails_from_url("https://github.com")
print(f'{len(emails)} emails found:')
print("\n".join(sorted(emails)))
| 230 |
"""simple docstring"""
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class UpperCAmelCase_ ( A_ ):
lowercase__ = ['''image_processor''', '''tokenizer''']
lowercase__ = '''AutoImageProcessor'''
lowercase__ = '''AutoTokenizer'''
def __init__( self : str , snake_case_ : Dict , snake_case_ : List[str] ) -> str:
'''simple docstring'''
super().__init__(snake_case_ , snake_case_ )
A__ = self.image_processor
def __call__( self : int , snake_case_ : Any=None , snake_case_ : Any=None , snake_case_ : Union[str, Any]=None , **snake_case_ : Optional[int] ) -> Optional[int]:
'''simple docstring'''
if text is None and images is None:
raise ValueError("You have to specify either text or images. Both cannot be none." )
if text is not None:
A__ = self.tokenizer(snake_case_ , return_tensors=snake_case_ , **snake_case_ )
if images is not None:
A__ = self.image_processor(snake_case_ , return_tensors=snake_case_ , **snake_case_ )
if text is not None and images is not None:
A__ = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**snake_case_ ) , tensor_type=snake_case_ )
def __magic_name__ ( self : Optional[int] , *snake_case_ : Union[str, Any] , **snake_case_ : List[Any] ) -> int:
'''simple docstring'''
return self.tokenizer.batch_decode(*snake_case_ , **snake_case_ )
def __magic_name__ ( self : List[str] , *snake_case_ : List[str] , **snake_case_ : Optional[int] ) -> Tuple:
'''simple docstring'''
return self.tokenizer.decode(*snake_case_ , **snake_case_ )
@property
def __magic_name__ ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
return ["input_ids", "attention_mask", "pixel_values"]
| 230 | 1 |
"""simple docstring"""
import argparse
import glob
import logging
import os
import time
from argparse import Namespace
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from torch.utils.data import DataLoader, TensorDataset
from transformers import glue_compute_metrics as compute_metrics
from transformers import glue_convert_examples_to_features as convert_examples_to_features
from transformers import glue_output_modes, glue_tasks_num_labels
from transformers import glue_processors as processors
a :Optional[int] = logging.getLogger(__name__)
class __a (UpperCamelCase_):
'''simple docstring'''
_SCREAMING_SNAKE_CASE :Dict = """sequence-classification"""
def __init__( self , _a ) -> Dict:
"""simple docstring"""
if type(_a ) == dict:
SCREAMING_SNAKE_CASE__ : int = Namespace(**_a )
SCREAMING_SNAKE_CASE__ : List[Any] = glue_output_modes[hparams.task]
SCREAMING_SNAKE_CASE__ : int = glue_tasks_num_labels[hparams.task]
super().__init__(_a , _a , self.mode )
def _a ( self , **_a ) -> Optional[int]:
"""simple docstring"""
return self.model(**_a )
def _a ( self , _a , _a ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = {"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
SCREAMING_SNAKE_CASE__ : Any = batch[2] if self.config.model_type in ["""bert""", """xlnet""", """albert"""] else None
SCREAMING_SNAKE_CASE__ : List[str] = self(**_a )
SCREAMING_SNAKE_CASE__ : int = outputs[0]
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.trainer.lr_schedulers[0]["""scheduler"""]
SCREAMING_SNAKE_CASE__ : Dict = {"""loss""": loss, """rate""": lr_scheduler.get_last_lr()[-1]}
return {"loss": loss, "log": tensorboard_logs}
def _a ( self ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = self.hparams
SCREAMING_SNAKE_CASE__ : Any = processors[args.task]()
SCREAMING_SNAKE_CASE__ : List[str] = processor.get_labels()
for mode in ["train", "dev"]:
SCREAMING_SNAKE_CASE__ : List[Any] = self._feature_file(_a )
if os.path.exists(_a ) and not args.overwrite_cache:
logger.info("""Loading features from cached file %s""" , _a )
else:
logger.info("""Creating features from dataset file at %s""" , args.data_dir )
SCREAMING_SNAKE_CASE__ : str = (
processor.get_dev_examples(args.data_dir )
if mode == """dev"""
else processor.get_train_examples(args.data_dir )
)
SCREAMING_SNAKE_CASE__ : str = convert_examples_to_features(
_a , self.tokenizer , max_length=args.max_seq_length , label_list=self.labels , output_mode=args.glue_output_mode , )
logger.info("""Saving features into cached file %s""" , _a )
torch.save(_a , _a )
def _a ( self , _a , _a , _a = False ) -> DataLoader:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = """dev""" if mode == """test""" else mode
SCREAMING_SNAKE_CASE__ : List[str] = self._feature_file(_a )
logger.info("""Loading features from cached file %s""" , _a )
SCREAMING_SNAKE_CASE__ : Any = torch.load(_a )
SCREAMING_SNAKE_CASE__ : str = torch.tensor([f.input_ids for f in features] , dtype=torch.long )
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
if self.hparams.glue_output_mode == "classification":
SCREAMING_SNAKE_CASE__ : List[str] = torch.tensor([f.label for f in features] , dtype=torch.long )
elif self.hparams.glue_output_mode == "regression":
SCREAMING_SNAKE_CASE__ : int = torch.tensor([f.label for f in features] , dtype=torch.float )
return DataLoader(
TensorDataset(_a , _a , _a , _a ) , batch_size=_a , shuffle=_a , )
def _a ( self , _a , _a ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = {"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
SCREAMING_SNAKE_CASE__ : Any = batch[2] if self.config.model_type in ["""bert""", """xlnet""", """albert"""] else None
SCREAMING_SNAKE_CASE__ : Optional[Any] = self(**_a )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] = outputs[:2]
SCREAMING_SNAKE_CASE__ : str = logits.detach().cpu().numpy()
SCREAMING_SNAKE_CASE__ : Dict = inputs["""labels"""].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def _a ( self , _a ) -> tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = torch.stack([x["""val_loss"""] for x in outputs] ).mean().detach().cpu().item()
SCREAMING_SNAKE_CASE__ : Dict = np.concatenate([x["""pred"""] for x in outputs] , axis=0 )
if self.hparams.glue_output_mode == "classification":
SCREAMING_SNAKE_CASE__ : str = np.argmax(_a , axis=1 )
elif self.hparams.glue_output_mode == "regression":
SCREAMING_SNAKE_CASE__ : Union[str, Any] = np.squeeze(_a )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = np.concatenate([x["""target"""] for x in outputs] , axis=0 )
SCREAMING_SNAKE_CASE__ : int = [[] for _ in range(out_label_ids.shape[0] )]
SCREAMING_SNAKE_CASE__ : List[str] = [[] for _ in range(out_label_ids.shape[0] )]
SCREAMING_SNAKE_CASE__ : List[Any] = {**{"""val_loss""": val_loss_mean}, **compute_metrics(self.hparams.task , _a , _a )}
SCREAMING_SNAKE_CASE__ : Optional[Any] = dict(results.items() )
SCREAMING_SNAKE_CASE__ : str = results
return ret, preds_list, out_label_list
def _a ( self , _a ) -> dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Any = self._eval_end(_a )
SCREAMING_SNAKE_CASE__ : str = ret["""log"""]
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def _a ( self , _a ) -> dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple = self._eval_end(_a )
SCREAMING_SNAKE_CASE__ : Dict = ret["""log"""]
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def _a ( _a , _a ) -> Dict:
"""simple docstring"""
BaseTransformer.add_model_specific_args(_a , _a )
parser.add_argument(
"""--max_seq_length""" , default=128 , type=_a , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--task""" , default="""""" , type=_a , required=_a , help="""The GLUE task to run""" , )
parser.add_argument(
"""--gpus""" , default=0 , type=_a , help="""The number of GPUs allocated for this, it is by default 0 meaning none""" , )
parser.add_argument(
"""--overwrite_cache""" , action="""store_true""" , help="""Overwrite the cached training and evaluation sets""" )
return parser
def _lowercase ( ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ : int = argparse.ArgumentParser()
add_generic_args(__lowerCAmelCase , os.getcwd() )
SCREAMING_SNAKE_CASE__ : Tuple = GLUETransformer.add_model_specific_args(__lowerCAmelCase , os.getcwd() )
SCREAMING_SNAKE_CASE__ : Optional[Any] = parser.parse_args()
# If output_dir not provided, a folder will be generated in pwd
if args.output_dir is None:
SCREAMING_SNAKE_CASE__ : Optional[Any] = os.path.join(
"""./results""" , F'''{args.task}_{time.strftime('%Y%m%d_%H%M%S' )}''' , )
os.makedirs(args.output_dir )
SCREAMING_SNAKE_CASE__ : Optional[Any] = GLUETransformer(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Any = generic_train(__lowerCAmelCase , __lowerCAmelCase )
# Optionally, predict on dev set and write to output_dir
if args.do_predict:
SCREAMING_SNAKE_CASE__ : str = sorted(glob.glob(os.path.join(args.output_dir , """checkpoint-epoch=*.ckpt""" ) , recursive=__lowerCAmelCase ) )
SCREAMING_SNAKE_CASE__ : Optional[Any] = model.load_from_checkpoint(checkpoints[-1] )
return trainer.test(__lowerCAmelCase )
if __name__ == "__main__":
main()
| 132 |
"""simple docstring"""
from typing import Optional, Tuple
import jax
import jax.numpy as jnp
from flax import linen as nn
from flax.core.frozen_dict import FrozenDict
from transformers import CLIPConfig, FlaxPreTrainedModel
from transformers.models.clip.modeling_flax_clip import FlaxCLIPVisionModule
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=1E-12 ) -> str:
SCREAMING_SNAKE_CASE__ : Optional[int] = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(__lowerCAmelCase , axis=1 ) , a_min=__lowerCAmelCase ) ).T
SCREAMING_SNAKE_CASE__ : str = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(__lowerCAmelCase , axis=1 ) , a_min=__lowerCAmelCase ) ).T
return jnp.matmul(__lowerCAmelCase , norm_emb_a.T )
class __a (nn.Module):
'''simple docstring'''
_SCREAMING_SNAKE_CASE :CLIPConfig
_SCREAMING_SNAKE_CASE :jnp.dtype = jnp.floataa
def _a ( self ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = FlaxCLIPVisionModule(self.config.vision_config )
SCREAMING_SNAKE_CASE__ : Optional[Any] = nn.Dense(self.config.projection_dim , use_bias=_a , dtype=self.dtype )
SCREAMING_SNAKE_CASE__ : Tuple = self.param("""concept_embeds""" , jax.nn.initializers.ones , (17, self.config.projection_dim) )
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.param(
"""special_care_embeds""" , jax.nn.initializers.ones , (3, self.config.projection_dim) )
SCREAMING_SNAKE_CASE__ : Any = self.param("""concept_embeds_weights""" , jax.nn.initializers.ones , (17,) )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.param("""special_care_embeds_weights""" , jax.nn.initializers.ones , (3,) )
def __call__( self , _a ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = self.vision_model(_a )[1]
SCREAMING_SNAKE_CASE__ : str = self.visual_projection(_a )
SCREAMING_SNAKE_CASE__ : List[str] = jax_cosine_distance(_a , self.special_care_embeds )
SCREAMING_SNAKE_CASE__ : Optional[Any] = jax_cosine_distance(_a , self.concept_embeds )
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign image inputs
SCREAMING_SNAKE_CASE__ : int = 0.0
SCREAMING_SNAKE_CASE__ : Optional[int] = special_cos_dist - self.special_care_embeds_weights[None, :] + adjustment
SCREAMING_SNAKE_CASE__ : Dict = jnp.round(_a , 3 )
SCREAMING_SNAKE_CASE__ : Dict = jnp.any(special_scores > 0 , axis=1 , keepdims=_a )
# Use a lower threshold if an image has any special care concept
SCREAMING_SNAKE_CASE__ : Any = is_special_care * 0.01
SCREAMING_SNAKE_CASE__ : List[Any] = cos_dist - self.concept_embeds_weights[None, :] + special_adjustment
SCREAMING_SNAKE_CASE__ : Union[str, Any] = jnp.round(_a , 3 )
SCREAMING_SNAKE_CASE__ : List[str] = jnp.any(concept_scores > 0 , axis=1 )
return has_nsfw_concepts
class __a (UpperCamelCase_):
'''simple docstring'''
_SCREAMING_SNAKE_CASE :Dict = CLIPConfig
_SCREAMING_SNAKE_CASE :Union[str, Any] = """clip_input"""
_SCREAMING_SNAKE_CASE :Dict = FlaxStableDiffusionSafetyCheckerModule
def __init__( self , _a , _a = None , _a = 0 , _a = jnp.floataa , _a = True , **_a , ) -> Optional[int]:
"""simple docstring"""
if input_shape is None:
SCREAMING_SNAKE_CASE__ : List[Any] = (1, 224, 224, 3)
SCREAMING_SNAKE_CASE__ : Any = self.module_class(config=_a , dtype=_a , **_a )
super().__init__(_a , _a , input_shape=_a , seed=_a , dtype=_a , _do_init=_do_init )
def _a ( self , _a , _a , _a = None ) -> FrozenDict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = jax.random.normal(_a , _a )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple = jax.random.split(_a )
SCREAMING_SNAKE_CASE__ : List[str] = {"""params""": params_rng, """dropout""": dropout_rng}
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.module.init(_a , _a )["""params"""]
return random_params
def __call__( self , _a , _a = None , ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = jnp.transpose(_a , (0, 2, 3, 1) )
return self.module.apply(
{"""params""": params or self.params} , jnp.array(_a , dtype=jnp.floataa ) , rngs={} , )
| 132 | 1 |
'''simple docstring'''
import math
import qiskit
def UpperCAmelCase_ (__a : int = 1 , __a : int = 1 , __a : int = 1 ):
"""simple docstring"""
if (
isinstance(__a , __a )
or isinstance(__a , __a )
or isinstance(__a , __a )
):
raise TypeError('inputs must be integers.' )
if (input_a < 0) or (input_a < 0) or (carry_in < 0):
raise ValueError('inputs must be positive.' )
if (
(math.floor(__a ) != input_a)
or (math.floor(__a ) != input_a)
or (math.floor(__a ) != carry_in)
):
raise ValueError('inputs must be exact integers.' )
if (input_a > 2) or (input_a > 2) or (carry_in > 2):
raise ValueError('inputs must be less or equal to 2.' )
# build registers
_a : Tuple = qiskit.QuantumRegister(4 , 'qr' )
_a : Optional[int] = qiskit.ClassicalRegister(2 , 'cr' )
# list the entries
_a : Dict = [input_a, input_a, carry_in]
_a : int = qiskit.QuantumCircuit(__a , __a )
for i in range(0 , 3 ):
if entry[i] == 2:
quantum_circuit.h(__a ) # for hadamard entries
elif entry[i] == 1:
quantum_circuit.x(__a ) # for 1 entries
elif entry[i] == 0:
quantum_circuit.i(__a ) # for 0 entries
# build the circuit
quantum_circuit.ccx(0 , 1 , 3 ) # ccx = toffoli gate
quantum_circuit.cx(0 , 1 )
quantum_circuit.ccx(1 , 2 , 3 )
quantum_circuit.cx(1 , 2 )
quantum_circuit.cx(0 , 1 )
quantum_circuit.measure([2, 3] , __a ) # measure the last two qbits
_a : Optional[int] = qiskit.Aer.get_backend('aer_simulator' )
_a : Optional[Any] = qiskit.execute(__a , __a , shots=1_0_0_0 )
return job.result().get_counts(__a )
if __name__ == "__main__":
print(f'''Total sum count for state is: {quantum_full_adder(1, 1, 1)}''')
| 361 |
'''simple docstring'''
import inspect
import unittest
from transformers import ViTMSNConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMSNForImageClassification, ViTMSNModel
from transformers.models.vit_msn.modeling_vit_msn import VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self : int ,_a : List[str] ,_a : Optional[Any]=13 ,_a : str=30 ,_a : str=2 ,_a : Union[str, Any]=3 ,_a : Optional[Any]=True ,_a : int=True ,_a : Union[str, Any]=32 ,_a : List[Any]=5 ,_a : Union[str, Any]=4 ,_a : int=37 ,_a : Any="gelu" ,_a : Union[str, Any]=0.1 ,_a : str=0.1 ,_a : List[str]=10 ,_a : Dict=0.02 ,_a : Tuple=None ,):
'''simple docstring'''
_a : Any = parent
_a : int = batch_size
_a : List[Any] = image_size
_a : Optional[int] = patch_size
_a : List[str] = num_channels
_a : Dict = is_training
_a : Dict = use_labels
_a : Optional[Any] = hidden_size
_a : str = num_hidden_layers
_a : Optional[int] = num_attention_heads
_a : Dict = intermediate_size
_a : Union[str, Any] = hidden_act
_a : List[str] = hidden_dropout_prob
_a : Any = attention_probs_dropout_prob
_a : List[str] = type_sequence_label_size
_a : int = initializer_range
_a : List[Any] = scope
# in ViT MSN, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
_a : Union[str, Any] = (image_size // patch_size) ** 2
_a : Tuple = num_patches + 1
def __lowercase ( self : Any ):
'''simple docstring'''
_a : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_a : str = None
if self.use_labels:
_a : Tuple = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
_a : List[str] = self.get_config()
return config, pixel_values, labels
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
return ViTMSNConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,initializer_range=self.initializer_range ,)
def __lowercase ( self : Tuple ,_a : Any ,_a : List[Any] ,_a : int ):
'''simple docstring'''
_a : str = ViTMSNModel(config=_a )
model.to(_a )
model.eval()
_a : int = model(_a )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def __lowercase ( self : List[Any] ,_a : str ,_a : Tuple ,_a : Dict ):
'''simple docstring'''
_a : Tuple = self.type_sequence_label_size
_a : int = ViTMSNForImageClassification(_a )
model.to(_a )
model.eval()
_a : Dict = model(_a ,labels=_a )
print('Pixel and labels shape: {pixel_values.shape}, {labels.shape}' )
print('Labels: {labels}' )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_a : int = 1
_a : Optional[Any] = ViTMSNForImageClassification(_a )
model.to(_a )
model.eval()
_a : Any = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_a : Optional[int] = model(_a )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
def __lowercase ( self : Any ):
'''simple docstring'''
_a : Optional[int] = self.prepare_config_and_inputs()
_a, _a, _a : int = config_and_inputs
_a : List[Any] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase__ ( lowercase__ , lowercase__ , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Tuple = (ViTMSNModel, ViTMSNForImageClassification) if is_torch_available() else ()
__UpperCAmelCase : List[Any] = (
{'''feature-extraction''': ViTMSNModel, '''image-classification''': ViTMSNForImageClassification}
if is_torch_available()
else {}
)
__UpperCAmelCase : str = False
__UpperCAmelCase : Optional[Any] = False
__UpperCAmelCase : List[str] = False
__UpperCAmelCase : int = False
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
_a : List[str] = ViTMSNModelTester(self )
_a : Optional[int] = ConfigTester(self ,config_class=_a ,has_text_modality=_a ,hidden_size=37 )
def __lowercase ( self : str ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='ViTMSN does not use inputs_embeds' )
def __lowercase ( self : List[str] ):
'''simple docstring'''
pass
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
_a, _a : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : List[Any] = model_class(_a )
self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) )
_a : Dict = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_a ,nn.Linear ) )
def __lowercase ( self : Any ):
'''simple docstring'''
_a, _a : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : List[str] = model_class(_a )
_a : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_a : List[Any] = [*signature.parameters.keys()]
_a : int = ['pixel_values']
self.assertListEqual(arg_names[:1] ,_a )
def __lowercase ( self : List[str] ):
'''simple docstring'''
_a : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
_a : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_a )
@slow
def __lowercase ( self : int ):
'''simple docstring'''
for model_name in VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_a : Dict = ViTMSNModel.from_pretrained(_a )
self.assertIsNotNone(_a )
def UpperCAmelCase_ ():
"""simple docstring"""
_a : List[str] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
return ViTImageProcessor.from_pretrained('facebook/vit-msn-small' ) if is_vision_available() else None
@slow
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
torch.manual_seed(2 )
_a : List[str] = ViTMSNForImageClassification.from_pretrained('facebook/vit-msn-small' ).to(_a )
_a : List[str] = self.default_image_processor
_a : int = prepare_img()
_a : Tuple = image_processor(images=_a ,return_tensors='pt' ).to(_a )
# forward pass
with torch.no_grad():
_a : Optional[int] = model(**_a )
# verify the logits
_a : Union[str, Any] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape ,_a )
_a : List[Any] = torch.tensor([-0.0803, -0.4454, -0.2375] ).to(_a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,_a ,atol=1E-4 ) )
| 5 | 0 |
"""simple docstring"""
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING, Dict, Optional
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.logging import get_logger
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import jax
import jaxlib
__UpperCamelCase : str = get_logger()
__UpperCamelCase : Optional[dict] = None
class SCREAMING_SNAKE_CASE ( TensorFormatter[Mapping, "jax.Array", Mapping] ):
"""simple docstring"""
def __init__( self : Any ,lowercase_ : str=None ,lowercase_ : Union[str, Any]=None ,**lowercase_ : List[Any] ):
super().__init__(features=lowercase_ )
import jax
from jaxlib.xla_client import Device
if isinstance(lowercase_ ,lowercase_ ):
raise ValueError(
F'Expected {device} to be a `str` not {type(lowercase_ )}, as `jaxlib.xla_extension.Device` '
'''is not serializable neither with `pickle` nor with `dill`. Instead you can surround '''
'''the device with `str()` to get its string identifier that will be internally mapped '''
'''to the actual `jaxlib.xla_extension.Device`.''' )
lowerCAmelCase__ : Optional[Any] = device if isinstance(lowercase_ ,lowercase_ ) else str(jax.devices()[0] )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
lowerCAmelCase__ : Dict = self._map_devices_to_str()
if self.device not in list(DEVICE_MAPPING.keys() ):
logger.warning(
F'Device with string identifier {self.device} not listed among the available '
F'devices: {list(DEVICE_MAPPING.keys() )}, so falling back to the default '
F'device: {str(jax.devices()[0] )}.' )
lowerCAmelCase__ : List[str] = str(jax.devices()[0] )
lowerCAmelCase__ : Optional[int] = jnp_array_kwargs
@staticmethod
def __lowerCAmelCase ( ):
import jax
return {str(lowercase_ ): device for device in jax.devices()}
def __lowerCAmelCase ( self : Optional[Any] ,lowercase_ : Optional[Any] ):
import jax
import jax.numpy as jnp
if isinstance(lowercase_ ,lowercase_ ) and column:
if all(
isinstance(lowercase_ ,jax.Array ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ):
return jnp.stack(lowercase_ ,axis=0 )
return column
def __lowerCAmelCase ( self : int ,lowercase_ : str ):
import jax
import jax.numpy as jnp
if isinstance(lowercase_ ,(str, bytes, type(lowercase_ )) ):
return value
elif isinstance(lowercase_ ,(np.character, np.ndarray) ) and np.issubdtype(value.dtype ,np.character ):
return value.tolist()
lowerCAmelCase__ : List[Any] = {}
if isinstance(lowercase_ ,(np.number, np.ndarray) ) and np.issubdtype(value.dtype ,np.integer ):
# the default int precision depends on the jax config
# see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision
if jax.config.jax_enable_xaa:
lowerCAmelCase__ : int = {'''dtype''': jnp.intaa}
else:
lowerCAmelCase__ : Optional[Any] = {'''dtype''': jnp.intaa}
elif isinstance(lowercase_ ,(np.number, np.ndarray) ) and np.issubdtype(value.dtype ,np.floating ):
lowerCAmelCase__ : List[Any] = {'''dtype''': jnp.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(lowercase_ ,PIL.Image.Image ):
lowerCAmelCase__ : Union[str, Any] = np.asarray(lowercase_ )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
lowerCAmelCase__ : int = self._map_devices_to_str()
with jax.default_device(DEVICE_MAPPING[self.device] ):
# calling jnp.array on a np.ndarray does copy the data
# see https://github.com/google/jax/issues/4486
return jnp.array(lowercase_ ,**{**default_dtype, **self.jnp_array_kwargs} )
def __lowerCAmelCase ( self : int ,lowercase_ : Tuple ):
import jax
# support for torch, tf, jax etc.
if config.TORCH_AVAILABLE and "torch" in sys.modules:
import torch
if isinstance(lowercase_ ,torch.Tensor ):
return self._tensorize(data_struct.detach().cpu().numpy()[()] )
if hasattr(lowercase_ ,'''__array__''' ) and not isinstance(lowercase_ ,jax.Array ):
lowerCAmelCase__ : Optional[int] = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(lowercase_ ,np.ndarray ):
if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(lowercase_ ) for substruct in data_struct] )
elif isinstance(lowercase_ ,(list, tuple) ):
return self._consolidate([self.recursive_tensorize(lowercase_ ) for substruct in data_struct] )
return self._tensorize(lowercase_ )
def __lowerCAmelCase ( self : Tuple ,lowercase_ : dict ):
return map_nested(self._recursive_tensorize ,lowercase_ ,map_list=lowercase_ )
def __lowerCAmelCase ( self : Union[str, Any] ,lowercase_ : pa.Table ):
lowerCAmelCase__ : Tuple = self.numpy_arrow_extractor().extract_row(lowercase_ )
lowerCAmelCase__ : int = self.python_features_decoder.decode_row(lowercase_ )
return self.recursive_tensorize(lowercase_ )
def __lowerCAmelCase ( self : Union[str, Any] ,lowercase_ : pa.Table ):
lowerCAmelCase__ : Any = self.numpy_arrow_extractor().extract_column(lowercase_ )
lowerCAmelCase__ : List[Any] = self.python_features_decoder.decode_column(lowercase_ ,pa_table.column_names[0] )
lowerCAmelCase__ : Optional[Any] = self.recursive_tensorize(lowercase_ )
lowerCAmelCase__ : int = self._consolidate(lowercase_ )
return column
def __lowerCAmelCase ( self : Union[str, Any] ,lowercase_ : pa.Table ):
lowerCAmelCase__ : List[Any] = self.numpy_arrow_extractor().extract_batch(lowercase_ )
lowerCAmelCase__ : Dict = self.python_features_decoder.decode_batch(lowercase_ )
lowerCAmelCase__ : int = self.recursive_tensorize(lowercase_ )
for column_name in batch:
lowerCAmelCase__ : Tuple = self._consolidate(batch[column_name] )
return batch
| 106 |
"""simple docstring"""
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
from torchvision.transforms.functional import InterpolationMode
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
ViTImageProcessor,
ViTMAEConfig,
ViTMAEForPreTraining,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
a = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
require_version('''datasets>=1.8.0''', '''To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt''')
@dataclass
class lowercase_ :
'''simple docstring'''
UpperCAmelCase : Optional[str] = field(
default='''cifar10''' , metadata={'''help''': '''Name of a dataset from the datasets package'''} )
UpperCAmelCase : Optional[str] = field(
default=__lowerCAmelCase , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} )
UpperCAmelCase : Optional[str] = field(
default=__lowerCAmelCase , metadata={'''help''': '''The column name of the images in the files.'''} )
UpperCAmelCase : Optional[str] = field(default=__lowerCAmelCase , metadata={'''help''': '''A folder containing the training data.'''} )
UpperCAmelCase : Optional[str] = field(default=__lowerCAmelCase , metadata={'''help''': '''A folder containing the validation data.'''} )
UpperCAmelCase : Optional[float] = field(
default=0.15 , metadata={'''help''': '''Percent to split off of train for validation.'''} )
UpperCAmelCase : Optional[int] = field(
default=__lowerCAmelCase , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of training examples to this '''
'''value if set.'''
)
} , )
UpperCAmelCase : Optional[int] = field(
default=__lowerCAmelCase , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of evaluation examples to this '''
'''value if set.'''
)
} , )
def lowerCAmelCase_ ( self : Dict ):
_A = {}
if self.train_dir is not None:
_A = self.train_dir
if self.validation_dir is not None:
_A = self.validation_dir
_A = data_files if data_files else None
@dataclass
class lowercase_ :
'''simple docstring'''
UpperCAmelCase : str = field(
default=__lowerCAmelCase , metadata={
'''help''': (
'''The model checkpoint for weights initialization.Don\'t set if you want to train a model from scratch.'''
)
} , )
UpperCAmelCase : Optional[str] = field(
default=__lowerCAmelCase , metadata={'''help''': '''Pretrained config name or path if not the same as model_name_or_path'''} )
UpperCAmelCase : Optional[str] = field(
default=__lowerCAmelCase , metadata={
'''help''': (
'''Override some existing default config settings when a model is trained from scratch. Example: '''
'''n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index'''
)
} , )
UpperCAmelCase : Optional[str] = field(
default=__lowerCAmelCase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from s3'''} )
UpperCAmelCase : str = field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
UpperCAmelCase : str = field(default=__lowerCAmelCase , metadata={'''help''': '''Name or path of preprocessor config.'''} )
UpperCAmelCase : bool = field(
default=__lowerCAmelCase , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
UpperCAmelCase : float = field(
default=0.75 , metadata={'''help''': '''The ratio of the number of masked tokens in the input sequence.'''} )
UpperCAmelCase : bool = field(
default=__lowerCAmelCase , metadata={'''help''': '''Whether or not to train with normalized pixel values as target.'''} )
@dataclass
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase : float = field(
default=1E-3 , metadata={'''help''': '''Base learning rate: absolute_lr = base_lr * total_batch_size / 256.'''} )
def _snake_case ( _snake_case : int ) -> Optional[int]:
'''simple docstring'''
_A = torch.stack([example['pixel_values'] for example in examples] )
return {"pixel_values": pixel_values}
def _snake_case ( ) -> List[str]:
'''simple docstring'''
_A = HfArgumentParser((ModelArguments, DataTrainingArguments, CustomTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_A , _A , _A = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_A , _A , _A = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('run_mae' , _snake_case , _snake_case )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
_A = training_args.get_process_log_level()
logger.setLevel(_snake_case )
transformers.utils.logging.set_verbosity(_snake_case )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(F'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
_A = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_A = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Initialize our dataset.
_A = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# If we don't have a validation split, split off a percentage of train as validation.
_A = None if 'validation' in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , _snake_case ) and data_args.train_val_split > 0.0:
_A = ds['train'].train_test_split(data_args.train_val_split )
_A = split['train']
_A = split['test']
# Load pretrained model and image processor
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_A = {
'cache_dir': model_args.cache_dir,
'revision': model_args.model_revision,
'use_auth_token': True if model_args.use_auth_token else None,
}
if model_args.config_name:
_A = ViTMAEConfig.from_pretrained(model_args.config_name , **_snake_case )
elif model_args.model_name_or_path:
_A = ViTMAEConfig.from_pretrained(model_args.model_name_or_path , **_snake_case )
else:
_A = ViTMAEConfig()
logger.warning('You are instantiating a new config instance from scratch.' )
if model_args.config_overrides is not None:
logger.info(F'''Overriding config: {model_args.config_overrides}''' )
config.update_from_string(model_args.config_overrides )
logger.info(F'''New config: {config}''' )
# adapt config
config.update(
{
'mask_ratio': model_args.mask_ratio,
'norm_pix_loss': model_args.norm_pix_loss,
} )
# create image processor
if model_args.image_processor_name:
_A = ViTImageProcessor.from_pretrained(model_args.image_processor_name , **_snake_case )
elif model_args.model_name_or_path:
_A = ViTImageProcessor.from_pretrained(model_args.model_name_or_path , **_snake_case )
else:
_A = ViTImageProcessor()
# create model
if model_args.model_name_or_path:
_A = ViTMAEForPreTraining.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=_snake_case , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info('Training new model from scratch' )
_A = ViTMAEForPreTraining(_snake_case )
if training_args.do_train:
_A = ds['train'].column_names
else:
_A = ds['validation'].column_names
if data_args.image_column_name is not None:
_A = data_args.image_column_name
elif "image" in column_names:
_A = 'image'
elif "img" in column_names:
_A = 'img'
else:
_A = column_names[0]
# transformations as done in original MAE paper
# source: https://github.com/facebookresearch/mae/blob/main/main_pretrain.py
if "shortest_edge" in image_processor.size:
_A = image_processor.size['shortest_edge']
else:
_A = (image_processor.size['height'], image_processor.size['width'])
_A = Compose(
[
Lambda(lambda _snake_case : img.convert('RGB' ) if img.mode != "RGB" else img ),
RandomResizedCrop(_snake_case , scale=(0.2, 1.0) , interpolation=InterpolationMode.BICUBIC ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean , std=image_processor.image_std ),
] )
def preprocess_images(_snake_case : List[Any] ):
_A = [transforms(_snake_case ) for image in examples[image_column_name]]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError('--do_train requires a train dataset' )
if data_args.max_train_samples is not None:
_A = ds['train'].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(_snake_case )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError('--do_eval requires a validation dataset' )
if data_args.max_eval_samples is not None:
_A = (
ds['validation'].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(_snake_case )
# Compute absolute learning rate
_A = (
training_args.train_batch_size * training_args.gradient_accumulation_steps * training_args.world_size
)
if training_args.base_learning_rate is not None:
_A = training_args.base_learning_rate * total_train_batch_size / 2_56
# Initialize our trainer
_A = Trainer(
model=_snake_case , args=_snake_case , train_dataset=ds['train'] if training_args.do_train else None , eval_dataset=ds['validation'] if training_args.do_eval else None , tokenizer=_snake_case , data_collator=_snake_case , )
# Training
if training_args.do_train:
_A = None
if training_args.resume_from_checkpoint is not None:
_A = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
_A = last_checkpoint
_A = trainer.train(resume_from_checkpoint=_snake_case )
trainer.save_model()
trainer.log_metrics('train' , train_result.metrics )
trainer.save_metrics('train' , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
_A = trainer.evaluate()
trainer.log_metrics('eval' , _snake_case )
trainer.save_metrics('eval' , _snake_case )
# Write model card and (optionally) push to hub
_A = {
'tasks': 'masked-auto-encoding',
'dataset': data_args.dataset_name,
'tags': ['masked-auto-encoding'],
}
if training_args.push_to_hub:
trainer.push_to_hub(**_snake_case )
else:
trainer.create_model_card(**_snake_case )
def _snake_case ( _snake_case : List[str] ) -> Optional[Any]:
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 315 | 0 |
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XLMRobertaTokenizer, XLMRobertaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
a : List[Any] = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
@require_tokenizers
class _a ( _lowerCAmelCase , unittest.TestCase ):
A = XLMRobertaTokenizer
A = XLMRobertaTokenizerFast
A = True
A = True
def __snake_case (self ) -> Union[str, Any]:
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase_: Dict = XLMRobertaTokenizer(__lowerCAmelCase, keep_accents=__lowerCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
def __snake_case (self ) -> List[Any]:
UpperCAmelCase_: Any = """<pad>"""
UpperCAmelCase_: Optional[int] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowerCAmelCase ), __lowerCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowerCAmelCase ), __lowerCAmelCase )
def __snake_case (self ) -> List[Any]:
UpperCAmelCase_: Dict = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0], """<s>""" )
self.assertEqual(vocab_keys[1], """<pad>""" )
self.assertEqual(vocab_keys[-1], """<mask>""" )
self.assertEqual(len(__lowerCAmelCase ), 1002 )
def __snake_case (self ) -> Dict:
self.assertEqual(self.get_tokenizer().vocab_size, 1002 )
def __snake_case (self ) -> Optional[Any]:
UpperCAmelCase_: List[str] = XLMRobertaTokenizer(__lowerCAmelCase, keep_accents=__lowerCAmelCase )
UpperCAmelCase_: Any = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(__lowerCAmelCase, ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__lowerCAmelCase ), [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]], )
UpperCAmelCase_: Any = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
__lowerCAmelCase, [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
], )
UpperCAmelCase_: Optional[Any] = tokenizer.convert_tokens_to_ids(__lowerCAmelCase )
self.assertListEqual(
__lowerCAmelCase, [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
], )
UpperCAmelCase_: Optional[int] = tokenizer.convert_ids_to_tokens(__lowerCAmelCase )
self.assertListEqual(
__lowerCAmelCase, [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
], )
def __snake_case (self ) -> Dict:
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
UpperCAmelCase_: List[Any] = (self.rust_tokenizer_class, """hf-internal-testing/tiny-xlm-roberta""", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
UpperCAmelCase_: Optional[Any] = self.rust_tokenizer_class.from_pretrained(__lowerCAmelCase, **__lowerCAmelCase )
UpperCAmelCase_: Optional[Any] = self.tokenizer_class.from_pretrained(__lowerCAmelCase, **__lowerCAmelCase )
UpperCAmelCase_: Dict = tempfile.mkdtemp()
UpperCAmelCase_: Any = tokenizer_r.save_pretrained(__lowerCAmelCase )
UpperCAmelCase_: Dict = tokenizer_p.save_pretrained(__lowerCAmelCase )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
UpperCAmelCase_: Dict = tuple(f for f in tokenizer_r_files if """tokenizer.json""" not in f )
self.assertSequenceEqual(__lowerCAmelCase, __lowerCAmelCase )
# Checks everything loads correctly in the same way
UpperCAmelCase_: Dict = tokenizer_r.from_pretrained(__lowerCAmelCase )
UpperCAmelCase_: Tuple = tokenizer_p.from_pretrained(__lowerCAmelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__lowerCAmelCase, __lowerCAmelCase ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(__lowerCAmelCase )
# Save tokenizer rust, legacy_format=True
UpperCAmelCase_: Optional[int] = tempfile.mkdtemp()
UpperCAmelCase_: int = tokenizer_r.save_pretrained(__lowerCAmelCase, legacy_format=__lowerCAmelCase )
UpperCAmelCase_: Union[str, Any] = tokenizer_p.save_pretrained(__lowerCAmelCase )
# Checks it save with the same files
self.assertSequenceEqual(__lowerCAmelCase, __lowerCAmelCase )
# Checks everything loads correctly in the same way
UpperCAmelCase_: int = tokenizer_r.from_pretrained(__lowerCAmelCase )
UpperCAmelCase_: Tuple = tokenizer_p.from_pretrained(__lowerCAmelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__lowerCAmelCase, __lowerCAmelCase ) )
shutil.rmtree(__lowerCAmelCase )
# Save tokenizer rust, legacy_format=False
UpperCAmelCase_: Optional[Any] = tempfile.mkdtemp()
UpperCAmelCase_: int = tokenizer_r.save_pretrained(__lowerCAmelCase, legacy_format=__lowerCAmelCase )
UpperCAmelCase_: str = tokenizer_p.save_pretrained(__lowerCAmelCase )
# Checks it saved the tokenizer.json file
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
UpperCAmelCase_: Dict = tokenizer_r.from_pretrained(__lowerCAmelCase )
UpperCAmelCase_: Union[str, Any] = tokenizer_p.from_pretrained(__lowerCAmelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__lowerCAmelCase, __lowerCAmelCase ) )
shutil.rmtree(__lowerCAmelCase )
@cached_property
def __snake_case (self ) -> str:
return XLMRobertaTokenizer.from_pretrained("""xlm-roberta-base""" )
def __snake_case (self ) -> List[str]:
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(__lowerCAmelCase, f.name )
UpperCAmelCase_: Optional[Any] = XLMRobertaTokenizer(f.name, keep_accents=__lowerCAmelCase )
UpperCAmelCase_: Optional[int] = pickle.dumps(__lowerCAmelCase )
pickle.loads(__lowerCAmelCase )
def __snake_case (self ) -> int:
if not self.test_rust_tokenizer:
return
UpperCAmelCase_: Optional[Any] = self.get_tokenizer()
UpperCAmelCase_: Any = self.get_rust_tokenizer()
UpperCAmelCase_: Dict = """I was born in 92000, and this is falsé."""
UpperCAmelCase_: int = tokenizer.tokenize(__lowerCAmelCase )
UpperCAmelCase_: List[str] = rust_tokenizer.tokenize(__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase, __lowerCAmelCase )
UpperCAmelCase_: Union[str, Any] = tokenizer.encode(__lowerCAmelCase, add_special_tokens=__lowerCAmelCase )
UpperCAmelCase_: Tuple = rust_tokenizer.encode(__lowerCAmelCase, add_special_tokens=__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase, __lowerCAmelCase )
UpperCAmelCase_: Tuple = self.get_rust_tokenizer()
UpperCAmelCase_: Tuple = tokenizer.encode(__lowerCAmelCase )
UpperCAmelCase_: Tuple = rust_tokenizer.encode(__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase, __lowerCAmelCase )
@slow
def __snake_case (self ) -> int:
UpperCAmelCase_: int = """Hello World!"""
UpperCAmelCase_: Tuple = [0, 35378, 6661, 38, 2]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(__lowerCAmelCase, self.big_tokenizer.encode(__lowerCAmelCase ) )
@slow
def __snake_case (self ) -> Any:
UpperCAmelCase_: List[Any] = (
"""This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"""
""" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"""
)
UpperCAmelCase_: Union[str, Any] = [
0,
3293,
83,
10,
4552,
4989,
7986,
678,
10,
5915,
111,
179459,
124850,
4,
6044,
237,
12,
6,
5,
6,
4,
6780,
705,
15,
1388,
44,
378,
10114,
711,
152,
20,
6,
5,
22376,
642,
1221,
15190,
34153,
450,
5608,
959,
1119,
57702,
136,
186,
47,
1098,
29367,
47,
# 4426, # What fairseq tokenizes from "<unk>": "_<"
# 3678, # What fairseq tokenizes from "<unk>": "unk"
# 2740, # What fairseq tokenizes from "<unk>": ">"
3, # What we tokenize from "<unk>": "<unk>"
6, # Residue from the tokenization: an extra sentencepiece underline
4,
6044,
237,
6284,
50901,
528,
31,
90,
34,
927,
2,
]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(__lowerCAmelCase, self.big_tokenizer.encode(__lowerCAmelCase ) )
@slow
def __snake_case (self ) -> Any:
UpperCAmelCase_: List[str] = {"""input_ids""": [[0, 11062, 82772, 7, 15, 82772, 538, 51529, 237, 17198, 1290, 206, 9, 215175, 1314, 136, 17198, 1290, 206, 9, 56359, 42, 122009, 9, 16466, 16, 87344, 4537, 9, 4717, 78381, 6, 159958, 7, 15, 24480, 618, 4, 527, 22693, 5428, 4, 2777, 24480, 9874, 4, 43523, 594, 4, 803, 18392, 33189, 18, 4, 43523, 24447, 12399, 100, 24955, 83658, 9626, 144057, 15, 839, 22335, 16, 136, 24955, 83658, 83479, 15, 39102, 724, 16, 678, 645, 2789, 1328, 4589, 42, 122009, 115774, 23, 805, 1328, 46876, 7, 136, 53894, 1940, 42227, 41159, 17721, 823, 425, 4, 27512, 98722, 206, 136, 5531, 4970, 919, 17336, 5, 2], [0, 20080, 618, 83, 82775, 47, 479, 9, 1517, 73, 53894, 333, 80581, 110117, 18811, 5256, 1295, 51, 152526, 297, 7986, 390, 124416, 538, 35431, 214, 98, 15044, 25737, 136, 7108, 43701, 23, 756, 135355, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 581, 63773, 119455, 6, 147797, 88203, 7, 645, 70, 21, 3285, 10269, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__lowerCAmelCase, model_name="""xlm-roberta-base""", revision="""d9d8a8ea5eb94b1c6654ae9249df7793cd2933d3""", )
| 360 |
import math
from typing import Optional
import numpy as np
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a : Tuple = logging.get_logger(__name__)
a : Optional[Any] = {
'facebook/encodec_24khz': 'https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json',
'facebook/encodec_48khz': 'https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json',
}
class _a ( _lowerCAmelCase ):
A = '''encodec'''
def __init__(self, SCREAMING_SNAKE_CASE_=[1.5, 3.0, 6.0, 1_2.0, 2_4.0], SCREAMING_SNAKE_CASE_=24000, SCREAMING_SNAKE_CASE_=1, SCREAMING_SNAKE_CASE_=False, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=128, SCREAMING_SNAKE_CASE_=32, SCREAMING_SNAKE_CASE_=1, SCREAMING_SNAKE_CASE_=[8, 5, 4, 2], SCREAMING_SNAKE_CASE_="weight_norm", SCREAMING_SNAKE_CASE_=7, SCREAMING_SNAKE_CASE_=7, SCREAMING_SNAKE_CASE_=3, SCREAMING_SNAKE_CASE_=2, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_="reflect", SCREAMING_SNAKE_CASE_=2, SCREAMING_SNAKE_CASE_=2, SCREAMING_SNAKE_CASE_=1.0, SCREAMING_SNAKE_CASE_=1024, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=True, **SCREAMING_SNAKE_CASE_, ) -> Optional[int]:
UpperCAmelCase_: List[Any] = target_bandwidths
UpperCAmelCase_: str = sampling_rate
UpperCAmelCase_: Any = audio_channels
UpperCAmelCase_: List[str] = normalize
UpperCAmelCase_: List[Any] = chunk_length_s
UpperCAmelCase_: List[Any] = overlap
UpperCAmelCase_: Any = hidden_size
UpperCAmelCase_: str = num_filters
UpperCAmelCase_: Any = num_residual_layers
UpperCAmelCase_: int = upsampling_ratios
UpperCAmelCase_: Tuple = norm_type
UpperCAmelCase_: Union[str, Any] = kernel_size
UpperCAmelCase_: str = last_kernel_size
UpperCAmelCase_: Union[str, Any] = residual_kernel_size
UpperCAmelCase_: str = dilation_growth_rate
UpperCAmelCase_: int = use_causal_conv
UpperCAmelCase_: int = pad_mode
UpperCAmelCase_: List[Any] = compress
UpperCAmelCase_: Dict = num_lstm_layers
UpperCAmelCase_: List[Any] = trim_right_ratio
UpperCAmelCase_: List[Any] = codebook_size
UpperCAmelCase_: List[Any] = codebook_dim if codebook_dim is not None else hidden_size
UpperCAmelCase_: Optional[Any] = use_conv_shortcut
if self.norm_type not in ["weight_norm", "time_group_norm"]:
raise ValueError(
f'self.norm_type must be one of `"weight_norm"`, `"time_group_norm"`), got {self.norm_type}' )
super().__init__(**SCREAMING_SNAKE_CASE_ )
@property
def __snake_case (self ) -> Optional[int]:
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def __snake_case (self ) -> Optional[int]:
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1, int((1.0 - self.overlap) * self.chunk_length ) )
@property
def __snake_case (self ) -> int:
UpperCAmelCase_: Optional[int] = np.prod(self.upsampling_ratios )
return math.ceil(self.sampling_rate / hop_length )
@property
def __snake_case (self ) -> int:
return int(1000 * self.target_bandwidths[-1] // (self.frame_rate * 10) )
| 82 | 0 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> str:
# Return True if there is node that has not iterated.
lowerCamelCase__ : Optional[Any] = [False] * len(UpperCamelCase )
lowerCamelCase__ : Optional[Any] = []
queue.append(UpperCamelCase )
lowerCamelCase__ : List[str] = True
while queue:
lowerCamelCase__ : Optional[Any] = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(UpperCamelCase )
lowerCamelCase__ : Dict = True
lowerCamelCase__ : List[str] = u
return visited[t]
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Optional[Any]:
# This array is filled by BFS and to store path
lowerCamelCase__ : Tuple = [-1] * (len(UpperCamelCase ))
lowerCamelCase__ : Dict = 0
while bfs(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ):
lowerCamelCase__ : Optional[Any] = float("""Inf""" )
lowerCamelCase__ : Optional[int] = sink
while s != source:
# Find the minimum value in select path
lowerCamelCase__ : Optional[int] = min(UpperCamelCase , graph[parent[s]][s] )
lowerCamelCase__ : Optional[Any] = parent[s]
max_flow += path_flow
lowerCamelCase__ : List[Any] = sink
while v != source:
lowerCamelCase__ : Optional[int] = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
lowerCamelCase__ : Dict = parent[v]
return max_flow
_A : str =[
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
_A , _A : Optional[Any] =0, 5
print(ford_fulkerson(graph, source, sink))
| 41 |
'''simple docstring'''
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
_A : List[Any] =logging.get_logger(__name__)
_A : Dict =['''model.decoder.embed_positions.weights''']
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> str:
if "emb" in name:
lowerCamelCase__ : Dict = name.replace("""emb""" , """model.decoder.embed_tokens""" )
if "transformer" in name:
lowerCamelCase__ : List[str] = name.replace("""transformer""" , """model.decoder""" )
if "cross_attention" in name:
lowerCamelCase__ : List[str] = name.replace("""cross_attention""" , """encoder_attn""" )
if "linear1" in name:
lowerCamelCase__ : Optional[int] = name.replace("""linear1""" , """fc1""" )
if "linear2" in name:
lowerCamelCase__ : Union[str, Any] = name.replace("""linear2""" , """fc2""" )
if "norm1" in name:
lowerCamelCase__ : Dict = name.replace("""norm1""" , """self_attn_layer_norm""" )
if "norm_cross" in name:
lowerCamelCase__ : Optional[Any] = name.replace("""norm_cross""" , """encoder_attn_layer_norm""" )
if "norm2" in name:
lowerCamelCase__ : Dict = name.replace("""norm2""" , """final_layer_norm""" )
if "out_norm" in name:
lowerCamelCase__ : Optional[Any] = name.replace("""out_norm""" , """model.decoder.layer_norm""" )
if "linears" in name:
lowerCamelCase__ : Optional[Any] = name.replace("""linears""" , """lm_heads""" )
if "condition_provider.conditioners.description.output_proj" in name:
lowerCamelCase__ : int = name.replace("""condition_provider.conditioners.description.output_proj""" , """enc_to_dec_proj""" )
return name
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> Tuple[Dict, Dict]:
lowerCamelCase__ : int = list(state_dict.keys() )
lowerCamelCase__ : Tuple = {}
for key in keys:
lowerCamelCase__ : Any = state_dict.pop(UpperCamelCase )
lowerCamelCase__ : Union[str, Any] = rename_keys(UpperCamelCase )
if "in_proj_weight" in key:
# split fused qkv proj
lowerCamelCase__ : Union[str, Any] = val[:hidden_size, :]
lowerCamelCase__ : Any = val[hidden_size : 2 * hidden_size, :]
lowerCamelCase__ : Optional[int] = val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
lowerCamelCase__ : str = val
else:
lowerCamelCase__ : Union[str, Any] = val
return state_dict, enc_dec_proj_state_dict
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> MusicgenDecoderConfig:
if checkpoint == "small":
# default config values
lowerCamelCase__ : int = 1024
lowerCamelCase__ : int = 24
lowerCamelCase__ : List[Any] = 16
elif checkpoint == "medium":
lowerCamelCase__ : Any = 1536
lowerCamelCase__ : Union[str, Any] = 48
lowerCamelCase__ : Optional[int] = 24
elif checkpoint == "large":
lowerCamelCase__ : Optional[Any] = 2048
lowerCamelCase__ : Dict = 48
lowerCamelCase__ : List[Any] = 32
else:
raise ValueError(f'''Checkpoint should be one of `[\'small\', \'medium\', \'large\']`, got {checkpoint}.''' )
lowerCamelCase__ : Any = MusicgenDecoderConfig(
hidden_size=UpperCamelCase , ffn_dim=hidden_size * 4 , num_hidden_layers=UpperCamelCase , num_attention_heads=UpperCamelCase , )
return config
@torch.no_grad()
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase="cpu" ) -> Optional[Any]:
lowerCamelCase__ : Optional[int] = MusicGen.get_pretrained(UpperCamelCase , device=UpperCamelCase )
lowerCamelCase__ : List[Any] = decoder_config_from_checkpoint(UpperCamelCase )
lowerCamelCase__ : Any = fairseq_model.lm.state_dict()
lowerCamelCase__ , lowerCamelCase__ : Optional[int] = rename_state_dict(
UpperCamelCase , hidden_size=decoder_config.hidden_size )
lowerCamelCase__ : str = TaEncoderModel.from_pretrained("""t5-base""" )
lowerCamelCase__ : Tuple = EncodecModel.from_pretrained("""facebook/encodec_32khz""" )
lowerCamelCase__ : Optional[int] = MusicgenForCausalLM(UpperCamelCase ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
lowerCamelCase__ , lowerCamelCase__ : List[str] = decoder.load_state_dict(UpperCamelCase , strict=UpperCamelCase )
for key in missing_keys.copy():
if key.startswith(("""text_encoder""", """audio_encoder""") ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(UpperCamelCase )
if len(UpperCamelCase ) > 0:
raise ValueError(f'''Missing key(s) in state_dict: {missing_keys}''' )
if len(UpperCamelCase ) > 0:
raise ValueError(f'''Unexpected key(s) in state_dict: {unexpected_keys}''' )
# init the composite model
lowerCamelCase__ : Optional[Any] = MusicgenForConditionalGeneration(text_encoder=UpperCamelCase , audio_encoder=UpperCamelCase , decoder=UpperCamelCase )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(UpperCamelCase )
# check we can do a forward pass
lowerCamelCase__ : Dict = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 )
lowerCamelCase__ : Optional[Any] = input_ids.reshape(2 * 4 , -1 )
with torch.no_grad():
lowerCamelCase__ : Union[str, Any] = model(input_ids=UpperCamelCase , decoder_input_ids=UpperCamelCase ).logits
if logits.shape != (8, 1, 2048):
raise ValueError("""Incorrect shape for logits""" )
# now construct the processor
lowerCamelCase__ : str = AutoTokenizer.from_pretrained("""t5-base""" )
lowerCamelCase__ : Union[str, Any] = AutoFeatureExtractor.from_pretrained("""facebook/encodec_32khz""" , padding_side="""left""" )
lowerCamelCase__ : Optional[int] = MusicgenProcessor(feature_extractor=UpperCamelCase , tokenizer=UpperCamelCase )
# set the appropriate bos/pad token ids
lowerCamelCase__ : Union[str, Any] = 2048
lowerCamelCase__ : List[str] = 2048
# set other default generation config params
lowerCamelCase__ : Optional[Any] = int(30 * audio_encoder.config.frame_rate )
lowerCamelCase__ : Union[str, Any] = True
lowerCamelCase__ : List[Any] = 3.0
if pytorch_dump_folder is not None:
Path(UpperCamelCase ).mkdir(exist_ok=UpperCamelCase )
logger.info(f'''Saving model {checkpoint} to {pytorch_dump_folder}''' )
model.save_pretrained(UpperCamelCase )
processor.save_pretrained(UpperCamelCase )
if repo_id:
logger.info(f'''Pushing model {checkpoint} to {repo_id}''' )
model.push_to_hub(UpperCamelCase )
processor.push_to_hub(UpperCamelCase )
if __name__ == "__main__":
_A : Dict =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint''',
default='''small''',
type=str,
help='''Checkpoint size of the MusicGen model you\'d like to convert. Can be one of: `[\'small\', \'medium\', \'large\']`.''',
)
parser.add_argument(
'''--pytorch_dump_folder''',
required=True,
default=None,
type=str,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.'''
)
parser.add_argument(
'''--device''', default='''cpu''', type=str, help='''Torch device to run the conversion, either cpu or cuda.'''
)
_A : List[str] =parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 41 | 1 |
"""simple docstring"""
from collections.abc import Sequence
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Sequence[float] ,_lowerCamelCase : bool = False ) -> float:
if not arr:
return 0
_lowerCAmelCase : Optional[Any] = 0 if allow_empty_subarrays else float("""-inf""" )
_lowerCAmelCase : str = 0.0
for num in arr:
_lowerCAmelCase : int = max(0 if allow_empty_subarrays else num ,curr_sum + num )
_lowerCAmelCase : str = max(_lowerCamelCase ,_lowerCamelCase )
return max_sum
if __name__ == "__main__":
from doctest import testmod
testmod()
_a : List[Any] = [-2, 1, -3, 4, -1, 2, 1, -5, 4]
print(F"""{max_subarray_sum(nums) = }""")
| 364 | """simple docstring"""
from .constants import (
MODEL_NAME,
OPTIMIZER_NAME,
RNG_STATE_NAME,
SAFE_WEIGHTS_INDEX_NAME,
SAFE_WEIGHTS_NAME,
SCALER_NAME,
SCHEDULER_NAME,
TORCH_LAUNCH_PARAMS,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
)
from .dataclasses import (
BnbQuantizationConfig,
ComputeEnvironment,
CustomDtype,
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
DynamoBackend,
FPaRecipeKwargs,
FullyShardedDataParallelPlugin,
GradientAccumulationPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
KwargsHandler,
LoggerType,
MegatronLMPlugin,
PrecisionType,
ProjectConfiguration,
RNGType,
SageMakerDistributedType,
TensorInformation,
TorchDynamoPlugin,
)
from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env
from .imports import (
get_ccl_version,
is_abit_bnb_available,
is_abit_bnb_available,
is_aim_available,
is_bfaa_available,
is_bnb_available,
is_botoa_available,
is_ccl_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_fpa_available,
is_ipex_available,
is_megatron_lm_available,
is_mlflow_available,
is_mps_available,
is_npu_available,
is_rich_available,
is_safetensors_available,
is_sagemaker_available,
is_tensorboard_available,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
from .modeling import (
check_device_map,
check_tied_parameters_in_config,
check_tied_parameters_on_same_device,
compute_module_sizes,
convert_file_size_to_int,
dtype_byte_size,
find_tied_parameters,
get_balanced_memory,
get_max_layer_size,
get_max_memory,
get_mixed_precision_context_manager,
id_tensor_storage,
infer_auto_device_map,
load_checkpoint_in_model,
load_offloaded_weights,
load_state_dict,
named_module_tensors,
retie_parameters,
set_module_tensor_to_device,
shard_checkpoint,
)
from .offload import (
OffloadedWeightsLoader,
PrefixedDataset,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
save_offload_index,
)
from .operations import (
broadcast,
broadcast_object_list,
concatenate,
convert_outputs_to_fpaa,
convert_to_fpaa,
find_batch_size,
find_device,
gather,
gather_object,
get_data_structure,
honor_type,
initialize_tensors,
is_namedtuple,
is_tensor_information,
is_torch_tensor,
listify,
pad_across_processes,
recursively_apply,
reduce,
send_to_device,
slice_tensors,
)
from .versions import compare_versions, is_torch_version
if is_deepspeed_available():
from .deepspeed import (
DeepSpeedEngineWrapper,
DeepSpeedOptimizerWrapper,
DeepSpeedSchedulerWrapper,
DummyOptim,
DummyScheduler,
HfDeepSpeedConfig,
)
from .bnb import has_abit_bnb_layers, load_and_quantize_model
from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer
from .launch import (
PrepareForLaunch,
_filter_args,
prepare_deepspeed_cmd_env,
prepare_multi_gpu_env,
prepare_sagemager_args_inputs,
prepare_simple_launcher_cmd_env,
prepare_tpu,
)
from .megatron_lm import (
AbstractTrainStep,
BertTrainStep,
GPTTrainStep,
MegatronEngine,
MegatronLMDummyDataLoader,
MegatronLMDummyScheduler,
MegatronLMOptimizerWrapper,
MegatronLMSchedulerWrapper,
TaTrainStep,
avg_losses_across_data_parallel_group,
gather_across_data_parallel_groups,
)
from .megatron_lm import initialize as megatron_lm_initialize
from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader
from .megatron_lm import prepare_model as megatron_lm_prepare_model
from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer
from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler
from .memory import find_executable_batch_size, release_memory
from .other import (
extract_model_from_parallel,
get_pretty_name,
is_port_in_use,
merge_dicts,
patch_environment,
save,
wait_for_everyone,
write_basic_config,
)
from .random import set_seed, synchronize_rng_state, synchronize_rng_states
from .torch_xla import install_xla
from .tqdm import tqdm
from .transformer_engine import convert_model, has_transformer_engine_layers
| 126 | 0 |
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModel, PreTrainedModel
from ...utils import logging
A__ = logging.get_logger(__name__)
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase ) -> Tuple:
"""simple docstring"""
snake_case__ : Union[str, Any] = nn.functional.normalize(__lowerCAmelCase )
snake_case__ : int = nn.functional.normalize(__lowerCAmelCase )
return torch.mm(__lowerCAmelCase , normalized_text_embeds.t() )
class a ( __lowerCamelCase ):
__lowerCAmelCase : Tuple = CLIPConfig
__lowerCAmelCase : int = ["""CLIPEncoderLayer"""]
def __init__( self :str ,__lowercase :CLIPConfig ):
super().__init__(__lowercase )
snake_case__ : List[str] = CLIPVisionModel(config.vision_config )
snake_case__ : Optional[int] = nn.Linear(config.vision_config.hidden_size ,config.projection_dim ,bias=__lowercase )
snake_case__ : str = nn.Parameter(torch.ones(1_7 ,config.projection_dim ) ,requires_grad=__lowercase )
snake_case__ : str = nn.Parameter(torch.ones(3 ,config.projection_dim ) ,requires_grad=__lowercase )
snake_case__ : Tuple = nn.Parameter(torch.ones(1_7 ) ,requires_grad=__lowercase )
snake_case__ : List[Any] = nn.Parameter(torch.ones(3 ) ,requires_grad=__lowercase )
@torch.no_grad()
def __lowerCamelCase ( self :List[str] ,__lowercase :Optional[Any] ,__lowercase :Dict ):
snake_case__ : Union[str, Any] = self.vision_model(__lowercase )[1] # pooled_output
snake_case__ : Union[str, Any] = self.visual_projection(__lowercase )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
snake_case__ : Optional[Any] = cosine_distance(__lowercase ,self.special_care_embeds ).cpu().float().numpy()
snake_case__ : Union[str, Any] = cosine_distance(__lowercase ,self.concept_embeds ).cpu().float().numpy()
snake_case__ : Dict = []
snake_case__ : Optional[int] = image_embeds.shape[0]
for i in range(__lowercase ):
snake_case__ : Tuple = {'''special_scores''': {}, '''special_care''': [], '''concept_scores''': {}, '''bad_concepts''': []}
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign images
snake_case__ : Union[str, Any] = 0.0
for concept_idx in range(len(special_cos_dist[0] ) ):
snake_case__ : Any = special_cos_dist[i][concept_idx]
snake_case__ : Any = self.special_care_embeds_weights[concept_idx].item()
snake_case__ : int = round(concept_cos - concept_threshold + adjustment ,3 )
if result_img["special_scores"][concept_idx] > 0:
result_img["special_care"].append({concept_idx, result_img['''special_scores'''][concept_idx]} )
snake_case__ : Tuple = 0.01
for concept_idx in range(len(cos_dist[0] ) ):
snake_case__ : str = cos_dist[i][concept_idx]
snake_case__ : List[str] = self.concept_embeds_weights[concept_idx].item()
snake_case__ : Tuple = round(concept_cos - concept_threshold + adjustment ,3 )
if result_img["concept_scores"][concept_idx] > 0:
result_img["bad_concepts"].append(__lowercase )
result.append(__lowercase )
snake_case__ : Any = [len(res['''bad_concepts'''] ) > 0 for res in result]
return images, has_nsfw_concepts
@torch.no_grad()
def __lowerCamelCase ( self :Tuple ,__lowercase :torch.FloatTensor ,__lowercase :torch.FloatTensor ):
snake_case__ : Optional[Any] = self.vision_model(__lowercase )[1] # pooled_output
snake_case__ : Optional[Any] = self.visual_projection(__lowercase )
snake_case__ : Optional[int] = cosine_distance(__lowercase ,self.special_care_embeds )
snake_case__ : Optional[int] = cosine_distance(__lowercase ,self.concept_embeds )
# increase this value to create a stronger `nsfw` filter
# at the cost of increasing the possibility of filtering benign images
snake_case__ : List[Any] = 0.0
snake_case__ : Optional[int] = special_cos_dist - self.special_care_embeds_weights + adjustment
# special_scores = special_scores.round(decimals=3)
snake_case__ : Any = torch.any(special_scores > 0 ,dim=1 )
snake_case__ : Any = special_care * 0.01
snake_case__ : List[Any] = special_adjustment.unsqueeze(1 ).expand(-1 ,cos_dist.shape[1] )
snake_case__ : Any = (cos_dist - self.concept_embeds_weights) + special_adjustment
# concept_scores = concept_scores.round(decimals=3)
snake_case__ : Dict = torch.any(concept_scores > 0 ,dim=1 )
return images, has_nsfw_concepts
| 230 |
import logging
import os
import sys
from dataclasses import dataclass, field
from importlib import import_module
from typing import Dict, List, Optional, Tuple
import numpy as np
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch import nn
from utils_ner import Split, TokenClassificationDataset, TokenClassificationTask
import transformers
from transformers import (
AutoConfig,
AutoModelForTokenClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
A__ = logging.getLogger(__name__)
@dataclass
class a :
__lowerCAmelCase : str = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
__lowerCAmelCase : Optional[str] = field(
default=__lowerCamelCase , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
__lowerCAmelCase : Optional[str] = field(
default="""NER""" , metadata={"""help""": """Task type to fine tune in training (e.g. NER, POS, etc)"""} )
__lowerCAmelCase : Optional[str] = field(
default=__lowerCamelCase , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
__lowerCAmelCase : bool = field(default=__lowerCamelCase , metadata={"""help""": """Set this flag to use fast tokenization."""} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
__lowerCAmelCase : Optional[str] = field(
default=__lowerCamelCase , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
@dataclass
class a :
__lowerCAmelCase : str = field(
metadata={"""help""": """The input data dir. Should contain the .txt files for a CoNLL-2003-formatted task."""} )
__lowerCAmelCase : Optional[str] = field(
default=__lowerCamelCase , metadata={"""help""": """Path to a file containing all labels. If not specified, CoNLL-2003 labels are used."""} , )
__lowerCAmelCase : int = field(
default=1_28 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
__lowerCAmelCase : bool = field(
default=__lowerCamelCase , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
def _lowerCAmelCase ( ) -> Dict:
"""simple docstring"""
snake_case__ : Any = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
snake_case__ , snake_case__ , snake_case__ : str = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
snake_case__ , snake_case__ , snake_case__ : List[Any] = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
''' --overwrite_output_dir to overcome.''' )
snake_case__ : int = import_module('''tasks''' )
try:
snake_case__ : Optional[int] = getattr(__lowerCAmelCase , model_args.task_type )
snake_case__ : TokenClassificationTask = token_classification_task_clazz()
except AttributeError:
raise ValueError(
f"""Task {model_args.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. """
f"""Available tasks classes are: {TokenClassificationTask.__subclasses__()}""" )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('''Training/evaluation parameters %s''' , __lowerCAmelCase )
# Set seed
set_seed(training_args.seed )
# Prepare CONLL-2003 task
snake_case__ : Optional[int] = token_classification_task.get_labels(data_args.labels )
snake_case__ : Dict[int, str] = dict(enumerate(__lowerCAmelCase ) )
snake_case__ : Optional[Any] = len(__lowerCAmelCase )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
snake_case__ : List[str] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=__lowerCAmelCase , idalabel=__lowerCAmelCase , labelaid={label: i for i, label in enumerate(__lowerCAmelCase )} , cache_dir=model_args.cache_dir , )
snake_case__ : List[str] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast , )
snake_case__ : Tuple = AutoModelForTokenClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=__lowerCAmelCase , cache_dir=model_args.cache_dir , )
# Get datasets
snake_case__ : Dict = (
TokenClassificationDataset(
token_classification_task=__lowerCAmelCase , data_dir=data_args.data_dir , tokenizer=__lowerCAmelCase , labels=__lowerCAmelCase , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
snake_case__ : Optional[Any] = (
TokenClassificationDataset(
token_classification_task=__lowerCAmelCase , data_dir=data_args.data_dir , tokenizer=__lowerCAmelCase , labels=__lowerCAmelCase , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def align_predictions(__lowerCAmelCase , __lowerCAmelCase ) -> Tuple[List[int], List[int]]:
snake_case__ : Any = np.argmax(__lowerCAmelCase , axis=2 )
snake_case__ , snake_case__ : List[Any] = preds.shape
snake_case__ : List[Any] = [[] for _ in range(__lowerCAmelCase )]
snake_case__ : int = [[] for _ in range(__lowerCAmelCase )]
for i in range(__lowerCAmelCase ):
for j in range(__lowerCAmelCase ):
if label_ids[i, j] != nn.CrossEntropyLoss().ignore_index:
out_label_list[i].append(label_map[label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
return preds_list, out_label_list
def compute_metrics(__lowerCAmelCase ) -> Dict:
snake_case__ , snake_case__ : List[str] = align_predictions(p.predictions , p.label_ids )
return {
"accuracy_score": accuracy_score(__lowerCAmelCase , __lowerCAmelCase ),
"precision": precision_score(__lowerCAmelCase , __lowerCAmelCase ),
"recall": recall_score(__lowerCAmelCase , __lowerCAmelCase ),
"f1": fa_score(__lowerCAmelCase , __lowerCAmelCase ),
}
# Data collator
snake_case__ : Any = DataCollatorWithPadding(__lowerCAmelCase , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
snake_case__ : Tuple = Trainer(
model=__lowerCAmelCase , args=__lowerCAmelCase , train_dataset=__lowerCAmelCase , eval_dataset=__lowerCAmelCase , compute_metrics=__lowerCAmelCase , data_collator=__lowerCAmelCase , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_process_zero():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
snake_case__ : Any = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
snake_case__ : Tuple = trainer.evaluate()
snake_case__ : Optional[Any] = os.path.join(training_args.output_dir , '''eval_results.txt''' )
if trainer.is_world_process_zero():
with open(__lowerCAmelCase , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key, value in result.items():
logger.info(''' %s = %s''' , __lowerCAmelCase , __lowerCAmelCase )
writer.write('''%s = %s\n''' % (key, value) )
results.update(__lowerCAmelCase )
# Predict
if training_args.do_predict:
snake_case__ : Optional[Any] = TokenClassificationDataset(
token_classification_task=__lowerCAmelCase , data_dir=data_args.data_dir , tokenizer=__lowerCAmelCase , labels=__lowerCAmelCase , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.test , )
snake_case__ , snake_case__ , snake_case__ : List[str] = trainer.predict(__lowerCAmelCase )
snake_case__ , snake_case__ : int = align_predictions(__lowerCAmelCase , __lowerCAmelCase )
snake_case__ : Optional[int] = os.path.join(training_args.output_dir , '''test_results.txt''' )
if trainer.is_world_process_zero():
with open(__lowerCAmelCase , '''w''' ) as writer:
for key, value in metrics.items():
logger.info(''' %s = %s''' , __lowerCAmelCase , __lowerCAmelCase )
writer.write('''%s = %s\n''' % (key, value) )
# Save predictions
snake_case__ : Any = os.path.join(training_args.output_dir , '''test_predictions.txt''' )
if trainer.is_world_process_zero():
with open(__lowerCAmelCase , '''w''' ) as writer:
with open(os.path.join(data_args.data_dir , '''test.txt''' ) , '''r''' ) as f:
token_classification_task.write_predictions_to_file(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
return results
def _lowerCAmelCase ( __lowerCAmelCase ) -> Union[str, Any]:
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 230 | 1 |
"""simple docstring"""
import json
import os
from datetime import date
from pathlib import Path
from tabulate import DataRow, TableFormat, tabulate
_a = TableFormat(
lineabove=None,
linebelowheader=None,
linebetweenrows=None,
linebelow=None,
headerrow=DataRow('', '|', '|'),
datarow=DataRow('', '|', '|'),
padding=1,
with_header_hide=None,
)
_a = []
_a = []
_a = {'type': 'section', 'text': {'type': 'plain_text', 'text': 'No failed tests! 🤗', 'emoji': True}}
_a = [
{
'type': 'header',
'text': {
'type': 'plain_text',
'text': f"""🤗 Accelerate nightly {os.environ.get('TEST_TYPE', '')} test results""",
'emoji': True,
},
}
]
_a = 0
for log in Path().glob('*.log'):
_a = 0
with open(log, 'r') as f:
for line in f:
_a = json.loads(line)
if line.get('nodeid', '') != "":
_a = line['nodeid']
if line.get('duration', None) is not None:
_a = f"""{line['duration']:.4f}"""
if line.get('outcome', '') == "failed":
section_num_failed += 1
failed.append([test, duration, log.name.split('_')[0]])
total_num_failed += 1
group_info.append([str(log), section_num_failed, failed])
_a = []
log.unlink()
_a = ''
_a = []
if total_num_failed > 0:
for name, num_failed, failed_tests in group_info:
if num_failed > 0:
if num_failed == 1:
message += f"*{name[1:]}: {num_failed} failed test*\n"
else:
message += f"*{name[1:]}: {num_failed} failed tests*\n"
_a = []
_a = {}
for test in failed_tests:
_a = test[0].split('::')
_a = data[0].split('/')[-1]
if data[0] not in filesafailed:
_a = [data[1:]]
else:
filesafailed[data[0]] += [data[1:]]
failed_table.append(data)
_a = [test[0] for test in failed_table]
_a = list(set(files))
# Count number of instances in failed_tests
_a = []
for file in individual_files:
table.append([file, len(filesafailed[file])])
_a = tabulate(
table,
headers=['Test Location', 'Num Failed'],
tablefmt=hf_table_format,
stralign='right',
)
message += f"\n```\n{failed_table}\n```"
all_filesafailed.append(filesafailed)
if len(message) > 3_000:
_a = 'Too many failed tests, please see the full report in the Action results.'
_a = len(err) + 10
_a = message[: 3_000 - offset] + f"""\n...\n```\n{err}"""
print(f"""### {message}""")
else:
_a = 'No failed tests! 🤗'
print(f"""## {message}""")
payload.append(no_error_payload)
if os.environ.get('TEST_TYPE', '') != "":
from slack_sdk import WebClient
_a = WebClient(token=os.environ['SLACK_API_TOKEN'])
if message != "No failed tests! 🤗":
_a = {
'type': 'section',
'text': {
'type': 'mrkdwn',
'text': message,
},
}
payload.append(md_report)
_a = {
'type': 'section',
'text': {
'type': 'mrkdwn',
'text': '*For more details:*',
},
'accessory': {
'type': 'button',
'text': {
'type': 'plain_text',
'text': 'Check Action results',
'emoji': True,
},
'url': f"""https://github.com/{os.environ['GITHUB_REPOSITORY']}/actions/runs/{os.environ['GITHUB_RUN_ID']}""",
},
}
payload.append(action_button)
_a = {
'type': 'context',
'elements': [
{
'type': 'plain_text',
'text': f"""Nightly {os.environ.get('TEST_TYPE')} test results for {date.today()}""",
}
],
}
payload.append(date_report)
_a = client.chat_postMessage(channel='#accelerate-ci-daily', text=message, blocks=payload)
_a = response.data['ts']
for failed_file in all_filesafailed:
for test_location, test_failures in failed_file.items():
# Keep only the first instance of the test name
_a = ''
for i, row in enumerate(test_failures):
if row[0] != test_class:
_a = row[0]
else:
_a = ''
_a = {
'type': 'section',
'text': {
'type': 'mrkdwn',
'text': f"""Test location: {test_location}\n```\n{tabulate(test_failures, headers=['Class', 'Test'], tablefmt=hf_table_format, stralign='right')}\n```""",
},
}
client.chat_postMessage(
channel='#accelerate-ci-daily',
thread_ts=ts,
blocks=[payload],
)
| 23 |
"""simple docstring"""
import string
# frequency taken from https://en.wikipedia.org/wiki/Letter_frequency
_a = {
'E': 12.70,
'T': 9.06,
'A': 8.17,
'O': 7.51,
'I': 6.97,
'N': 6.75,
'S': 6.33,
'H': 6.09,
'R': 5.99,
'D': 4.25,
'L': 4.03,
'C': 2.78,
'U': 2.76,
'M': 2.41,
'W': 2.36,
'F': 2.23,
'G': 2.02,
'Y': 1.97,
'P': 1.93,
'B': 1.29,
'V': 0.98,
'K': 0.77,
'J': 0.15,
'X': 0.15,
'Q': 0.10,
'Z': 0.07,
}
_a = 'ETAOINSHRDLCUMWFGYPBVKJXQZ'
_a = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
def __a ( __lowerCamelCase ):
UpperCAmelCase_ : Any = {letter: 0 for letter in string.ascii_uppercase}
for letter in message.upper():
if letter in LETTERS:
letter_count[letter] += 1
return letter_count
def __a ( __lowerCamelCase ):
return x[0]
def __a ( __lowerCamelCase ):
UpperCAmelCase_ : Any = get_letter_count(__lowerCamelCase )
UpperCAmelCase_ : dict[int, list[str]] = {
freq: [] for letter, freq in letter_to_freq.items()
}
for letter in LETTERS:
freq_to_letter[letter_to_freq[letter]].append(__lowerCamelCase )
UpperCAmelCase_ : dict[int, str] = {}
for freq in freq_to_letter:
freq_to_letter[freq].sort(key=ETAOIN.find, reverse=__lowerCamelCase )
UpperCAmelCase_ : Any = "".join(freq_to_letter[freq] )
UpperCAmelCase_ : str = list(freq_to_letter_str.items() )
freq_pairs.sort(key=__lowerCamelCase, reverse=__lowerCamelCase )
UpperCAmelCase_ : list[str] = [freq_pair[1] for freq_pair in freq_pairs]
return "".join(__lowerCamelCase )
def __a ( __lowerCamelCase ):
UpperCAmelCase_ : Any = get_frequency_order(__lowerCamelCase )
UpperCAmelCase_ : int = 0
for common_letter in ETAOIN[:6]:
if common_letter in freq_order[:6]:
match_score += 1
for uncommon_letter in ETAOIN[-6:]:
if uncommon_letter in freq_order[-6:]:
match_score += 1
return match_score
if __name__ == "__main__":
import doctest
doctest.testmod()
| 23 | 1 |
import argparse
import gc
import json
import os
import re
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizerFast, RwkvConfig
from transformers.modeling_utils import WEIGHTS_INDEX_NAME, shard_checkpoint
_a = {
'''169M''': 1_2,
'''430M''': 2_4,
'''1B5''': 2_4,
'''3B''': 3_2,
'''7B''': 3_2,
'''14B''': 4_0,
}
_a = {
'''169M''': 7_6_8,
'''430M''': 1_0_2_4,
'''1B5''': 2_0_4_8,
'''3B''': 2_5_6_0,
'''7B''': 4_0_9_6,
'''14B''': 5_1_2_0,
}
def _a ( SCREAMING_SNAKE_CASE : Optional[int] ) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase: int = list(state_dict.keys() )
for name in state_dict_keys:
__lowerCAmelCase: Union[str, Any] = state_dict.pop(__snake_case )
# emb -> embedding
if name.startswith('emb.' ):
__lowerCAmelCase: Optional[int] = name.replace('emb.' , 'embeddings.' )
# ln_0 -> pre_ln (only present at block 0)
if name.startswith('blocks.0.ln0' ):
__lowerCAmelCase: List[Any] = name.replace('blocks.0.ln0' , 'blocks.0.pre_ln' )
# att -> attention
__lowerCAmelCase: Optional[int] = re.sub(R'blocks\.(\d+)\.att' , R'blocks.\1.attention' , __snake_case )
# ffn -> feed_forward
__lowerCAmelCase: Optional[Any] = re.sub(R'blocks\.(\d+)\.ffn' , R'blocks.\1.feed_forward' , __snake_case )
# time_mix_k -> time_mix_key and reshape
if name.endswith('.time_mix_k' ):
__lowerCAmelCase: int = name.replace('.time_mix_k' , '.time_mix_key' )
# time_mix_v -> time_mix_value and reshape
if name.endswith('.time_mix_v' ):
__lowerCAmelCase: Optional[Any] = name.replace('.time_mix_v' , '.time_mix_value' )
# time_mix_r -> time_mix_key and reshape
if name.endswith('.time_mix_r' ):
__lowerCAmelCase: str = name.replace('.time_mix_r' , '.time_mix_receptance' )
if name != "head.weight":
__lowerCAmelCase: Any = 'rwkv.' + name
__lowerCAmelCase: List[Any] = weight
return state_dict
def _a ( SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : List[Any]=None , SCREAMING_SNAKE_CASE : Tuple=None , SCREAMING_SNAKE_CASE : Optional[Any]=False , SCREAMING_SNAKE_CASE : Any=None ) -> int:
"""simple docstring"""
if tokenizer_file is None:
print('No `--tokenizer_file` provided, we will use the default tokenizer.' )
__lowerCAmelCase: Optional[Any] = 5_02_77
__lowerCAmelCase: Optional[Any] = AutoTokenizer.from_pretrained('EleutherAI/gpt-neox-20b' )
else:
__lowerCAmelCase: Optional[int] = PreTrainedTokenizerFast(tokenizer_file=__snake_case )
__lowerCAmelCase: Optional[Any] = len(__snake_case )
tokenizer.save_pretrained(__snake_case )
# 2. Build the config
__lowerCAmelCase: Union[str, Any] = list(NUM_HIDDEN_LAYERS_MAPPING.keys() )
if size is None:
# Try to infer size from the checkpoint name
for candidate in possible_sizes:
if candidate in checkpoint_file:
__lowerCAmelCase: List[str] = candidate
break
if size is None:
raise ValueError('Could not infer the size, please provide it with the `--size` argument.' )
if size not in possible_sizes:
raise ValueError(f'''`size` should be one of {possible_sizes}, got {size}.''' )
__lowerCAmelCase: Union[str, Any] = RwkvConfig(
vocab_size=__snake_case , num_hidden_layers=NUM_HIDDEN_LAYERS_MAPPING[size] , hidden_size=HIDEN_SIZE_MAPPING[size] , )
config.save_pretrained(__snake_case )
# 3. Download model file then convert state_dict
__lowerCAmelCase: Optional[int] = hf_hub_download(__snake_case , __snake_case )
__lowerCAmelCase: Optional[Any] = torch.load(__snake_case , map_location='cpu' )
__lowerCAmelCase: Optional[int] = convert_state_dict(__snake_case )
# 4. Split in shards and save
__lowerCAmelCase , __lowerCAmelCase: List[Any] = shard_checkpoint(__snake_case )
for shard_file, shard in shards.items():
torch.save(__snake_case , os.path.join(__snake_case , __snake_case ) )
if index is not None:
__lowerCAmelCase: Tuple = os.path.join(__snake_case , __snake_case )
# Save the index as well
with open(__snake_case , 'w' , encoding='utf-8' ) as f:
__lowerCAmelCase: Optional[int] = json.dumps(__snake_case , indent=2 , sort_keys=__snake_case ) + '\n'
f.write(__snake_case )
# 5. Clean up shards (for some reason the file PyTorch saves take the same space as the whole state_dict
print(
'Cleaning up shards. This may error with an OOM error, it this is the case don\'t worry you still have converted the model.' )
__lowerCAmelCase: int = list(shards.keys() )
del state_dict
del shards
gc.collect()
for shard_file in shard_files:
__lowerCAmelCase: Union[str, Any] = torch.load(os.path.join(__snake_case , __snake_case ) )
torch.save({k: v.cpu().clone() for k, v in state_dict.items()} , os.path.join(__snake_case , __snake_case ) )
del state_dict
gc.collect()
if push_to_hub:
if model_name is None:
raise ValueError('Please provide a `model_name` to push the model to the Hub.' )
__lowerCAmelCase: str = AutoModelForCausalLM.from_pretrained(__snake_case )
model.push_to_hub(__snake_case , max_shard_size='2GB' )
tokenizer.push_to_hub(__snake_case )
if __name__ == "__main__":
_a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--repo_id''', default=None, type=str, required=True, help='''Repo ID from which to pull the checkpoint.'''
)
parser.add_argument(
'''--checkpoint_file''', default=None, type=str, required=True, help='''Name of the checkpoint file in the repo.'''
)
parser.add_argument(
'''--output_dir''', default=None, type=str, required=True, help='''Where to save the converted model.'''
)
parser.add_argument(
'''--tokenizer_file''',
default=None,
type=str,
help='''Path to the tokenizer file to use (if not provided, only the model is converted).''',
)
parser.add_argument(
'''--size''',
default=None,
type=str,
help='''Size of the model. Will be inferred from the `checkpoint_file` if not passed.''',
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Push to the Hub the converted model.''',
)
parser.add_argument(
'''--model_name''',
default=None,
type=str,
help='''Name of the pushed model on the Hub, including the username / organization.''',
)
_a = parser.parse_args()
convert_rmkv_checkpoint_to_hf_format(
args.repo_id,
args.checkpoint_file,
args.output_dir,
size=args.size,
tokenizer_file=args.tokenizer_file,
push_to_hub=args.push_to_hub,
model_name=args.model_name,
)
| 322 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCAmelCase__ = {
'''configuration_xlm''': ['''XLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XLMConfig''', '''XLMOnnxConfig'''],
'''tokenization_xlm''': ['''XLMTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
'''XLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XLMForMultipleChoice''',
'''XLMForQuestionAnswering''',
'''XLMForQuestionAnsweringSimple''',
'''XLMForSequenceClassification''',
'''XLMForTokenClassification''',
'''XLMModel''',
'''XLMPreTrainedModel''',
'''XLMWithLMHeadModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
'''TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXLMForMultipleChoice''',
'''TFXLMForQuestionAnsweringSimple''',
'''TFXLMForSequenceClassification''',
'''TFXLMForTokenClassification''',
'''TFXLMMainLayer''',
'''TFXLMModel''',
'''TFXLMPreTrainedModel''',
'''TFXLMWithLMHeadModel''',
]
if TYPE_CHECKING:
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig
from .tokenization_xlm import XLMTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 5 | 0 |
import re
from pathlib import Path
from unittest import TestCase
import pytest
@pytest.mark.integration
class a__ ( _a ):
def __SCREAMING_SNAKE_CASE( self , _A ):
"""simple docstring"""
with open(snake_case_ , encoding="utf-8" ) as input_file:
__lowerCAmelCase = re.compile(R"(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)" )
__lowerCAmelCase = input_file.read()
__lowerCAmelCase = regexp.search(snake_case_ )
return match
def __SCREAMING_SNAKE_CASE( self , _A ):
"""simple docstring"""
with open(snake_case_ , encoding="utf-8" ) as input_file:
__lowerCAmelCase = re.compile(R"#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()" , re.DOTALL )
__lowerCAmelCase = input_file.read()
# use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search`
__lowerCAmelCase = regexp.finditer(snake_case_ )
__lowerCAmelCase = [match for match in matches if match is not None and match.group(1 ) is not None]
return matches[0] if matches else None
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = Path("./datasets" )
__lowerCAmelCase = list(dataset_paths.absolute().glob("**/*.py" ) )
for dataset in dataset_files:
if self._no_encoding_on_file_open(str(snake_case_ ) ):
raise AssertionError(f"""open(...) must use utf-8 encoding in {dataset}""" )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = Path("./datasets" )
__lowerCAmelCase = list(dataset_paths.absolute().glob("**/*.py" ) )
for dataset in dataset_files:
if self._no_print_statements(str(snake_case_ ) ):
raise AssertionError(f"""print statement found in {dataset}. Use datasets.logger/logging instead.""" )
| 371 |
UpperCamelCase__ = """0.18.2"""
from .configuration_utils import ConfigMixin
from .utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_inflect_available,
is_invisible_watermark_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_librosa_available,
is_note_seq_available,
is_onnx_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
is_transformers_available,
is_transformers_version,
is_unidecode_available,
logging,
)
try:
if not is_onnx_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_onnx_objects import * # noqa F403
else:
from .pipelines import OnnxRuntimeModel
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_pt_objects import * # noqa F403
else:
from .models import (
AutoencoderKL,
ControlNetModel,
ModelMixin,
PriorTransformer,
TaFilmDecoder,
TransformeraDModel,
UNetaDModel,
UNetaDConditionModel,
UNetaDModel,
UNetaDConditionModel,
VQModel,
)
from .optimization import (
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
get_scheduler,
)
from .pipelines import (
AudioPipelineOutput,
ConsistencyModelPipeline,
DanceDiffusionPipeline,
DDIMPipeline,
DDPMPipeline,
DiffusionPipeline,
DiTPipeline,
ImagePipelineOutput,
KarrasVePipeline,
LDMPipeline,
LDMSuperResolutionPipeline,
PNDMPipeline,
RePaintPipeline,
ScoreSdeVePipeline,
)
from .schedulers import (
CMStochasticIterativeScheduler,
DDIMInverseScheduler,
DDIMParallelScheduler,
DDIMScheduler,
DDPMParallelScheduler,
DDPMScheduler,
DEISMultistepScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
HeunDiscreteScheduler,
IPNDMScheduler,
KarrasVeScheduler,
KDPMaAncestralDiscreteScheduler,
KDPMaDiscreteScheduler,
PNDMScheduler,
RePaintScheduler,
SchedulerMixin,
ScoreSdeVeScheduler,
UnCLIPScheduler,
UniPCMultistepScheduler,
VQDiffusionScheduler,
)
from .training_utils import EMAModel
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .schedulers import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .schedulers import DPMSolverSDEScheduler
try:
if not (is_torch_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
AltDiffusionImgaImgPipeline,
AltDiffusionPipeline,
AudioLDMPipeline,
CycleDiffusionPipeline,
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
ImageTextPipelineOutput,
KandinskyImgaImgPipeline,
KandinskyInpaintPipeline,
KandinskyPipeline,
KandinskyPriorPipeline,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaControlnetPipeline,
KandinskyVaaImgaImgPipeline,
KandinskyVaaInpaintPipeline,
KandinskyVaaPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
KandinskyVaaPriorPipeline,
LDMTextToImagePipeline,
PaintByExamplePipeline,
SemanticStableDiffusionPipeline,
ShapEImgaImgPipeline,
ShapEPipeline,
StableDiffusionAttendAndExcitePipeline,
StableDiffusionControlNetImgaImgPipeline,
StableDiffusionControlNetInpaintPipeline,
StableDiffusionControlNetPipeline,
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionImageVariationPipeline,
StableDiffusionImgaImgPipeline,
StableDiffusionInpaintPipeline,
StableDiffusionInpaintPipelineLegacy,
StableDiffusionInstructPixaPixPipeline,
StableDiffusionLatentUpscalePipeline,
StableDiffusionLDMaDPipeline,
StableDiffusionModelEditingPipeline,
StableDiffusionPanoramaPipeline,
StableDiffusionParadigmsPipeline,
StableDiffusionPipeline,
StableDiffusionPipelineSafe,
StableDiffusionPixaPixZeroPipeline,
StableDiffusionSAGPipeline,
StableDiffusionUpscalePipeline,
StableUnCLIPImgaImgPipeline,
StableUnCLIPPipeline,
TextToVideoSDPipeline,
TextToVideoZeroPipeline,
UnCLIPImageVariationPipeline,
UnCLIPPipeline,
UniDiffuserModel,
UniDiffuserPipeline,
UniDiffuserTextDecoder,
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
VideoToVideoSDPipeline,
VQDiffusionPipeline,
)
try:
if not (is_torch_available() and is_transformers_available() and is_invisible_watermark_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_invisible_watermark_objects import * # noqa F403
else:
from .pipelines import StableDiffusionXLImgaImgPipeline, StableDiffusionXLPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_k_diffusion_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipelines import StableDiffusionKDiffusionPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_onnx_objects import * # noqa F403
else:
from .pipelines import (
OnnxStableDiffusionImgaImgPipeline,
OnnxStableDiffusionInpaintPipeline,
OnnxStableDiffusionInpaintPipelineLegacy,
OnnxStableDiffusionPipeline,
OnnxStableDiffusionUpscalePipeline,
StableDiffusionOnnxPipeline,
)
try:
if not (is_torch_available() and is_librosa_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_librosa_objects import * # noqa F403
else:
from .pipelines import AudioDiffusionPipeline, Mel
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .pipelines import SpectrogramDiffusionPipeline
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_objects import * # noqa F403
else:
from .models.controlnet_flax import FlaxControlNetModel
from .models.modeling_flax_utils import FlaxModelMixin
from .models.unet_ad_condition_flax import FlaxUNetaDConditionModel
from .models.vae_flax import FlaxAutoencoderKL
from .pipelines import FlaxDiffusionPipeline
from .schedulers import (
FlaxDDIMScheduler,
FlaxDDPMScheduler,
FlaxDPMSolverMultistepScheduler,
FlaxKarrasVeScheduler,
FlaxLMSDiscreteScheduler,
FlaxPNDMScheduler,
FlaxSchedulerMixin,
FlaxScoreSdeVeScheduler,
)
try:
if not (is_flax_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
FlaxStableDiffusionControlNetPipeline,
FlaxStableDiffusionImgaImgPipeline,
FlaxStableDiffusionInpaintPipeline,
FlaxStableDiffusionPipeline,
)
try:
if not (is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_note_seq_objects import * # noqa F403
else:
from .pipelines import MidiProcessor
| 102 | 0 |
import argparse
import torch
from ...utils import logging
from . import AlbertConfig, AlbertForPreTraining, load_tf_weights_in_albert
logging.set_verbosity_info()
def a ( snake_case__: Optional[int] , snake_case__: Tuple , snake_case__: List[str] ):
'''simple docstring'''
# Initialise PyTorch model
lowercase_ = AlbertConfig.from_json_file(snake_case__ )
print(F'''Building PyTorch model from configuration: {config}''' )
lowercase_ = AlbertForPreTraining(snake_case__ )
# Load weights from tf checkpoint
load_tf_weights_in_albert(snake_case__ , snake_case__ , snake_case__ )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , snake_case__ )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--albert_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained ALBERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
__a = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.albert_config_file, args.pytorch_dump_path)
| 30 |
from __future__ import annotations
import math
def _UpperCAmelCase ( snake_case ):
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(snake_case ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def _UpperCAmelCase ( snake_case ):
"""simple docstring"""
_lowerCAmelCase = str(snake_case )
_lowerCAmelCase = [n]
for i in range(1 , len(snake_case ) ):
list_nums.append(int(str_num[i:] ) )
list_nums.append(int(str_num[:-i] ) )
return list_nums
def _UpperCAmelCase ( snake_case ):
"""simple docstring"""
if len(str(snake_case ) ) > 3:
if not is_prime(int(str(snake_case )[-3:] ) ) or not is_prime(int(str(snake_case )[:3] ) ):
return False
return True
def _UpperCAmelCase ( snake_case = 11 ):
"""simple docstring"""
_lowerCAmelCase = []
_lowerCAmelCase = 13
while len(snake_case ) != count:
if validate(snake_case ):
_lowerCAmelCase = list_truncated_nums(snake_case )
if all(is_prime(snake_case ) for i in list_nums ):
list_truncated_primes.append(snake_case )
num += 2
return list_truncated_primes
def _UpperCAmelCase ( ):
"""simple docstring"""
return sum(compute_truncated_primes(11 ) )
if __name__ == "__main__":
print(f"{sum(compute_truncated_primes(11)) = }")
| 82 | 0 |
"""simple docstring"""
snake_case__ : Union[str, Any] = 65_521
def _snake_case ( _snake_case : str ):
lowerCAmelCase : Dict = 1
lowerCAmelCase : Optional[Any] = 0
for plain_chr in plain_text:
lowerCAmelCase : Dict = (a + ord(_snake_case )) % MOD_ADLER
lowerCAmelCase : str = (b + a) % MOD_ADLER
return (b << 16) | a
| 314 |
"""simple docstring"""
import unittest
import torch
from diffusers import DDIMScheduler, DDPMScheduler, UNetaDModel
from diffusers.training_utils import set_seed
from diffusers.utils.testing_utils import slow
snake_case__ : Optional[Any] = False
class snake_case_( unittest.TestCase ):
def lowerCamelCase__ ( self : List[str] , UpperCamelCase_ : List[Any]=3_2 ):
set_seed(0 )
lowerCAmelCase : Tuple = UNetaDModel(sample_size=UpperCamelCase_ , in_channels=3 , out_channels=3 )
lowerCAmelCase : List[str] = torch.optim.SGD(model.parameters() , lr=0.0_001 )
return model, optimizer
@slow
def lowerCamelCase__ ( self : Dict ):
lowerCAmelCase : List[str] = '''cpu''' # ensure full determinism without setting the CUBLAS_WORKSPACE_CONFIG env variable
lowerCAmelCase : str = DDPMScheduler(
num_train_timesteps=1_0_0_0 , beta_start=0.0_001 , beta_end=0.02 , beta_schedule='''linear''' , clip_sample=UpperCamelCase_ , )
lowerCAmelCase : int = DDIMScheduler(
num_train_timesteps=1_0_0_0 , beta_start=0.0_001 , beta_end=0.02 , beta_schedule='''linear''' , clip_sample=UpperCamelCase_ , )
assert ddpm_scheduler.config.num_train_timesteps == ddim_scheduler.config.num_train_timesteps
# shared batches for DDPM and DDIM
set_seed(0 )
lowerCAmelCase : int = [torch.randn((4, 3, 3_2, 3_2) ).clip(-1 , 1 ).to(UpperCamelCase_ ) for _ in range(4 )]
lowerCAmelCase : Optional[int] = [torch.randn((4, 3, 3_2, 3_2) ).to(UpperCamelCase_ ) for _ in range(4 )]
lowerCAmelCase : Optional[int] = [torch.randint(0 , 1_0_0_0 , (4,) ).long().to(UpperCamelCase_ ) for _ in range(4 )]
# train with a DDPM scheduler
lowerCAmelCase, lowerCAmelCase : str = self.get_model_optimizer(resolution=3_2 )
model.train().to(UpperCamelCase_ )
for i in range(4 ):
optimizer.zero_grad()
lowerCAmelCase : List[Any] = ddpm_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] )
lowerCAmelCase : List[str] = model(UpperCamelCase_ , timesteps[i] ).sample
lowerCAmelCase : Dict = torch.nn.functional.mse_loss(UpperCamelCase_ , noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
# recreate the model and optimizer, and retry with DDIM
lowerCAmelCase, lowerCAmelCase : List[Any] = self.get_model_optimizer(resolution=3_2 )
model.train().to(UpperCamelCase_ )
for i in range(4 ):
optimizer.zero_grad()
lowerCAmelCase : Union[str, Any] = ddim_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] )
lowerCAmelCase : Optional[int] = model(UpperCamelCase_ , timesteps[i] ).sample
lowerCAmelCase : int = torch.nn.functional.mse_loss(UpperCamelCase_ , noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
self.assertTrue(torch.allclose(UpperCamelCase_ , UpperCamelCase_ , atol=1E-5 ) )
self.assertTrue(torch.allclose(UpperCamelCase_ , UpperCamelCase_ , atol=1E-5 ) )
| 314 | 1 |
"""simple docstring"""
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
"huggingface/time-series-transformer-tourism-monthly": (
"https://huggingface.co/huggingface/time-series-transformer-tourism-monthly/resolve/main/config.json"
),
# See all TimeSeriesTransformer models at https://huggingface.co/models?filter=time_series_transformer
}
class snake_case ( A__ ):
SCREAMING_SNAKE_CASE_ : Dict = """time_series_transformer"""
SCREAMING_SNAKE_CASE_ : List[str] = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
"""num_hidden_layers""": """encoder_layers""",
}
def __init__( self : Any , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : str = "student_t" , UpperCamelCase__ : str = "nll" , UpperCamelCase__ : int = 1 , UpperCamelCase__ : List[int] = [1, 2, 3, 4, 5, 6, 7] , UpperCamelCase__ : Optional[Union[str, bool]] = "mean" , UpperCamelCase__ : int = 0 , UpperCamelCase__ : int = 0 , UpperCamelCase__ : int = 0 , UpperCamelCase__ : int = 0 , UpperCamelCase__ : Optional[List[int]] = None , UpperCamelCase__ : Optional[List[int]] = None , UpperCamelCase__ : int = 3_2 , UpperCamelCase__ : int = 3_2 , UpperCamelCase__ : int = 2 , UpperCamelCase__ : int = 2 , UpperCamelCase__ : int = 2 , UpperCamelCase__ : int = 2 , UpperCamelCase__ : bool = True , UpperCamelCase__ : str = "gelu" , UpperCamelCase__ : int = 6_4 , UpperCamelCase__ : float = 0.1 , UpperCamelCase__ : float = 0.1 , UpperCamelCase__ : float = 0.1 , UpperCamelCase__ : float = 0.1 , UpperCamelCase__ : float = 0.1 , UpperCamelCase__ : int = 1_0_0 , UpperCamelCase__ : float = 0.02 , UpperCamelCase__ : Dict=True , **UpperCamelCase__ : Tuple , )-> Any:
'''simple docstring'''
__lowerCAmelCase: Optional[int] = prediction_length
__lowerCAmelCase: Optional[int] = context_length or prediction_length
__lowerCAmelCase: Tuple = distribution_output
__lowerCAmelCase: Optional[int] = loss
__lowerCAmelCase: List[Any] = input_size
__lowerCAmelCase: Union[str, Any] = num_time_features
__lowerCAmelCase: str = lags_sequence
__lowerCAmelCase: Dict = scaling
__lowerCAmelCase: List[Any] = num_dynamic_real_features
__lowerCAmelCase: Dict = num_static_real_features
__lowerCAmelCase: List[Any] = num_static_categorical_features
if cardinality and num_static_categorical_features > 0:
if len(lowerCamelCase_) != num_static_categorical_features:
raise ValueError(
"The cardinality should be a list of the same length as `num_static_categorical_features`")
__lowerCAmelCase: Tuple = cardinality
else:
__lowerCAmelCase: Union[str, Any] = [0]
if embedding_dimension and num_static_categorical_features > 0:
if len(lowerCamelCase_) != num_static_categorical_features:
raise ValueError(
"The embedding dimension should be a list of the same length as `num_static_categorical_features`")
__lowerCAmelCase: List[Any] = embedding_dimension
else:
__lowerCAmelCase: Optional[int] = [min(5_0 , (cat + 1) // 2) for cat in self.cardinality]
__lowerCAmelCase: Union[str, Any] = num_parallel_samples
# Transformer architecture configuration
__lowerCAmelCase: List[str] = input_size * len(lowerCamelCase_) + self._number_of_features
__lowerCAmelCase: Tuple = d_model
__lowerCAmelCase: List[Any] = encoder_attention_heads
__lowerCAmelCase: Any = decoder_attention_heads
__lowerCAmelCase: List[str] = encoder_ffn_dim
__lowerCAmelCase: str = decoder_ffn_dim
__lowerCAmelCase: Tuple = encoder_layers
__lowerCAmelCase: List[Any] = decoder_layers
__lowerCAmelCase: Union[str, Any] = dropout
__lowerCAmelCase: Dict = attention_dropout
__lowerCAmelCase: str = activation_dropout
__lowerCAmelCase: List[Any] = encoder_layerdrop
__lowerCAmelCase: List[str] = decoder_layerdrop
__lowerCAmelCase: Optional[Any] = activation_function
__lowerCAmelCase: Tuple = init_std
__lowerCAmelCase: List[str] = use_cache
super().__init__(is_encoder_decoder=lowerCamelCase_ , **lowerCamelCase_)
@property
def lowercase_ ( self : Union[str, Any])-> Tuple:
'''simple docstring'''
return (
sum(self.embedding_dimension)
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 217 |
"""simple docstring"""
import pytest
from datasets.splits import SplitDict, SplitInfo
from datasets.utils.py_utils import asdict
@pytest.mark.parametrize(
'split_dict' , [
SplitDict(),
SplitDict({'train': SplitInfo(name='train' , num_bytes=1_3_3_7 , num_examples=4_2 , dataset_name='my_dataset' )} ),
SplitDict({'train': SplitInfo(name='train' , num_bytes=1_3_3_7 , num_examples=4_2 )} ),
SplitDict({'train': SplitInfo()} ),
] , )
def lowerCAmelCase_ ( snake_case_ : SplitDict ) ->str:
lowerCamelCase__ : str =split_dict._to_yaml_list()
assert len(snake_case_ ) == len(snake_case_ )
lowerCamelCase__ : Optional[Any] =SplitDict._from_yaml_list(snake_case_ )
for split_name, split_info in split_dict.items():
# dataset_name field is deprecated, and is therefore not part of the YAML dump
lowerCamelCase__ : Dict =None
# the split name of split_dict takes over the name of the split info object
lowerCamelCase__ : Optional[int] =split_name
assert split_dict == reloaded
@pytest.mark.parametrize(
'split_info' , [SplitInfo(), SplitInfo(dataset_name=snake_case_ ), SplitInfo(dataset_name='my_dataset' )] )
def lowerCAmelCase_ ( snake_case_ : List[str] ) ->Union[str, Any]:
# For backward compatibility, we need asdict(split_dict) to return split info dictrionaries with the "dataset_name"
# field even if it's deprecated. This way old versionso of `datasets` can still reload dataset_infos.json files
lowerCamelCase__ : List[str] =asdict(SplitDict({'train': split_info} ) )
assert "dataset_name" in split_dict_asdict["train"]
assert split_dict_asdict["train"]["dataset_name"] == split_info.dataset_name | 126 | 0 |
'''simple docstring'''
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase ) -> int:
'''simple docstring'''
if exponent == 1:
return base
if exponent % 2 == 0:
snake_case_ = _modexpt(__UpperCAmelCase, exponent // 2, __UpperCAmelCase ) % modulo_value
return (x * x) % modulo_value
else:
return (base * _modexpt(__UpperCAmelCase, exponent - 1, __UpperCAmelCase )) % modulo_value
def __magic_name__ ( __UpperCAmelCase = 1777, __UpperCAmelCase = 1855, __UpperCAmelCase = 8 ) -> int:
'''simple docstring'''
snake_case_ = base
for _ in range(1, __UpperCAmelCase ):
snake_case_ = _modexpt(__UpperCAmelCase, __UpperCAmelCase, 10**digits )
return result
if __name__ == "__main__":
print(f'''{solution() = }''')
| 354 |
'''simple docstring'''
import os
import torch
from ..logging import get_logger
from .constants import FSDP_PYTORCH_VERSION, MODEL_NAME, OPTIMIZER_NAME
from .versions import is_torch_version
if is_torch_version('>=', FSDP_PYTORCH_VERSION):
import torch.distributed.checkpoint as dist_cp
from torch.distributed.checkpoint.default_planner import DefaultLoadPlanner, DefaultSavePlanner
from torch.distributed.checkpoint.optimizer import load_sharded_optimizer_state_dict
from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
a : Union[str, Any] = get_logger(__name__)
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase=0 ) -> Tuple:
'''simple docstring'''
os.makedirs(__UpperCAmelCase, exist_ok=__UpperCAmelCase )
with FSDP.state_dict_type(
__UpperCAmelCase, fsdp_plugin.state_dict_type, fsdp_plugin.state_dict_config, fsdp_plugin.optim_state_dict_config ):
snake_case_ = model.state_dict()
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
snake_case_ = F"{MODEL_NAME}.bin" if model_index == 0 else F"{MODEL_NAME}_{model_index}.bin"
snake_case_ = os.path.join(__UpperCAmelCase, __UpperCAmelCase )
if accelerator.process_index == 0:
logger.info(F"Saving model to {output_model_file}" )
torch.save(__UpperCAmelCase, __UpperCAmelCase )
logger.info(F"Model saved to {output_model_file}" )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
snake_case_ = (
F"{MODEL_NAME}_rank{accelerator.process_index}.bin"
if model_index == 0
else F"{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin"
)
snake_case_ = os.path.join(__UpperCAmelCase, __UpperCAmelCase )
logger.info(F"Saving model to {output_model_file}" )
torch.save(__UpperCAmelCase, __UpperCAmelCase )
logger.info(F"Model saved to {output_model_file}" )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
snake_case_ = os.path.join(__UpperCAmelCase, F"{MODEL_NAME}_{model_index}" )
os.makedirs(__UpperCAmelCase, exist_ok=__UpperCAmelCase )
logger.info(F"Saving model to {ckpt_dir}" )
snake_case_ = {'''model''': state_dict}
dist_cp.save_state_dict(
state_dict=__UpperCAmelCase, storage_writer=dist_cp.FileSystemWriter(__UpperCAmelCase ), planner=DefaultSavePlanner(), )
logger.info(F"Model saved to {ckpt_dir}" )
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase=0 ) -> str:
'''simple docstring'''
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
__UpperCAmelCase, fsdp_plugin.state_dict_type, fsdp_plugin.state_dict_config, fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if type(__UpperCAmelCase ) != FSDP and accelerator.process_index != 0:
if not fsdp_plugin.sync_module_states:
raise ValueError(
'''Set the `sync_module_states` flag to `True` so that model states are synced across processes when '''
'''initializing FSDP object''' )
return
snake_case_ = F"{MODEL_NAME}.bin" if model_index == 0 else F"{MODEL_NAME}_{model_index}.bin"
snake_case_ = os.path.join(__UpperCAmelCase, __UpperCAmelCase )
logger.info(F"Loading model from {input_model_file}" )
snake_case_ = torch.load(__UpperCAmelCase )
logger.info(F"Model loaded from {input_model_file}" )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
snake_case_ = (
F"{MODEL_NAME}_rank{accelerator.process_index}.bin"
if model_index == 0
else F"{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin"
)
snake_case_ = os.path.join(__UpperCAmelCase, __UpperCAmelCase )
logger.info(F"Loading model from {input_model_file}" )
snake_case_ = torch.load(__UpperCAmelCase )
logger.info(F"Model loaded from {input_model_file}" )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
snake_case_ = (
os.path.join(__UpperCAmelCase, F"{MODEL_NAME}_{model_index}" )
if F"{MODEL_NAME}" not in input_dir
else input_dir
)
logger.info(F"Loading model from {ckpt_dir}" )
snake_case_ = {'''model''': model.state_dict()}
dist_cp.load_state_dict(
state_dict=__UpperCAmelCase, storage_reader=dist_cp.FileSystemReader(__UpperCAmelCase ), planner=DefaultLoadPlanner(), )
snake_case_ = state_dict['''model''']
logger.info(F"Model loaded from {ckpt_dir}" )
model.load_state_dict(__UpperCAmelCase )
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase=0 ) -> Dict:
'''simple docstring'''
os.makedirs(__UpperCAmelCase, exist_ok=__UpperCAmelCase )
with FSDP.state_dict_type(
__UpperCAmelCase, fsdp_plugin.state_dict_type, fsdp_plugin.state_dict_config, fsdp_plugin.optim_state_dict_config ):
snake_case_ = FSDP.optim_state_dict(__UpperCAmelCase, __UpperCAmelCase )
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if accelerator.process_index == 0:
snake_case_ = (
F"{OPTIMIZER_NAME}.bin" if optimizer_index == 0 else F"{OPTIMIZER_NAME}_{optimizer_index}.bin"
)
snake_case_ = os.path.join(__UpperCAmelCase, __UpperCAmelCase )
logger.info(F"Saving Optimizer state to {output_optimizer_file}" )
torch.save(__UpperCAmelCase, __UpperCAmelCase )
logger.info(F"Optimizer state saved in {output_optimizer_file}" )
else:
snake_case_ = os.path.join(__UpperCAmelCase, F"{OPTIMIZER_NAME}_{optimizer_index}" )
os.makedirs(__UpperCAmelCase, exist_ok=__UpperCAmelCase )
logger.info(F"Saving Optimizer state to {ckpt_dir}" )
dist_cp.save_state_dict(
state_dict={'''optimizer''': optim_state}, storage_writer=dist_cp.FileSystemWriter(__UpperCAmelCase ), planner=DefaultSavePlanner(), )
logger.info(F"Optimizer state saved in {ckpt_dir}" )
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase=0 ) -> Union[str, Any]:
'''simple docstring'''
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
__UpperCAmelCase, fsdp_plugin.state_dict_type, fsdp_plugin.state_dict_config, fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
snake_case_ = None
# below check should work but currently it isn't working (mostly opytorch issue),
# in the meantime disabling it at the cost of excess memory usage
# if accelerator.process_index == 0 or not fsdp_plugin.optim_state_dict_config.rank0_only:
snake_case_ = (
F"{OPTIMIZER_NAME}.bin" if optimizer_index == 0 else F"{OPTIMIZER_NAME}_{optimizer_index}.bin"
)
snake_case_ = os.path.join(__UpperCAmelCase, __UpperCAmelCase )
logger.info(F"Loading Optimizer state from {input_optimizer_file}" )
snake_case_ = torch.load(__UpperCAmelCase )
logger.info(F"Optimizer state loaded from {input_optimizer_file}" )
else:
snake_case_ = (
os.path.join(__UpperCAmelCase, F"{OPTIMIZER_NAME}_{optimizer_index}" )
if F"{OPTIMIZER_NAME}" not in input_dir
else input_dir
)
logger.info(F"Loading Optimizer from {ckpt_dir}" )
snake_case_ = load_sharded_optimizer_state_dict(
model_state_dict=model.state_dict(), optimizer_key='''optimizer''', storage_reader=dist_cp.FileSystemReader(__UpperCAmelCase ), )
snake_case_ = optim_state['''optimizer''']
logger.info(F"Optimizer loaded from {ckpt_dir}" )
snake_case_ = FSDP.optim_state_dict_to_load(__UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase )
optimizer.load_state_dict(__UpperCAmelCase )
| 72 | 0 |
'''simple docstring'''
import os
from typing import Dict, List, Tuple, TypeVar, Union
UpperCamelCase__: List[str] = TypeVar("T")
UpperCamelCase__: Any = Union[List[T], Tuple[T, ...]]
UpperCamelCase__: Dict = Union[T, List[T], Dict[str, T]]
UpperCamelCase__: Optional[Any] = Union[str, bytes, os.PathLike]
| 23 |
'''simple docstring'''
from __future__ import annotations
def snake_case_ ( _lowerCAmelCase : list[int | float] , _lowerCAmelCase : int , _lowerCAmelCase : int ) -> int | float:
if len(_lowerCAmelCase ) == 0:
raise ValueError('''find_max() arg is an empty sequence''' )
if (
left >= len(_lowerCAmelCase )
or left < -len(_lowerCAmelCase )
or right >= len(_lowerCAmelCase )
or right < -len(_lowerCAmelCase )
):
raise IndexError('''list index out of range''' )
if left == right:
return nums[left]
UpperCAmelCase : List[Any] = (left + right) >> 1 # the middle
UpperCAmelCase : Optional[Any] = find_max(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) # find max in range[left, mid]
UpperCAmelCase : Dict = find_max(_lowerCAmelCase , mid + 1 , _lowerCAmelCase ) # find max in range[mid + 1, right]
return left_max if left_max >= right_max else right_max
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 23 | 1 |
import importlib
import os
import sys
# This is required to make the module import works (when the python process is running from the root of the repo)
sys.path.append('.')
def lowerCamelCase__ ( snake_case_ : Any ) -> Tuple:
__snake_case = test_file.split(os.path.sep )
if components[0:2] != ["tests", "models"]:
raise ValueError(
'''`test_file` should start with `tests/models/` (with `/` being the OS specific path separator). Got '''
f"""{test_file} instead.""" )
__snake_case = components[-1]
if not test_fn.endswith('''py''' ):
raise ValueError(f"""`test_file` should be a python file. Got {test_fn} instead.""" )
if not test_fn.startswith('''test_modeling_''' ):
raise ValueError(
f"""`test_file` should point to a file name of the form `test_modeling_*.py`. Got {test_fn} instead.""" )
__snake_case = components[:-1] + [test_fn.replace('''.py''' , '''''' )]
__snake_case = '''.'''.join(__snake_case )
return test_module_path
def lowerCamelCase__ ( snake_case_ : Union[str, Any] ) -> Union[str, Any]:
__snake_case = get_module_path(__snake_case )
__snake_case = importlib.import_module(__snake_case )
return test_module
def lowerCamelCase__ ( snake_case_ : Dict ) -> Tuple:
__snake_case = []
__snake_case = get_test_module(__snake_case )
for attr in dir(__snake_case ):
if attr.endswith('''ModelTester''' ):
tester_classes.append(getattr(__snake_case , __snake_case ) )
# sort with class names
return sorted(__snake_case , key=lambda snake_case_ : x.__name__ )
def lowerCamelCase__ ( snake_case_ : List[Any] ) -> Optional[int]:
__snake_case = []
__snake_case = get_test_module(__snake_case )
for attr in dir(__snake_case ):
__snake_case = getattr(__snake_case , __snake_case )
# (TF/Flax)ModelTesterMixin is also an attribute in specific model test module. Let's exclude them by checking
# `all_model_classes` is not empty (which also excludes other special classes).
__snake_case = getattr(__snake_case , '''all_model_classes''' , [] )
if len(__snake_case ) > 0:
test_classes.append(__snake_case )
# sort with class names
return sorted(__snake_case , key=lambda snake_case_ : x.__name__ )
def lowerCamelCase__ ( snake_case_ : List[Any] ) -> Optional[int]:
__snake_case = get_test_classes(__snake_case )
__snake_case = set()
for test_class in test_classes:
model_classes.update(test_class.all_model_classes )
# sort with class names
return sorted(__snake_case , key=lambda snake_case_ : x.__name__ )
def lowerCamelCase__ ( snake_case_ : str ) -> str:
__snake_case = test_class()
if hasattr(__snake_case , '''setUp''' ):
test.setUp()
__snake_case = None
if hasattr(__snake_case , '''model_tester''' ):
# `(TF/Flax)ModelTesterMixin` has this attribute default to `None`. Let's skip this case.
if test.model_tester is not None:
__snake_case = test.model_tester.__class__
return model_tester
def lowerCamelCase__ ( snake_case_ : Dict , snake_case_ : List[str] ) -> List[Any]:
__snake_case = get_test_classes(__snake_case )
__snake_case = []
for test_class in test_classes:
if model_class in test_class.all_model_classes:
target_test_classes.append(__snake_case )
# sort with class names
return sorted(__snake_case , key=lambda snake_case_ : x.__name__ )
def lowerCamelCase__ ( snake_case_ : Optional[Any] , snake_case_ : List[str] ) -> Any:
__snake_case = get_test_classes_for_model(__snake_case , __snake_case )
__snake_case = []
for test_class in test_classes:
__snake_case = get_model_tester_from_test_class(__snake_case )
if tester_class is not None:
tester_classes.append(__snake_case )
# sort with class names
return sorted(__snake_case , key=lambda snake_case_ : x.__name__ )
def lowerCamelCase__ ( snake_case_ : Tuple ) -> Tuple:
__snake_case = get_test_classes(__snake_case )
__snake_case = {test_class: get_model_tester_from_test_class(__snake_case ) for test_class in test_classes}
return test_tester_mapping
def lowerCamelCase__ ( snake_case_ : Dict ) -> Optional[Any]:
__snake_case = get_model_classes(__snake_case )
__snake_case = {
model_class: get_test_classes_for_model(__snake_case , __snake_case ) for model_class in model_classes
}
return model_test_mapping
def lowerCamelCase__ ( snake_case_ : Optional[Any] ) -> Dict:
__snake_case = get_model_classes(__snake_case )
__snake_case = {
model_class: get_tester_classes_for_model(__snake_case , __snake_case ) for model_class in model_classes
}
return model_to_tester_mapping
def lowerCamelCase__ ( snake_case_ : List[str] ) -> List[Any]:
if isinstance(__snake_case , __snake_case ):
return o
elif isinstance(__snake_case , __snake_case ):
return o.__name__
elif isinstance(__snake_case , (list, tuple) ):
return [to_json(__snake_case ) for x in o]
elif isinstance(__snake_case , __snake_case ):
return {to_json(__snake_case ): to_json(__snake_case ) for k, v in o.items()}
else:
return o
| 356 |
# Algorithm for the pigeonhole sorting
def lowerCamelCase__ ( snake_case_ : int ) -> Optional[int]:
__snake_case = min(snake_case_ ) # min() finds the minimum value
__snake_case = max(snake_case_ ) # max() finds the maximum value
__snake_case = max_val - min_val + 1 # size is difference of max and min values plus one
# list of pigeonholes of size equal to the variable size
__snake_case = [0] * size
# Populate the pigeonholes.
for x in a:
assert isinstance(snake_case_ , snake_case_ ), "integers only please"
holes[x - min_val] += 1
# Putting the elements back into the array in an order.
__snake_case = 0
for count in range(snake_case_ ):
while holes[count] > 0:
holes[count] -= 1
__snake_case = count + min_val
i += 1
def lowerCamelCase__ ( ) -> Union[str, Any]:
__snake_case = [8, 3, 2, 7, 4, 6, 8]
pigeonhole_sort(snake_case_ )
print('''Sorted order is:''' , ''' '''.join(snake_case_ ) )
if __name__ == "__main__":
main()
| 238 | 0 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roberta import RobertaTokenizer
_A : List[Any] =logging.get_logger(__name__)
_A : List[str] ={'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
_A : Optional[Any] ={
'''vocab_file''': {
'''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/vocab.json''',
'''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/vocab.json''',
'''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json''',
'''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/vocab.json''',
'''roberta-base-openai-detector''': '''https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json''',
'''roberta-large-openai-detector''': (
'''https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json'''
),
},
'''merges_file''': {
'''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/merges.txt''',
'''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/merges.txt''',
'''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt''',
'''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/merges.txt''',
'''roberta-base-openai-detector''': '''https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt''',
'''roberta-large-openai-detector''': (
'''https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt'''
),
},
'''tokenizer_file''': {
'''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/tokenizer.json''',
'''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/tokenizer.json''',
'''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json''',
'''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json''',
'''roberta-base-openai-detector''': (
'''https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json'''
),
'''roberta-large-openai-detector''': (
'''https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json'''
),
},
}
_A : Optional[Any] ={
'''roberta-base''': 512,
'''roberta-large''': 512,
'''roberta-large-mnli''': 512,
'''distilroberta-base''': 512,
'''roberta-base-openai-detector''': 512,
'''roberta-large-openai-detector''': 512,
}
class _lowercase ( _lowercase ):
a = VOCAB_FILES_NAMES
a = PRETRAINED_VOCAB_FILES_MAP
a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a = ["""input_ids""", """attention_mask"""]
a = RobertaTokenizer
def __init__( self: List[str] , UpperCamelCase__: Union[str, Any]=None , UpperCamelCase__: Dict=None , UpperCamelCase__: int=None , UpperCamelCase__: List[Any]="replace" , UpperCamelCase__: List[Any]="<s>" , UpperCamelCase__: Optional[Any]="</s>" , UpperCamelCase__: str="</s>" , UpperCamelCase__: List[Any]="<s>" , UpperCamelCase__: Union[str, Any]="<unk>" , UpperCamelCase__: Dict="<pad>" , UpperCamelCase__: Any="<mask>" , UpperCamelCase__: str=False , UpperCamelCase__: List[Any]=True , **UpperCamelCase__: int , ):
super().__init__(
UpperCamelCase__ , UpperCamelCase__ , tokenizer_file=UpperCamelCase__ , errors=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , sep_token=UpperCamelCase__ , cls_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , mask_token=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ , trim_offsets=UpperCamelCase__ , **UpperCamelCase__ , )
lowerCamelCase__ : Optional[Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" , UpperCamelCase__ ) != add_prefix_space:
lowerCamelCase__ : int = getattr(UpperCamelCase__ , pre_tok_state.pop("""type""" ) )
lowerCamelCase__ : Dict = add_prefix_space
lowerCamelCase__ : Union[str, Any] = pre_tok_class(**UpperCamelCase__ )
lowerCamelCase__ : Any = add_prefix_space
lowerCamelCase__ : List[Any] = """post_processor"""
lowerCamelCase__ : Optional[Any] = getattr(self.backend_tokenizer , UpperCamelCase__ , UpperCamelCase__ )
if tokenizer_component_instance:
lowerCamelCase__ : int = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
lowerCamelCase__ : Optional[Any] = tuple(state["""sep"""] )
if "cls" in state:
lowerCamelCase__ : List[Any] = tuple(state["""cls"""] )
lowerCamelCase__ : int = False
if state.get("""add_prefix_space""" , UpperCamelCase__ ) != add_prefix_space:
lowerCamelCase__ : Optional[Any] = add_prefix_space
lowerCamelCase__ : Any = True
if state.get("""trim_offsets""" , UpperCamelCase__ ) != trim_offsets:
lowerCamelCase__ : Optional[Any] = trim_offsets
lowerCamelCase__ : Tuple = True
if changes_to_apply:
lowerCamelCase__ : Optional[int] = getattr(UpperCamelCase__ , state.pop("""type""" ) )
lowerCamelCase__ : Any = component_class(**UpperCamelCase__ )
setattr(self.backend_tokenizer , UpperCamelCase__ , UpperCamelCase__ )
@property
def lowerCamelCase_ ( self: Union[str, Any] ):
if self._mask_token is None:
if self.verbose:
logger.error("""Using mask_token, but it is not set yet.""" )
return None
return str(self._mask_token )
@mask_token.setter
def lowerCamelCase_ ( self: str , UpperCamelCase__: int ):
lowerCamelCase__ : int = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else value
lowerCamelCase__ : str = value
def lowerCamelCase_ ( self: Any , *UpperCamelCase__: Optional[int] , **UpperCamelCase__: Optional[int] ):
lowerCamelCase__ : List[str] = kwargs.get("""is_split_into_words""" , UpperCamelCase__ )
assert self.add_prefix_space or not is_split_into_words, (
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*UpperCamelCase__ , **UpperCamelCase__ )
def lowerCamelCase_ ( self: Any , *UpperCamelCase__: Any , **UpperCamelCase__: Optional[Any] ):
lowerCamelCase__ : int = kwargs.get("""is_split_into_words""" , UpperCamelCase__ )
assert self.add_prefix_space or not is_split_into_words, (
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._encode_plus(*UpperCamelCase__ , **UpperCamelCase__ )
def lowerCamelCase_ ( self: str , UpperCamelCase__: str , UpperCamelCase__: Optional[str] = None ):
lowerCamelCase__ : Any = self._tokenizer.model.save(UpperCamelCase__ , name=UpperCamelCase__ )
return tuple(UpperCamelCase__ )
def lowerCamelCase_ ( self: Dict , UpperCamelCase__: Any , UpperCamelCase__: int=None ):
lowerCamelCase__ : Tuple = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def lowerCamelCase_ ( self: Optional[Any] , UpperCamelCase__: List[int] , UpperCamelCase__: Optional[List[int]] = None ):
lowerCamelCase__ : Optional[int] = [self.sep_token_id]
lowerCamelCase__ : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 41 |
"""simple docstring"""
import logging
import math
import os
from dataclasses import dataclass, field
from glob import glob
from typing import Optional
from torch.utils.data import ConcatDataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_WITH_LM_HEAD_MAPPING,
AutoConfig,
AutoModelWithLMHead,
AutoTokenizer,
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForWholeWordMask,
HfArgumentParser,
LineByLineTextDataset,
LineByLineWithRefDataset,
PreTrainedTokenizer,
TextDataset,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
SCREAMING_SNAKE_CASE : int = logging.getLogger(__name__)
SCREAMING_SNAKE_CASE : Dict = list(MODEL_WITH_LM_HEAD_MAPPING.keys())
SCREAMING_SNAKE_CASE : Any = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class _UpperCAmelCase :
'''simple docstring'''
lowerCamelCase__ =field(
default=__snake_case, metadata={
'help': (
'The model checkpoint for weights initialization. Leave None if you want to train a model from'
' scratch.'
)
}, )
lowerCamelCase__ =field(
default=__snake_case, metadata={'help': 'If training from scratch, pass a model type from the list: ' + ', '.join(__snake_case )}, )
lowerCamelCase__ =field(
default=__snake_case, metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
lowerCamelCase__ =field(
default=__snake_case, metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
lowerCamelCase__ =field(
default=__snake_case, metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'}, )
@dataclass
class _UpperCAmelCase :
'''simple docstring'''
lowerCamelCase__ =field(
default=__snake_case, metadata={'help': 'The input training data file (a text file).'} )
lowerCamelCase__ =field(
default=__snake_case, metadata={
'help': (
'The input training data files (multiple files in glob format). '
'Very often splitting large files to smaller files can prevent tokenizer going out of memory'
)
}, )
lowerCamelCase__ =field(
default=__snake_case, metadata={'help': 'An optional input evaluation data file to evaluate the perplexity on (a text file).'}, )
lowerCamelCase__ =field(
default=__snake_case, metadata={'help': 'An optional input train ref data file for whole word mask in Chinese.'}, )
lowerCamelCase__ =field(
default=__snake_case, metadata={'help': 'An optional input eval ref data file for whole word mask in Chinese.'}, )
lowerCamelCase__ =field(
default=__snake_case, metadata={'help': 'Whether distinct lines of text in the dataset are to be handled as distinct sequences.'}, )
lowerCamelCase__ =field(
default=__snake_case, metadata={'help': 'Train with masked-language modeling loss instead of language modeling.'} )
lowerCamelCase__ =field(default=__snake_case, metadata={'help': 'Whether ot not to use whole word mask.'} )
lowerCamelCase__ =field(
default=0.1_5, metadata={'help': 'Ratio of tokens to mask for masked language modeling loss'} )
lowerCamelCase__ =field(
default=1 / 6, metadata={
'help': (
'Ratio of length of a span of masked tokens to surrounding context length for permutation language'
' modeling.'
)
}, )
lowerCamelCase__ =field(
default=5, metadata={'help': 'Maximum length of a span of masked tokens for permutation language modeling.'} )
lowerCamelCase__ =field(
default=-1, metadata={
'help': (
'Optional input sequence length after tokenization.'
'The training dataset will be truncated in block of this size for training.'
'Default to the model max input length for single sentence inputs (take into account special tokens).'
)
}, )
lowerCamelCase__ =field(
default=__snake_case, metadata={'help': 'Overwrite the cached training and evaluation sets'} )
def lowercase ( _snake_case : DataTrainingArguments , _snake_case : PreTrainedTokenizer , _snake_case : bool = False , _snake_case : Optional[str] = None , ) ->Any:
"""simple docstring"""
def _dataset(_snake_case : List[Any] , _snake_case : str=None ):
if args.line_by_line:
if ref_path is not None:
if not args.whole_word_mask or not args.mlm:
raise ValueError('''You need to set world whole masking and mlm to True for Chinese Whole Word Mask''' )
return LineByLineWithRefDataset(
tokenizer=_snake_case , file_path=_snake_case , block_size=args.block_size , ref_path=_snake_case , )
return LineByLineTextDataset(tokenizer=_snake_case , file_path=_snake_case , block_size=args.block_size )
else:
return TextDataset(
tokenizer=_snake_case , file_path=_snake_case , block_size=args.block_size , overwrite_cache=args.overwrite_cache , cache_dir=_snake_case , )
if evaluate:
return _dataset(args.eval_data_file , args.eval_ref_file )
elif args.train_data_files:
return ConcatDataset([_dataset(_snake_case ) for f in glob(args.train_data_files )] )
else:
return _dataset(args.train_data_file , args.train_ref_file )
def lowercase ( ) ->List[Any]:
"""simple docstring"""
__snake_case : List[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
__snake_case , __snake_case , __snake_case : Union[str, Any] = parser.parse_args_into_dataclasses()
if data_args.eval_data_file is None and training_args.do_eval:
raise ValueError(
'''Cannot do evaluation without an evaluation data file. Either supply a file to --eval_data_file '''
'''or remove the --do_eval argument.''' )
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
''' --overwrite_output_dir to overcome.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('''Training/evaluation parameters %s''' , _snake_case )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
if model_args.config_name:
__snake_case : Optional[Any] = AutoConfig.from_pretrained(model_args.config_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
__snake_case : Optional[Any] = AutoConfig.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
__snake_case : Tuple = CONFIG_MAPPING[model_args.model_type]()
logger.warning('''You are instantiating a new config instance from scratch.''' )
if model_args.tokenizer_name:
__snake_case : Dict = AutoTokenizer.from_pretrained(model_args.tokenizer_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
__snake_case : List[Any] = AutoTokenizer.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
raise ValueError(
'''You are instantiating a new tokenizer from scratch. This is not supported, but you can do it from another'''
''' script, save it,and load it from here, using --tokenizer_name''' )
if model_args.model_name_or_path:
__snake_case : int = AutoModelWithLMHead.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=_snake_case , cache_dir=model_args.cache_dir , )
else:
logger.info('''Training new model from scratch''' )
__snake_case : List[Any] = AutoModelWithLMHead.from_config(_snake_case )
model.resize_token_embeddings(len(_snake_case ) )
if config.model_type in ["bert", "roberta", "distilbert", "camembert"] and not data_args.mlm:
raise ValueError(
'''BERT and RoBERTa-like models do not have LM heads but masked LM heads. They must be run using the'''
'''--mlm flag (masked language modeling).''' )
if data_args.block_size <= 0:
__snake_case : List[str] = tokenizer.max_len
# Our input block size will be the max possible for the model
else:
__snake_case : Optional[int] = min(data_args.block_size , tokenizer.max_len )
# Get datasets
__snake_case : Optional[Any] = (
get_dataset(_snake_case , tokenizer=_snake_case , cache_dir=model_args.cache_dir ) if training_args.do_train else None
)
__snake_case : Any = (
get_dataset(_snake_case , tokenizer=_snake_case , evaluate=_snake_case , cache_dir=model_args.cache_dir )
if training_args.do_eval
else None
)
if config.model_type == "xlnet":
__snake_case : List[Any] = DataCollatorForPermutationLanguageModeling(
tokenizer=_snake_case , plm_probability=data_args.plm_probability , max_span_length=data_args.max_span_length , )
else:
if data_args.mlm and data_args.whole_word_mask:
__snake_case : Optional[Any] = DataCollatorForWholeWordMask(
tokenizer=_snake_case , mlm_probability=data_args.mlm_probability )
else:
__snake_case : Union[str, Any] = DataCollatorForLanguageModeling(
tokenizer=_snake_case , mlm=data_args.mlm , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
__snake_case : Optional[int] = Trainer(
model=_snake_case , args=_snake_case , data_collator=_snake_case , train_dataset=_snake_case , eval_dataset=_snake_case , prediction_loss_only=_snake_case , )
# Training
if training_args.do_train:
__snake_case : Dict = (
model_args.model_name_or_path
if model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path )
else None
)
trainer.train(model_path=_snake_case )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
__snake_case : int = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
__snake_case : Dict = trainer.evaluate()
__snake_case : Dict = math.exp(eval_output['''eval_loss'''] )
__snake_case : List[Any] = {'''perplexity''': perplexity}
__snake_case : str = os.path.join(training_args.output_dir , '''eval_results_lm.txt''' )
if trainer.is_world_master():
with open(_snake_case , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key in sorted(result.keys() ):
logger.info(''' %s = %s''' , _snake_case , str(result[key] ) )
writer.write('''%s = %s\n''' % (key, str(result[key] )) )
results.update(_snake_case )
return results
def lowercase ( _snake_case : Optional[int] ) ->Tuple:
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 102 | 0 |
import warnings
from ...utils import logging
from .image_processing_yolos import YolosImageProcessor
lowerCAmelCase = logging.get_logger(__name__)
class _a ( UpperCamelCase__ ):
def __init__( self: Dict , *UpperCamelCase_: int , **UpperCamelCase_: Tuple ) -> None:
"""simple docstring"""
warnings.warn(
'''The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use YolosImageProcessor instead.''' , UpperCamelCase_ , )
super().__init__(*UpperCamelCase_ , **UpperCamelCase_ )
| 93 |
import logging
from transformers import PretrainedConfig
lowerCAmelCase = logging.getLogger(__name__)
lowerCAmelCase = {
'bertabs-finetuned-cnndm': 'https://huggingface.co/remi/bertabs-finetuned-cnndm-extractive-abstractive-summarization/resolve/main/config.json',
}
class _a ( UpperCamelCase__ ):
_lowercase : List[Any] = '''bertabs'''
def __init__( self: List[str] , UpperCamelCase_: Dict=30_522 , UpperCamelCase_: Union[str, Any]=512 , UpperCamelCase_: Optional[int]=6 , UpperCamelCase_: int=512 , UpperCamelCase_: Optional[int]=8 , UpperCamelCase_: List[Any]=512 , UpperCamelCase_: Tuple=0.2 , UpperCamelCase_: List[Any]=6 , UpperCamelCase_: Tuple=768 , UpperCamelCase_: List[Any]=8 , UpperCamelCase_: Union[str, Any]=2_048 , UpperCamelCase_: str=0.2 , **UpperCamelCase_: Any , ) -> List[str]:
"""simple docstring"""
super().__init__(**UpperCamelCase_ )
lowercase__ = vocab_size
lowercase__ = max_pos
lowercase__ = enc_layers
lowercase__ = enc_hidden_size
lowercase__ = enc_heads
lowercase__ = enc_ff_size
lowercase__ = enc_dropout
lowercase__ = dec_layers
lowercase__ = dec_hidden_size
lowercase__ = dec_heads
lowercase__ = dec_ff_size
lowercase__ = dec_dropout
| 93 | 1 |
import collections
import gzip
import os
import urllib
import numpy
from tensorflow.python.framework import dtypes, random_seed
from tensorflow.python.platform import gfile
from tensorflow.python.util.deprecation import deprecated
_SCREAMING_SNAKE_CASE : Optional[int] = collections.namedtuple('''_Datasets''', ['''train''', '''validation''', '''test'''])
# CVDF mirror of http://yann.lecun.com/exdb/mnist/
_SCREAMING_SNAKE_CASE : Any = '''https://storage.googleapis.com/cvdf-datasets/mnist/'''
def UpperCAmelCase_ ( _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = numpy.dtype(numpy.uintaa ).newbyteorder('''>''' )
return numpy.frombuffer(bytestream.read(4 ) , dtype=_A )[0]
@deprecated(_A , '''Please use tf.data to implement this functionality.''' )
def UpperCAmelCase_ ( _A ):
'''simple docstring'''
print('''Extracting''' , f.name )
with gzip.GzipFile(fileobj=_A ) as bytestream:
SCREAMING_SNAKE_CASE__ = _readaa(_A )
if magic != 20_51:
raise ValueError(
'''Invalid magic number %d in MNIST image file: %s''' % (magic, f.name) )
SCREAMING_SNAKE_CASE__ = _readaa(_A )
SCREAMING_SNAKE_CASE__ = _readaa(_A )
SCREAMING_SNAKE_CASE__ = _readaa(_A )
SCREAMING_SNAKE_CASE__ = bytestream.read(rows * cols * num_images )
SCREAMING_SNAKE_CASE__ = numpy.frombuffer(_A , dtype=numpy.uinta )
SCREAMING_SNAKE_CASE__ = data.reshape(_A , _A , _A , 1 )
return data
@deprecated(_A , '''Please use tf.one_hot on tensors.''' )
def UpperCAmelCase_ ( _A , _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = labels_dense.shape[0]
SCREAMING_SNAKE_CASE__ = numpy.arange(_A ) * num_classes
SCREAMING_SNAKE_CASE__ = numpy.zeros((num_labels, num_classes) )
SCREAMING_SNAKE_CASE__ = 1
return labels_one_hot
@deprecated(_A , '''Please use tf.data to implement this functionality.''' )
def UpperCAmelCase_ ( _A , _A=False , _A=10 ):
'''simple docstring'''
print('''Extracting''' , f.name )
with gzip.GzipFile(fileobj=_A ) as bytestream:
SCREAMING_SNAKE_CASE__ = _readaa(_A )
if magic != 20_49:
raise ValueError(
'''Invalid magic number %d in MNIST label file: %s''' % (magic, f.name) )
SCREAMING_SNAKE_CASE__ = _readaa(_A )
SCREAMING_SNAKE_CASE__ = bytestream.read(_A )
SCREAMING_SNAKE_CASE__ = numpy.frombuffer(_A , dtype=numpy.uinta )
if one_hot:
return _dense_to_one_hot(_A , _A )
return labels
class UpperCAmelCase__ :
"""simple docstring"""
@deprecated(
__lowerCamelCase , '''Please use alternatives such as official/mnist/_DataSet.py'''
''' from tensorflow/models.''' , )
def __init__( self : Union[str, Any] , __lowerCamelCase : Dict , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Dict=False , __lowerCamelCase : Dict=False , __lowerCamelCase : List[str]=dtypes.floataa , __lowerCamelCase : List[str]=True , __lowerCamelCase : Any=None , ) -> List[Any]:
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = random_seed.get_seed(__lowerCamelCase )
# If op level seed is not set, use whatever graph level seed is returned
numpy.random.seed(seeda if seed is None else seeda )
SCREAMING_SNAKE_CASE__ = dtypes.as_dtype(__lowerCamelCase ).base_dtype
if dtype not in (dtypes.uinta, dtypes.floataa):
raise TypeError('''Invalid image dtype %r, expected uint8 or float32''' % dtype )
if fake_data:
SCREAMING_SNAKE_CASE__ = 1_0000
SCREAMING_SNAKE_CASE__ = one_hot
else:
assert (
images.shape[0] == labels.shape[0]
), f'''images.shape: {images.shape} labels.shape: {labels.shape}'''
SCREAMING_SNAKE_CASE__ = images.shape[0]
# Convert shape from [num examples, rows, columns, depth]
# to [num examples, rows*columns] (assuming depth == 1)
if reshape:
assert images.shape[3] == 1
SCREAMING_SNAKE_CASE__ = images.reshape(
images.shape[0] , images.shape[1] * images.shape[2] )
if dtype == dtypes.floataa:
# Convert from [0, 255] -> [0.0, 1.0].
SCREAMING_SNAKE_CASE__ = images.astype(numpy.floataa )
SCREAMING_SNAKE_CASE__ = numpy.multiply(__lowerCamelCase , 1.0 / 255.0 )
SCREAMING_SNAKE_CASE__ = images
SCREAMING_SNAKE_CASE__ = labels
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 0
@property
def lowercase_ ( self : Tuple ) -> List[str]:
return self._images
@property
def lowercase_ ( self : List[Any] ) -> Tuple:
return self._labels
@property
def lowercase_ ( self : Tuple ) -> Tuple:
return self._num_examples
@property
def lowercase_ ( self : Optional[int] ) -> int:
return self._epochs_completed
def lowercase_ ( self : Dict , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[int]=False , __lowerCamelCase : Union[str, Any]=True ) -> str:
if fake_data:
SCREAMING_SNAKE_CASE__ = [1] * 784
SCREAMING_SNAKE_CASE__ = [1] + [0] * 9 if self.one_hot else 0
return (
[fake_image for _ in range(__lowerCamelCase )],
[fake_label for _ in range(__lowerCamelCase )],
)
SCREAMING_SNAKE_CASE__ = self._index_in_epoch
# Shuffle for the first epoch
if self._epochs_completed == 0 and start == 0 and shuffle:
SCREAMING_SNAKE_CASE__ = numpy.arange(self._num_examples )
numpy.random.shuffle(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = self.images[perma]
SCREAMING_SNAKE_CASE__ = self.labels[perma]
# Go to the next epoch
if start + batch_size > self._num_examples:
# Finished epoch
self._epochs_completed += 1
# Get the rest examples in this epoch
SCREAMING_SNAKE_CASE__ = self._num_examples - start
SCREAMING_SNAKE_CASE__ = self._images[start : self._num_examples]
SCREAMING_SNAKE_CASE__ = self._labels[start : self._num_examples]
# Shuffle the data
if shuffle:
SCREAMING_SNAKE_CASE__ = numpy.arange(self._num_examples )
numpy.random.shuffle(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = self.images[perm]
SCREAMING_SNAKE_CASE__ = self.labels[perm]
# Start next epoch
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = batch_size - rest_num_examples
SCREAMING_SNAKE_CASE__ = self._index_in_epoch
SCREAMING_SNAKE_CASE__ = self._images[start:end]
SCREAMING_SNAKE_CASE__ = self._labels[start:end]
return (
numpy.concatenate((images_rest_part, images_new_part) , axis=0 ),
numpy.concatenate((labels_rest_part, labels_new_part) , axis=0 ),
)
else:
self._index_in_epoch += batch_size
SCREAMING_SNAKE_CASE__ = self._index_in_epoch
return self._images[start:end], self._labels[start:end]
@deprecated(_A , '''Please write your own downloading logic.''' )
def UpperCAmelCase_ ( _A , _A , _A ):
'''simple docstring'''
if not gfile.Exists(_A ):
gfile.MakeDirs(_A )
SCREAMING_SNAKE_CASE__ = os.path.join(_A , _A )
if not gfile.Exists(_A ):
urllib.request.urlretrieve(_A , _A ) # noqa: S310
with gfile.GFile(_A ) as f:
SCREAMING_SNAKE_CASE__ = f.size()
print('''Successfully downloaded''' , _A , _A , '''bytes.''' )
return filepath
@deprecated(
_A , '''Please use alternatives such as:''' ''' tensorflow_datasets.load(\'mnist\')''' )
def UpperCAmelCase_ ( _A , _A=False , _A=False , _A=dtypes.floataa , _A=True , _A=50_00 , _A=None , _A=DEFAULT_SOURCE_URL , ):
'''simple docstring'''
if fake_data:
def fake():
return _DataSet(
[] , [] , fake_data=_A , one_hot=_A , dtype=_A , seed=_A )
SCREAMING_SNAKE_CASE__ = fake()
SCREAMING_SNAKE_CASE__ = fake()
SCREAMING_SNAKE_CASE__ = fake()
return _Datasets(train=_A , validation=_A , test=_A )
if not source_url: # empty string check
SCREAMING_SNAKE_CASE__ = DEFAULT_SOURCE_URL
SCREAMING_SNAKE_CASE__ = '''train-images-idx3-ubyte.gz'''
SCREAMING_SNAKE_CASE__ = '''train-labels-idx1-ubyte.gz'''
SCREAMING_SNAKE_CASE__ = '''t10k-images-idx3-ubyte.gz'''
SCREAMING_SNAKE_CASE__ = '''t10k-labels-idx1-ubyte.gz'''
SCREAMING_SNAKE_CASE__ = _maybe_download(
_A , _A , source_url + train_images_file )
with gfile.Open(_A , '''rb''' ) as f:
SCREAMING_SNAKE_CASE__ = _extract_images(_A )
SCREAMING_SNAKE_CASE__ = _maybe_download(
_A , _A , source_url + train_labels_file )
with gfile.Open(_A , '''rb''' ) as f:
SCREAMING_SNAKE_CASE__ = _extract_labels(_A , one_hot=_A )
SCREAMING_SNAKE_CASE__ = _maybe_download(
_A , _A , source_url + test_images_file )
with gfile.Open(_A , '''rb''' ) as f:
SCREAMING_SNAKE_CASE__ = _extract_images(_A )
SCREAMING_SNAKE_CASE__ = _maybe_download(
_A , _A , source_url + test_labels_file )
with gfile.Open(_A , '''rb''' ) as f:
SCREAMING_SNAKE_CASE__ = _extract_labels(_A , one_hot=_A )
if not 0 <= validation_size <= len(_A ):
SCREAMING_SNAKE_CASE__ = (
'''Validation size should be between 0 and '''
F'''{len(_A )}. Received: {validation_size}.'''
)
raise ValueError(_A )
SCREAMING_SNAKE_CASE__ = train_images[:validation_size]
SCREAMING_SNAKE_CASE__ = train_labels[:validation_size]
SCREAMING_SNAKE_CASE__ = train_images[validation_size:]
SCREAMING_SNAKE_CASE__ = train_labels[validation_size:]
SCREAMING_SNAKE_CASE__ = {'''dtype''': dtype, '''reshape''': reshape, '''seed''': seed}
SCREAMING_SNAKE_CASE__ = _DataSet(_A , _A , **_A )
SCREAMING_SNAKE_CASE__ = _DataSet(_A , _A , **_A )
SCREAMING_SNAKE_CASE__ = _DataSet(_A , _A , **_A )
return _Datasets(train=_A , validation=_A , test=_A )
| 314 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE : str = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : Union[str, Any] = {
'''unc-nlp/lxmert-base-uncased''': '''https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/config.json''',
}
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
a = "lxmert"
a = {}
def __init__( self : Union[str, Any] , __lowerCamelCase : List[str]=3_0522 , __lowerCamelCase : Union[str, Any]=768 , __lowerCamelCase : Dict=12 , __lowerCamelCase : Union[str, Any]=9500 , __lowerCamelCase : Union[str, Any]=1600 , __lowerCamelCase : Any=400 , __lowerCamelCase : List[str]=3072 , __lowerCamelCase : List[str]="gelu" , __lowerCamelCase : Optional[Any]=0.1 , __lowerCamelCase : Union[str, Any]=0.1 , __lowerCamelCase : Optional[Any]=512 , __lowerCamelCase : Optional[int]=2 , __lowerCamelCase : Any=0.02 , __lowerCamelCase : Any=1e-12 , __lowerCamelCase : List[Any]=9 , __lowerCamelCase : Any=5 , __lowerCamelCase : List[str]=5 , __lowerCamelCase : Optional[Any]=2048 , __lowerCamelCase : Optional[int]=4 , __lowerCamelCase : List[str]=6.67 , __lowerCamelCase : Dict=True , __lowerCamelCase : Any=True , __lowerCamelCase : Any=True , __lowerCamelCase : Tuple=True , __lowerCamelCase : Optional[Any]=True , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : Any=True , **__lowerCamelCase : Optional[Any] , ) -> Any:
SCREAMING_SNAKE_CASE__ = vocab_size
SCREAMING_SNAKE_CASE__ = hidden_size
SCREAMING_SNAKE_CASE__ = num_attention_heads
SCREAMING_SNAKE_CASE__ = hidden_act
SCREAMING_SNAKE_CASE__ = intermediate_size
SCREAMING_SNAKE_CASE__ = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ = max_position_embeddings
SCREAMING_SNAKE_CASE__ = type_vocab_size
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = layer_norm_eps
SCREAMING_SNAKE_CASE__ = num_qa_labels
SCREAMING_SNAKE_CASE__ = num_object_labels
SCREAMING_SNAKE_CASE__ = num_attr_labels
SCREAMING_SNAKE_CASE__ = l_layers
SCREAMING_SNAKE_CASE__ = x_layers
SCREAMING_SNAKE_CASE__ = r_layers
SCREAMING_SNAKE_CASE__ = visual_feat_dim
SCREAMING_SNAKE_CASE__ = visual_pos_dim
SCREAMING_SNAKE_CASE__ = visual_loss_normalizer
SCREAMING_SNAKE_CASE__ = task_matched
SCREAMING_SNAKE_CASE__ = task_mask_lm
SCREAMING_SNAKE_CASE__ = task_obj_predict
SCREAMING_SNAKE_CASE__ = task_qa
SCREAMING_SNAKE_CASE__ = visual_obj_loss
SCREAMING_SNAKE_CASE__ = visual_attr_loss
SCREAMING_SNAKE_CASE__ = visual_feat_loss
SCREAMING_SNAKE_CASE__ = {'''vision''': r_layers, '''cross_encoder''': x_layers, '''language''': l_layers}
super().__init__(**__lowerCamelCase )
| 314 | 1 |
from ..utils import DummyObject, requires_backends
class __A ( metaclass=__lowercase ):
__A = ['''transformers''', '''torch''', '''note_seq''']
def __init__( self , *UpperCAmelCase_ , **UpperCAmelCase_ ):
requires_backends(self , ["""transformers""", """torch""", """note_seq"""] )
@classmethod
def _snake_case ( cls , *UpperCAmelCase_ , **UpperCAmelCase_ ):
requires_backends(cls , ["""transformers""", """torch""", """note_seq"""] )
@classmethod
def _snake_case ( cls , *UpperCAmelCase_ , **UpperCAmelCase_ ):
requires_backends(cls , ["""transformers""", """torch""", """note_seq"""] )
| 363 |
import json
import os
import unittest
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class __A ( a , unittest.TestCase ):
__A = BioGptTokenizer
__A = False
def _snake_case ( self ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowerCamelCase =[
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""w</w>""",
"""r</w>""",
"""t</w>""",
"""lo""",
"""low""",
"""er</w>""",
"""low</w>""",
"""lowest</w>""",
"""newer</w>""",
"""wider</w>""",
"""<unk>""",
]
lowerCamelCase =dict(zip(UpperCAmelCase_ , range(len(UpperCAmelCase_ ) ) ) )
lowerCamelCase =["""l o 123""", """lo w 1456""", """e r</w> 1789""", """"""]
lowerCamelCase =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
lowerCamelCase =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" ) as fp:
fp.write(json.dumps(UpperCAmelCase_ ) )
with open(self.merges_file , """w""" ) as fp:
fp.write("""\n""".join(UpperCAmelCase_ ) )
def _snake_case ( self , UpperCAmelCase_ ):
lowerCamelCase ="""lower newer"""
lowerCamelCase ="""lower newer"""
return input_text, output_text
def _snake_case ( self ):
lowerCamelCase =BioGptTokenizer(self.vocab_file , self.merges_file )
lowerCamelCase ="""lower"""
lowerCamelCase =["""low""", """er</w>"""]
lowerCamelCase =tokenizer.tokenize(UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
lowerCamelCase =tokens + ["""<unk>"""]
lowerCamelCase =[14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) , UpperCAmelCase_ )
@slow
def _snake_case ( self ):
lowerCamelCase =BioGptTokenizer.from_pretrained("""microsoft/biogpt""" )
lowerCamelCase =tokenizer.encode("""sequence builders""" , add_special_tokens=UpperCAmelCase_ )
lowerCamelCase =tokenizer.encode("""multi-sequence build""" , add_special_tokens=UpperCAmelCase_ )
lowerCamelCase =tokenizer.build_inputs_with_special_tokens(UpperCAmelCase_ )
lowerCamelCase =tokenizer.build_inputs_with_special_tokens(UpperCAmelCase_ , UpperCAmelCase_ )
self.assertTrue(encoded_sentence == [2] + text )
self.assertTrue(encoded_pair == [2] + text + [2] + text_a )
| 262 | 0 |
'''simple docstring'''
import inspect
import unittest
from transformers import MobileViTVaConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, MobileViTVaModel
from transformers.models.mobilevitva.modeling_mobilevitva import (
MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST,
make_divisible,
)
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class lowerCamelCase_ ( _lowercase ):
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : int = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__lowerCAmelCase , '''width_multiplier''' ) )
class lowerCamelCase_ :
def __init__( self : List[str] , _A : Optional[Any] , _A : str=13 , _A : List[Any]=64 , _A : int=2 , _A : Dict=3 , _A : int="swish" , _A : List[Any]=3 , _A : List[Any]=32 , _A : Tuple=0.1 , _A : Any=0.0_2 , _A : int=True , _A : Any=True , _A : Tuple=10 , _A : List[str]=None , _A : Tuple=0.2_5 , _A : Union[str, Any]=0.0 , _A : List[str]=0.0 , ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = parent
UpperCAmelCase__ : Tuple = batch_size
UpperCAmelCase__ : Optional[int] = image_size
UpperCAmelCase__ : Union[str, Any] = patch_size
UpperCAmelCase__ : Optional[int] = num_channels
UpperCAmelCase__ : Union[str, Any] = make_divisible(512 * width_multiplier , divisor=8 )
UpperCAmelCase__ : Union[str, Any] = hidden_act
UpperCAmelCase__ : Dict = conv_kernel_size
UpperCAmelCase__ : List[Any] = output_stride
UpperCAmelCase__ : Any = classifier_dropout_prob
UpperCAmelCase__ : Tuple = use_labels
UpperCAmelCase__ : List[str] = is_training
UpperCAmelCase__ : Optional[int] = num_labels
UpperCAmelCase__ : str = initializer_range
UpperCAmelCase__ : List[Any] = scope
UpperCAmelCase__ : Any = width_multiplier
UpperCAmelCase__ : int = ffn_dropout
UpperCAmelCase__ : Union[str, Any] = attn_dropout
def lowercase_ ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase__ : Optional[int] = None
UpperCAmelCase__ : Union[str, Any] = None
if self.use_labels:
UpperCAmelCase__ : List[Any] = ids_tensor([self.batch_size] , self.num_labels )
UpperCAmelCase__ : Any = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
UpperCAmelCase__ : int = self.get_config()
return config, pixel_values, labels, pixel_labels
def lowercase_ ( self : int ):
'''simple docstring'''
return MobileViTVaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , width_multiplier=self.width_multiplier , ffn_dropout=self.ffn_dropout_prob , attn_dropout=self.attn_dropout_prob , )
def lowercase_ ( self : Optional[Any] , _A : List[Any] , _A : Dict , _A : Optional[Any] , _A : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = MobileViTVaModel(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
UpperCAmelCase__ : Optional[Any] = model(__lowerCAmelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def lowercase_ ( self : Any , _A : Dict , _A : Tuple , _A : List[str] , _A : str ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = self.num_labels
UpperCAmelCase__ : Optional[Any] = MobileViTVaForImageClassification(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
UpperCAmelCase__ : Any = model(__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase_ ( self : str , _A : str , _A : Any , _A : Tuple , _A : int ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = self.num_labels
UpperCAmelCase__ : List[str] = MobileViTVaForSemanticSegmentation(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
UpperCAmelCase__ : str = model(__lowerCAmelCase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
UpperCAmelCase__ : Union[str, Any] = model(__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def lowercase_ ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = self.prepare_config_and_inputs()
UpperCAmelCase__ : Union[str, Any] = config_and_inputs
UpperCAmelCase__ : Any = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class lowerCamelCase_ ( _lowercase , _lowercase , unittest.TestCase ):
lowerCAmelCase__ = (
(MobileViTVaModel, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation)
if is_torch_available()
else ()
)
lowerCAmelCase__ = (
{
"feature-extraction": MobileViTVaModel,
"image-classification": MobileViTVaForImageClassification,
"image-segmentation": MobileViTVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def lowercase_ ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : int = MobileViTVaModelTester(self )
UpperCAmelCase__ : Union[str, Any] = MobileViTVaConfigTester(self , config_class=__lowerCAmelCase , has_text_modality=__lowerCAmelCase )
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='''MobileViTV2 does not use inputs_embeds''' )
def lowercase_ ( self : Tuple ):
'''simple docstring'''
pass
@unittest.skip(reason='''MobileViTV2 does not support input and output embeddings''' )
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
pass
@unittest.skip(reason='''MobileViTV2 does not output attentions''' )
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(reason='''Got `CUDA error: misaligned address` for tests after this one being run.''' )
def lowercase_ ( self : int ):
'''simple docstring'''
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def lowercase_ ( self : str ):
'''simple docstring'''
pass
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : Any = model_class(__lowerCAmelCase )
UpperCAmelCase__ : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase__ : Dict = [*signature.parameters.keys()]
UpperCAmelCase__ : List[str] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __lowerCAmelCase )
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCAmelCase )
def lowercase_ ( self : str ):
'''simple docstring'''
def check_hidden_states_output(_A : int , _A : Tuple , _A : Optional[int] ):
UpperCAmelCase__ : Tuple = model_class(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
with torch.no_grad():
UpperCAmelCase__ : Dict = model(**self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase ) )
UpperCAmelCase__ : List[Any] = outputs.hidden_states
UpperCAmelCase__ : List[str] = 5
self.assertEqual(len(__lowerCAmelCase ) , __lowerCAmelCase )
# MobileViTV2's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
UpperCAmelCase__ : Union[str, Any] = 2
for i in range(len(__lowerCAmelCase ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
UpperCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : Any = True
check_hidden_states_output(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase__ : List[Any] = True
check_hidden_states_output(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowerCAmelCase )
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__lowerCAmelCase )
@slow
def lowercase_ ( self : Dict ):
'''simple docstring'''
for model_name in MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ : List[Any] = MobileViTVaModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
def a__ ( ) -> str:
UpperCAmelCase__ : List[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class lowerCamelCase_ ( unittest.TestCase ):
@cached_property
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
return (
MobileViTImageProcessor.from_pretrained('''apple/mobilevitv2-1.0-imagenet1k-256''' )
if is_vision_available()
else None
)
@slow
def lowercase_ ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = MobileViTVaForImageClassification.from_pretrained('''apple/mobilevitv2-1.0-imagenet1k-256''' ).to(
__lowerCAmelCase )
UpperCAmelCase__ : int = self.default_image_processor
UpperCAmelCase__ : Optional[Any] = prepare_img()
UpperCAmelCase__ : Any = image_processor(images=__lowerCAmelCase , return_tensors='''pt''' ).to(__lowerCAmelCase )
# forward pass
with torch.no_grad():
UpperCAmelCase__ : Any = model(**__lowerCAmelCase )
# verify the logits
UpperCAmelCase__ : List[Any] = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , __lowerCAmelCase )
UpperCAmelCase__ : int = torch.tensor([-1.63_36e00, -7.32_04e-02, -5.18_83e-01] ).to(__lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowerCAmelCase , atol=1e-4 ) )
@slow
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : int = MobileViTVaForSemanticSegmentation.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' )
UpperCAmelCase__ : Any = model.to(__lowerCAmelCase )
UpperCAmelCase__ : Tuple = MobileViTImageProcessor.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' )
UpperCAmelCase__ : Dict = prepare_img()
UpperCAmelCase__ : Union[str, Any] = image_processor(images=__lowerCAmelCase , return_tensors='''pt''' ).to(__lowerCAmelCase )
# forward pass
with torch.no_grad():
UpperCAmelCase__ : Optional[int] = model(**__lowerCAmelCase )
UpperCAmelCase__ : Optional[Any] = outputs.logits
# verify the logits
UpperCAmelCase__ : Dict = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape , __lowerCAmelCase )
UpperCAmelCase__ : Optional[Any] = torch.tensor(
[
[[7.0_8_6_3, 7.1_5_2_5, 6.8_2_0_1], [6.6_9_3_1, 6.8_7_7_0, 6.8_9_3_3], [6.2_9_7_8, 7.0_3_6_6, 6.9_6_3_6]],
[[-3.7_1_3_4, -3.6_7_1_2, -3.6_6_7_5], [-3.5_8_2_5, -3.3_5_4_9, -3.4_7_7_7], [-3.3_4_3_5, -3.3_9_7_9, -3.2_8_5_7]],
[[-2.9_3_2_9, -2.8_0_0_3, -2.7_3_6_9], [-3.0_5_6_4, -2.4_7_8_0, -2.0_2_0_7], [-2.6_8_8_9, -1.9_2_9_8, -1.7_6_4_0]],
] , device=__lowerCAmelCase , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , __lowerCAmelCase , atol=1e-4 ) )
@slow
def lowercase_ ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = MobileViTVaForSemanticSegmentation.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' )
UpperCAmelCase__ : Tuple = model.to(__lowerCAmelCase )
UpperCAmelCase__ : Tuple = MobileViTImageProcessor.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' )
UpperCAmelCase__ : List[str] = prepare_img()
UpperCAmelCase__ : Optional[int] = image_processor(images=__lowerCAmelCase , return_tensors='''pt''' ).to(__lowerCAmelCase )
# forward pass
with torch.no_grad():
UpperCAmelCase__ : Dict = model(**__lowerCAmelCase )
UpperCAmelCase__ : List[str] = outputs.logits.detach().cpu()
UpperCAmelCase__ : Optional[Any] = image_processor.post_process_semantic_segmentation(outputs=__lowerCAmelCase , target_sizes=[(50, 60)] )
UpperCAmelCase__ : Dict = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape , __lowerCAmelCase )
UpperCAmelCase__ : List[Any] = image_processor.post_process_semantic_segmentation(outputs=__lowerCAmelCase )
UpperCAmelCase__ : str = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape , __lowerCAmelCase )
| 181 |
"""simple docstring"""
import argparse
import torch
from transformers import LxmertConfig, LxmertForPreTraining, load_tf_weights_in_lxmert
from transformers.utils import logging
logging.set_verbosity_info()
def snake_case_ ( A_ : Tuple, A_ : int, A_ : Dict ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = LxmertConfig.from_json_file(A_ )
print(F'''Building PyTorch model from configuration: {config}''' )
_lowerCamelCase : List[str] = LxmertForPreTraining(A_ )
# Load weights from tf checkpoint
load_tf_weights_in_lxmert(A_, A_, A_ )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict(), A_ )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
lowerCAmelCase__ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 72 | 0 |
'''simple docstring'''
from __future__ import annotations
def __magic_name__ ( __UpperCAmelCase ) -> list[int]:
'''simple docstring'''
if len(__UpperCAmelCase ) == 0:
return array
snake_case_ ,snake_case_ = min(__UpperCAmelCase ), max(__UpperCAmelCase )
# Compute the variables
snake_case_ = _max - _min + 1
snake_case_ ,snake_case_ = [0] * holes_range, [0] * holes_range
# Make the sorting.
for i in array:
snake_case_ = i - _min
snake_case_ = i
holes_repeat[index] += 1
# Makes the array back by replacing the numbers.
snake_case_ = 0
for i in range(__UpperCAmelCase ):
while holes_repeat[i] > 0:
snake_case_ = holes[i]
index += 1
holes_repeat[i] -= 1
# Returns the sorted array.
return array
if __name__ == "__main__":
import doctest
doctest.testmod()
a : int = input('Enter numbers separated by comma:\n')
a : List[Any] = [int(x) for x in user_input.split(',')]
print(pigeon_sort(unsorted))
| 363 |
'''simple docstring'''
import os
import re
import sys
import traceback
import warnings
from pathlib import Path
from typing import Dict, Optional, Union
from uuid import uuida
from huggingface_hub import HfFolder, ModelCard, ModelCardData, hf_hub_download, whoami
from huggingface_hub.file_download import REGEX_COMMIT_HASH
from huggingface_hub.utils import (
EntryNotFoundError,
RepositoryNotFoundError,
RevisionNotFoundError,
is_jinja_available,
)
from packaging import version
from requests import HTTPError
from .. import __version__
from .constants import (
DEPRECATED_REVISION_ARGS,
DIFFUSERS_CACHE,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
SAFETENSORS_WEIGHTS_NAME,
WEIGHTS_NAME,
)
from .import_utils import (
ENV_VARS_TRUE_VALUES,
_flax_version,
_jax_version,
_onnxruntime_version,
_torch_version,
is_flax_available,
is_onnx_available,
is_torch_available,
)
from .logging import get_logger
a : Any = get_logger(__name__)
a : Union[str, Any] = Path(__file__).parent / 'model_card_template.md'
a : List[Any] = uuida().hex
a : List[str] = os.getenv('HF_HUB_OFFLINE', '').upper() in ENV_VARS_TRUE_VALUES
a : str = os.getenv('DISABLE_TELEMETRY', '').upper() in ENV_VARS_TRUE_VALUES
a : Optional[Any] = HUGGINGFACE_CO_RESOLVE_ENDPOINT + '/api/telemetry/'
def __magic_name__ ( __UpperCAmelCase = None ) -> str:
'''simple docstring'''
snake_case_ = F"diffusers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}"
if DISABLE_TELEMETRY or HF_HUB_OFFLINE:
return ua + "; telemetry/off"
if is_torch_available():
ua += F"; torch/{_torch_version}"
if is_flax_available():
ua += F"; jax/{_jax_version}"
ua += F"; flax/{_flax_version}"
if is_onnx_available():
ua += F"; onnxruntime/{_onnxruntime_version}"
# CI will set this value to True
if os.environ.get('''DIFFUSERS_IS_CI''', '''''' ).upper() in ENV_VARS_TRUE_VALUES:
ua += "; is_ci/true"
if isinstance(__UpperCAmelCase, __UpperCAmelCase ):
ua += "; " + "; ".join(F"{k}/{v}" for k, v in user_agent.items() )
elif isinstance(__UpperCAmelCase, __UpperCAmelCase ):
ua += "; " + user_agent
return ua
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase = None, __UpperCAmelCase = None ) -> Optional[Any]:
'''simple docstring'''
if token is None:
snake_case_ = HfFolder.get_token()
if organization is None:
snake_case_ = whoami(__UpperCAmelCase )['''name''']
return F"{username}/{model_id}"
else:
return F"{organization}/{model_id}"
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
if not is_jinja_available():
raise ValueError(
'''Modelcard rendering is based on Jinja templates.'''
''' Please make sure to have `jinja` installed before using `create_model_card`.'''
''' To install it, please run `pip install Jinja2`.''' )
if hasattr(__UpperCAmelCase, '''local_rank''' ) and args.local_rank not in [-1, 0]:
return
snake_case_ = args.hub_token if hasattr(__UpperCAmelCase, '''hub_token''' ) else None
snake_case_ = get_full_repo_name(__UpperCAmelCase, token=__UpperCAmelCase )
snake_case_ = ModelCard.from_template(
card_data=ModelCardData( # Card metadata object that will be converted to YAML block
language='''en''', license='''apache-2.0''', library_name='''diffusers''', tags=[], datasets=args.dataset_name, metrics=[], ), template_path=__UpperCAmelCase, model_name=__UpperCAmelCase, repo_name=__UpperCAmelCase, dataset_name=args.dataset_name if hasattr(__UpperCAmelCase, '''dataset_name''' ) else None, learning_rate=args.learning_rate, train_batch_size=args.train_batch_size, eval_batch_size=args.eval_batch_size, gradient_accumulation_steps=(
args.gradient_accumulation_steps if hasattr(__UpperCAmelCase, '''gradient_accumulation_steps''' ) else None
), adam_betaa=args.adam_betaa if hasattr(__UpperCAmelCase, '''adam_beta1''' ) else None, adam_betaa=args.adam_betaa if hasattr(__UpperCAmelCase, '''adam_beta2''' ) else None, adam_weight_decay=args.adam_weight_decay if hasattr(__UpperCAmelCase, '''adam_weight_decay''' ) else None, adam_epsilon=args.adam_epsilon if hasattr(__UpperCAmelCase, '''adam_epsilon''' ) else None, lr_scheduler=args.lr_scheduler if hasattr(__UpperCAmelCase, '''lr_scheduler''' ) else None, lr_warmup_steps=args.lr_warmup_steps if hasattr(__UpperCAmelCase, '''lr_warmup_steps''' ) else None, ema_inv_gamma=args.ema_inv_gamma if hasattr(__UpperCAmelCase, '''ema_inv_gamma''' ) else None, ema_power=args.ema_power if hasattr(__UpperCAmelCase, '''ema_power''' ) else None, ema_max_decay=args.ema_max_decay if hasattr(__UpperCAmelCase, '''ema_max_decay''' ) else None, mixed_precision=args.mixed_precision, )
snake_case_ = os.path.join(args.output_dir, '''README.md''' )
model_card.save(__UpperCAmelCase )
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase = None ) -> Optional[Any]:
'''simple docstring'''
if resolved_file is None or commit_hash is not None:
return commit_hash
snake_case_ = str(Path(__UpperCAmelCase ).as_posix() )
snake_case_ = re.search(r'''snapshots/([^/]+)/''', __UpperCAmelCase )
if search is None:
return None
snake_case_ = search.groups()[0]
return commit_hash if REGEX_COMMIT_HASH.match(__UpperCAmelCase ) else None
# Old default cache path, potentially to be migrated.
# This logic was more or less taken from `transformers`, with the following differences:
# - Diffusers doesn't use custom environment variables to specify the cache path.
# - There is no need to migrate the cache format, just move the files to the new location.
a : str = os.path.expanduser(
os.getenv('HF_HOME', os.path.join(os.getenv('XDG_CACHE_HOME', '~/.cache'), 'huggingface'))
)
a : Optional[Any] = os.path.join(hf_cache_home, 'diffusers')
def __magic_name__ ( __UpperCAmelCase = None, __UpperCAmelCase = None ) -> None:
'''simple docstring'''
if new_cache_dir is None:
snake_case_ = DIFFUSERS_CACHE
if old_cache_dir is None:
snake_case_ = old_diffusers_cache
snake_case_ = Path(__UpperCAmelCase ).expanduser()
snake_case_ = Path(__UpperCAmelCase ).expanduser()
for old_blob_path in old_cache_dir.glob('''**/blobs/*''' ):
if old_blob_path.is_file() and not old_blob_path.is_symlink():
snake_case_ = new_cache_dir / old_blob_path.relative_to(__UpperCAmelCase )
new_blob_path.parent.mkdir(parents=__UpperCAmelCase, exist_ok=__UpperCAmelCase )
os.replace(__UpperCAmelCase, __UpperCAmelCase )
try:
os.symlink(__UpperCAmelCase, __UpperCAmelCase )
except OSError:
logger.warning(
'''Could not create symlink between old cache and new cache. If you use an older version of diffusers again, files will be re-downloaded.''' )
# At this point, old_cache_dir contains symlinks to the new cache (it can still be used).
a : Tuple = os.path.join(DIFFUSERS_CACHE, 'version_diffusers_cache.txt')
if not os.path.isfile(cache_version_file):
a : Tuple = 0
else:
with open(cache_version_file) as f:
try:
a : Optional[Any] = int(f.read())
except ValueError:
a : List[str] = 0
if cache_version < 1:
a : Tuple = os.path.isdir(old_diffusers_cache) and len(os.listdir(old_diffusers_cache)) > 0
if old_cache_is_not_empty:
logger.warning(
'The cache for model files in Diffusers v0.14.0 has moved to a new location. Moving your '
'existing cached models. This is a one-time operation, you can interrupt it or run it '
'later by calling `diffusers.utils.hub_utils.move_cache()`.'
)
try:
move_cache()
except Exception as e:
a : str = '\n'.join(traceback.format_tb(e.__traceback__))
logger.error(
f'''There was a problem when trying to move your cache:\n\n{trace}\n{e.__class__.__name__}: {e}\n\nPlease '''
'file an issue at https://github.com/huggingface/diffusers/issues/new/choose, copy paste this whole '
'message and we will do our best to help.'
)
if cache_version < 1:
try:
os.makedirs(DIFFUSERS_CACHE, exist_ok=True)
with open(cache_version_file, 'w') as f:
f.write('1')
except Exception:
logger.warning(
f'''There was a problem when trying to write in your cache folder ({DIFFUSERS_CACHE}). Please, ensure '''
'the directory exists and can be written to.'
)
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase = None ) -> str:
'''simple docstring'''
if variant is not None:
snake_case_ = weights_name.split('''.''' )
snake_case_ = splits[:-1] + [variant] + splits[-1:]
snake_case_ = '''.'''.join(__UpperCAmelCase )
return weights_name
def __magic_name__ ( __UpperCAmelCase, *,
__UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase=None, ) -> int:
'''simple docstring'''
snake_case_ = str(__UpperCAmelCase )
if os.path.isfile(__UpperCAmelCase ):
return pretrained_model_name_or_path
elif os.path.isdir(__UpperCAmelCase ):
if os.path.isfile(os.path.join(__UpperCAmelCase, __UpperCAmelCase ) ):
# Load from a PyTorch checkpoint
snake_case_ = os.path.join(__UpperCAmelCase, __UpperCAmelCase )
return model_file
elif subfolder is not None and os.path.isfile(
os.path.join(__UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase ) ):
snake_case_ = os.path.join(__UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase )
return model_file
else:
raise EnvironmentError(
F"Error no file named {weights_name} found in directory {pretrained_model_name_or_path}." )
else:
# 1. First check if deprecated way of loading from branches is used
if (
revision in DEPRECATED_REVISION_ARGS
and (weights_name == WEIGHTS_NAME or weights_name == SAFETENSORS_WEIGHTS_NAME)
and version.parse(version.parse(__UpperCAmelCase ).base_version ) >= version.parse('''0.20.0''' )
):
try:
snake_case_ = hf_hub_download(
__UpperCAmelCase, filename=_add_variant(__UpperCAmelCase, __UpperCAmelCase ), cache_dir=__UpperCAmelCase, force_download=__UpperCAmelCase, proxies=__UpperCAmelCase, resume_download=__UpperCAmelCase, local_files_only=__UpperCAmelCase, use_auth_token=__UpperCAmelCase, user_agent=__UpperCAmelCase, subfolder=__UpperCAmelCase, revision=revision or commit_hash, )
warnings.warn(
F"Loading the variant {revision} from {pretrained_model_name_or_path} via `revision='{revision}'` is deprecated. Loading instead from `revision='main'` with `variant={revision}`. Loading model variants via `revision='{revision}'` will be removed in diffusers v1. Please use `variant='{revision}'` instead.", __UpperCAmelCase, )
return model_file
except: # noqa: E722
warnings.warn(
F"You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision='{revision}'`. This behavior is deprecated and will be removed in diffusers v1. One should use `variant='{revision}'` instead. However, it appears that {pretrained_model_name_or_path} currently does not have a {_add_variant(__UpperCAmelCase, __UpperCAmelCase )} file in the 'main' branch of {pretrained_model_name_or_path}. \n The Diffusers team and community would be very grateful if you could open an issue: https://github.com/huggingface/diffusers/issues/new with the title '{pretrained_model_name_or_path} is missing {_add_variant(__UpperCAmelCase, __UpperCAmelCase )}' so that the correct variant file can be added.", __UpperCAmelCase, )
try:
# 2. Load model file as usual
snake_case_ = hf_hub_download(
__UpperCAmelCase, filename=__UpperCAmelCase, cache_dir=__UpperCAmelCase, force_download=__UpperCAmelCase, proxies=__UpperCAmelCase, resume_download=__UpperCAmelCase, local_files_only=__UpperCAmelCase, use_auth_token=__UpperCAmelCase, user_agent=__UpperCAmelCase, subfolder=__UpperCAmelCase, revision=revision or commit_hash, )
return model_file
except RepositoryNotFoundError:
raise EnvironmentError(
F"{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier "
'''listed on \'https://huggingface.co/models\'\nIf this is a private repository, make sure to pass a '''
'''token having permission to this repo with `use_auth_token` or log in with `huggingface-cli '''
'''login`.''' )
except RevisionNotFoundError:
raise EnvironmentError(
F"{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for "
'''this model name. Check the model page at '''
F"'https://huggingface.co/{pretrained_model_name_or_path}' for available revisions." )
except EntryNotFoundError:
raise EnvironmentError(
F"{pretrained_model_name_or_path} does not appear to have a file named {weights_name}." )
except HTTPError as err:
raise EnvironmentError(
F"There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n{err}" )
except ValueError:
raise EnvironmentError(
F"We couldn't connect to '{HUGGINGFACE_CO_RESOLVE_ENDPOINT}' to load this model, couldn't find it"
F" in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a"
F" directory containing a file named {weights_name} or"
''' \nCheckout your internet connection or see how to run the library in'''
''' offline mode at \'https://huggingface.co/docs/diffusers/installation#offline-mode\'.''' )
except EnvironmentError:
raise EnvironmentError(
F"Can't load the model for '{pretrained_model_name_or_path}'. If you were trying to load it from "
'''\'https://huggingface.co/models\', make sure you don\'t have a local directory with the same name. '''
F"Otherwise, make sure '{pretrained_model_name_or_path}' is the correct path to a directory "
F"containing a file named {weights_name}" )
| 72 | 0 |
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
)
@flax.struct.dataclass
class lowercase_ ( lowercase ):
'''simple docstring'''
__snake_case = 42
__snake_case = 42
class lowercase_ ( nn.Module ):
'''simple docstring'''
__snake_case = 42
__snake_case = (16, 32, 96, 2_56)
__snake_case = jnp.floataa
def __lowerCAmelCase ( self : Dict ) ->int:
"""simple docstring"""
a = nn.Conv(
self.block_out_channels[0] , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
a = []
for i in range(len(self.block_out_channels ) - 1 ):
a = self.block_out_channels[i]
a = self.block_out_channels[i + 1]
a = nn.Conv(
__UpperCAmelCase , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(__UpperCAmelCase )
a = nn.Conv(
__UpperCAmelCase , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(__UpperCAmelCase )
a = blocks
a = nn.Conv(
self.conditioning_embedding_channels , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self : List[Any] , __UpperCAmelCase : Any ) ->Dict:
"""simple docstring"""
a = self.conv_in(__UpperCAmelCase )
a = nn.silu(__UpperCAmelCase )
for block in self.blocks:
a = block(__UpperCAmelCase )
a = nn.silu(__UpperCAmelCase )
a = self.conv_out(__UpperCAmelCase )
return embedding
@flax_register_to_config
class lowercase_ ( nn.Module , lowercase , lowercase ):
'''simple docstring'''
__snake_case = 32
__snake_case = 4
__snake_case = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
__snake_case = False
__snake_case = (3_20, 6_40, 12_80, 12_80)
__snake_case = 2
__snake_case = 8
__snake_case = None
__snake_case = 12_80
__snake_case = 0.0
__snake_case = False
__snake_case = jnp.floataa
__snake_case = True
__snake_case = 0
__snake_case = "rgb"
__snake_case = (16, 32, 96, 2_56)
def __lowerCAmelCase ( self : Dict , __UpperCAmelCase : jax.random.KeyArray ) ->FrozenDict:
"""simple docstring"""
a = (1, self.in_channels, self.sample_size, self.sample_size)
a = jnp.zeros(__UpperCAmelCase , dtype=jnp.floataa )
a = jnp.ones((1,) , dtype=jnp.intaa )
a = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
a = (1, 3, self.sample_size * 8, self.sample_size * 8)
a = jnp.zeros(__UpperCAmelCase , dtype=jnp.floataa )
a , a = jax.random.split(__UpperCAmelCase )
a = {'''params''': params_rng, '''dropout''': dropout_rng}
return self.init(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )["params"]
def __lowerCAmelCase ( self : int ) ->List[str]:
"""simple docstring"""
a = self.block_out_channels
a = block_out_channels[0] * 4
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
a = self.num_attention_heads or self.attention_head_dim
# input
a = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
a = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
a = FlaxTimestepEmbedding(__UpperCAmelCase , dtype=self.dtype )
a = FlaxControlNetConditioningEmbedding(
conditioning_embedding_channels=block_out_channels[0] , block_out_channels=self.conditioning_embedding_out_channels , )
a = self.only_cross_attention
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
a = (only_cross_attention,) * len(self.down_block_types )
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
a = (num_attention_heads,) * len(self.down_block_types )
# down
a = []
a = []
a = block_out_channels[0]
a = nn.Conv(
__UpperCAmelCase , kernel_size=(1, 1) , padding='''VALID''' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(__UpperCAmelCase )
for i, down_block_type in enumerate(self.down_block_types ):
a = output_channel
a = block_out_channels[i]
a = i == len(__UpperCAmelCase ) - 1
if down_block_type == "CrossAttnDownBlock2D":
a = FlaxCrossAttnDownBlockaD(
in_channels=__UpperCAmelCase , out_channels=__UpperCAmelCase , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , dtype=self.dtype , )
else:
a = FlaxDownBlockaD(
in_channels=__UpperCAmelCase , out_channels=__UpperCAmelCase , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(__UpperCAmelCase )
for _ in range(self.layers_per_block ):
a = nn.Conv(
__UpperCAmelCase , kernel_size=(1, 1) , padding='''VALID''' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(__UpperCAmelCase )
if not is_final_block:
a = nn.Conv(
__UpperCAmelCase , kernel_size=(1, 1) , padding='''VALID''' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(__UpperCAmelCase )
a = down_blocks
a = controlnet_down_blocks
# mid
a = block_out_channels[-1]
a = FlaxUNetMidBlockaDCrossAttn(
in_channels=__UpperCAmelCase , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , dtype=self.dtype , )
a = nn.Conv(
__UpperCAmelCase , kernel_size=(1, 1) , padding='''VALID''' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self : Any , __UpperCAmelCase : List[str] , __UpperCAmelCase : str , __UpperCAmelCase : int , __UpperCAmelCase : Tuple , __UpperCAmelCase : float = 1.0 , __UpperCAmelCase : bool = True , __UpperCAmelCase : bool = False , ) ->Union[FlaxControlNetOutput, Tuple]:
"""simple docstring"""
a = self.controlnet_conditioning_channel_order
if channel_order == "bgr":
a = jnp.flip(__UpperCAmelCase , axis=1 )
# 1. time
if not isinstance(__UpperCAmelCase , jnp.ndarray ):
a = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(__UpperCAmelCase , jnp.ndarray ) and len(timesteps.shape ) == 0:
a = timesteps.astype(dtype=jnp.floataa )
a = jnp.expand_dims(__UpperCAmelCase , 0 )
a = self.time_proj(__UpperCAmelCase )
a = self.time_embedding(__UpperCAmelCase )
# 2. pre-process
a = jnp.transpose(__UpperCAmelCase , (0, 2, 3, 1) )
a = self.conv_in(__UpperCAmelCase )
a = jnp.transpose(__UpperCAmelCase , (0, 2, 3, 1) )
a = self.controlnet_cond_embedding(__UpperCAmelCase )
sample += controlnet_cond
# 3. down
a = (sample,)
for down_block in self.down_blocks:
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
a , a = down_block(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , deterministic=not train )
else:
a , a = down_block(__UpperCAmelCase , __UpperCAmelCase , deterministic=not train )
down_block_res_samples += res_samples
# 4. mid
a = self.mid_block(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , deterministic=not train )
# 5. contronet blocks
a = ()
for down_block_res_sample, controlnet_block in zip(__UpperCAmelCase , self.controlnet_down_blocks ):
a = controlnet_block(__UpperCAmelCase )
controlnet_down_block_res_samples += (down_block_res_sample,)
a = controlnet_down_block_res_samples
a = self.controlnet_mid_block(__UpperCAmelCase )
# 6. scaling
a = [sample * conditioning_scale for sample in down_block_res_samples]
mid_block_res_sample *= conditioning_scale
if not return_dict:
return (down_block_res_samples, mid_block_res_sample)
return FlaxControlNetOutput(
down_block_res_samples=__UpperCAmelCase , mid_block_res_sample=__UpperCAmelCase )
| 0 |
"""simple docstring"""
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import VideoMAEConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEModel,
)
from transformers.models.videomae.modeling_videomae import VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : Optional[Any], lowerCamelCase : Any, lowerCamelCase : List[Any]=13, lowerCamelCase : Any=10, lowerCamelCase : Optional[Any]=3, lowerCamelCase : Union[str, Any]=2, lowerCamelCase : Dict=2, lowerCamelCase : Tuple=2, lowerCamelCase : List[str]=True, lowerCamelCase : Optional[int]=True, lowerCamelCase : Dict=32, lowerCamelCase : Any=5, lowerCamelCase : Dict=4, lowerCamelCase : Any=37, lowerCamelCase : Union[str, Any]="gelu", lowerCamelCase : Dict=0.1, lowerCamelCase : Union[str, Any]=0.1, lowerCamelCase : Dict=10, lowerCamelCase : str=0.02, lowerCamelCase : List[Any]=0.9, lowerCamelCase : List[Any]=None, )-> str:
lowerCamelCase__ : List[str] =parent
lowerCamelCase__ : Any =batch_size
lowerCamelCase__ : str =image_size
lowerCamelCase__ : Optional[Any] =num_channels
lowerCamelCase__ : Optional[int] =patch_size
lowerCamelCase__ : List[str] =tubelet_size
lowerCamelCase__ : Optional[Any] =num_frames
lowerCamelCase__ : Any =is_training
lowerCamelCase__ : List[Any] =use_labels
lowerCamelCase__ : Union[str, Any] =hidden_size
lowerCamelCase__ : List[str] =num_hidden_layers
lowerCamelCase__ : str =num_attention_heads
lowerCamelCase__ : List[Any] =intermediate_size
lowerCamelCase__ : Any =hidden_act
lowerCamelCase__ : int =hidden_dropout_prob
lowerCamelCase__ : Optional[int] =attention_probs_dropout_prob
lowerCamelCase__ : Optional[Any] =type_sequence_label_size
lowerCamelCase__ : int =initializer_range
lowerCamelCase__ : Optional[Any] =mask_ratio
lowerCamelCase__ : Any =scope
# in VideoMAE, the number of tokens equals num_frames/tubelet_size * num_patches per frame
lowerCamelCase__ : Optional[Any] =(image_size // patch_size) ** 2
lowerCamelCase__ : Any =(num_frames // tubelet_size) * self.num_patches_per_frame
# use this variable to define bool_masked_pos
lowerCamelCase__ : List[Any] =int(mask_ratio * self.seq_length )
def snake_case ( self : Dict )-> Union[str, Any]:
lowerCamelCase__ : str =floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase__ : Any =None
if self.use_labels:
lowerCamelCase__ : Union[str, Any] =ids_tensor([self.batch_size], self.type_sequence_label_size )
lowerCamelCase__ : Optional[Any] =self.get_config()
return config, pixel_values, labels
def snake_case ( self : Union[str, Any] )-> Optional[int]:
return VideoMAEConfig(
image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, num_frames=self.num_frames, tubelet_size=self.tubelet_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=lowerCamelCase, initializer_range=self.initializer_range, )
def snake_case ( self : Dict, lowerCamelCase : Tuple, lowerCamelCase : Optional[Any], lowerCamelCase : Any )-> Union[str, Any]:
lowerCamelCase__ : List[str] =VideoMAEModel(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
lowerCamelCase__ : int =model(lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case ( self : Any, lowerCamelCase : str, lowerCamelCase : Optional[int], lowerCamelCase : str )-> Dict:
lowerCamelCase__ : int =VideoMAEForPreTraining(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
lowerCamelCase__ : Optional[int] =torch.ones((self.num_masks,) )
lowerCamelCase__ : List[str] =torch.cat([mask, torch.zeros(self.seq_length - mask.size(0 ) )] )
lowerCamelCase__ : int =mask.expand(self.batch_size, -1 ).bool()
lowerCamelCase__ : Any =model(lowerCamelCase, lowerCamelCase )
# model only returns predictions for masked patches
lowerCamelCase__ : Optional[int] =mask.sum().item()
lowerCamelCase__ : Dict =3 * self.tubelet_size * self.patch_size**2
self.parent.assertEqual(result.logits.shape, (self.batch_size, num_masked_patches, decoder_num_labels) )
def snake_case ( self : Optional[Any] )-> Tuple:
lowerCamelCase__ : Tuple =self.prepare_config_and_inputs()
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Dict =config_and_inputs
lowerCamelCase__ : List[str] ={'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
_a = (
(VideoMAEModel, VideoMAEForPreTraining, VideoMAEForVideoClassification) if is_torch_available() else ()
)
_a = (
{'feature-extraction': VideoMAEModel, 'video-classification': VideoMAEForVideoClassification}
if is_torch_available()
else {}
)
_a = False
_a = False
_a = False
_a = False
def snake_case ( self : List[Any] )-> Tuple:
lowerCamelCase__ : int =VideoMAEModelTester(self )
lowerCamelCase__ : Optional[int] =ConfigTester(self, config_class=lowerCamelCase, has_text_modality=lowerCamelCase, hidden_size=37 )
def snake_case ( self : Any, lowerCamelCase : List[Any], lowerCamelCase : Optional[Any], lowerCamelCase : List[str]=False )-> Tuple:
lowerCamelCase__ : str =copy.deepcopy(lowerCamelCase )
if model_class == VideoMAEForPreTraining:
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
lowerCamelCase__ : Any =torch.ones((self.model_tester.num_masks,) )
lowerCamelCase__ : Dict =torch.cat([mask, torch.zeros(self.model_tester.seq_length - mask.size(0 ) )] )
lowerCamelCase__ : Optional[int] =mask.expand(self.model_tester.batch_size, -1 ).bool()
lowerCamelCase__ : int =bool_masked_pos.to(lowerCamelCase )
if return_labels:
if model_class in [
*get_values(lowerCamelCase ),
]:
lowerCamelCase__ : List[str] =torch.zeros(
self.model_tester.batch_size, dtype=torch.long, device=lowerCamelCase )
return inputs_dict
def snake_case ( self : List[Any] )-> int:
self.config_tester.run_common_tests()
@unittest.skip(reason='''VideoMAE does not use inputs_embeds''' )
def snake_case ( self : List[str] )-> Tuple:
pass
def snake_case ( self : Union[str, Any] )-> Union[str, Any]:
lowerCamelCase__ , lowerCamelCase__ : Optional[int] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : List[str] =model_class(lowerCamelCase )
self.assertIsInstance(model.get_input_embeddings(), (nn.Module) )
lowerCamelCase__ : Optional[Any] =model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase, nn.Linear ) )
def snake_case ( self : Optional[int] )-> Optional[Any]:
lowerCamelCase__ , lowerCamelCase__ : Any =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : Optional[int] =model_class(lowerCamelCase )
lowerCamelCase__ : Dict =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase__ : Tuple =[*signature.parameters.keys()]
lowerCamelCase__ : List[str] =['''pixel_values''']
self.assertListEqual(arg_names[:1], lowerCamelCase )
def snake_case ( self : Tuple )-> Optional[int]:
lowerCamelCase__ : Any =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase )
def snake_case ( self : List[Any] )-> Union[str, Any]:
lowerCamelCase__ : Optional[int] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*lowerCamelCase )
@slow
def snake_case ( self : List[Any] )-> Dict:
for model_name in VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ : str =VideoMAEModel.from_pretrained(lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
def snake_case ( self : List[str] )-> Optional[int]:
if not self.has_attentions:
pass
else:
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] =self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ : Tuple =True
for model_class in self.all_model_classes:
lowerCamelCase__ : Any =self.model_tester.seq_length - self.model_tester.num_masks
lowerCamelCase__ : Any =(
num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
)
lowerCamelCase__ : Optional[int] =True
lowerCamelCase__ : Optional[int] =False
lowerCamelCase__ : Optional[int] =True
lowerCamelCase__ : int =model_class(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
with torch.no_grad():
lowerCamelCase__ : Union[str, Any] =model(**self._prepare_for_class(lowerCamelCase, lowerCamelCase ) )
lowerCamelCase__ : str =outputs.attentions
self.assertEqual(len(lowerCamelCase ), self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowerCamelCase__ : Tuple =True
lowerCamelCase__ : Union[str, Any] =model_class(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
with torch.no_grad():
lowerCamelCase__ : List[str] =model(**self._prepare_for_class(lowerCamelCase, lowerCamelCase ) )
lowerCamelCase__ : int =outputs.attentions
self.assertEqual(len(lowerCamelCase ), self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ), [self.model_tester.num_attention_heads, seq_len, seq_len], )
lowerCamelCase__ : Union[str, Any] =len(lowerCamelCase )
# Check attention is always last and order is fine
lowerCamelCase__ : List[Any] =True
lowerCamelCase__ : Union[str, Any] =True
lowerCamelCase__ : Dict =model_class(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
with torch.no_grad():
lowerCamelCase__ : Any =model(**self._prepare_for_class(lowerCamelCase, lowerCamelCase ) )
self.assertEqual(out_len + 1, len(lowerCamelCase ) )
lowerCamelCase__ : Optional[Any] =outputs.attentions
self.assertEqual(len(lowerCamelCase ), self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ), [self.model_tester.num_attention_heads, seq_len, seq_len], )
def snake_case ( self : str )-> int:
def check_hidden_states_output(lowerCamelCase : Optional[Any], lowerCamelCase : List[str], lowerCamelCase : Optional[Any] ):
lowerCamelCase__ : List[Any] =model_class(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
with torch.no_grad():
lowerCamelCase__ : Optional[Any] =model(**self._prepare_for_class(lowerCamelCase, lowerCamelCase ) )
lowerCamelCase__ : Dict =outputs.hidden_states
lowerCamelCase__ : Any =self.model_tester.num_hidden_layers + 1
self.assertEqual(len(lowerCamelCase ), lowerCamelCase )
lowerCamelCase__ : Any =self.model_tester.seq_length - self.model_tester.num_masks
lowerCamelCase__ : str =num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ), [seq_length, self.model_tester.hidden_size], )
lowerCamelCase__ , lowerCamelCase__ : List[str] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : Union[str, Any] =True
check_hidden_states_output(lowerCamelCase, lowerCamelCase, lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase__ : int =True
check_hidden_states_output(lowerCamelCase, lowerCamelCase, lowerCamelCase )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def snake_case ( self : Optional[int] )-> int:
pass
def snake_case__ ( ):
"""simple docstring"""
lowerCamelCase__ : int =hf_hub_download(
repo_id='''hf-internal-testing/spaghetti-video''' , filename='''eating_spaghetti.npy''' , repo_type='''dataset''' )
lowerCamelCase__ : str =np.load(__lowerCamelCase )
return list(__lowerCamelCase )
@require_torch
@require_vision
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def snake_case ( self : List[str] )-> List[Any]:
# logits were tested with a different mean and std, so we use the same here
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5], image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def snake_case ( self : Optional[Any] )-> Dict:
lowerCamelCase__ : str =VideoMAEForVideoClassification.from_pretrained('''MCG-NJU/videomae-base-finetuned-kinetics''' ).to(
lowerCamelCase )
lowerCamelCase__ : Optional[Any] =self.default_image_processor
lowerCamelCase__ : List[str] =prepare_video()
lowerCamelCase__ : Union[str, Any] =image_processor(lowerCamelCase, return_tensors='''pt''' ).to(lowerCamelCase )
# forward pass
with torch.no_grad():
lowerCamelCase__ : Tuple =model(**lowerCamelCase )
# verify the logits
lowerCamelCase__ : Union[str, Any] =torch.Size((1, 400) )
self.assertEqual(outputs.logits.shape, lowerCamelCase )
lowerCamelCase__ : Tuple =torch.tensor([0.3_669, -0.0_688, -0.2_421] ).to(lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3], lowerCamelCase, atol=1E-4 ) )
@slow
def snake_case ( self : Any )-> Tuple:
lowerCamelCase__ : Tuple =VideoMAEForPreTraining.from_pretrained('''MCG-NJU/videomae-base-short''' ).to(lowerCamelCase )
lowerCamelCase__ : Optional[int] =self.default_image_processor
lowerCamelCase__ : Dict =prepare_video()
lowerCamelCase__ : Dict =image_processor(lowerCamelCase, return_tensors='''pt''' ).to(lowerCamelCase )
# add boolean mask, indicating which patches to mask
lowerCamelCase__ : str =hf_hub_download(repo_id='''hf-internal-testing/bool-masked-pos''', filename='''bool_masked_pos.pt''' )
lowerCamelCase__ : Dict =torch.load(lowerCamelCase )
# forward pass
with torch.no_grad():
lowerCamelCase__ : Union[str, Any] =model(**lowerCamelCase )
# verify the logits
lowerCamelCase__ : Dict =torch.Size([1, 1408, 1536] )
lowerCamelCase__ : Union[str, Any] =torch.tensor(
[[0.7_994, 0.9_612, 0.8_508], [0.7_401, 0.8_958, 0.8_302], [0.5_862, 0.7_468, 0.7_325]], device=lowerCamelCase )
self.assertEqual(outputs.logits.shape, lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3], lowerCamelCase, atol=1E-4 ) )
# verify the loss (`config.norm_pix_loss` = `True`)
lowerCamelCase__ : Optional[int] =torch.tensor([0.5_142], device=lowerCamelCase )
self.assertTrue(torch.allclose(outputs.loss, lowerCamelCase, atol=1E-4 ) )
# verify the loss (`config.norm_pix_loss` = `False`)
lowerCamelCase__ : Union[str, Any] =VideoMAEForPreTraining.from_pretrained('''MCG-NJU/videomae-base-short''', norm_pix_loss=lowerCamelCase ).to(
lowerCamelCase )
with torch.no_grad():
lowerCamelCase__ : Union[str, Any] =model(**lowerCamelCase )
lowerCamelCase__ : Union[str, Any] =torch.tensor(torch.tensor([0.6_469] ), device=lowerCamelCase )
self.assertTrue(torch.allclose(outputs.loss, lowerCamelCase, atol=1E-4 ) )
| 238 | 0 |
'''simple docstring'''
from typing import Callable, List, Optional, Tuple, Union
import torch
from transformers import CLIPTextModel, CLIPTokenizer
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin, TransformeraDModel, VQModel
from ...schedulers import VQDiffusionScheduler
from ...utils import logging
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
__UpperCAmelCase =logging.get_logger(__name__) # pylint: disable=invalid-name
class a__ ( UpperCAmelCase__ , UpperCAmelCase__ ):
@register_to_config
def __init__( self : Optional[Any] , a : bool , a : Optional[int] = None , a : Optional[int] = None ):
"""simple docstring"""
super().__init__()
__lowerCamelCase = learnable
if self.learnable:
assert hidden_size is not None, "learnable=True requires `hidden_size` to be set"
assert length is not None, "learnable=True requires `length` to be set"
__lowerCamelCase = torch.zeros(a , a )
else:
__lowerCamelCase = None
__lowerCamelCase = torch.nn.Parameter(a )
class a__ ( UpperCAmelCase__ ):
lowerCamelCase : VQModel
lowerCamelCase : CLIPTextModel
lowerCamelCase : CLIPTokenizer
lowerCamelCase : TransformeraDModel
lowerCamelCase : LearnedClassifierFreeSamplingEmbeddings
lowerCamelCase : VQDiffusionScheduler
def __init__( self : List[str] , a : VQModel , a : CLIPTextModel , a : CLIPTokenizer , a : TransformeraDModel , a : VQDiffusionScheduler , a : LearnedClassifierFreeSamplingEmbeddings , ):
"""simple docstring"""
super().__init__()
self.register_modules(
vqvae=a , transformer=a , text_encoder=a , tokenizer=a , scheduler=a , learned_classifier_free_sampling_embeddings=a , )
def SCREAMING_SNAKE_CASE__ ( self : Any , a : List[str] , a : Union[str, Any] , a : List[str] ):
"""simple docstring"""
__lowerCamelCase = len(a ) if isinstance(a , a ) else 1
# get prompt text embeddings
__lowerCamelCase = self.tokenizer(
a , padding='''max_length''' , max_length=self.tokenizer.model_max_length , return_tensors='''pt''' , )
__lowerCamelCase = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
__lowerCamelCase = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
'''The following part of your input was truncated because CLIP can only handle sequences up to'''
f""" {self.tokenizer.model_max_length} tokens: {removed_text}""" )
__lowerCamelCase = text_input_ids[:, : self.tokenizer.model_max_length]
__lowerCamelCase = self.text_encoder(text_input_ids.to(self.device ) )[0]
# NOTE: This additional step of normalizing the text embeddings is from VQ-Diffusion.
# While CLIP does normalize the pooled output of the text transformer when combining
# the image and text embeddings, CLIP does not directly normalize the last hidden state.
#
# CLIP normalizing the pooled output.
# https://github.com/huggingface/transformers/blob/d92e22d1f28324f513f3080e5c47c071a3916721/src/transformers/models/clip/modeling_clip.py#L1052-L1053
__lowerCamelCase = prompt_embeds / prompt_embeds.norm(dim=-1 , keepdim=a )
# duplicate text embeddings for each generation per prompt
__lowerCamelCase = prompt_embeds.repeat_interleave(a , dim=0 )
if do_classifier_free_guidance:
if self.learned_classifier_free_sampling_embeddings.learnable:
__lowerCamelCase = self.learned_classifier_free_sampling_embeddings.embeddings
__lowerCamelCase = negative_prompt_embeds.unsqueeze(0 ).repeat(a , 1 , 1 )
else:
__lowerCamelCase = [''''''] * batch_size
__lowerCamelCase = text_input_ids.shape[-1]
__lowerCamelCase = self.tokenizer(
a , padding='''max_length''' , max_length=a , truncation=a , return_tensors='''pt''' , )
__lowerCamelCase = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# See comment for normalizing text embeddings
__lowerCamelCase = negative_prompt_embeds / negative_prompt_embeds.norm(dim=-1 , keepdim=a )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
__lowerCamelCase = negative_prompt_embeds.shape[1]
__lowerCamelCase = negative_prompt_embeds.repeat(1 , a , 1 )
__lowerCamelCase = negative_prompt_embeds.view(batch_size * num_images_per_prompt , a , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
__lowerCamelCase = torch.cat([negative_prompt_embeds, prompt_embeds] )
return prompt_embeds
@torch.no_grad()
def __call__( self : Optional[int] , a : Union[str, List[str]] , a : int = 1_00 , a : float = 5.0 , a : float = 1.0 , a : int = 1 , a : Optional[Union[torch.Generator, List[torch.Generator]]] = None , a : Optional[torch.FloatTensor] = None , a : Optional[str] = "pil" , a : bool = True , a : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , a : int = 1 , ):
"""simple docstring"""
if isinstance(a , a ):
__lowerCamelCase = 1
elif isinstance(a , a ):
__lowerCamelCase = len(a )
else:
raise ValueError(f"""`prompt` has to be of type `str` or `list` but is {type(a )}""" )
__lowerCamelCase = batch_size * num_images_per_prompt
__lowerCamelCase = guidance_scale > 1.0
__lowerCamelCase = self._encode_prompt(a , a , a )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(a , a ) or callback_steps <= 0)
):
raise ValueError(
f"""`callback_steps` has to be a positive integer but is {callback_steps} of type"""
f""" {type(a )}.""" )
# get the initial completely masked latents unless the user supplied it
__lowerCamelCase = (batch_size, self.transformer.num_latent_pixels)
if latents is None:
__lowerCamelCase = self.transformer.num_vector_embeds - 1
__lowerCamelCase = torch.full(a , a ).to(self.device )
else:
if latents.shape != latents_shape:
raise ValueError(f"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" )
if (latents < 0).any() or (latents >= self.transformer.num_vector_embeds).any():
raise ValueError(
'''Unexpected latents value(s). All latents be valid embedding indices i.e. in the range 0,'''
f""" {self.transformer.num_vector_embeds - 1} (inclusive).""" )
__lowerCamelCase = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(a , device=self.device )
__lowerCamelCase = self.scheduler.timesteps.to(self.device )
__lowerCamelCase = latents
for i, t in enumerate(self.progress_bar(a ) ):
# expand the sample if we are doing classifier free guidance
__lowerCamelCase = torch.cat([sample] * 2 ) if do_classifier_free_guidance else sample
# predict the un-noised image
# model_output == `log_p_x_0`
__lowerCamelCase = self.transformer(a , encoder_hidden_states=a , timestep=a ).sample
if do_classifier_free_guidance:
__lowerCamelCase , __lowerCamelCase = model_output.chunk(2 )
__lowerCamelCase = model_output_uncond + guidance_scale * (model_output_text - model_output_uncond)
model_output -= torch.logsumexp(a , dim=1 , keepdim=a )
__lowerCamelCase = self.truncate(a , a )
# remove `log(0)`'s (`-inf`s)
__lowerCamelCase = model_output.clamp(-70 )
# compute the previous noisy sample x_t -> x_t-1
__lowerCamelCase = self.scheduler.step(a , timestep=a , sample=a , generator=a ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(a , a , a )
__lowerCamelCase = self.vqvae.config.vq_embed_dim
__lowerCamelCase = (batch_size, self.transformer.height, self.transformer.width, embedding_channels)
__lowerCamelCase = self.vqvae.quantize.get_codebook_entry(a , shape=a )
__lowerCamelCase = self.vqvae.decode(a , force_not_quantize=a ).sample
__lowerCamelCase = (image / 2 + 0.5).clamp(0 , 1 )
__lowerCamelCase = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__lowerCamelCase = self.numpy_to_pil(a )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=a )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , a : torch.FloatTensor , a : float ):
"""simple docstring"""
__lowerCamelCase , __lowerCamelCase = torch.sort(a , 1 , descending=a )
__lowerCamelCase = torch.exp(a )
__lowerCamelCase = sorted_p_x_0.cumsum(dim=1 ) < truncation_rate
# Ensure that at least the largest probability is not zeroed out
__lowerCamelCase = torch.full_like(keep_mask[:, 0:1, :] , a )
__lowerCamelCase = torch.cat((all_true, keep_mask) , dim=1 )
__lowerCamelCase = keep_mask[:, :-1, :]
__lowerCamelCase = keep_mask.gather(1 , indices.argsort(1 ) )
__lowerCamelCase = log_p_x_0.clone()
__lowerCamelCase = -torch.inf # -inf = log(0)
return rv
| 361 | '''simple docstring'''
import os
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers.models.realm.configuration_realm import RealmConfig
from transformers.models.realm.retrieval_realm import _REALM_BLOCK_RECORDS_FILENAME, RealmRetriever
from transformers.models.realm.tokenization_realm import VOCAB_FILES_NAMES, RealmTokenizer
class a__ ( UpperCAmelCase__ ):
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
"""simple docstring"""
__lowerCamelCase = tempfile.mkdtemp()
__lowerCamelCase = 5
# Realm tok
__lowerCamelCase = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''test''',
'''question''',
'''this''',
'''is''',
'''the''',
'''first''',
'''second''',
'''third''',
'''fourth''',
'''fifth''',
'''record''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
__lowerCamelCase = os.path.join(self.tmpdirname , '''realm_tokenizer''' )
os.makedirs(a , exist_ok=a )
__lowerCamelCase = os.path.join(a , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
__lowerCamelCase = os.path.join(self.tmpdirname , '''realm_block_records''' )
os.makedirs(a , exist_ok=a )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
"""simple docstring"""
return RealmTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''realm_tokenizer''' ) )
def SCREAMING_SNAKE_CASE__ ( self : Any ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
"""simple docstring"""
__lowerCamelCase = RealmConfig(num_block_records=self.num_block_records )
return config
def SCREAMING_SNAKE_CASE__ ( self : int ):
"""simple docstring"""
__lowerCamelCase = Dataset.from_dict(
{
'''id''': ['''0''', '''1'''],
'''question''': ['''foo''', '''bar'''],
'''answers''': [['''Foo''', '''Bar'''], ['''Bar''']],
} )
return dataset
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
"""simple docstring"""
__lowerCamelCase = np.array(
[
b'''This is the first record''',
b'''This is the second record''',
b'''This is the third record''',
b'''This is the fourth record''',
b'''This is the fifth record''',
b'''This is a longer longer longer record''',
] , dtype=a , )
return block_records
def SCREAMING_SNAKE_CASE__ ( self : str ):
"""simple docstring"""
__lowerCamelCase = RealmRetriever(
block_records=self.get_dummy_block_records() , tokenizer=self.get_tokenizer() , )
return retriever
def SCREAMING_SNAKE_CASE__ ( self : Any ):
"""simple docstring"""
__lowerCamelCase = self.get_config()
__lowerCamelCase = self.get_dummy_retriever()
__lowerCamelCase = retriever.tokenizer
__lowerCamelCase = np.array([0, 3] , dtype='''long''' )
__lowerCamelCase = tokenizer(['''Test question'''] ).input_ids
__lowerCamelCase = tokenizer(
['''the fourth'''] , add_special_tokens=a , return_token_type_ids=a , return_attention_mask=a , ).input_ids
__lowerCamelCase = config.reader_seq_len
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = retriever(
a , a , answer_ids=a , max_length=a , return_tensors='''np''' )
self.assertEqual(len(a ) , 2 )
self.assertEqual(len(a ) , 2 )
self.assertEqual(len(a ) , 2 )
self.assertEqual(concat_inputs.input_ids.shape , (2, 10) )
self.assertEqual(concat_inputs.attention_mask.shape , (2, 10) )
self.assertEqual(concat_inputs.token_type_ids.shape , (2, 10) )
self.assertEqual(concat_inputs.special_tokens_mask.shape , (2, 10) )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[0] ) , ['''[CLS]''', '''test''', '''question''', '''[SEP]''', '''this''', '''is''', '''the''', '''first''', '''record''', '''[SEP]'''] , )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[1] ) , ['''[CLS]''', '''test''', '''question''', '''[SEP]''', '''this''', '''is''', '''the''', '''fourth''', '''record''', '''[SEP]'''] , )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
"""simple docstring"""
__lowerCamelCase = self.get_config()
__lowerCamelCase = self.get_dummy_retriever()
__lowerCamelCase = retriever.tokenizer
__lowerCamelCase = np.array([0, 3, 5] , dtype='''long''' )
__lowerCamelCase = tokenizer(['''Test question'''] ).input_ids
__lowerCamelCase = tokenizer(
['''the fourth''', '''longer longer'''] , add_special_tokens=a , return_token_type_ids=a , return_attention_mask=a , ).input_ids
__lowerCamelCase = config.reader_seq_len
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = retriever(
a , a , answer_ids=a , max_length=a , return_tensors='''np''' )
self.assertEqual([False, True, True] , a )
self.assertEqual([[-1, -1, -1], [6, -1, -1], [6, 7, 8]] , a )
self.assertEqual([[-1, -1, -1], [7, -1, -1], [7, 8, 9]] , a )
def SCREAMING_SNAKE_CASE__ ( self : int ):
"""simple docstring"""
__lowerCamelCase = self.get_dummy_retriever()
retriever.save_pretrained(os.path.join(self.tmpdirname , '''realm_block_records''' ) )
# Test local path
__lowerCamelCase = retriever.from_pretrained(os.path.join(self.tmpdirname , '''realm_block_records''' ) )
self.assertEqual(retriever.block_records[0] , b'''This is the first record''' )
# Test mocked remote path
with patch('''transformers.models.realm.retrieval_realm.hf_hub_download''' ) as mock_hf_hub_download:
__lowerCamelCase = os.path.join(
os.path.join(self.tmpdirname , '''realm_block_records''' ) , _REALM_BLOCK_RECORDS_FILENAME )
__lowerCamelCase = RealmRetriever.from_pretrained('''google/realm-cc-news-pretrained-openqa''' )
self.assertEqual(retriever.block_records[0] , b'''This is the first record''' )
| 237 | 0 |
'''simple docstring'''
def snake_case_ ( __SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
lowercase_ : Optional[int] = int(__SCREAMING_SNAKE_CASE )
if decimal in (0, 1): # Exit cases for the recursion
return str(__SCREAMING_SNAKE_CASE )
lowercase_ , lowercase_ : List[str] = divmod(__SCREAMING_SNAKE_CASE , 2 )
return binary_recursive(__SCREAMING_SNAKE_CASE ) + str(__SCREAMING_SNAKE_CASE )
def snake_case_ ( __SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
lowercase_ : str = str(__SCREAMING_SNAKE_CASE ).strip()
if not number:
raise ValueError('''No input value was provided''' )
lowercase_ : Optional[int] = '''-''' if number.startswith('''-''' ) else ''''''
lowercase_ : Union[str, Any] = number.lstrip('''-''' )
if not number.isnumeric():
raise ValueError('''Input value is not an integer''' )
return F'''{negative}0b{binary_recursive(int(__SCREAMING_SNAKE_CASE ) )}'''
if __name__ == "__main__":
from doctest import testmod
testmod()
| 93 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase : Optional[Any] = logging.get_logger(__name__)
_lowercase : List[str] = {
"google/pix2struct-textcaps-base": (
"https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json"
),
}
class lowerCAmelCase__ ( lowerCamelCase_ ):
lowerCAmelCase_ = '''pix2struct_text_model'''
lowerCAmelCase_ = ['''past_key_values''']
lowerCAmelCase_ = {
'''hidden_size''': '''hidden_size''',
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self , __SCREAMING_SNAKE_CASE=5_02_44 , __SCREAMING_SNAKE_CASE=7_68 , __SCREAMING_SNAKE_CASE=64 , __SCREAMING_SNAKE_CASE=20_48 , __SCREAMING_SNAKE_CASE=12 , __SCREAMING_SNAKE_CASE=12 , __SCREAMING_SNAKE_CASE=32 , __SCREAMING_SNAKE_CASE=1_28 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=1E-6 , __SCREAMING_SNAKE_CASE=1.0 , __SCREAMING_SNAKE_CASE="gelu_new" , __SCREAMING_SNAKE_CASE=0 , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=0 , __SCREAMING_SNAKE_CASE=1 , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=True , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
lowercase_ : Any = vocab_size
lowercase_ : Tuple = hidden_size
lowercase_ : Optional[Any] = d_kv
lowercase_ : List[str] = d_ff
lowercase_ : List[str] = num_layers
lowercase_ : Optional[Any] = num_heads
lowercase_ : Union[str, Any] = relative_attention_num_buckets
lowercase_ : Optional[int] = relative_attention_max_distance
lowercase_ : Union[str, Any] = dropout_rate
lowercase_ : Dict = layer_norm_epsilon
lowercase_ : Dict = initializer_factor
lowercase_ : List[Any] = use_cache
lowercase_ : Optional[int] = eos_token_id
lowercase_ : Optional[int] = decoder_start_token_id
# for backwards compatibility
lowercase_ : Any = dense_act_fn
super().__init__(
pad_token_id=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE , decoder_start_token_id=__SCREAMING_SNAKE_CASE , tie_word_embeddings=__SCREAMING_SNAKE_CASE , is_decoder=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
@classmethod
def _snake_case ( cls , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
cls._set_token_in_kwargs(__SCREAMING_SNAKE_CASE )
lowercase_ , lowercase_ : Optional[int] = cls.get_config_dict(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
# get the text config dict if we are loading from Pix2StructConfig
if config_dict.get('''model_type''' ) == "pix2struct":
lowercase_ : List[Any] = config_dict['''text_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
class lowerCAmelCase__ ( lowerCamelCase_ ):
lowerCAmelCase_ = '''pix2struct_vision_model'''
def __init__( self , __SCREAMING_SNAKE_CASE=7_68 , __SCREAMING_SNAKE_CASE=7_68 , __SCREAMING_SNAKE_CASE=20_48 , __SCREAMING_SNAKE_CASE=64 , __SCREAMING_SNAKE_CASE=12 , __SCREAMING_SNAKE_CASE=12 , __SCREAMING_SNAKE_CASE="gelu_new" , __SCREAMING_SNAKE_CASE=1E-6 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=1E-1_0 , __SCREAMING_SNAKE_CASE=1.0 , __SCREAMING_SNAKE_CASE=40_96 , __SCREAMING_SNAKE_CASE=32 , __SCREAMING_SNAKE_CASE=1_28 , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
super().__init__(**__SCREAMING_SNAKE_CASE )
lowercase_ : Union[str, Any] = hidden_size
lowercase_ : Any = patch_embed_hidden_size
lowercase_ : List[Any] = d_ff
lowercase_ : Dict = dropout_rate
lowercase_ : Any = num_hidden_layers
lowercase_ : Any = num_attention_heads
lowercase_ : int = initializer_range
lowercase_ : Dict = initializer_factor
lowercase_ : Dict = attention_dropout
lowercase_ : Optional[Any] = layer_norm_eps
lowercase_ : str = dense_act_fn
lowercase_ : Dict = seq_len
lowercase_ : List[Any] = relative_attention_num_buckets
lowercase_ : int = relative_attention_max_distance
lowercase_ : Optional[int] = d_kv
@classmethod
def _snake_case ( cls , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
cls._set_token_in_kwargs(__SCREAMING_SNAKE_CASE )
lowercase_ , lowercase_ : str = cls.get_config_dict(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
# get the vision config dict if we are loading from Pix2StructConfig
if config_dict.get('''model_type''' ) == "pix2struct":
lowercase_ : Optional[int] = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
class lowerCAmelCase__ ( lowerCamelCase_ ):
lowerCAmelCase_ = '''pix2struct'''
lowerCAmelCase_ = True
def __init__( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=1.0 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=True , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
super().__init__(tie_word_embeddings=__SCREAMING_SNAKE_CASE , is_encoder_decoder=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
if text_config is None:
lowercase_ : Optional[Any] = {}
logger.info('''text_config is None. Initializing the Pix2StructTextConfig with default values.''' )
if vision_config is None:
lowercase_ : Dict = {}
logger.info('''vision_config is None. Initializing the Pix2StructVisionConfig with default values.''' )
lowercase_ : str = PixaStructTextConfig(**__SCREAMING_SNAKE_CASE )
lowercase_ : Tuple = PixaStructVisionConfig(**__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[Any] = self.text_config.decoder_start_token_id
lowercase_ : Union[str, Any] = self.text_config.pad_token_id
lowercase_ : Union[str, Any] = self.text_config.eos_token_id
lowercase_ : int = initializer_factor
lowercase_ : Any = initializer_range
lowercase_ : str = self.initializer_range
lowercase_ : str = self.initializer_range
lowercase_ : int = is_vqa
@classmethod
def _snake_case ( cls , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **__SCREAMING_SNAKE_CASE )
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : Tuple = copy.deepcopy(self.__dict__ )
lowercase_ : Any = self.text_config.to_dict()
lowercase_ : Optional[Any] = self.vision_config.to_dict()
lowercase_ : Optional[int] = self.__class__.model_type
return output
| 93 | 1 |
"""simple docstring"""
import argparse
import glob
import importlib.util
import os
import re
import black
from doc_builder.style_doc import style_docstrings_in_code
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
lowerCAmelCase__ = '''src/diffusers'''
lowerCAmelCase__ = '''.'''
# This is to make sure the diffusers module imported is the one in the repo.
lowerCAmelCase__ = importlib.util.spec_from_file_location(
'''diffusers''',
os.path.join(DIFFUSERS_PATH, '''__init__.py'''),
submodule_search_locations=[DIFFUSERS_PATH],
)
lowerCAmelCase__ = spec.loader.load_module()
def a__ ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
return line.startswith(SCREAMING_SNAKE_CASE ) or len(SCREAMING_SNAKE_CASE ) <= 1 or re.search(r"^\s*\)(\s*->.*:|:)\s*$" , SCREAMING_SNAKE_CASE ) is not None
def a__ ( SCREAMING_SNAKE_CASE : Tuple ):
'''simple docstring'''
lowerCAmelCase : Dict = object_name.split("." )
lowerCAmelCase : Optional[int] = 0
# First let's find the module where our object lives.
lowerCAmelCase : Any = parts[i]
while i < len(SCREAMING_SNAKE_CASE ) and not os.path.isfile(os.path.join(SCREAMING_SNAKE_CASE , f"""{module}.py""" ) ):
i += 1
if i < len(SCREAMING_SNAKE_CASE ):
lowerCAmelCase : Optional[Any] = os.path.join(SCREAMING_SNAKE_CASE , parts[i] )
if i >= len(SCREAMING_SNAKE_CASE ):
raise ValueError(f"""`object_name` should begin with the name of a module of diffusers but got {object_name}.""" )
with open(os.path.join(SCREAMING_SNAKE_CASE , f"""{module}.py""" ) , "r" , encoding="utf-8" , newline="\n" ) as f:
lowerCAmelCase : List[Any] = f.readlines()
# Now let's find the class / func in the code!
lowerCAmelCase : List[str] = ""
lowerCAmelCase : int = 0
for name in parts[i + 1 :]:
while (
line_index < len(SCREAMING_SNAKE_CASE ) and re.search(rf"""^{indent}(class|def)\s+{name}(\(|\:)""" , lines[line_index] ) is None
):
line_index += 1
indent += " "
line_index += 1
if line_index >= len(SCREAMING_SNAKE_CASE ):
raise ValueError(f""" {object_name} does not match any function or class in {module}.""" )
# We found the beginning of the class / func, now let's find the end (when the indent diminishes).
lowerCAmelCase : List[str] = line_index
while line_index < len(SCREAMING_SNAKE_CASE ) and _should_continue(lines[line_index] , SCREAMING_SNAKE_CASE ):
line_index += 1
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
lowerCAmelCase : List[Any] = lines[start_index:line_index]
return "".join(SCREAMING_SNAKE_CASE )
lowerCAmelCase__ = re.compile(r'''^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)''')
lowerCAmelCase__ = re.compile(r'''^\s*(\S+)->(\S+)(\s+.*|$)''')
lowerCAmelCase__ = re.compile(r'''<FILL\s+[^>]*>''')
def a__ ( SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
lowerCAmelCase : int = code.split("\n" )
lowerCAmelCase : List[str] = 0
while idx < len(SCREAMING_SNAKE_CASE ) and len(lines[idx] ) == 0:
idx += 1
if idx < len(SCREAMING_SNAKE_CASE ):
return re.search(r"^(\s*)\S" , lines[idx] ).groups()[0]
return ""
def a__ ( SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
lowerCAmelCase : List[Any] = len(get_indent(SCREAMING_SNAKE_CASE ) ) > 0
if has_indent:
lowerCAmelCase : Tuple = f"""class Bla:\n{code}"""
lowerCAmelCase : Optional[Any] = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_1_9 , preview=SCREAMING_SNAKE_CASE )
lowerCAmelCase : Dict = black.format_str(SCREAMING_SNAKE_CASE , mode=SCREAMING_SNAKE_CASE )
lowerCAmelCase , lowerCAmelCase : List[Any] = style_docstrings_in_code(SCREAMING_SNAKE_CASE )
return result[len("class Bla:\n" ) :] if has_indent else result
def a__ ( SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : int=False ):
'''simple docstring'''
with open(SCREAMING_SNAKE_CASE , "r" , encoding="utf-8" , newline="\n" ) as f:
lowerCAmelCase : int = f.readlines()
lowerCAmelCase : List[str] = []
lowerCAmelCase : str = 0
# Not a for loop cause `lines` is going to change (if `overwrite=True`).
while line_index < len(SCREAMING_SNAKE_CASE ):
lowerCAmelCase : List[Any] = _re_copy_warning.search(lines[line_index] )
if search is None:
line_index += 1
continue
# There is some copied code here, let's retrieve the original.
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : List[Any] = search.groups()
lowerCAmelCase : List[str] = find_code_in_diffusers(SCREAMING_SNAKE_CASE )
lowerCAmelCase : Optional[Any] = get_indent(SCREAMING_SNAKE_CASE )
lowerCAmelCase : Dict = line_index + 1 if indent == theoretical_indent else line_index + 2
lowerCAmelCase : Optional[int] = theoretical_indent
lowerCAmelCase : List[str] = start_index
# Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment.
lowerCAmelCase : str = True
while line_index < len(SCREAMING_SNAKE_CASE ) and should_continue:
line_index += 1
if line_index >= len(SCREAMING_SNAKE_CASE ):
break
lowerCAmelCase : Tuple = lines[line_index]
lowerCAmelCase : str = _should_continue(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) and re.search(f"""^{indent}# End copy""" , SCREAMING_SNAKE_CASE ) is None
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
lowerCAmelCase : Tuple = lines[start_index:line_index]
lowerCAmelCase : List[str] = "".join(SCREAMING_SNAKE_CASE )
# Remove any nested `Copied from` comments to avoid circular copies
lowerCAmelCase : List[str] = [line for line in theoretical_code.split("\n" ) if _re_copy_warning.search(SCREAMING_SNAKE_CASE ) is None]
lowerCAmelCase : Union[str, Any] = "\n".join(SCREAMING_SNAKE_CASE )
# Before comparing, use the `replace_pattern` on the original code.
if len(SCREAMING_SNAKE_CASE ) > 0:
lowerCAmelCase : str = replace_pattern.replace("with" , "" ).split("," )
lowerCAmelCase : List[str] = [_re_replace_pattern.search(SCREAMING_SNAKE_CASE ) for p in patterns]
for pattern in patterns:
if pattern is None:
continue
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : List[str] = pattern.groups()
lowerCAmelCase : List[Any] = re.sub(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if option.strip() == "all-casing":
lowerCAmelCase : Optional[Any] = re.sub(obja.lower() , obja.lower() , SCREAMING_SNAKE_CASE )
lowerCAmelCase : Dict = re.sub(obja.upper() , obja.upper() , SCREAMING_SNAKE_CASE )
# Blackify after replacement. To be able to do that, we need the header (class or function definition)
# from the previous line
lowerCAmelCase : Union[str, Any] = blackify(lines[start_index - 1] + theoretical_code )
lowerCAmelCase : List[str] = theoretical_code[len(lines[start_index - 1] ) :]
# Test for a diff and act accordingly.
if observed_code != theoretical_code:
diffs.append([object_name, start_index] )
if overwrite:
lowerCAmelCase : Tuple = lines[:start_index] + [theoretical_code] + lines[line_index:]
lowerCAmelCase : int = start_index + 1
if overwrite and len(SCREAMING_SNAKE_CASE ) > 0:
# Warn the user a file has been modified.
print(f"""Detected changes, rewriting {filename}.""" )
with open(SCREAMING_SNAKE_CASE , "w" , encoding="utf-8" , newline="\n" ) as f:
f.writelines(SCREAMING_SNAKE_CASE )
return diffs
def a__ ( SCREAMING_SNAKE_CASE : bool = False ):
'''simple docstring'''
lowerCAmelCase : List[Any] = glob.glob(os.path.join(SCREAMING_SNAKE_CASE , "**/*.py" ) , recursive=SCREAMING_SNAKE_CASE )
lowerCAmelCase : str = []
for filename in all_files:
lowerCAmelCase : List[Any] = is_copy_consistent(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
diffs += [f"""- {filename}: copy does not match {d[0]} at line {d[1]}""" for d in new_diffs]
if not overwrite and len(SCREAMING_SNAKE_CASE ) > 0:
lowerCAmelCase : List[Any] = "\n".join(SCREAMING_SNAKE_CASE )
raise Exception(
"Found the following copy inconsistencies:\n"
+ diff
+ "\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them." )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
lowerCAmelCase__ = parser.parse_args()
check_copies(args.fix_and_overwrite)
| 133 |
"""simple docstring"""
from typing import List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''huggingface/autoformer-tourism-monthly''': '''https://huggingface.co/huggingface/autoformer-tourism-monthly/resolve/main/config.json''',
}
class SCREAMING_SNAKE_CASE__ ( lowercase ):
"""simple docstring"""
a : Dict ="autoformer"
a : Dict ={
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
"num_hidden_layers": "encoder_layers",
}
def __init__( self , snake_case__ = None , snake_case__ = None , snake_case__ = "student_t" , snake_case__ = "nll" , snake_case__ = 1 , snake_case__ = [1, 2, 3, 4, 5, 6, 7] , snake_case__ = True , snake_case__ = 0 , snake_case__ = 0 , snake_case__ = 0 , snake_case__ = 0 , snake_case__ = None , snake_case__ = None , snake_case__ = 64 , snake_case__ = 2 , snake_case__ = 2 , snake_case__ = 2 , snake_case__ = 2 , snake_case__ = 32 , snake_case__ = 32 , snake_case__ = "gelu" , snake_case__ = 0.1 , snake_case__ = 0.1 , snake_case__ = 0.1 , snake_case__ = 0.1 , snake_case__ = 0.1 , snake_case__ = 100 , snake_case__ = 0.02 , snake_case__ = True , snake_case__=True , snake_case__ = 10 , snake_case__ = 25 , snake_case__ = 3 , **snake_case__ , ):
"""simple docstring"""
lowerCAmelCase : Any = prediction_length
lowerCAmelCase : Dict = context_length if context_length is not None else prediction_length
lowerCAmelCase : Tuple = distribution_output
lowerCAmelCase : List[Any] = loss
lowerCAmelCase : int = input_size
lowerCAmelCase : str = num_time_features
lowerCAmelCase : str = lags_sequence
lowerCAmelCase : List[str] = scaling
lowerCAmelCase : List[Any] = num_dynamic_real_features
lowerCAmelCase : Tuple = num_static_real_features
lowerCAmelCase : Dict = num_static_categorical_features
if cardinality is not None and num_static_categorical_features > 0:
if len(snake_case__ ) != num_static_categorical_features:
raise ValueError(
"The cardinality should be a list of the same length as `num_static_categorical_features`" )
lowerCAmelCase : Any = cardinality
else:
lowerCAmelCase : Union[str, Any] = [0]
if embedding_dimension is not None and num_static_categorical_features > 0:
if len(snake_case__ ) != num_static_categorical_features:
raise ValueError(
"The embedding dimension should be a list of the same length as `num_static_categorical_features`" )
lowerCAmelCase : Tuple = embedding_dimension
else:
lowerCAmelCase : Any = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
lowerCAmelCase : Any = num_parallel_samples
# Transformer architecture configuration
lowerCAmelCase : str = input_size * len(self.lags_sequence ) + self._number_of_features
lowerCAmelCase : Any = d_model
lowerCAmelCase : List[str] = encoder_attention_heads
lowerCAmelCase : Union[str, Any] = decoder_attention_heads
lowerCAmelCase : Optional[int] = encoder_ffn_dim
lowerCAmelCase : Optional[Any] = decoder_ffn_dim
lowerCAmelCase : int = encoder_layers
lowerCAmelCase : int = decoder_layers
lowerCAmelCase : List[Any] = dropout
lowerCAmelCase : Optional[int] = attention_dropout
lowerCAmelCase : Union[str, Any] = activation_dropout
lowerCAmelCase : Optional[int] = encoder_layerdrop
lowerCAmelCase : Dict = decoder_layerdrop
lowerCAmelCase : Tuple = activation_function
lowerCAmelCase : Optional[Any] = init_std
lowerCAmelCase : List[Any] = use_cache
# Autoformer
lowerCAmelCase : Any = label_length
lowerCAmelCase : Any = moving_average
lowerCAmelCase : Optional[Any] = autocorrelation_factor
super().__init__(is_encoder_decoder=snake_case__ , **snake_case__ )
@property
def lowercase__ ( self ):
"""simple docstring"""
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 133 | 1 |
import gc
import unittest
from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class __A( unittest.TestCase ):
def SCREAMING_SNAKE_CASE_ ( self ) -> str:
'''simple docstring'''
super().tearDown()
gc.collect()
def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[Any]:
'''simple docstring'''
__a , __a = FlaxControlNetModel.from_pretrained(
'''lllyasviel/sd-controlnet-canny''' , from_pt=_snake_case , dtype=jnp.bfloataa )
__a , __a = FlaxStableDiffusionControlNetPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , controlnet=_snake_case , from_pt=_snake_case , dtype=jnp.bfloataa )
__a = controlnet_params
__a = '''bird'''
__a = jax.device_count()
__a = pipe.prepare_text_inputs([prompts] * num_samples )
__a = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png''' )
__a = pipe.prepare_image_inputs([canny_image] * num_samples )
__a = jax.random.PRNGKey(0 )
__a = jax.random.split(_snake_case , jax.device_count() )
__a = replicate(_snake_case )
__a = shard(_snake_case )
__a = shard(_snake_case )
__a = pipe(
prompt_ids=_snake_case , image=_snake_case , params=_snake_case , prng_seed=_snake_case , num_inference_steps=50 , jit=_snake_case , ).images
assert images.shape == (jax.device_count(), 1, 768, 512, 3)
__a = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
__a = images[0, 253:256, 253:256, -1]
__a = jnp.asarray(jax.device_get(image_slice.flatten() ) )
__a = jnp.array(
[0.16_7969, 0.11_6699, 0.08_1543, 0.15_4297, 0.13_2812, 0.10_8887, 0.16_9922, 0.16_9922, 0.20_5078] )
print(F"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
def SCREAMING_SNAKE_CASE_ ( self ) -> Any:
'''simple docstring'''
__a , __a = FlaxControlNetModel.from_pretrained(
'''lllyasviel/sd-controlnet-openpose''' , from_pt=_snake_case , dtype=jnp.bfloataa )
__a , __a = FlaxStableDiffusionControlNetPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , controlnet=_snake_case , from_pt=_snake_case , dtype=jnp.bfloataa )
__a = controlnet_params
__a = '''Chef in the kitchen'''
__a = jax.device_count()
__a = pipe.prepare_text_inputs([prompts] * num_samples )
__a = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png''' )
__a = pipe.prepare_image_inputs([pose_image] * num_samples )
__a = jax.random.PRNGKey(0 )
__a = jax.random.split(_snake_case , jax.device_count() )
__a = replicate(_snake_case )
__a = shard(_snake_case )
__a = shard(_snake_case )
__a = pipe(
prompt_ids=_snake_case , image=_snake_case , params=_snake_case , prng_seed=_snake_case , num_inference_steps=50 , jit=_snake_case , ).images
assert images.shape == (jax.device_count(), 1, 768, 512, 3)
__a = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
__a = images[0, 253:256, 253:256, -1]
__a = jnp.asarray(jax.device_get(image_slice.flatten() ) )
__a = jnp.array(
[[0.27_1484, 0.26_1719, 0.27_5391, 0.27_7344, 0.27_9297, 0.29_1016, 0.29_4922, 0.30_2734, 0.30_2734]] )
print(F"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2 | 6 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(""">=""", """4.25.0""")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel | 262 | 0 |
'''simple docstring'''
import argparse
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import BigBirdPegasusConfig, BigBirdPegasusForConditionalGeneration
_lowerCamelCase : Any = [
# tf -> hf
('/', '.'),
('layer_', 'layers.'),
('kernel', 'weight'),
('beta', 'bias'),
('gamma', 'weight'),
('pegasus', 'model'),
]
_lowerCamelCase : Tuple = [
('.output.dense', '.fc2'),
('intermediate.LayerNorm', 'final_layer_norm'),
('intermediate.dense', 'fc1'),
]
_lowerCamelCase : int = (
INIT_COMMON
+ [
('attention.self.LayerNorm', 'self_attn_layer_norm'),
('attention.output.dense', 'self_attn.out_proj'),
('attention.self', 'self_attn'),
('attention.encdec.LayerNorm', 'encoder_attn_layer_norm'),
('attention.encdec_output.dense', 'encoder_attn.out_proj'),
('attention.encdec', 'encoder_attn'),
('key', 'k_proj'),
('value', 'v_proj'),
('query', 'q_proj'),
('decoder.LayerNorm', 'decoder.layernorm_embedding'),
]
+ END_COMMON
)
_lowerCamelCase : Any = (
INIT_COMMON
+ [
('embeddings.word_embeddings', 'shared.weight'),
('embeddings.position_embeddings', 'embed_positions.weight'),
('attention.self.LayerNorm', 'self_attn_layer_norm'),
('attention.output.dense', 'self_attn.output'),
('attention.self', 'self_attn.self'),
('encoder.LayerNorm', 'encoder.layernorm_embedding'),
]
+ END_COMMON
)
_lowerCamelCase : Optional[Any] = [
'encdec/key/bias',
'encdec/query/bias',
'encdec/value/bias',
'self/key/bias',
'self/query/bias',
'self/value/bias',
'encdec_output/dense/bias',
'attention/output/dense/bias',
]
def __a ( UpperCAmelCase , UpperCAmelCase ) ->int:
"""simple docstring"""
for tf_name, hf_name in patterns:
A = k.replace(UpperCAmelCase , UpperCAmelCase )
return k
def __a ( UpperCAmelCase , UpperCAmelCase ) ->BigBirdPegasusForConditionalGeneration:
"""simple docstring"""
A = BigBirdPegasusConfig(**UpperCAmelCase )
A = BigBirdPegasusForConditionalGeneration(UpperCAmelCase )
A = torch_model.state_dict()
A = {}
# separating decoder weights
A = {k: tf_weights[k] for k in tf_weights if k.startswith("""pegasus/decoder""" )}
A = {k: tf_weights[k] for k in tf_weights if not k.startswith("""pegasus/decoder""" )}
for k, v in tqdm(decoder_weights.items() , """tf -> hf conversion""" ):
A = [k.endswith(UpperCAmelCase ) for ending in KEYS_TO_IGNORE]
if any(UpperCAmelCase ):
continue
A = DECODER_PATTERNS
A = rename_state_dict_key(UpperCAmelCase , UpperCAmelCase )
if new_k not in state_dict:
raise ValueError(f"""could not find new key {new_k} in state dict. (converted from {k})""" )
if any(True if i in k else False for i in ["""dense""", """query""", """key""", """value"""] ):
A = v.T
A = torch.from_numpy(UpperCAmelCase )
assert v.shape == state_dict[new_k].shape, f"""{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}"""
for k, v in tqdm(remaining_weights.items() , """tf -> hf conversion""" ):
A = [k.endswith(UpperCAmelCase ) for ending in KEYS_TO_IGNORE]
if any(UpperCAmelCase ):
continue
A = REMAINING_PATTERNS
A = rename_state_dict_key(UpperCAmelCase , UpperCAmelCase )
if new_k not in state_dict and k != "pegasus/embeddings/position_embeddings":
raise ValueError(f"""could not find new key {new_k} in state dict. (converted from {k})""" )
if any(True if i in k else False for i in ["""dense""", """query""", """key""", """value"""] ):
A = v.T
A = torch.from_numpy(UpperCAmelCase )
if k != "pegasus/embeddings/position_embeddings":
assert v.shape == state_dict[new_k].shape, f"""{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}"""
A = mapping["""model.embed_positions.weight"""]
A = mapping.pop("""model.embed_positions.weight""" )
A , A = torch_model.load_state_dict(UpperCAmelCase , strict=UpperCAmelCase )
A = [
k
for k in missing
if k
not in [
"""final_logits_bias""",
"""model.encoder.embed_tokens.weight""",
"""model.decoder.embed_tokens.weight""",
"""lm_head.weight""",
]
]
assert unexpected_missing == [], f"""no matches found for the following torch keys {unexpected_missing}"""
assert extra == [], f"""no matches found for the following tf keys {extra}"""
return torch_model
def __a ( UpperCAmelCase ) ->Dict:
"""simple docstring"""
A = tf.train.list_variables(UpperCAmelCase )
A = {}
A = ["""global_step"""]
for name, shape in tqdm(UpperCAmelCase , desc="""converting tf checkpoint to dict""" ):
A = any(pat in name for pat in ignore_name )
if skip_key:
continue
A = tf.train.load_variable(UpperCAmelCase , UpperCAmelCase )
A = array
return tf_weights
def __a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->List[str]:
"""simple docstring"""
A = get_tf_weights_as_numpy(UpperCAmelCase )
A = convert_bigbird_pegasus(UpperCAmelCase , UpperCAmelCase )
torch_model.save_pretrained(UpperCAmelCase )
if __name__ == "__main__":
_lowerCamelCase : Any = argparse.ArgumentParser()
parser.add_argument('--tf_ckpt_path', type=str, help='passed to tf.train.list_variables')
parser.add_argument('--save_dir', default=None, type=str, help='Path to the output PyTorch model.')
_lowerCamelCase : Tuple = parser.parse_args()
_lowerCamelCase : str = {}
convert_bigbird_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir, config_update=config_update)
| 337 |
'''simple docstring'''
_lowerCamelCase : List[Any] = 'Input must be a string of 8 numbers plus letter'
_lowerCamelCase : str = 'TRWAGMYFPDXBNJZSQVHLCKE'
def __a ( UpperCAmelCase ) ->bool:
"""simple docstring"""
if not isinstance(UpperCAmelCase , UpperCAmelCase ):
A = f"""Expected string as input, found {type(UpperCAmelCase ).__name__}"""
raise TypeError(UpperCAmelCase )
A = spanish_id.replace("""-""" , """""" ).upper()
if len(UpperCAmelCase ) != 9:
raise ValueError(UpperCAmelCase )
try:
A = int(spanish_id_clean[0:8] )
A = spanish_id_clean[8]
except ValueError as ex:
raise ValueError(UpperCAmelCase ) from ex
if letter.isdigit():
raise ValueError(UpperCAmelCase )
return letter == LOOKUP_LETTERS[number % 23]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 337 | 1 |
from ....configuration_utils import PretrainedConfig
from ....utils import logging
_lowerCamelCase : List[str] = logging.get_logger(__name__)
# TODO: upload to AWS
_lowerCamelCase : List[str] = {
"""yjernite/retribert-base-uncased""": (
"""https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/config.json"""
),
}
class UpperCamelCase_ ( _lowercase ):
'''simple docstring'''
UpperCAmelCase__ = "retribert"
def __init__( self : Optional[int] , UpperCAmelCase__ : str=30_522 , UpperCAmelCase__ : Tuple=768 , UpperCAmelCase__ : Union[str, Any]=8 , UpperCAmelCase__ : Any=12 , UpperCAmelCase__ : Optional[int]=3_072 , UpperCAmelCase__ : List[str]="gelu" , UpperCAmelCase__ : Tuple=0.1 , UpperCAmelCase__ : Tuple=0.1 , UpperCAmelCase__ : Tuple=512 , UpperCAmelCase__ : List[str]=2 , UpperCAmelCase__ : Tuple=0.02 , UpperCAmelCase__ : Optional[Any]=1e-12 , UpperCAmelCase__ : Tuple=True , UpperCAmelCase__ : Any=128 , UpperCAmelCase__ : Optional[int]=0 , **UpperCAmelCase__ : str , ) ->Any:
'''simple docstring'''
super().__init__(pad_token_id=__lowerCAmelCase , **__lowerCAmelCase)
A__ = vocab_size
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = hidden_act
A__ = intermediate_size
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = max_position_embeddings
A__ = type_vocab_size
A__ = initializer_range
A__ = layer_norm_eps
A__ = share_encoders
A__ = projection_dim
| 14 |
"""simple docstring"""
def snake_case_ ( A_ : list[int], A_ : str ):
'''simple docstring'''
_lowerCamelCase : Tuple = int(A_ )
# Initialize Result
_lowerCamelCase : Dict = []
# Traverse through all denomination
for denomination in reversed(A_ ):
# Find denominations
while int(A_ ) >= int(A_ ):
total_value -= int(A_ )
answer.append(A_ ) # Append the "answers" array
return answer
# Driver Code
if __name__ == "__main__":
lowerCAmelCase__ = []
lowerCAmelCase__ = '''0'''
if (
input('''Do you want to enter your denominations ? (yY/n): ''').strip().lower()
== "y"
):
lowerCAmelCase__ = int(input('''Enter the number of denominations you want to add: ''').strip())
for i in range(0, n):
denominations.append(int(input(F"""Denomination {i}: """).strip()))
lowerCAmelCase__ = input('''Enter the change you want to make in Indian Currency: ''').strip()
else:
# All denominations of Indian Currency if user does not enter
lowerCAmelCase__ = [1, 2, 5, 10, 20, 50, 100, 500, 2000]
lowerCAmelCase__ = input('''Enter the change you want to make: ''').strip()
if int(value) == 0 or int(value) < 0:
print('''The total value cannot be zero or negative.''')
else:
print(F"""Following is minimal change for {value}: """)
lowerCAmelCase__ = find_minimum_change(denominations, value)
# Print result
for i in range(len(answer)):
print(answer[i], end=''' ''')
| 72 | 0 |
'''simple docstring'''
import datasets
from .evaluate import evaluate
__snake_case : List[str] = '\\n@article{hendrycks2021cuad,\n title={CUAD: An Expert-Annotated NLP Dataset for Legal Contract Review},\n author={Dan Hendrycks and Collin Burns and Anya Chen and Spencer Ball},\n journal={arXiv preprint arXiv:2103.06268},\n year={2021}\n}\n'
__snake_case : Tuple = '\nThis metric wrap the official scoring script for version 1 of the Contract\nUnderstanding Atticus Dataset (CUAD).\nContract Understanding Atticus Dataset (CUAD) v1 is a corpus of more than 13,000 labels in 510\ncommercial legal contracts that have been manually labeled to identify 41 categories of important\nclauses that lawyers look for when reviewing contracts in connection with corporate transactions.\n'
__snake_case : Tuple = '\nComputes CUAD scores (EM, F1, AUPR, Precision@80%Recall, and Precision@90%Recall).\nArgs:\n predictions: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair as given in the references (see below)\n - \'prediction_text\': list of possible texts for the answer, as a list of strings\n depending on a threshold on the confidence probability of each prediction.\n references: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair (see above),\n - \'answers\': a Dict in the CUAD dataset format\n {\n \'text\': list of possible texts for the answer, as a list of strings\n \'answer_start\': list of start positions for the answer, as a list of ints\n }\n Note that answer_start values are not taken into account to compute the metric.\nReturns:\n \'exact_match\': Exact match (the normalized answer exactly match the gold answer)\n \'f1\': The F-score of predicted tokens versus the gold answer\n \'aupr\': Area Under the Precision-Recall curve\n \'prec_at_80_recall\': Precision at 80% recall\n \'prec_at_90_recall\': Precision at 90% recall\nExamples:\n >>> predictions = [{\'prediction_text\': [\'The seller:\', \'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.\'], \'id\': \'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties\'}]\n >>> references = [{\'answers\': {\'answer_start\': [143, 49], \'text\': [\'The seller:\', \'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.\']}, \'id\': \'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties\'}]\n >>> cuad_metric = datasets.load_metric("cuad")\n >>> results = cuad_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'exact_match\': 100.0, \'f1\': 100.0, \'aupr\': 0.0, \'prec_at_80_recall\': 1.0, \'prec_at_90_recall\': 1.0}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCamelCase ( datasets.Metric ):
'''simple docstring'''
def lowercase__ ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": {
"""id""": datasets.Value("""string""" ),
"""prediction_text""": datasets.features.Sequence(datasets.Value("""string""" ) ),
},
"""references""": {
"""id""": datasets.Value("""string""" ),
"""answers""": datasets.features.Sequence(
{
"""text""": datasets.Value("""string""" ),
"""answer_start""": datasets.Value("""int32""" ),
} ),
},
} ) , codebase_urls=["""https://www.atticusprojectai.org/cuad"""] , reference_urls=["""https://www.atticusprojectai.org/cuad"""] , )
def lowercase__ ( self : Dict , lowerCAmelCase_ : str , lowerCAmelCase_ : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
A__ : Union[str, Any] ={prediction["""id"""]: prediction["""prediction_text"""] for prediction in predictions}
A__ : Optional[int] =[
{
"""paragraphs""": [
{
"""qas""": [
{
"""answers""": [{"""text""": answer_text} for answer_text in ref["""answers"""]["""text"""]],
"""id""": ref["""id"""],
}
for ref in references
]
}
]
}
]
A__ : Tuple =evaluate(dataset=lowerCAmelCase_ , predictions=lowerCAmelCase_ )
return score
| 362 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case : Union[str, Any] = logging.get_logger(__name__)
__snake_case : Optional[int] = {
'bigcode/gpt_bigcode-santacoder': 'https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json',
}
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
__snake_case = 'gpt_bigcode'
__snake_case = ['past_key_values']
__snake_case = {
'hidden_size': 'n_embd',
'max_position_embeddings': 'n_positions',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self : Optional[Any] , lowerCAmelCase_ : List[str]=5_02_57 , lowerCAmelCase_ : str=10_24 , lowerCAmelCase_ : str=7_68 , lowerCAmelCase_ : str=12 , lowerCAmelCase_ : int=12 , lowerCAmelCase_ : Dict=None , lowerCAmelCase_ : List[Any]="gelu_pytorch_tanh" , lowerCAmelCase_ : Union[str, Any]=0.1 , lowerCAmelCase_ : Optional[Any]=0.1 , lowerCAmelCase_ : Optional[int]=0.1 , lowerCAmelCase_ : Dict=1e-5 , lowerCAmelCase_ : str=0.02 , lowerCAmelCase_ : List[Any]=True , lowerCAmelCase_ : str=True , lowerCAmelCase_ : str=5_02_56 , lowerCAmelCase_ : Dict=5_02_56 , lowerCAmelCase_ : Union[str, Any]=True , lowerCAmelCase_ : str=True , lowerCAmelCase_ : Optional[Any]=True , **lowerCAmelCase_ : Optional[Any] , ) -> Tuple:
'''simple docstring'''
A__ : Optional[Any] =vocab_size
A__ : Optional[Any] =n_positions
A__ : List[str] =n_embd
A__ : str =n_layer
A__ : Optional[int] =n_head
A__ : Optional[int] =n_inner
A__ : int =activation_function
A__ : int =resid_pdrop
A__ : int =embd_pdrop
A__ : Dict =attn_pdrop
A__ : Any =layer_norm_epsilon
A__ : List[Any] =initializer_range
A__ : Dict =scale_attn_weights
A__ : Any =use_cache
A__ : List[Any] =attention_softmax_in_fpaa
A__ : Optional[int] =scale_attention_softmax_in_fpaa
A__ : Dict =multi_query
A__ : List[str] =bos_token_id
A__ : Any =eos_token_id
super().__init__(bos_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ , **lowerCAmelCase_ )
| 136 | 0 |
"""simple docstring"""
from typing import Callable, List, Optional, Tuple, Union
import torch
from transformers import CLIPTextModel, CLIPTokenizer
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin, TransformeraDModel, VQModel
from ...schedulers import VQDiffusionScheduler
from ...utils import logging
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
__SCREAMING_SNAKE_CASE =logging.get_logger(__name__) # pylint: disable=invalid-name
class UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
@register_to_config
def __init__( self ,__UpperCamelCase ,__UpperCamelCase = None ,__UpperCamelCase = None ) -> Optional[int]:
'''simple docstring'''
super().__init__()
lowercase_ : Optional[Any] = learnable
if self.learnable:
assert hidden_size is not None, "learnable=True requires `hidden_size` to be set"
assert length is not None, "learnable=True requires `length` to be set"
lowercase_ : int = torch.zeros(lowercase_ ,lowercase_ )
else:
lowercase_ : Any = None
lowercase_ : Optional[int] = torch.nn.Parameter(lowercase_ )
class UpperCamelCase ( UpperCamelCase__ ):
lowercase = 4_2
lowercase = 4_2
lowercase = 4_2
lowercase = 4_2
lowercase = 4_2
lowercase = 4_2
def __init__( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,) -> Tuple:
'''simple docstring'''
super().__init__()
self.register_modules(
vqvae=lowercase_ ,transformer=lowercase_ ,text_encoder=lowercase_ ,tokenizer=lowercase_ ,scheduler=lowercase_ ,learned_classifier_free_sampling_embeddings=lowercase_ ,)
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Dict:
'''simple docstring'''
lowercase_ : Dict = len(lowercase_ ) if isinstance(lowercase_ ,lowercase_ ) else 1
# get prompt text embeddings
lowercase_ : Union[str, Any] = self.tokenizer(
lowercase_ ,padding='max_length' ,max_length=self.tokenizer.model_max_length ,return_tensors='pt' ,)
lowercase_ : Dict = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
lowercase_ : List[str] = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
'The following part of your input was truncated because CLIP can only handle sequences up to'
f''' {self.tokenizer.model_max_length} tokens: {removed_text}''' )
lowercase_ : Optional[int] = text_input_ids[:, : self.tokenizer.model_max_length]
lowercase_ : Union[str, Any] = self.text_encoder(text_input_ids.to(self.device ) )[0]
# NOTE: This additional step of normalizing the text embeddings is from VQ-Diffusion.
# While CLIP does normalize the pooled output of the text transformer when combining
# the image and text embeddings, CLIP does not directly normalize the last hidden state.
#
# CLIP normalizing the pooled output.
# https://github.com/huggingface/transformers/blob/d92e22d1f28324f513f3080e5c47c071a3916721/src/transformers/models/clip/modeling_clip.py#L1052-L1053
lowercase_ : str = prompt_embeds / prompt_embeds.norm(dim=-1 ,keepdim=lowercase_ )
# duplicate text embeddings for each generation per prompt
lowercase_ : Tuple = prompt_embeds.repeat_interleave(lowercase_ ,dim=0 )
if do_classifier_free_guidance:
if self.learned_classifier_free_sampling_embeddings.learnable:
lowercase_ : Tuple = self.learned_classifier_free_sampling_embeddings.embeddings
lowercase_ : Union[str, Any] = negative_prompt_embeds.unsqueeze(0 ).repeat(lowercase_ ,1 ,1 )
else:
lowercase_ : Optional[Any] = [''] * batch_size
lowercase_ : str = text_input_ids.shape[-1]
lowercase_ : List[Any] = self.tokenizer(
lowercase_ ,padding='max_length' ,max_length=lowercase_ ,truncation=lowercase_ ,return_tensors='pt' ,)
lowercase_ : Optional[int] = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# See comment for normalizing text embeddings
lowercase_ : str = negative_prompt_embeds / negative_prompt_embeds.norm(dim=-1 ,keepdim=lowercase_ )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
lowercase_ : List[Any] = negative_prompt_embeds.shape[1]
lowercase_ : int = negative_prompt_embeds.repeat(1 ,lowercase_ ,1 )
lowercase_ : List[str] = negative_prompt_embeds.view(batch_size * num_images_per_prompt ,lowercase_ ,-1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowercase_ : Optional[Any] = torch.cat([negative_prompt_embeds, prompt_embeds] )
return prompt_embeds
@torch.no_grad()
def __call__( self ,__UpperCamelCase ,__UpperCamelCase = 100 ,__UpperCamelCase = 5.0 ,__UpperCamelCase = 1.0 ,__UpperCamelCase = 1 ,__UpperCamelCase = None ,__UpperCamelCase = None ,__UpperCamelCase = "pil" ,__UpperCamelCase = True ,__UpperCamelCase = None ,__UpperCamelCase = 1 ,) -> Union[ImagePipelineOutput, Tuple]:
'''simple docstring'''
if isinstance(lowercase_ ,lowercase_ ):
lowercase_ : Optional[Any] = 1
elif isinstance(lowercase_ ,lowercase_ ):
lowercase_ : str = len(lowercase_ )
else:
raise ValueError(f'''`prompt` has to be of type `str` or `list` but is {type(lowercase_ )}''' )
lowercase_ : Optional[int] = batch_size * num_images_per_prompt
lowercase_ : Tuple = guidance_scale > 1.0
lowercase_ : Dict = self._encode_prompt(lowercase_ ,lowercase_ ,lowercase_ )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(lowercase_ ,lowercase_ ) or callback_steps <= 0)
):
raise ValueError(
f'''`callback_steps` has to be a positive integer but is {callback_steps} of type'''
f''' {type(lowercase_ )}.''' )
# get the initial completely masked latents unless the user supplied it
lowercase_ : int = (batch_size, self.transformer.num_latent_pixels)
if latents is None:
lowercase_ : List[str] = self.transformer.num_vector_embeds - 1
lowercase_ : Dict = torch.full(lowercase_ ,lowercase_ ).to(self.device )
else:
if latents.shape != latents_shape:
raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
if (latents < 0).any() or (latents >= self.transformer.num_vector_embeds).any():
raise ValueError(
'Unexpected latents value(s). All latents be valid embedding indices i.e. in the range 0,'
f''' {self.transformer.num_vector_embeds - 1} (inclusive).''' )
lowercase_ : Union[str, Any] = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(lowercase_ ,device=self.device )
lowercase_ : Optional[Any] = self.scheduler.timesteps.to(self.device )
lowercase_ : List[Any] = latents
for i, t in enumerate(self.progress_bar(lowercase_ ) ):
# expand the sample if we are doing classifier free guidance
lowercase_ : Union[str, Any] = torch.cat([sample] * 2 ) if do_classifier_free_guidance else sample
# predict the un-noised image
# model_output == `log_p_x_0`
lowercase_ : Union[str, Any] = self.transformer(lowercase_ ,encoder_hidden_states=lowercase_ ,timestep=lowercase_ ).sample
if do_classifier_free_guidance:
lowercase_ , lowercase_ : int = model_output.chunk(2 )
lowercase_ : Tuple = model_output_uncond + guidance_scale * (model_output_text - model_output_uncond)
model_output -= torch.logsumexp(lowercase_ ,dim=1 ,keepdim=lowercase_ )
lowercase_ : Any = self.truncate(lowercase_ ,lowercase_ )
# remove `log(0)`'s (`-inf`s)
lowercase_ : Dict = model_output.clamp(-70 )
# compute the previous noisy sample x_t -> x_t-1
lowercase_ : int = self.scheduler.step(lowercase_ ,timestep=lowercase_ ,sample=lowercase_ ,generator=lowercase_ ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(lowercase_ ,lowercase_ ,lowercase_ )
lowercase_ : Any = self.vqvae.config.vq_embed_dim
lowercase_ : int = (batch_size, self.transformer.height, self.transformer.width, embedding_channels)
lowercase_ : Any = self.vqvae.quantize.get_codebook_entry(lowercase_ ,shape=lowercase_ )
lowercase_ : int = self.vqvae.decode(lowercase_ ,force_not_quantize=lowercase_ ).sample
lowercase_ : Dict = (image / 2 + 0.5).clamp(0 ,1 )
lowercase_ : Optional[Any] = image.cpu().permute(0 ,2 ,3 ,1 ).numpy()
if output_type == "pil":
lowercase_ : str = self.numpy_to_pil(lowercase_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowercase_ )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ) -> torch.FloatTensor:
'''simple docstring'''
lowercase_ , lowercase_ : Dict = torch.sort(lowercase_ ,1 ,descending=lowercase_ )
lowercase_ : List[str] = torch.exp(lowercase_ )
lowercase_ : str = sorted_p_x_0.cumsum(dim=1 ) < truncation_rate
# Ensure that at least the largest probability is not zeroed out
lowercase_ : Tuple = torch.full_like(keep_mask[:, 0:1, :] ,lowercase_ )
lowercase_ : str = torch.cat((all_true, keep_mask) ,dim=1 )
lowercase_ : Dict = keep_mask[:, :-1, :]
lowercase_ : Tuple = keep_mask.gather(1 ,indices.argsort(1 ) )
lowercase_ : Union[str, Any] = log_p_x_0.clone()
lowercase_ : Optional[Any] = -torch.inf # -inf = log(0)
return rv
| 213 |
'''simple docstring'''
import inspect
import tempfile
from collections import OrderedDict, UserDict
from collections.abc import MutableMapping
from contextlib import ExitStack, contextmanager
from dataclasses import fields
from enum import Enum
from typing import Any, ContextManager, List, Tuple
import numpy as np
from .import_utils import is_flax_available, is_tf_available, is_torch_available, is_torch_fx_proxy
if is_flax_available():
import jax.numpy as jnp
class UpperCAmelCase ( UpperCamelCase__ ):
def __get__( self :Optional[int] , lowercase_ :Tuple , lowercase_ :Tuple=None )-> Optional[Any]:
# See docs.python.org/3/howto/descriptor.html#properties
if obj is None:
return self
if self.fget is None:
raise AttributeError("unreadable attribute" )
A__ = "__cached_" + self.fget.__name__
A__ = getattr(lowercase_ , lowercase_ , lowercase_ )
if cached is None:
A__ = self.fget(lowercase_ )
setattr(lowercase_ , lowercase_ , lowercase_ )
return cached
def UpperCamelCase ( _lowerCamelCase : Dict ):
A__ = val.lower()
if val in {"y", "yes", "t", "true", "on", "1"}:
return 1
if val in {"n", "no", "f", "false", "off", "0"}:
return 0
raise ValueError(F"invalid truth value {val!r}" )
def UpperCamelCase ( _lowerCamelCase : Any ):
if is_torch_fx_proxy(_lowerCamelCase ):
return True
if is_torch_available():
import torch
if isinstance(_lowerCamelCase , torch.Tensor ):
return True
if is_tf_available():
import tensorflow as tf
if isinstance(_lowerCamelCase , tf.Tensor ):
return True
if is_flax_available():
import jax.numpy as jnp
from jax.core import Tracer
if isinstance(_lowerCamelCase , (jnp.ndarray, Tracer) ):
return True
return isinstance(_lowerCamelCase , np.ndarray )
def UpperCamelCase ( _lowerCamelCase : str ):
return isinstance(_lowerCamelCase , np.ndarray )
def UpperCamelCase ( _lowerCamelCase : Union[str, Any] ):
return _is_numpy(_lowerCamelCase )
def UpperCamelCase ( _lowerCamelCase : Dict ):
import torch
return isinstance(_lowerCamelCase , torch.Tensor )
def UpperCamelCase ( _lowerCamelCase : Union[str, Any] ):
return False if not is_torch_available() else _is_torch(_lowerCamelCase )
def UpperCamelCase ( _lowerCamelCase : Any ):
import torch
return isinstance(_lowerCamelCase , torch.device )
def UpperCamelCase ( _lowerCamelCase : int ):
return False if not is_torch_available() else _is_torch_device(_lowerCamelCase )
def UpperCamelCase ( _lowerCamelCase : Optional[Any] ):
import torch
if isinstance(_lowerCamelCase , _lowerCamelCase ):
if hasattr(_lowerCamelCase , _lowerCamelCase ):
A__ = getattr(_lowerCamelCase , _lowerCamelCase )
else:
return False
return isinstance(_lowerCamelCase , torch.dtype )
def UpperCamelCase ( _lowerCamelCase : Any ):
return False if not is_torch_available() else _is_torch_dtype(_lowerCamelCase )
def UpperCamelCase ( _lowerCamelCase : List[Any] ):
import tensorflow as tf
return isinstance(_lowerCamelCase , tf.Tensor )
def UpperCamelCase ( _lowerCamelCase : List[str] ):
return False if not is_tf_available() else _is_tensorflow(_lowerCamelCase )
def UpperCamelCase ( _lowerCamelCase : Union[str, Any] ):
import tensorflow as tf
# the `is_symbolic_tensor` predicate is only available starting with TF 2.14
if hasattr(_lowerCamelCase , "is_symbolic_tensor" ):
return tf.is_symbolic_tensor(_lowerCamelCase )
return type(_lowerCamelCase ) == tf.Tensor
def UpperCamelCase ( _lowerCamelCase : str ):
return False if not is_tf_available() else _is_tf_symbolic_tensor(_lowerCamelCase )
def UpperCamelCase ( _lowerCamelCase : str ):
import jax.numpy as jnp # noqa: F811
return isinstance(_lowerCamelCase , jnp.ndarray )
def UpperCamelCase ( _lowerCamelCase : Tuple ):
return False if not is_flax_available() else _is_jax(_lowerCamelCase )
def UpperCamelCase ( _lowerCamelCase : Optional[int] ):
if isinstance(_lowerCamelCase , (dict, UserDict) ):
return {k: to_py_obj(_lowerCamelCase ) for k, v in obj.items()}
elif isinstance(_lowerCamelCase , (list, tuple) ):
return [to_py_obj(_lowerCamelCase ) for o in obj]
elif is_tf_tensor(_lowerCamelCase ):
return obj.numpy().tolist()
elif is_torch_tensor(_lowerCamelCase ):
return obj.detach().cpu().tolist()
elif is_jax_tensor(_lowerCamelCase ):
return np.asarray(_lowerCamelCase ).tolist()
elif isinstance(_lowerCamelCase , (np.ndarray, np.number) ): # tolist also works on 0d np arrays
return obj.tolist()
else:
return obj
def UpperCamelCase ( _lowerCamelCase : int ):
if isinstance(_lowerCamelCase , (dict, UserDict) ):
return {k: to_numpy(_lowerCamelCase ) for k, v in obj.items()}
elif isinstance(_lowerCamelCase , (list, tuple) ):
return np.array(_lowerCamelCase )
elif is_tf_tensor(_lowerCamelCase ):
return obj.numpy()
elif is_torch_tensor(_lowerCamelCase ):
return obj.detach().cpu().numpy()
elif is_jax_tensor(_lowerCamelCase ):
return np.asarray(_lowerCamelCase )
else:
return obj
class UpperCAmelCase ( UpperCamelCase__ ):
def UpperCAmelCase_ ( self :int )-> Any:
A__ = fields(self )
# Safety and consistency checks
if not len(lowercase_ ):
raise ValueError(F"{self.__class__.__name__} has no fields." )
if not all(field.default is None for field in class_fields[1:] ):
raise ValueError(F"{self.__class__.__name__} should not have more than one required field." )
A__ = getattr(self , class_fields[0].name )
A__ = all(getattr(self , field.name ) is None for field in class_fields[1:] )
if other_fields_are_none and not is_tensor(lowercase_ ):
if isinstance(lowercase_ , lowercase_ ):
A__ = first_field.items()
A__ = True
else:
try:
A__ = iter(lowercase_ )
A__ = True
except TypeError:
A__ = False
# if we provided an iterator as first field and the iterator is a (key, value) iterator
# set the associated fields
if first_field_iterator:
for idx, element in enumerate(lowercase_ ):
if (
not isinstance(lowercase_ , (list, tuple) )
or not len(lowercase_ ) == 2
or not isinstance(element[0] , lowercase_ )
):
if idx == 0:
# If we do not have an iterator of key/values, set it as attribute
A__ = first_field
else:
# If we have a mixed iterator, raise an error
raise ValueError(
F"Cannot set key/value for {element}. It needs to be a tuple (key, value)." )
break
setattr(self , element[0] , element[1] )
if element[1] is not None:
A__ = element[1]
elif first_field is not None:
A__ = first_field
else:
for field in class_fields:
A__ = getattr(self , field.name )
if v is not None:
A__ = v
def __delitem__( self :List[Any] , *lowercase_ :List[Any] , **lowercase_ :Optional[Any] )-> Union[str, Any]:
raise Exception(F"You cannot use ``__delitem__`` on a {self.__class__.__name__} instance." )
def UpperCAmelCase_ ( self :Tuple , *lowercase_ :int , **lowercase_ :int )-> Union[str, Any]:
raise Exception(F"You cannot use ``setdefault`` on a {self.__class__.__name__} instance." )
def UpperCAmelCase_ ( self :List[Any] , *lowercase_ :Optional[int] , **lowercase_ :Tuple )-> List[Any]:
raise Exception(F"You cannot use ``pop`` on a {self.__class__.__name__} instance." )
def UpperCAmelCase_ ( self :Dict , *lowercase_ :Optional[int] , **lowercase_ :Any )-> Any:
raise Exception(F"You cannot use ``update`` on a {self.__class__.__name__} instance." )
def __getitem__( self :Optional[Any] , lowercase_ :Optional[Any] )-> Any:
if isinstance(lowercase_ , lowercase_ ):
A__ = dict(self.items() )
return inner_dict[k]
else:
return self.to_tuple()[k]
def __setattr__( self :Optional[Any] , lowercase_ :Union[str, Any] , lowercase_ :Union[str, Any] )-> Tuple:
if name in self.keys() and value is not None:
# Don't call self.__setitem__ to avoid recursion errors
super().__setitem__(lowercase_ , lowercase_ )
super().__setattr__(lowercase_ , lowercase_ )
def __setitem__( self :Tuple , lowercase_ :Optional[int] , lowercase_ :Tuple )-> List[Any]:
# Will raise a KeyException if needed
super().__setitem__(lowercase_ , lowercase_ )
# Don't call self.__setattr__ to avoid recursion errors
super().__setattr__(lowercase_ , lowercase_ )
def UpperCAmelCase_ ( self :List[Any] )-> Tuple[Any]:
return tuple(self[k] for k in self.keys() )
class UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ):
@classmethod
def UpperCAmelCase_ ( cls :Any , lowercase_ :int )-> List[str]:
raise ValueError(
F"{value} is not a valid {cls.__name__}, please select one of {list(cls._valueamember_map_.keys() )}" )
class UpperCAmelCase ( UpperCamelCase__ ):
__lowercase = """longest"""
__lowercase = """max_length"""
__lowercase = """do_not_pad"""
class UpperCAmelCase ( UpperCamelCase__ ):
__lowercase = """pt"""
__lowercase = """tf"""
__lowercase = """np"""
__lowercase = """jax"""
class UpperCAmelCase :
def __init__( self :List[str] , lowercase_ :List[ContextManager] )-> str:
A__ = context_managers
A__ = ExitStack()
def __enter__( self :Dict )-> Any:
for context_manager in self.context_managers:
self.stack.enter_context(lowercase_ )
def __exit__( self :List[Any] , *lowercase_ :Optional[Any] , **lowercase_ :str )-> Union[str, Any]:
self.stack.__exit__(*lowercase_ , **lowercase_ )
def UpperCamelCase ( _lowerCamelCase : Dict ):
A__ = infer_framework(_lowerCamelCase )
if framework == "tf":
A__ = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
A__ = inspect.signature(model_class.forward ) # PyTorch models
else:
A__ = inspect.signature(model_class.__call__ ) # Flax models
for p in signature.parameters:
if p == "return_loss" and signature.parameters[p].default is True:
return True
return False
def UpperCamelCase ( _lowerCamelCase : List[str] ):
A__ = model_class.__name__
A__ = infer_framework(_lowerCamelCase )
if framework == "tf":
A__ = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
A__ = inspect.signature(model_class.forward ) # PyTorch models
else:
A__ = inspect.signature(model_class.__call__ ) # Flax models
if "QuestionAnswering" in model_name:
return [p for p in signature.parameters if "label" in p or p in ("start_positions", "end_positions")]
else:
return [p for p in signature.parameters if "label" in p]
def UpperCamelCase ( _lowerCamelCase : MutableMapping , _lowerCamelCase : str = "" , _lowerCamelCase : str = "." ):
def _flatten_dict(_lowerCamelCase : List[Any] , _lowerCamelCase : int="" , _lowerCamelCase : Any="." ):
for k, v in d.items():
A__ = str(_lowerCamelCase ) + delimiter + str(_lowerCamelCase ) if parent_key else k
if v and isinstance(_lowerCamelCase , _lowerCamelCase ):
yield from flatten_dict(_lowerCamelCase , _lowerCamelCase , delimiter=_lowerCamelCase ).items()
else:
yield key, v
return dict(_flatten_dict(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) )
@contextmanager
def UpperCamelCase ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : bool = False ):
if use_temp_dir:
with tempfile.TemporaryDirectory() as tmp_dir:
yield tmp_dir
else:
yield working_dir
def UpperCamelCase ( _lowerCamelCase : int , _lowerCamelCase : Optional[Any]=None ):
if is_numpy_array(_lowerCamelCase ):
return np.transpose(_lowerCamelCase , axes=_lowerCamelCase )
elif is_torch_tensor(_lowerCamelCase ):
return array.T if axes is None else array.permute(*_lowerCamelCase )
elif is_tf_tensor(_lowerCamelCase ):
import tensorflow as tf
return tf.transpose(_lowerCamelCase , perm=_lowerCamelCase )
elif is_jax_tensor(_lowerCamelCase ):
return jnp.transpose(_lowerCamelCase , axes=_lowerCamelCase )
else:
raise ValueError(F"Type not supported for transpose: {type(_lowerCamelCase )}." )
def UpperCamelCase ( _lowerCamelCase : Dict , _lowerCamelCase : Any ):
if is_numpy_array(_lowerCamelCase ):
return np.reshape(_lowerCamelCase , _lowerCamelCase )
elif is_torch_tensor(_lowerCamelCase ):
return array.reshape(*_lowerCamelCase )
elif is_tf_tensor(_lowerCamelCase ):
import tensorflow as tf
return tf.reshape(_lowerCamelCase , _lowerCamelCase )
elif is_jax_tensor(_lowerCamelCase ):
return jnp.reshape(_lowerCamelCase , _lowerCamelCase )
else:
raise ValueError(F"Type not supported for reshape: {type(_lowerCamelCase )}." )
def UpperCamelCase ( _lowerCamelCase : List[str] , _lowerCamelCase : Optional[Any]=None ):
if is_numpy_array(_lowerCamelCase ):
return np.squeeze(_lowerCamelCase , axis=_lowerCamelCase )
elif is_torch_tensor(_lowerCamelCase ):
return array.squeeze() if axis is None else array.squeeze(dim=_lowerCamelCase )
elif is_tf_tensor(_lowerCamelCase ):
import tensorflow as tf
return tf.squeeze(_lowerCamelCase , axis=_lowerCamelCase )
elif is_jax_tensor(_lowerCamelCase ):
return jnp.squeeze(_lowerCamelCase , axis=_lowerCamelCase )
else:
raise ValueError(F"Type not supported for squeeze: {type(_lowerCamelCase )}." )
def UpperCamelCase ( _lowerCamelCase : List[str] , _lowerCamelCase : Dict ):
if is_numpy_array(_lowerCamelCase ):
return np.expand_dims(_lowerCamelCase , _lowerCamelCase )
elif is_torch_tensor(_lowerCamelCase ):
return array.unsqueeze(dim=_lowerCamelCase )
elif is_tf_tensor(_lowerCamelCase ):
import tensorflow as tf
return tf.expand_dims(_lowerCamelCase , axis=_lowerCamelCase )
elif is_jax_tensor(_lowerCamelCase ):
return jnp.expand_dims(_lowerCamelCase , axis=_lowerCamelCase )
else:
raise ValueError(F"Type not supported for expand_dims: {type(_lowerCamelCase )}." )
def UpperCamelCase ( _lowerCamelCase : List[str] ):
if is_numpy_array(_lowerCamelCase ):
return np.size(_lowerCamelCase )
elif is_torch_tensor(_lowerCamelCase ):
return array.numel()
elif is_tf_tensor(_lowerCamelCase ):
import tensorflow as tf
return tf.size(_lowerCamelCase )
elif is_jax_tensor(_lowerCamelCase ):
return array.size
else:
raise ValueError(F"Type not supported for expand_dims: {type(_lowerCamelCase )}." )
def UpperCamelCase ( _lowerCamelCase : List[str] , _lowerCamelCase : Optional[Any] ):
for key, value in auto_map.items():
if isinstance(_lowerCamelCase , (tuple, list) ):
A__ = [F"{repo_id}--{v}" if (v is not None and "--" not in v) else v for v in value]
elif value is not None and "--" not in value:
A__ = F"{repo_id}--{value}"
return auto_map
def UpperCamelCase ( _lowerCamelCase : Dict ):
for base_class in inspect.getmro(_lowerCamelCase ):
A__ = base_class.__module__
A__ = base_class.__name__
if module.startswith("tensorflow" ) or module.startswith("keras" ) or name == "TFPreTrainedModel":
return "tf"
elif module.startswith("torch" ) or name == "PreTrainedModel":
return "pt"
elif module.startswith("flax" ) or module.startswith("jax" ) or name == "FlaxPreTrainedModel":
return "flax"
else:
raise TypeError(F"Could not infer framework from class {model_class}." )
| 237 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
A : List[str] = {"configuration_swin": ["SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP", "SwinConfig", "SwinOnnxConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Any = [
"SWIN_PRETRAINED_MODEL_ARCHIVE_LIST",
"SwinForImageClassification",
"SwinForMaskedImageModeling",
"SwinModel",
"SwinPreTrainedModel",
"SwinBackbone",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : str = [
"TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFSwinForImageClassification",
"TFSwinForMaskedImageModeling",
"TFSwinModel",
"TFSwinPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_swin import SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinConfig, SwinOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swin import (
SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinBackbone,
SwinForImageClassification,
SwinForMaskedImageModeling,
SwinModel,
SwinPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_swin import (
TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSwinForImageClassification,
TFSwinForMaskedImageModeling,
TFSwinModel,
TFSwinPreTrainedModel,
)
else:
import sys
A : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 361 | import copy
import os
from typing import TYPE_CHECKING, List, Union
if TYPE_CHECKING:
pass
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A : int = logging.get_logger(__name__)
A : str = {
"kakaobrain/align-base": "https://huggingface.co/kakaobrain/align-base/resolve/main/config.json",
}
class lowerCamelCase (SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
lowerCamelCase__ = '''align_text_model'''
def __init__( self : Optional[Any] , __magic_name__ : Union[str, Any]=30_522 , __magic_name__ : Tuple=768 , __magic_name__ : List[str]=12 , __magic_name__ : Optional[Any]=12 , __magic_name__ : str=3_072 , __magic_name__ : Dict="gelu" , __magic_name__ : Any=0.1 , __magic_name__ : Optional[int]=0.1 , __magic_name__ : List[str]=512 , __magic_name__ : Any=2 , __magic_name__ : Optional[Any]=0.02 , __magic_name__ : int=1e-12 , __magic_name__ : str=0 , __magic_name__ : Optional[Any]="absolute" , __magic_name__ : Optional[Any]=True , **__magic_name__ : Tuple , ) -> Union[str, Any]:
super().__init__(**__magic_name__ )
SCREAMING_SNAKE_CASE_ = vocab_size
SCREAMING_SNAKE_CASE_ = hidden_size
SCREAMING_SNAKE_CASE_ = num_hidden_layers
SCREAMING_SNAKE_CASE_ = num_attention_heads
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = intermediate_size
SCREAMING_SNAKE_CASE_ = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ = max_position_embeddings
SCREAMING_SNAKE_CASE_ = type_vocab_size
SCREAMING_SNAKE_CASE_ = initializer_range
SCREAMING_SNAKE_CASE_ = layer_norm_eps
SCREAMING_SNAKE_CASE_ = position_embedding_type
SCREAMING_SNAKE_CASE_ = use_cache
SCREAMING_SNAKE_CASE_ = pad_token_id
@classmethod
def __A ( cls : Any , __magic_name__ : Union[str, os.PathLike] , **__magic_name__ : Optional[Any] ) -> "PretrainedConfig":
cls._set_token_in_kwargs(__magic_name__ )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = cls.get_config_dict(__magic_name__ , **__magic_name__ )
# get the text config dict if we are loading from AlignConfig
if config_dict.get("model_type" ) == "align":
SCREAMING_SNAKE_CASE_ = config_dict["text_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__magic_name__ , **__magic_name__ )
class lowerCamelCase (SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
lowerCamelCase__ = '''align_vision_model'''
def __init__( self : List[str] , __magic_name__ : int = 3 , __magic_name__ : int = 600 , __magic_name__ : float = 2.0 , __magic_name__ : float = 3.1 , __magic_name__ : int = 8 , __magic_name__ : List[int] = [3, 3, 5, 3, 5, 5, 3] , __magic_name__ : List[int] = [32, 16, 24, 40, 80, 112, 192] , __magic_name__ : List[int] = [16, 24, 40, 80, 112, 192, 320] , __magic_name__ : List[int] = [] , __magic_name__ : List[int] = [1, 2, 2, 2, 1, 2, 1] , __magic_name__ : List[int] = [1, 2, 2, 3, 3, 4, 1] , __magic_name__ : List[int] = [1, 6, 6, 6, 6, 6, 6] , __magic_name__ : float = 0.25 , __magic_name__ : str = "swish" , __magic_name__ : int = 2_560 , __magic_name__ : str = "mean" , __magic_name__ : float = 0.02 , __magic_name__ : float = 0.001 , __magic_name__ : float = 0.99 , __magic_name__ : float = 0.2 , **__magic_name__ : List[Any] , ) -> Tuple:
super().__init__(**__magic_name__ )
SCREAMING_SNAKE_CASE_ = num_channels
SCREAMING_SNAKE_CASE_ = image_size
SCREAMING_SNAKE_CASE_ = width_coefficient
SCREAMING_SNAKE_CASE_ = depth_coefficient
SCREAMING_SNAKE_CASE_ = depth_divisor
SCREAMING_SNAKE_CASE_ = kernel_sizes
SCREAMING_SNAKE_CASE_ = in_channels
SCREAMING_SNAKE_CASE_ = out_channels
SCREAMING_SNAKE_CASE_ = depthwise_padding
SCREAMING_SNAKE_CASE_ = strides
SCREAMING_SNAKE_CASE_ = num_block_repeats
SCREAMING_SNAKE_CASE_ = expand_ratios
SCREAMING_SNAKE_CASE_ = squeeze_expansion_ratio
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = hidden_dim
SCREAMING_SNAKE_CASE_ = pooling_type
SCREAMING_SNAKE_CASE_ = initializer_range
SCREAMING_SNAKE_CASE_ = batch_norm_eps
SCREAMING_SNAKE_CASE_ = batch_norm_momentum
SCREAMING_SNAKE_CASE_ = drop_connect_rate
SCREAMING_SNAKE_CASE_ = sum(__magic_name__ ) * 4
@classmethod
def __A ( cls : List[str] , __magic_name__ : Union[str, os.PathLike] , **__magic_name__ : Dict ) -> "PretrainedConfig":
cls._set_token_in_kwargs(__magic_name__ )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = cls.get_config_dict(__magic_name__ , **__magic_name__ )
# get the vision config dict if we are loading from AlignConfig
if config_dict.get("model_type" ) == "align":
SCREAMING_SNAKE_CASE_ = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__magic_name__ , **__magic_name__ )
class lowerCamelCase (SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
lowerCamelCase__ = '''align'''
lowerCamelCase__ = True
def __init__( self : Optional[Any] , __magic_name__ : Dict=None , __magic_name__ : List[Any]=None , __magic_name__ : str=640 , __magic_name__ : Any=1.0 , __magic_name__ : Dict=0.02 , **__magic_name__ : Union[str, Any] , ) -> int:
super().__init__(**__magic_name__ )
if text_config is None:
SCREAMING_SNAKE_CASE_ = {}
logger.info("text_config is None. Initializing the AlignTextConfig with default values." )
if vision_config is None:
SCREAMING_SNAKE_CASE_ = {}
logger.info("vision_config is None. Initializing the AlignVisionConfig with default values." )
SCREAMING_SNAKE_CASE_ = AlignTextConfig(**__magic_name__ )
SCREAMING_SNAKE_CASE_ = AlignVisionConfig(**__magic_name__ )
SCREAMING_SNAKE_CASE_ = projection_dim
SCREAMING_SNAKE_CASE_ = temperature_init_value
SCREAMING_SNAKE_CASE_ = initializer_range
@classmethod
def __A ( cls : List[str] , __magic_name__ : AlignTextConfig , __magic_name__ : AlignVisionConfig , **__magic_name__ : Tuple ) -> Any:
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **__magic_name__ )
def __A ( self : int ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = copy.deepcopy(self.__dict__ )
SCREAMING_SNAKE_CASE_ = self.text_config.to_dict()
SCREAMING_SNAKE_CASE_ = self.vision_config.to_dict()
SCREAMING_SNAKE_CASE_ = self.__class__.model_type
return output
| 305 | 0 |
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFAutoModel, is_tensorflow_text_available, is_tf_available
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_tensorflow_text_available():
from transformers.models.bert import TFBertTokenizer
lowercase_ : List[Any] = ['bert-base-uncased', 'bert-base-cased']
lowercase_ : Union[str, Any] = 'hf-internal-testing/tiny-bert-tf-only'
if is_tf_available():
class __lowerCAmelCase ( tf.keras.Model ):
def __init__( self : Union[str, Any] , snake_case__ : str ):
"""simple docstring"""
super().__init__()
_UpperCAmelCase = tokenizer
_UpperCAmelCase = AutoConfig.from_pretrained(snake_case__ )
_UpperCAmelCase = TFAutoModel.from_config(snake_case__ )
def UpperCamelCase ( self : List[str] , snake_case__ : int ):
"""simple docstring"""
_UpperCAmelCase = self.tokenizer(snake_case__ )
_UpperCAmelCase = self.bert(**snake_case__ )
return out["pooler_output"]
@require_tf
@require_tensorflow_text
class __lowerCAmelCase ( unittest.TestCase ):
def UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
super().setUp()
_UpperCAmelCase = [
BertTokenizer.from_pretrained(snake_case__ ) for checkpoint in (TOKENIZER_CHECKPOINTS * 2)
] # repeat for when fast_bert_tokenizer=false
_UpperCAmelCase = [TFBertTokenizer.from_pretrained(snake_case__ ) for checkpoint in TOKENIZER_CHECKPOINTS] + [
TFBertTokenizer.from_pretrained(snake_case__ , use_fast_bert_tokenizer=snake_case__ )
for checkpoint in TOKENIZER_CHECKPOINTS
]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
_UpperCAmelCase = [
"This is a straightforward English test sentence.",
"This one has some weird characters\rto\nsee\r\nif those\u00E9break things.",
"Now we're going to add some Chinese: 一 二 三 一二三",
"And some much more rare Chinese: 齉 堃 齉堃",
"Je vais aussi écrire en français pour tester les accents",
"Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ",
]
_UpperCAmelCase = list(zip(self.test_sentences , self.test_sentences[::-1] ) )
def UpperCamelCase ( self : str ):
"""simple docstring"""
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ):
for test_inputs in (self.test_sentences, self.paired_sentences):
_UpperCAmelCase = tokenizer(snake_case__ , return_tensors="tf" , padding="longest" )
_UpperCAmelCase = tf_tokenizer(snake_case__ )
for key in python_outputs.keys():
self.assertTrue(tf.reduce_all(python_outputs[key].shape == tf_outputs[key].shape ) )
self.assertTrue(tf.reduce_all(tf.cast(python_outputs[key] , tf.intaa ) == tf_outputs[key] ) )
@slow
def UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
for tf_tokenizer in self.tf_tokenizers:
_UpperCAmelCase = tf_tokenizer(self.paired_sentences )
_UpperCAmelCase = tf_tokenizer(
text=[sentence[0] for sentence in self.paired_sentences] , text_pair=[sentence[1] for sentence in self.paired_sentences] , )
for key in merged_outputs.keys():
self.assertTrue(tf.reduce_all(tf.cast(merged_outputs[key] , tf.intaa ) == separated_outputs[key] ) )
@slow
def UpperCamelCase ( self : Tuple ):
"""simple docstring"""
for tf_tokenizer in self.tf_tokenizers:
_UpperCAmelCase = tf.function(snake_case__ )
for test_inputs in (self.test_sentences, self.paired_sentences):
_UpperCAmelCase = tf.constant(snake_case__ )
_UpperCAmelCase = compiled_tokenizer(snake_case__ )
_UpperCAmelCase = tf_tokenizer(snake_case__ )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def UpperCamelCase ( self : List[str] ):
"""simple docstring"""
for tf_tokenizer in self.tf_tokenizers:
_UpperCAmelCase = ModelToSave(tokenizer=snake_case__ )
_UpperCAmelCase = tf.convert_to_tensor(self.test_sentences )
_UpperCAmelCase = model(snake_case__ ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
_UpperCAmelCase = Path(snake_case__ ) / "saved.model"
model.save(snake_case__ )
_UpperCAmelCase = tf.keras.models.load_model(snake_case__ )
_UpperCAmelCase = loaded_model(snake_case__ )
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertLessEqual(tf.reduce_max(tf.abs(out - loaded_output ) ) , 1e-5 )
| 133 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ : Dict = logging.get_logger(__name__)
lowercase_ : Union[str, Any] = {'ctrl': 'https://huggingface.co/ctrl/resolve/main/config.json'}
class __lowerCAmelCase ( UpperCAmelCase__ ):
snake_case_ : int = "ctrl"
snake_case_ : Optional[int] = ["past_key_values"]
snake_case_ : Tuple = {
"max_position_embeddings": "n_positions",
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self : List[str] , snake_case__ : List[str]=246_534 , snake_case__ : Optional[Any]=256 , snake_case__ : List[str]=1_280 , snake_case__ : Optional[int]=8_192 , snake_case__ : List[Any]=48 , snake_case__ : Dict=16 , snake_case__ : int=0.1 , snake_case__ : List[str]=0.1 , snake_case__ : Optional[int]=1e-6 , snake_case__ : Dict=0.02 , snake_case__ : List[Any]=True , **snake_case__ : List[str] , ):
"""simple docstring"""
_UpperCAmelCase = vocab_size
_UpperCAmelCase = n_positions
_UpperCAmelCase = n_embd
_UpperCAmelCase = n_layer
_UpperCAmelCase = n_head
_UpperCAmelCase = dff
_UpperCAmelCase = resid_pdrop
_UpperCAmelCase = embd_pdrop
_UpperCAmelCase = layer_norm_epsilon
_UpperCAmelCase = initializer_range
_UpperCAmelCase = use_cache
super().__init__(**snake_case__ )
| 133 | 1 |
'''simple docstring'''
import argparse
import re
import torch
from CLAP import create_model
from transformers import AutoFeatureExtractor, ClapConfig, ClapModel
lowercase__ : Tuple = {
'''text_branch''': '''text_model''',
'''audio_branch''': '''audio_model.audio_encoder''',
'''attn''': '''attention.self''',
'''self.proj''': '''output.dense''',
'''attention.self_mask''': '''attn_mask''',
'''mlp.fc1''': '''intermediate.dense''',
'''mlp.fc2''': '''output.dense''',
'''norm1''': '''layernorm_before''',
'''norm2''': '''layernorm_after''',
'''bn0''': '''batch_norm''',
}
lowercase__ : List[str] = AutoFeatureExtractor.from_pretrained('''laion/clap-htsat-unfused''', truncation='''rand_trunc''')
def _lowerCAmelCase ( __snake_case : Optional[int] , __snake_case : Dict=False ) -> int:
__A ,__A : List[Any] = create_model(
'HTSAT-tiny' , 'roberta' , __snake_case , precision='fp32' , device='cuda:0' if torch.cuda.is_available() else 'cpu' , enable_fusion=__snake_case , fusion_type='aff_2d' if enable_fusion else None , )
return model, model_cfg
def _lowerCAmelCase ( __snake_case : int ) -> Tuple:
__A : List[Any] = {}
__A : Union[str, Any] = r'.*sequential.(\d+).*'
__A : Optional[int] = r'.*_projection.(\d+).*'
for key, value in state_dict.items():
# check if any key needs to be modified
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
__A : int = key.replace(__snake_case , __snake_case )
if re.match(__snake_case , __snake_case ):
# replace sequential layers with list
__A : Any = re.match(__snake_case , __snake_case ).group(1 )
__A : int = key.replace(f'sequential.{sequential_layer}.' , f'layers.{int(__snake_case )//3}.linear.' )
elif re.match(__snake_case , __snake_case ):
__A : int = int(re.match(__snake_case , __snake_case ).group(1 ) )
# Because in CLAP they use `nn.Sequential`...
__A : Dict = 1 if projecton_layer == 0 else 2
__A : Optional[Any] = key.replace(f'_projection.{projecton_layer}.' , f'_projection.linear{transformers_projection_layer}.' )
if "audio" and "qkv" in key:
# split qkv into query key and value
__A : Optional[int] = value
__A : Any = mixed_qkv.size(0 ) // 3
__A : str = mixed_qkv[:qkv_dim]
__A : Any = mixed_qkv[qkv_dim : qkv_dim * 2]
__A : List[str] = mixed_qkv[qkv_dim * 2 :]
__A : List[str] = query_layer
__A : Optional[int] = key_layer
__A : List[str] = value_layer
else:
__A : Any = value
return model_state_dict
def _lowerCAmelCase ( __snake_case : Any , __snake_case : Any , __snake_case : Tuple , __snake_case : Dict=False ) -> List[str]:
__A ,__A : List[str] = init_clap(__snake_case , enable_fusion=__snake_case )
clap_model.eval()
__A : Optional[Any] = clap_model.state_dict()
__A : Any = rename_state_dict(__snake_case )
__A : Optional[int] = ClapConfig()
__A : List[str] = enable_fusion
__A : Tuple = ClapModel(__snake_case )
# ignore the spectrogram embedding layer
model.load_state_dict(__snake_case , strict=__snake_case )
model.save_pretrained(__snake_case )
transformers_config.save_pretrained(__snake_case )
if __name__ == "__main__":
lowercase__ : str = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument('''--enable_fusion''', action='''store_true''', help='''Whether to enable fusion or not''')
lowercase__ : List[str] = parser.parse_args()
convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion) | 190 |
'''simple docstring'''
lowercase__ : Any = {'''a''': ['''c''', '''b'''], '''b''': ['''d''', '''e'''], '''c''': [], '''d''': [], '''e''': []}
lowercase__ : List[Any] = ['''a''', '''b''', '''c''', '''d''', '''e''']
def _lowerCAmelCase ( __snake_case : str , __snake_case : Tuple , __snake_case : int ) -> Tuple:
__A : List[str] = start
# add current to visited
visited.append(__snake_case )
__A : Optional[int] = edges[current]
for neighbor in neighbors:
# if neighbor not in visited, visit
if neighbor not in visited:
__A : int = topological_sort(__snake_case , __snake_case , __snake_case )
# if all neighbors visited add current to sort
sort.append(__snake_case )
# if all vertices haven't been visited select a new one to visit
if len(__snake_case ) != len(__snake_case ):
for vertice in vertices:
if vertice not in visited:
__A : Dict = topological_sort(__snake_case , __snake_case , __snake_case )
# return sort
return sort
if __name__ == "__main__":
lowercase__ : Tuple = topological_sort('''a''', [], [])
print(sort) | 190 | 1 |
from collections.abc import Callable
from math import pi, sqrt
from random import uniform
from statistics import mean
def __lowercase ( _UpperCamelCase ) ->int:
"""simple docstring"""
def is_in_circle(_UpperCamelCase, _UpperCamelCase ) -> bool:
lowercase : List[str] = sqrt((x**2) + (y**2) )
# Our circle has a radius of 1, so a distance
# greater than 1 would land outside the circle.
return distance_from_centre <= 1
# The proportion of guesses that landed in the circle
lowercase : List[str] = mean(
int(is_in_circle(uniform(-1.0, 1.0 ), uniform(-1.0, 1.0 ) ) )
for _ in range(_UpperCamelCase ) )
# The ratio of the area for circle to square is pi/4.
lowercase : str = proportion * 4
print(f"""The estimated value of pi is {pi_estimate}""" )
print(f"""The numpy value of pi is {pi}""" )
print(f"""The total error is {abs(pi - pi_estimate )}""" )
def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase = 0.0, _UpperCamelCase = 1.0, ) ->float:
"""simple docstring"""
return mean(
function_to_integrate(uniform(_UpperCamelCase, _UpperCamelCase ) ) for _ in range(_UpperCamelCase ) ) * (max_value - min_value)
def __lowercase ( _UpperCamelCase, _UpperCamelCase = 0.0, _UpperCamelCase = 1.0 ) ->None:
"""simple docstring"""
def identity_function(_UpperCamelCase ) -> float:
return x
lowercase : Union[str, Any] = area_under_curve_estimator(
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase )
lowercase : Tuple = (max_value * max_value - min_value * min_value) / 2
print('''******************''' )
print(f"""Estimating area under y=x where x varies from {min_value} to {max_value}""" )
print(f"""Estimated value is {estimated_value}""" )
print(f"""Expected value is {expected_value}""" )
print(f"""Total error is {abs(estimated_value - expected_value )}""" )
print('''******************''' )
def __lowercase ( _UpperCamelCase ) ->None:
"""simple docstring"""
def function_to_integrate(_UpperCamelCase ) -> float:
return sqrt(4.0 - x * x )
lowercase : int = area_under_curve_estimator(
_UpperCamelCase, _UpperCamelCase, 0.0, 2.0 )
print('''******************''' )
print('''Estimating pi using area_under_curve_estimator''' )
print(f"""Estimated value is {estimated_value}""" )
print(f"""Expected value is {pi}""" )
print(f"""Total error is {abs(estimated_value - pi )}""" )
print('''******************''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 337 |
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import torch
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
@dataclass
class __SCREAMING_SNAKE_CASE ( A__ ):
A : Union[List[np.ndarray], torch.FloatTensor]
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_text_to_video_synth import TextToVideoSDPipeline
from .pipeline_text_to_video_synth_imgaimg import VideoToVideoSDPipeline # noqa: F401
from .pipeline_text_to_video_zero import TextToVideoZeroPipeline
| 337 | 1 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase : str = logging.get_logger(__name__)
lowerCamelCase : Optional[Any] = {
"""facebook/levit-128S""": """https://huggingface.co/facebook/levit-128S/resolve/main/config.json""",
# See all LeViT models at https://huggingface.co/models?filter=levit
}
class lowerCAmelCase ( lowerCamelCase__ ):
'''simple docstring'''
_A : Tuple = 'levit'
def __init__( self : Optional[Any] , __a : Dict=224 , __a : Any=3 , __a : str=3 , __a : Optional[Any]=2 , __a : Union[str, Any]=1 , __a : Optional[Any]=16 , __a : List[Any]=[128, 256, 384] , __a : Dict=[4, 8, 12] , __a : Union[str, Any]=[4, 4, 4] , __a : Optional[Any]=[16, 16, 16] , __a : List[Any]=0 , __a : Any=[2, 2, 2] , __a : int=[2, 2, 2] , __a : Union[str, Any]=0.02 , **__a : List[str] , ) -> Optional[int]:
"""simple docstring"""
super().__init__(**__a )
__lowercase : Optional[Any] = image_size
__lowercase : Tuple = num_channels
__lowercase : Union[str, Any] = kernel_size
__lowercase : int = stride
__lowercase : Dict = padding
__lowercase : List[str] = hidden_sizes
__lowercase : str = num_attention_heads
__lowercase : Any = depths
__lowercase : List[Any] = key_dim
__lowercase : Dict = drop_path_rate
__lowercase : Optional[Any] = patch_size
__lowercase : Tuple = attention_ratio
__lowercase : Tuple = mlp_ratio
__lowercase : str = initializer_range
__lowercase : str = [
["""Subsample""", key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
["""Subsample""", key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
class lowerCAmelCase ( lowerCamelCase__ ):
'''simple docstring'''
_A : List[str] = version.parse('''1.11''' )
@property
def lowerCAmelCase ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def lowerCAmelCase ( self : int ) -> Dict:
"""simple docstring"""
return 1E-4 | 356 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
if is_vision_available():
import PIL
class lowerCAmelCase ( __a ):
'''simple docstring'''
_A : List[str] = ['''pixel_values''']
def __init__( self : Any , __a : bool = True , __a : Dict[str, int] = None , __a : PILImageResampling = PILImageResampling.BICUBIC , __a : bool = True , __a : Dict[str, int] = None , __a : bool = True , __a : Union[int, float] = 1 / 255 , __a : bool = True , __a : Optional[Union[float, List[float]]] = None , __a : Optional[Union[float, List[float]]] = None , __a : bool = True , **__a : str , ) -> None:
"""simple docstring"""
super().__init__(**__a )
__lowercase : Dict = size if size is not None else {"""shortest_edge""": 224}
__lowercase : Union[str, Any] = get_size_dict(__a , default_to_square=__a )
__lowercase : int = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
__lowercase : Any = get_size_dict(__a , default_to_square=__a , param_name="""crop_size""" )
__lowercase : Optional[int] = do_resize
__lowercase : Union[str, Any] = size
__lowercase : List[Any] = resample
__lowercase : Any = do_center_crop
__lowercase : Dict = crop_size
__lowercase : int = do_rescale
__lowercase : Tuple = rescale_factor
__lowercase : List[Any] = do_normalize
__lowercase : Union[str, Any] = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
__lowercase : int = image_std if image_std is not None else OPENAI_CLIP_STD
__lowercase : Union[str, Any] = do_convert_rgb
def lowerCAmelCase ( self : Union[str, Any] , __a : np.ndarray , __a : Dict[str, int] , __a : PILImageResampling = PILImageResampling.BICUBIC , __a : Optional[Union[str, ChannelDimension]] = None , **__a : List[Any] , ) -> np.ndarray:
"""simple docstring"""
__lowercase : Dict = get_size_dict(__a , default_to_square=__a )
if "shortest_edge" not in size:
raise ValueError(F"The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}" )
__lowercase : str = get_resize_output_image_size(__a , size=size["""shortest_edge"""] , default_to_square=__a )
return resize(__a , size=__a , resample=__a , data_format=__a , **__a )
def lowerCAmelCase ( self : Tuple , __a : np.ndarray , __a : Dict[str, int] , __a : Optional[Union[str, ChannelDimension]] = None , **__a : Any , ) -> np.ndarray:
"""simple docstring"""
__lowercase : Tuple = get_size_dict(__a )
if "height" not in size or "width" not in size:
raise ValueError(F"The `size` parameter must contain the keys (height, width). Got {size.keys()}" )
return center_crop(__a , size=(size["""height"""], size["""width"""]) , data_format=__a , **__a )
def lowerCAmelCase ( self : Tuple , __a : np.ndarray , __a : Union[int, float] , __a : Optional[Union[str, ChannelDimension]] = None , **__a : Optional[Any] , ) -> List[str]:
"""simple docstring"""
return rescale(__a , scale=__a , data_format=__a , **__a )
def lowerCAmelCase ( self : Optional[int] , __a : np.ndarray , __a : Union[float, List[float]] , __a : Union[float, List[float]] , __a : Optional[Union[str, ChannelDimension]] = None , **__a : List[str] , ) -> np.ndarray:
"""simple docstring"""
return normalize(__a , mean=__a , std=__a , data_format=__a , **__a )
def lowerCAmelCase ( self : Optional[int] , __a : ImageInput , __a : bool = None , __a : Dict[str, int] = None , __a : PILImageResampling = None , __a : bool = None , __a : int = None , __a : bool = None , __a : float = None , __a : bool = None , __a : Optional[Union[float, List[float]]] = None , __a : Optional[Union[float, List[float]]] = None , __a : bool = None , __a : Optional[Union[str, TensorType]] = None , __a : Optional[ChannelDimension] = ChannelDimension.FIRST , **__a : List[Any] , ) -> PIL.Image.Image:
"""simple docstring"""
__lowercase : List[Any] = do_resize if do_resize is not None else self.do_resize
__lowercase : Dict = size if size is not None else self.size
__lowercase : Tuple = get_size_dict(__a , param_name="""size""" , default_to_square=__a )
__lowercase : int = resample if resample is not None else self.resample
__lowercase : Optional[int] = do_center_crop if do_center_crop is not None else self.do_center_crop
__lowercase : List[Any] = crop_size if crop_size is not None else self.crop_size
__lowercase : List[str] = get_size_dict(__a , param_name="""crop_size""" , default_to_square=__a )
__lowercase : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale
__lowercase : str = rescale_factor if rescale_factor is not None else self.rescale_factor
__lowercase : Dict = do_normalize if do_normalize is not None else self.do_normalize
__lowercase : Tuple = image_mean if image_mean is not None else self.image_mean
__lowercase : str = image_std if image_std is not None else self.image_std
__lowercase : str = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
__lowercase : Union[str, Any] = make_list_of_images(__a )
if not valid_images(__a ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
__lowercase : Union[str, Any] = [convert_to_rgb(__a ) for image in images]
# All transformations expect numpy arrays.
__lowercase : Any = [to_numpy_array(__a ) for image in images]
if do_resize:
__lowercase : str = [self.resize(image=__a , size=__a , resample=__a ) for image in images]
if do_center_crop:
__lowercase : str = [self.center_crop(image=__a , size=__a ) for image in images]
if do_rescale:
__lowercase : Dict = [self.rescale(image=__a , scale=__a ) for image in images]
if do_normalize:
__lowercase : Optional[Any] = [self.normalize(image=__a , mean=__a , std=__a ) for image in images]
__lowercase : Any = [to_channel_dimension_format(__a , __a ) for image in images]
__lowercase : Optional[int] = {"""pixel_values""": images}
return BatchFeature(data=__a , tensor_type=__a ) | 306 | 0 |
"""simple docstring"""
from typing import Dict, Optional
import numpy as np
import datasets
__SCREAMING_SNAKE_CASE : Tuple = "\nIoU is the area of overlap between the predicted segmentation and the ground truth divided by the area of union\nbetween the predicted segmentation and the ground truth. For binary (two classes) or multi-class segmentation,\nthe mean IoU of the image is calculated by taking the IoU of each class and averaging them.\n"
__SCREAMING_SNAKE_CASE : Optional[int] = "\nArgs:\n predictions (`List[ndarray]`):\n List of predicted segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.\n references (`List[ndarray]`):\n List of ground truth segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.\n num_labels (`int`):\n Number of classes (categories).\n ignore_index (`int`):\n Index that will be ignored during evaluation.\n nan_to_num (`int`, *optional*):\n If specified, NaN values will be replaced by the number defined by the user.\n label_map (`dict`, *optional*):\n If specified, dictionary mapping old label indices to new label indices.\n reduce_labels (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background,\n and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by 255.\n\nReturns:\n `Dict[str, float | ndarray]` comprising various elements:\n - *mean_iou* (`float`):\n Mean Intersection-over-Union (IoU averaged over all categories).\n - *mean_accuracy* (`float`):\n Mean accuracy (averaged over all categories).\n - *overall_accuracy* (`float`):\n Overall accuracy on all images.\n - *per_category_accuracy* (`ndarray` of shape `(num_labels,)`):\n Per category accuracy.\n - *per_category_iou* (`ndarray` of shape `(num_labels,)`):\n Per category IoU.\n\nExamples:\n\n >>> import numpy as np\n\n >>> mean_iou = datasets.load_metric(\"mean_iou\")\n\n >>> # suppose one has 3 different segmentation maps predicted\n >>> predicted_1 = np.array([[1, 2], [3, 4], [5, 255]])\n >>> actual_1 = np.array([[0, 3], [5, 4], [6, 255]])\n\n >>> predicted_2 = np.array([[2, 7], [9, 2], [3, 6]])\n >>> actual_2 = np.array([[1, 7], [9, 2], [3, 6]])\n\n >>> predicted_3 = np.array([[2, 2, 3], [8, 2, 4], [3, 255, 2]])\n >>> actual_3 = np.array([[1, 2, 2], [8, 2, 1], [3, 255, 1]])\n\n >>> predicted = [predicted_1, predicted_2, predicted_3]\n >>> ground_truth = [actual_1, actual_2, actual_3]\n\n >>> results = mean_iou.compute(predictions=predicted, references=ground_truth, num_labels=10, ignore_index=255, reduce_labels=False)\n >>> print(results) # doctest: +NORMALIZE_WHITESPACE\n {'mean_iou': 0.47750000000000004, 'mean_accuracy': 0.5916666666666666, 'overall_accuracy': 0.5263157894736842, 'per_category_iou': array([0. , 0. , 0.375, 0.4 , 0.5 , 0. , 0.5 , 1. , 1. , 1. ]), 'per_category_accuracy': array([0. , 0. , 0.75 , 0.66666667, 1. , 0. , 0.5 , 1. , 1. , 1. ])}\n"
__SCREAMING_SNAKE_CASE : List[str] = "\\n@software{MMSegmentation_Contributors_OpenMMLab_Semantic_Segmentation_2020,\nauthor = {{MMSegmentation Contributors}},\nlicense = {Apache-2.0},\nmonth = {7},\ntitle = {{OpenMMLab Semantic Segmentation Toolbox and Benchmark}},\nurl = {https://github.com/open-mmlab/mmsegmentation},\nyear = {2020}\n}"
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = False , ) -> Dict:
if label_map is not None:
for old_id, new_id in label_map.items():
snake_case_ = new_id
# turn into Numpy arrays
snake_case_ = np.array(__lowerCAmelCase )
snake_case_ = np.array(__lowerCAmelCase )
if reduce_labels:
snake_case_ = 255
snake_case_ = label - 1
snake_case_ = 255
snake_case_ = label != ignore_index
snake_case_ = np.not_equal(__lowerCAmelCase , __lowerCAmelCase )
snake_case_ = pred_label[mask]
snake_case_ = np.array(__lowerCAmelCase )[mask]
snake_case_ = pred_label[pred_label == label]
snake_case_ = np.histogram(__lowerCAmelCase , bins=__lowerCAmelCase , range=(0, num_labels - 1) )[0]
snake_case_ = np.histogram(__lowerCAmelCase , bins=__lowerCAmelCase , range=(0, num_labels - 1) )[0]
snake_case_ = np.histogram(__lowerCAmelCase , bins=__lowerCAmelCase , range=(0, num_labels - 1) )[0]
snake_case_ = area_pred_label + area_label - area_intersect
return area_intersect, area_union, area_pred_label, area_label
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = False , ) -> Optional[Any]:
snake_case_ = np.zeros((num_labels,) , dtype=np.floataa )
snake_case_ = np.zeros((num_labels,) , dtype=np.floataa )
snake_case_ = np.zeros((num_labels,) , dtype=np.floataa )
snake_case_ = np.zeros((num_labels,) , dtype=np.floataa )
for result, gt_seg_map in zip(__lowerCAmelCase , __lowerCAmelCase ):
snake_case_ , snake_case_ , snake_case_ , snake_case_ = intersect_and_union(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
total_area_intersect += area_intersect
total_area_union += area_union
total_area_pred_label += area_pred_label
total_area_label += area_label
return total_area_intersect, total_area_union, total_area_pred_label, total_area_label
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = False , ) -> Any:
snake_case_ , snake_case_ , snake_case_ , snake_case_ = total_intersect_and_union(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# compute metrics
snake_case_ = {}
snake_case_ = total_area_intersect.sum() / total_area_label.sum()
snake_case_ = total_area_intersect / total_area_union
snake_case_ = total_area_intersect / total_area_label
snake_case_ = np.nanmean(__lowerCAmelCase )
snake_case_ = np.nanmean(__lowerCAmelCase )
snake_case_ = all_acc
snake_case_ = iou
snake_case_ = acc
if nan_to_num is not None:
snake_case_ = {metric: np.nan_to_num(__lowerCAmelCase , nan=__lowerCAmelCase ) for metric, metric_value in metrics.items()}
return metrics
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class __A (datasets.Metric):
'''simple docstring'''
def lowerCAmelCase ( self : Union[str, Any] ) ->Tuple:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
# 1st Seq - height dim, 2nd - width dim
{
"""predictions""": datasets.Sequence(datasets.Sequence(datasets.Value("""uint16""" ) ) ),
"""references""": datasets.Sequence(datasets.Sequence(datasets.Value("""uint16""" ) ) ),
} ) , reference_urls=[
"""https://github.com/open-mmlab/mmsegmentation/blob/71c201b1813267d78764f306a297ca717827c4bf/mmseg/core/evaluation/metrics.py"""
] , )
def lowerCAmelCase ( self : Tuple , UpperCAmelCase_ : Any , UpperCAmelCase_ : Dict , UpperCAmelCase_ : int , UpperCAmelCase_ : bool , UpperCAmelCase_ : Optional[int] = None , UpperCAmelCase_ : Optional[Dict[int, int]] = None , UpperCAmelCase_ : bool = False , ) ->Tuple:
"""simple docstring"""
snake_case_ = mean_iou(
results=lowerCAmelCase_ , gt_seg_maps=lowerCAmelCase_ , num_labels=lowerCAmelCase_ , ignore_index=lowerCAmelCase_ , nan_to_num=lowerCAmelCase_ , label_map=lowerCAmelCase_ , reduce_labels=lowerCAmelCase_ , )
return iou_result
| 347 |
"""simple docstring"""
import math
from typing import Optional
import numpy as np
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase : Optional[Any] = logging.get_logger(__name__)
UpperCAmelCase : int = {
"facebook/encodec_24khz": "https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json",
"facebook/encodec_48khz": "https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json",
}
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ):
lowercase__ = "encodec"
def __init__( self : Union[str, Any] , lowerCAmelCase_ : Tuple=[1.5, 3.0, 6.0, 12.0, 24.0] , lowerCAmelCase_ : Tuple=2_4_0_0_0 , lowerCAmelCase_ : List[str]=1 , lowerCAmelCase_ : Tuple=False , lowerCAmelCase_ : List[Any]=None , lowerCAmelCase_ : int=None , lowerCAmelCase_ : Dict=1_2_8 , lowerCAmelCase_ : Dict=3_2 , lowerCAmelCase_ : Tuple=1 , lowerCAmelCase_ : Dict=[8, 5, 4, 2] , lowerCAmelCase_ : Optional[Any]="weight_norm" , lowerCAmelCase_ : Any=7 , lowerCAmelCase_ : int=7 , lowerCAmelCase_ : str=3 , lowerCAmelCase_ : Optional[int]=2 , lowerCAmelCase_ : Union[str, Any]=True , lowerCAmelCase_ : int="reflect" , lowerCAmelCase_ : Optional[Any]=2 , lowerCAmelCase_ : Optional[int]=2 , lowerCAmelCase_ : List[Any]=1.0 , lowerCAmelCase_ : Dict=1_0_2_4 , lowerCAmelCase_ : int=None , lowerCAmelCase_ : Optional[int]=True , **lowerCAmelCase_ : List[str] , ):
"""simple docstring"""
lowercase_ = target_bandwidths
lowercase_ = sampling_rate
lowercase_ = audio_channels
lowercase_ = normalize
lowercase_ = chunk_length_s
lowercase_ = overlap
lowercase_ = hidden_size
lowercase_ = num_filters
lowercase_ = num_residual_layers
lowercase_ = upsampling_ratios
lowercase_ = norm_type
lowercase_ = kernel_size
lowercase_ = last_kernel_size
lowercase_ = residual_kernel_size
lowercase_ = dilation_growth_rate
lowercase_ = use_causal_conv
lowercase_ = pad_mode
lowercase_ = compress
lowercase_ = num_lstm_layers
lowercase_ = trim_right_ratio
lowercase_ = codebook_size
lowercase_ = codebook_dim if codebook_dim is not None else hidden_size
lowercase_ = use_conv_shortcut
if self.norm_type not in ["weight_norm", "time_group_norm"]:
raise ValueError(
F'''self.norm_type must be one of `"weight_norm"`, `"time_group_norm"`), got {self.norm_type}''')
super().__init__(**lowerCAmelCase_)
@property
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate)
@property
def _UpperCAmelCase ( self : Optional[int]):
"""simple docstring"""
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length))
@property
def _UpperCAmelCase ( self : Optional[Any]):
"""simple docstring"""
lowercase_ = np.prod(self.upsampling_ratios)
return math.ceil(self.sampling_rate / hop_length)
@property
def _UpperCAmelCase ( self : Optional[int]):
"""simple docstring"""
return int(1_0_0_0 * self.target_bandwidths[-1] // (self.frame_rate * 1_0))
| 136 | 0 |
'''simple docstring'''
from pathlib import Path
import cva
import numpy as np
from matplotlib import pyplot as plt
def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase = cva.getAffineTransform(lowerCAmelCase , lowerCAmelCase )
return cva.warpAffine(lowerCAmelCase , lowerCAmelCase , (rows, cols) )
if __name__ == "__main__":
# read original image
A__ : Any =cva.imread(
str(Path(__file__).resolve().parent.parent / '''image_data''' / '''lena.jpg''')
)
# turn image in gray scale value
A__ : Any =cva.cvtColor(image, cva.COLOR_BGR2GRAY)
# get image shape
A__ , A__ : Tuple =gray_img.shape
# set different points to rotate image
A__ : Optional[int] =np.array([[50, 50], [2_00, 50], [50, 2_00]], np.floataa)
A__ : Tuple =np.array([[10, 1_00], [2_00, 50], [1_00, 2_50]], np.floataa)
A__ : List[str] =np.array([[50, 50], [1_50, 50], [1_20, 2_00]], np.floataa)
A__ : Dict =np.array([[10, 1_00], [80, 50], [1_80, 2_50]], np.floataa)
# add all rotated images in a list
A__ : List[Any] =[
gray_img,
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
]
# plot different image rotations
A__ : Tuple =plt.figure(1)
A__ : int =['''Original''', '''Rotation 1''', '''Rotation 2''', '''Rotation 3''']
for i, image in enumerate(images):
plt.subplot(2, 2, i + 1), plt.imshow(image, '''gray''')
plt.title(titles[i])
plt.axis('''off''')
plt.subplots_adjust(left=0.0, bottom=0.05, right=1.0, top=0.95)
plt.show()
| 220 |
'''simple docstring'''
from pathlib import Path
import cva
import numpy as np
from matplotlib import pyplot as plt
def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase = cva.getAffineTransform(lowerCAmelCase , lowerCAmelCase )
return cva.warpAffine(lowerCAmelCase , lowerCAmelCase , (rows, cols) )
if __name__ == "__main__":
# read original image
A__ : Any =cva.imread(
str(Path(__file__).resolve().parent.parent / '''image_data''' / '''lena.jpg''')
)
# turn image in gray scale value
A__ : Any =cva.cvtColor(image, cva.COLOR_BGR2GRAY)
# get image shape
A__ , A__ : Tuple =gray_img.shape
# set different points to rotate image
A__ : Optional[int] =np.array([[50, 50], [2_00, 50], [50, 2_00]], np.floataa)
A__ : Tuple =np.array([[10, 1_00], [2_00, 50], [1_00, 2_50]], np.floataa)
A__ : List[str] =np.array([[50, 50], [1_50, 50], [1_20, 2_00]], np.floataa)
A__ : Dict =np.array([[10, 1_00], [80, 50], [1_80, 2_50]], np.floataa)
# add all rotated images in a list
A__ : List[Any] =[
gray_img,
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
]
# plot different image rotations
A__ : Tuple =plt.figure(1)
A__ : int =['''Original''', '''Rotation 1''', '''Rotation 2''', '''Rotation 3''']
for i, image in enumerate(images):
plt.subplot(2, 2, i + 1), plt.imshow(image, '''gray''')
plt.title(titles[i])
plt.axis('''off''')
plt.subplots_adjust(left=0.0, bottom=0.05, right=1.0, top=0.95)
plt.show()
| 220 | 1 |
import pandas as pd
from matplotlib import pyplot as plt
from sklearn.linear_model import LinearRegression
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
# Fitting Polynomial Regression to the dataset
from sklearn.preprocessing import PolynomialFeatures
# Importing the dataset
SCREAMING_SNAKE_CASE__ : List[Any] = pd.read_csv(
"https://s3.us-west-2.amazonaws.com/public.gamelab.fun/dataset/"
"position_salaries.csv"
)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = dataset.iloc[:, 1:2].values
SCREAMING_SNAKE_CASE__ : Tuple = dataset.iloc[:, 2].values
SCREAMING_SNAKE_CASE__ : Any = train_test_split(X, y, test_size=0.2, random_state=0)
SCREAMING_SNAKE_CASE__ : int = PolynomialFeatures(degree=4)
SCREAMING_SNAKE_CASE__ : List[Any] = poly_reg.fit_transform(X)
SCREAMING_SNAKE_CASE__ : Optional[int] = LinearRegression()
pol_reg.fit(X_poly, y)
def __magic_name__ ( ) -> List[Any]:
plt.scatter(__lowerCAmelCase , __lowerCAmelCase , color='''red''' )
plt.plot(__lowerCAmelCase , pol_reg.predict(poly_reg.fit_transform(__lowerCAmelCase ) ) , color='''blue''' )
plt.title('''Truth or Bluff (Linear Regression)''' )
plt.xlabel('''Position level''' )
plt.ylabel('''Salary''' )
plt.show()
if __name__ == "__main__":
viz_polymonial()
# Predicting a new result with Polymonial Regression
pol_reg.predict(poly_reg.fit_transform([[5.5]]))
# output should be 132148.43750003
| 270 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A : List[str] = logging.get_logger(__name__)
A : Any = {
'tiiuae/falcon-40b': 'https://huggingface.co/tiiuae/falcon-40b/resolve/main/config.json',
'tiiuae/falcon-7b': 'https://huggingface.co/tiiuae/falcon-7b/resolve/main/config.json',
}
class A ( UpperCAmelCase__ ):
'''simple docstring'''
A__ = '''falcon'''
A__ = ['''past_key_values''']
def __init__(self : str , _UpperCAmelCase : Dict=6_5024 , _UpperCAmelCase : Optional[Any]=4544 , _UpperCAmelCase : Optional[int]=32 , _UpperCAmelCase : Optional[Any]=71 , _UpperCAmelCase : List[Any]=1E-5 , _UpperCAmelCase : int=0.02 , _UpperCAmelCase : str=True , _UpperCAmelCase : Tuple=0.0 , _UpperCAmelCase : Any=0.0 , _UpperCAmelCase : str=None , _UpperCAmelCase : Optional[int]=False , _UpperCAmelCase : int=False , _UpperCAmelCase : Union[str, Any]=True , _UpperCAmelCase : List[Any]=True , _UpperCAmelCase : List[Any]=False , _UpperCAmelCase : Optional[int]=11 , _UpperCAmelCase : Optional[Any]=11 , **_UpperCAmelCase : Union[str, Any] , ) -> List[str]:
"""simple docstring"""
lowercase__ = vocab_size
# Backward compatibility with n_embed kwarg
lowercase__ = kwargs.pop("""n_embed""" , _UpperCAmelCase )
lowercase__ = hidden_size if n_embed is None else n_embed
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = layer_norm_epsilon
lowercase__ = initializer_range
lowercase__ = use_cache
lowercase__ = hidden_dropout
lowercase__ = attention_dropout
lowercase__ = bos_token_id
lowercase__ = eos_token_id
lowercase__ = num_attention_heads if num_kv_heads is None else num_kv_heads
lowercase__ = alibi
lowercase__ = new_decoder_architecture
lowercase__ = multi_query # Ignored when new_decoder_architecture is True
lowercase__ = parallel_attn
lowercase__ = bias
super().__init__(bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase )
@property
def lowerCamelCase__ (self : Tuple ) -> int:
"""simple docstring"""
return self.hidden_size // self.num_attention_heads
@property
def lowerCamelCase__ (self : List[str] ) -> Tuple:
"""simple docstring"""
return not self.alibi
| 305 | 0 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_UpperCAmelCase = '▁'
_UpperCAmelCase = {'vocab_file': 'spiece.model'}
_UpperCAmelCase = {
'vocab_file': {'google/pegasus-xsum': 'https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model'}
}
_UpperCAmelCase = {
'google/pegasus-xsum': 512,
}
_UpperCAmelCase = logging.get_logger(__name__)
class snake_case_ ( __lowercase ):
A_ = VOCAB_FILES_NAMES
A_ = VOCAB_FILES_NAMES
A_ = PRETRAINED_VOCAB_FILES_MAP
A_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A_ = ['input_ids', 'attention_mask']
def __init__( self : List[Any] , _snake_case : Union[str, Any] , _snake_case : Union[str, Any]="<pad>" , _snake_case : int="</s>" , _snake_case : Any="<unk>" , _snake_case : Union[str, Any]="<mask_2>" , _snake_case : Any="<mask_1>" , _snake_case : Optional[int]=None , _snake_case : List[str]=103 , _snake_case : Optional[Dict[str, Any]] = None , **_snake_case : Optional[int] , )->None:
'''simple docstring'''
__lowerCAmelCase : Union[str, Any] = offset
if additional_special_tokens is not None:
if not isinstance(_snake_case , _snake_case ):
raise TypeError(
F'''additional_special_tokens should be of type {type(_snake_case )}, but is'''
F''' {type(_snake_case )}''' )
__lowerCAmelCase : List[str] = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
F'''<unk_{i}>''' for i in range(len(_snake_case ) , self.offset - 1 )
]
if len(set(_snake_case ) ) != len(_snake_case ):
raise ValueError(
"""Please make sure that the provided additional_special_tokens do not contain an incorrectly"""
F''' shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.''' )
__lowerCAmelCase : Dict = additional_special_tokens_extended
else:
__lowerCAmelCase : Tuple = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [F'''<unk_{i}>''' for i in range(2 , self.offset )]
__lowerCAmelCase : Optional[int] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=_snake_case , unk_token=_snake_case , mask_token=_snake_case , pad_token=_snake_case , mask_token_sent=_snake_case , offset=_snake_case , additional_special_tokens=_snake_case , sp_model_kwargs=self.sp_model_kwargs , **_snake_case , )
__lowerCAmelCase : Optional[Any] = mask_token_sent
__lowerCAmelCase : Any = vocab_file
__lowerCAmelCase : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_snake_case )
# add special tokens to encoder dict
__lowerCAmelCase : Dict[int, str] = {
0: self.pad_token,
1: self.eos_token,
}
if self.mask_token_sent is not None:
self.encoder.update(
{
2: self.mask_token_sent,
3: self.mask_token,
} )
if self.offset > 0:
# entries 2-104 are only used for pretraining and called <mask_1>, <mask_2>, unk_2, ...unk_102
# mask_token_sent is already added to list -> so start at 1
self.encoder.update({i + 3: additional_special_tokens[i] for i in range(1 , self.offset - 1 )} )
__lowerCAmelCase : Dict[str, int] = {v: k for k, v in self.encoder.items()}
@property
def UpperCAmelCase__ ( self : str )->int:
'''simple docstring'''
return len(self.sp_model ) + self.offset
def UpperCAmelCase__ ( self : Dict )->Dict[str, int]:
'''simple docstring'''
__lowerCAmelCase : Tuple = {self.convert_ids_to_tokens(_snake_case ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Optional[int] )->str:
'''simple docstring'''
__lowerCAmelCase : Optional[Any] = self.__dict__.copy()
__lowerCAmelCase : Union[str, Any] = None
return state
def __setstate__( self : Any , _snake_case : str )->Any:
'''simple docstring'''
__lowerCAmelCase : str = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
__lowerCAmelCase : Any = {}
__lowerCAmelCase : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCAmelCase__ ( self : Dict , _snake_case : str )->List[str]:
'''simple docstring'''
return self.sp_model.encode(_snake_case , out_type=_snake_case )
def UpperCAmelCase__ ( self : Tuple , _snake_case : str )->int:
'''simple docstring'''
if token in self.decoder:
return self.decoder[token]
elif token in self.added_tokens_decoder:
return self.added_tokens_decoder[token]
__lowerCAmelCase : Any = self.sp_model.piece_to_id(_snake_case )
return sp_id + self.offset
def UpperCAmelCase__ ( self : List[Any] , _snake_case : int )->str:
'''simple docstring'''
if index in self.encoder:
return self.encoder[index]
elif index in self.added_tokens_encoder:
return self.added_tokens_encoder[index]
else:
__lowerCAmelCase : Optional[int] = self.sp_model.IdToPiece(index - self.offset )
return token
def UpperCAmelCase__ ( self : Union[str, Any] , _snake_case : Optional[int] )->List[str]:
'''simple docstring'''
__lowerCAmelCase : Any = []
__lowerCAmelCase : Dict = """"""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(_snake_case ) + token
__lowerCAmelCase : int = []
else:
current_sub_tokens.append(_snake_case )
out_string += self.sp_model.decode(_snake_case )
return out_string.strip()
def UpperCAmelCase__ ( self : Optional[int] , _snake_case : Dict=False )->int:
'''simple docstring'''
return 1
def UpperCAmelCase__ ( self : Tuple , _snake_case : Tuple )->str:
'''simple docstring'''
__lowerCAmelCase : List[Any] = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
return [1 if x in all_special_ids else 0 for x in seq]
def UpperCAmelCase__ ( self : List[str] , _snake_case : List , _snake_case : Optional[List] = None , _snake_case : bool = False )->List[int]:
'''simple docstring'''
if already_has_special_tokens:
return self._special_token_mask(_snake_case )
elif token_ids_a is None:
return self._special_token_mask(_snake_case ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def UpperCAmelCase__ ( self : Any , _snake_case : Union[str, Any] , _snake_case : Tuple=None )->List[int]:
'''simple docstring'''
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def UpperCAmelCase__ ( self : Any , _snake_case : str , _snake_case : Optional[str] = None )->Tuple[str]:
'''simple docstring'''
if not os.path.isdir(_snake_case ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
__lowerCAmelCase : Optional[int] = os.path.join(
_snake_case , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_snake_case ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _snake_case )
elif not os.path.isfile(self.vocab_file ):
with open(_snake_case , """wb""" ) as fi:
__lowerCAmelCase : Tuple = self.sp_model.serialized_model_proto()
fi.write(_snake_case )
return (out_vocab_file,) | 232 |
from __future__ import annotations
import time
import numpy as np
_UpperCAmelCase = [8, 5, 9, 7]
_UpperCAmelCase = [
[2, 0, 1, 1],
[0, 1, 2, 1],
[4, 0, 0, 3],
[0, 2, 1, 0],
[1, 0, 3, 0],
]
_UpperCAmelCase = [
[3, 2, 1, 4],
[0, 2, 5, 2],
[5, 1, 0, 5],
[1, 5, 3, 0],
[3, 0, 3, 3],
]
class snake_case_ :
def __init__( self : Union[str, Any] , _snake_case : list[int] , _snake_case : list[list[int]] , _snake_case : list[list[int]] , )->None:
'''simple docstring'''
__lowerCAmelCase : str = claim_vector
__lowerCAmelCase : List[Any] = allocated_resources_table
__lowerCAmelCase : str = maximum_claim_table
def UpperCAmelCase__ ( self : Tuple )->list[int]:
'''simple docstring'''
return [
sum(p_item[i] for p_item in self.__allocated_resources_table )
for i in range(len(self.__allocated_resources_table[0] ) )
]
def UpperCAmelCase__ ( self : int )->list[int]:
'''simple docstring'''
return np.array(self.__claim_vector ) - np.array(
self.__processes_resource_summation() )
def UpperCAmelCase__ ( self : Optional[int] )->list[list[int]]:
'''simple docstring'''
return [
list(np.array(self.__maximum_claim_table[i] ) - np.array(_snake_case ) )
for i, allocated_resource in enumerate(self.__allocated_resources_table )
]
def UpperCAmelCase__ ( self : Union[str, Any] )->dict[int, list[int]]:
'''simple docstring'''
return {self.__need().index(_snake_case ): i for i in self.__need()}
def UpperCAmelCase__ ( self : Dict , **_snake_case : Optional[Any] )->None:
'''simple docstring'''
__lowerCAmelCase : Optional[int] = self.__need()
__lowerCAmelCase : Any = self.__allocated_resources_table
__lowerCAmelCase : List[Any] = self.__available_resources()
__lowerCAmelCase : Optional[Any] = self.__need_index_manager()
for kw, val in kwargs.items():
if kw and val is True:
self.__pretty_data()
print("""_""" * 50 + """\n""" )
while need_list:
__lowerCAmelCase : Optional[Any] = False
for each_need in need_list:
__lowerCAmelCase : Optional[int] = True
for index, need in enumerate(_snake_case ):
if need > available_resources[index]:
__lowerCAmelCase : int = False
break
if execution:
__lowerCAmelCase : int = True
# get the original index of the process from ind_ctrl db
for original_need_index, need_clone in need_index_manager.items():
if each_need == need_clone:
__lowerCAmelCase : Any = original_need_index
print(F'''Process {process_number + 1} is executing.''' )
# remove the process run from stack
need_list.remove(_snake_case )
# update available/freed resources stack
__lowerCAmelCase : int = np.array(_snake_case ) + np.array(
alloc_resources_table[process_number] )
print(
"""Updated available resource stack for processes: """
+ """ """.join([str(_snake_case ) for x in available_resources] ) )
break
if safe:
print("""The process is in a safe state.\n""" )
else:
print("""System in unsafe state. Aborting...\n""" )
break
def UpperCAmelCase__ ( self : List[Any] )->int:
'''simple docstring'''
print(""" """ * 9 + """Allocated Resource Table""" )
for item in self.__allocated_resources_table:
print(
F'''P{self.__allocated_resources_table.index(_snake_case ) + 1}'''
+ """ """.join(F'''{it:>8}''' for it in item )
+ """\n""" )
print(""" """ * 9 + """System Resource Table""" )
for item in self.__maximum_claim_table:
print(
F'''P{self.__maximum_claim_table.index(_snake_case ) + 1}'''
+ """ """.join(F'''{it:>8}''' for it in item )
+ """\n""" )
print(
"""Current Usage by Active Processes: """
+ """ """.join(str(_snake_case ) for x in self.__claim_vector ) )
print(
"""Initial Available Resources: """
+ """ """.join(str(_snake_case ) for x in self.__available_resources() ) )
time.sleep(1 )
if __name__ == "__main__":
import doctest
doctest.testmod() | 232 | 1 |
'''simple docstring'''
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class SCREAMING_SNAKE_CASE (a__ ):
lowerCAmelCase = ['''image_processor''', '''tokenizer''']
lowerCAmelCase = '''AutoImageProcessor'''
lowerCAmelCase = '''AutoTokenizer'''
def __init__( self , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
super().__init__(_UpperCAmelCase , _UpperCAmelCase)
__A : str = self.image_processor
def __call__( self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , **_UpperCAmelCase):
'''simple docstring'''
if text is None and images is None:
raise ValueError('You have to specify either text or images. Both cannot be none.')
if text is not None:
__A : int = self.tokenizer(_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase)
if images is not None:
__A : str = self.image_processor(_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase)
if text is not None and images is not None:
__A : str = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**_UpperCAmelCase) , tensor_type=_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self , *_UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
return self.tokenizer.batch_decode(*_UpperCAmelCase , **_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self , *_UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
return self.tokenizer.decode(*_UpperCAmelCase , **_UpperCAmelCase)
@property
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
return ["input_ids", "attention_mask", "pixel_values"] | 190 |
'''simple docstring'''
import random
def _lowerCAmelCase ( __snake_case : int , __snake_case : float , __snake_case : bool = False ) -> dict:
__A : dict = {i: [] for i in range(__snake_case )}
# if probability is greater or equal than 1, then generate a complete graph
if probability >= 1:
return complete_graph(__snake_case )
# if probability is lower or equal than 0, then return a graph without edges
if probability <= 0:
return graph
# for each couple of nodes, add an edge from u to v
# if the number randomly generated is greater than probability probability
for i in range(__snake_case ):
for j in range(i + 1 , __snake_case ):
if random.random() < probability:
graph[i].append(__snake_case )
if not directed:
# if the graph is undirected, add an edge in from j to i, either
graph[j].append(__snake_case )
return graph
def _lowerCAmelCase ( __snake_case : int ) -> dict:
return {
i: [j for j in range(__snake_case ) if i != j] for i in range(__snake_case )
}
if __name__ == "__main__":
import doctest
doctest.testmod() | 190 | 1 |
"""simple docstring"""
import numpy as np
from sklearn.datasets import fetch_california_housing
from sklearn.metrics import mean_absolute_error, mean_squared_error
from sklearn.model_selection import train_test_split
from xgboost import XGBRegressor
def _snake_case ( lowerCamelCase__ : dict ) -> tuple:
return (data["data"], data["target"])
def _snake_case ( lowerCamelCase__ : np.ndarray , lowerCamelCase__ : np.ndarray , lowerCamelCase__ : np.ndarray ) -> np.ndarray:
lowerCamelCase_ : Any =XGBRegressor(verbosity=0 , random_state=42 )
xgb.fit(lowerCamelCase__ , lowerCamelCase__ )
# Predict target for test data
lowerCamelCase_ : Optional[Any] =xgb.predict(lowerCamelCase__ )
lowerCamelCase_ : str =predictions.reshape(len(lowerCamelCase__ ) , 1 )
return predictions
def _snake_case ( ) -> None:
lowerCamelCase_ : Tuple =fetch_california_housing()
lowerCamelCase_ , lowerCamelCase_ : Union[str, Any] =data_handling(lowerCamelCase__ )
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ : List[str] =train_test_split(
lowerCamelCase__ , lowerCamelCase__ , test_size=0.25 , random_state=1 )
lowerCamelCase_ : Optional[Any] =xgboost(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# Error printing
print(F"""Mean Absolute Error : {mean_absolute_error(lowerCamelCase__ , lowerCamelCase__ )}""" )
print(F"""Mean Square Error : {mean_squared_error(lowerCamelCase__ , lowerCamelCase__ )}""" )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 209 |
"""simple docstring"""
import copy
import json
import os
import tempfile
from transformers import is_torch_available
from .test_configuration_utils import config_common_kwargs
class lowercase__ ( snake_case__ ):
def __init__( self : Tuple , snake_case__ : Optional[int] , snake_case__ : int=None , snake_case__ : Union[str, Any]=True , snake_case__ : Optional[int]=None , **snake_case__ : Optional[int] ):
lowerCamelCase_ : Dict =parent
lowerCamelCase_ : List[str] =config_class
lowerCamelCase_ : Union[str, Any] =has_text_modality
lowerCamelCase_ : Optional[int] =kwargs
lowerCamelCase_ : List[str] =common_properties
def UpperCAmelCase__ ( self : Optional[Any] ):
lowerCamelCase_ : List[str] =self.config_class(**self.inputs_dict )
lowerCamelCase_ : Any =(
["hidden_size", "num_attention_heads", "num_hidden_layers"]
if self.common_properties is None
else self.common_properties
)
# Add common fields for text models
if self.has_text_modality:
common_properties.extend(["vocab_size"] )
# Test that config has the common properties as getters
for prop in common_properties:
self.parent.assertTrue(hasattr(snake_case__ , snake_case__ ) , msg=F"""`{prop}` does not exist""" )
# Test that config has the common properties as setter
for idx, name in enumerate(snake_case__ ):
try:
setattr(snake_case__ , snake_case__ , snake_case__ )
self.parent.assertEqual(
getattr(snake_case__ , snake_case__ ) , snake_case__ , msg=F"""`{name} value {idx} expected, but was {getattr(snake_case__ , snake_case__ )}""" )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
# Test if config class can be called with Config(prop_name=..)
for idx, name in enumerate(snake_case__ ):
try:
lowerCamelCase_ : Dict =self.config_class(**{name: idx} )
self.parent.assertEqual(
getattr(snake_case__ , snake_case__ ) , snake_case__ , msg=F"""`{name} value {idx} expected, but was {getattr(snake_case__ , snake_case__ )}""" )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
def UpperCAmelCase__ ( self : Any ):
lowerCamelCase_ : Tuple =self.config_class(**self.inputs_dict )
lowerCamelCase_ : Any =json.loads(config.to_json_string() )
for key, value in self.inputs_dict.items():
self.parent.assertEqual(obj[key] , snake_case__ )
def UpperCAmelCase__ ( self : int ):
lowerCamelCase_ : Tuple =self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCamelCase_ : List[Any] =os.path.join(snake_case__ , "config.json" )
config_first.to_json_file(snake_case__ )
lowerCamelCase_ : Optional[int] =self.config_class.from_json_file(snake_case__ )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def UpperCAmelCase__ ( self : Dict ):
lowerCamelCase_ : Dict =self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
config_first.save_pretrained(snake_case__ )
lowerCamelCase_ : Optional[int] =self.config_class.from_pretrained(snake_case__ )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def UpperCAmelCase__ ( self : str ):
lowerCamelCase_ : Dict =self.config_class(**self.inputs_dict )
lowerCamelCase_ : Dict ="test"
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCamelCase_ : str =os.path.join(snake_case__ , snake_case__ )
config_first.save_pretrained(snake_case__ )
lowerCamelCase_ : Optional[Any] =self.config_class.from_pretrained(snake_case__ , subfolder=snake_case__ )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def UpperCAmelCase__ ( self : Optional[Any] ):
lowerCamelCase_ : Optional[Any] =self.config_class(**self.inputs_dict , num_labels=5 )
self.parent.assertEqual(len(config.idalabel ) , 5 )
self.parent.assertEqual(len(config.labelaid ) , 5 )
lowerCamelCase_ : List[Any] =3
self.parent.assertEqual(len(config.idalabel ) , 3 )
self.parent.assertEqual(len(config.labelaid ) , 3 )
def UpperCAmelCase__ ( self : List[Any] ):
if self.config_class.is_composition:
return
lowerCamelCase_ : Tuple =self.config_class()
self.parent.assertIsNotNone(snake_case__ )
def UpperCAmelCase__ ( self : List[Any] ):
lowerCamelCase_ : List[str] =copy.deepcopy(snake_case__ )
lowerCamelCase_ : Optional[int] =self.config_class(**snake_case__ )
lowerCamelCase_ : Union[str, Any] =[]
for key, value in config_common_kwargs.items():
if key == "torch_dtype":
if not is_torch_available():
continue
else:
import torch
if config.torch_dtype != torch.floataa:
wrong_values.append(("torch_dtype", config.torch_dtype, torch.floataa) )
elif getattr(snake_case__ , snake_case__ ) != value:
wrong_values.append((key, getattr(snake_case__ , snake_case__ ), value) )
if len(snake_case__ ) > 0:
lowerCamelCase_ : Any ="\n".join([F"""- {v[0]}: got {v[1]} instead of {v[2]}""" for v in wrong_values] )
raise ValueError(F"""The following keys were not properly set in the config:\n{errors}""" )
def UpperCAmelCase__ ( self : int ):
self.create_and_test_config_common_properties()
self.create_and_test_config_to_json_string()
self.create_and_test_config_to_json_file()
self.create_and_test_config_from_and_save_pretrained()
self.create_and_test_config_from_and_save_pretrained_subfolder()
self.create_and_test_config_with_num_labels()
self.check_config_can_be_init_without_params()
self.check_config_arguments_init()
| 209 | 1 |
import string
def __lowerCamelCase ( snake_case__ ) -> None:
"""simple docstring"""
for key in range(len(string.ascii_uppercase ) ):
_SCREAMING_SNAKE_CASE = """"""
for symbol in message:
if symbol in string.ascii_uppercase:
_SCREAMING_SNAKE_CASE = string.ascii_uppercase.find(snake_case__ )
_SCREAMING_SNAKE_CASE = num - key
if num < 0:
_SCREAMING_SNAKE_CASE = num + len(string.ascii_uppercase )
_SCREAMING_SNAKE_CASE = translated + string.ascii_uppercase[num]
else:
_SCREAMING_SNAKE_CASE = translated + symbol
print(F'Decryption using Key #{key}: {translated}' )
def __lowerCamelCase ( ) -> None:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = input("""Encrypted message: """ )
_SCREAMING_SNAKE_CASE = message.upper()
decrypt(snake_case__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 306 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
UpperCamelCase = {
'''configuration_wav2vec2''': ['''WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Wav2Vec2Config'''],
'''feature_extraction_wav2vec2''': ['''Wav2Vec2FeatureExtractor'''],
'''processing_wav2vec2''': ['''Wav2Vec2Processor'''],
'''tokenization_wav2vec2''': ['''Wav2Vec2CTCTokenizer''', '''Wav2Vec2Tokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'''WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Wav2Vec2ForAudioFrameClassification''',
'''Wav2Vec2ForCTC''',
'''Wav2Vec2ForMaskedLM''',
'''Wav2Vec2ForPreTraining''',
'''Wav2Vec2ForSequenceClassification''',
'''Wav2Vec2ForXVector''',
'''Wav2Vec2Model''',
'''Wav2Vec2PreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'''TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFWav2Vec2ForCTC''',
'''TFWav2Vec2Model''',
'''TFWav2Vec2PreTrainedModel''',
'''TFWav2Vec2ForSequenceClassification''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'''FlaxWav2Vec2ForCTC''',
'''FlaxWav2Vec2ForPreTraining''',
'''FlaxWav2Vec2Model''',
'''FlaxWav2Vec2PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_wavaveca import WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, WavaVecaConfig
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .processing_wavaveca import WavaVecaProcessor
from .tokenization_wavaveca import WavaVecaCTCTokenizer, WavaVecaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavaveca import (
WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
WavaVecaForAudioFrameClassification,
WavaVecaForCTC,
WavaVecaForMaskedLM,
WavaVecaForPreTraining,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
WavaVecaModel,
WavaVecaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWavaVecaForCTC,
TFWavaVecaForSequenceClassification,
TFWavaVecaModel,
TFWavaVecaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
FlaxWavaVecaForCTC,
FlaxWavaVecaForPreTraining,
FlaxWavaVecaModel,
FlaxWavaVecaPreTrainedModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 306 | 1 |
'''simple docstring'''
from string import ascii_uppercase
UpperCAmelCase : Union[str, Any] = {char: i for i, char in enumerate(ascii_uppercase)}
UpperCAmelCase : List[Any] = dict(enumerate(ascii_uppercase))
def a__ ( a__ , a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = len(a__ )
__SCREAMING_SNAKE_CASE = 0
while True:
if x == i:
__SCREAMING_SNAKE_CASE = 0
if len(a__ ) == len(a__ ):
break
key += key[i]
i += 1
return key
def a__ ( a__ , a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = """"""
__SCREAMING_SNAKE_CASE = 0
for letter in message:
if letter == " ":
cipher_text += " "
else:
__SCREAMING_SNAKE_CASE = (dicta[letter] - dicta[key_new[i]]) % 26
i += 1
cipher_text += dicta[x]
return cipher_text
def a__ ( a__ , a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = """"""
__SCREAMING_SNAKE_CASE = 0
for letter in cipher_text:
if letter == " ":
or_txt += " "
else:
__SCREAMING_SNAKE_CASE = (dicta[letter] + dicta[key_new[i]] + 26) % 26
i += 1
or_txt += dicta[x]
return or_txt
def a__ ( ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = """THE GERMAN ATTACK"""
__SCREAMING_SNAKE_CASE = """SECRET"""
__SCREAMING_SNAKE_CASE = generate_key(a__ , a__ )
__SCREAMING_SNAKE_CASE = cipher_text(a__ , a__ )
print(F'Encrypted Text = {s}' )
print(F'Original Text = {original_text(a__ , a__ )}' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 364 |
'''simple docstring'''
import unittest
from transformers import MraConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_torch_available():
import torch
from transformers import (
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraModel,
)
from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCAmelCase__ :
"""simple docstring"""
def __init__( self : Optional[Any] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str=2 , __SCREAMING_SNAKE_CASE : List[str]=8 , __SCREAMING_SNAKE_CASE : Optional[int]=True , __SCREAMING_SNAKE_CASE : Dict=True , __SCREAMING_SNAKE_CASE : Dict=True , __SCREAMING_SNAKE_CASE : Union[str, Any]=True , __SCREAMING_SNAKE_CASE : Tuple=99 , __SCREAMING_SNAKE_CASE : Tuple=16 , __SCREAMING_SNAKE_CASE : Optional[int]=5 , __SCREAMING_SNAKE_CASE : str=2 , __SCREAMING_SNAKE_CASE : Optional[Any]=36 , __SCREAMING_SNAKE_CASE : Any="gelu" , __SCREAMING_SNAKE_CASE : Any=0.0 , __SCREAMING_SNAKE_CASE : Any=0.0 , __SCREAMING_SNAKE_CASE : Tuple=512 , __SCREAMING_SNAKE_CASE : Any=16 , __SCREAMING_SNAKE_CASE : Union[str, Any]=2 , __SCREAMING_SNAKE_CASE : Dict=0.02 , __SCREAMING_SNAKE_CASE : Union[str, Any]=3 , __SCREAMING_SNAKE_CASE : int=4 , __SCREAMING_SNAKE_CASE : int=None , ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = parent
__SCREAMING_SNAKE_CASE = batch_size
__SCREAMING_SNAKE_CASE = seq_length
__SCREAMING_SNAKE_CASE = is_training
__SCREAMING_SNAKE_CASE = use_input_mask
__SCREAMING_SNAKE_CASE = use_token_type_ids
__SCREAMING_SNAKE_CASE = use_labels
__SCREAMING_SNAKE_CASE = vocab_size
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = intermediate_size
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = hidden_dropout_prob
__SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE = max_position_embeddings
__SCREAMING_SNAKE_CASE = type_vocab_size
__SCREAMING_SNAKE_CASE = type_sequence_label_size
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = num_labels
__SCREAMING_SNAKE_CASE = num_choices
__SCREAMING_SNAKE_CASE = scope
def UpperCAmelCase__ ( self : Dict ) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__SCREAMING_SNAKE_CASE = None
if self.use_input_mask:
__SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] )
__SCREAMING_SNAKE_CASE = None
if self.use_token_type_ids:
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
if self.use_labels:
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_choices )
__SCREAMING_SNAKE_CASE = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase__ ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
return MraConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , )
def UpperCAmelCase__ ( self : Any ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.get_config()
__SCREAMING_SNAKE_CASE = 300
return config
def UpperCAmelCase__ ( self : Tuple ) -> List[Any]:
"""simple docstring"""
(
(
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) ,
) = self.prepare_config_and_inputs()
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def UpperCAmelCase__ ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : List[str] ) -> Optional[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = MraModel(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase__ ( self : Dict , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : List[str] , ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = MraModel(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__SCREAMING_SNAKE_CASE = model(
__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , encoder_hidden_states=__SCREAMING_SNAKE_CASE , encoder_attention_mask=__SCREAMING_SNAKE_CASE , )
__SCREAMING_SNAKE_CASE = model(
__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , encoder_hidden_states=__SCREAMING_SNAKE_CASE , )
__SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase__ ( self : Tuple , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[int] ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = MraForMaskedLM(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase__ ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Any ) -> Optional[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = MraForQuestionAnswering(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__SCREAMING_SNAKE_CASE = model(
__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , start_positions=__SCREAMING_SNAKE_CASE , end_positions=__SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase__ ( self : List[Any] , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Optional[int] ) -> Optional[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.num_labels
__SCREAMING_SNAKE_CASE = MraForSequenceClassification(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase__ ( self : Any , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Optional[int] ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.num_labels
__SCREAMING_SNAKE_CASE = MraForTokenClassification(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase__ ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.num_choices
__SCREAMING_SNAKE_CASE = MraForMultipleChoice(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__SCREAMING_SNAKE_CASE = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__SCREAMING_SNAKE_CASE = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__SCREAMING_SNAKE_CASE = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__SCREAMING_SNAKE_CASE = model(
__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCAmelCase__ ( self : int ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
(
(
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) ,
) = config_and_inputs
__SCREAMING_SNAKE_CASE = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class lowerCAmelCase__ ( a , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ = (
(
MraModel,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
)
if is_torch_available()
else ()
)
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = ()
def UpperCAmelCase__ ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = MraModelTester(self )
__SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , hidden_size=37 )
def UpperCAmelCase__ ( self : List[str] ) -> Tuple:
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self : Dict ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : Union[str, Any] ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__SCREAMING_SNAKE_CASE = type
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : Any ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : str ) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__SCREAMING_SNAKE_CASE )
@slow
def UpperCAmelCase__ ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__SCREAMING_SNAKE_CASE = MraModel.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
@unittest.skip(reason="""MRA does not output attentions""" )
def UpperCAmelCase__ ( self : int ) -> List[Any]:
"""simple docstring"""
return
@require_torch
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCAmelCase__ ( self : Dict ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = MraModel.from_pretrained("""uw-madison/mra-base-512-4""" )
__SCREAMING_SNAKE_CASE = torch.arange(256 ).unsqueeze(0 )
with torch.no_grad():
__SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE )[0]
__SCREAMING_SNAKE_CASE = torch.Size((1, 256, 768) )
self.assertEqual(output.shape , __SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = torch.tensor(
[[[-0.0140, 0.0830, -0.0381], [0.1546, 0.1402, 0.0220], [0.1162, 0.0851, 0.0165]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __SCREAMING_SNAKE_CASE , atol=1E-4 ) )
@slow
def UpperCAmelCase__ ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = MraForMaskedLM.from_pretrained("""uw-madison/mra-base-512-4""" )
__SCREAMING_SNAKE_CASE = torch.arange(256 ).unsqueeze(0 )
with torch.no_grad():
__SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE )[0]
__SCREAMING_SNAKE_CASE = 50_265
__SCREAMING_SNAKE_CASE = torch.Size((1, 256, vocab_size) )
self.assertEqual(output.shape , __SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = torch.tensor(
[[[9.2595, -3.6038, 11.8819], [9.3869, -3.2693, 11.0956], [11.8524, -3.4938, 13.1210]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __SCREAMING_SNAKE_CASE , atol=1E-4 ) )
@slow
def UpperCAmelCase__ ( self : int ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = MraForMaskedLM.from_pretrained("""uw-madison/mra-base-4096-8-d3""" )
__SCREAMING_SNAKE_CASE = torch.arange(4_096 ).unsqueeze(0 )
with torch.no_grad():
__SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE )[0]
__SCREAMING_SNAKE_CASE = 50_265
__SCREAMING_SNAKE_CASE = torch.Size((1, 4_096, vocab_size) )
self.assertEqual(output.shape , __SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = torch.tensor(
[[[5.4789, -2.3564, 7.5064], [7.9067, -1.3369, 9.9668], [9.0712, -1.8106, 7.0380]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __SCREAMING_SNAKE_CASE , atol=1E-4 ) )
| 331 | 0 |
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional, Tuple
import torch
from torch import nn
from transformers import RobertaPreTrainedModel, XLMRobertaConfig, XLMRobertaModel
from transformers.utils import ModelOutput
@dataclass
class a ( a_ ):
UpperCAmelCase_ : Optional[torch.FloatTensor] =None
UpperCAmelCase_ : torch.FloatTensor =None
UpperCAmelCase_ : Optional[Tuple[torch.FloatTensor]] =None
UpperCAmelCase_ : Optional[Tuple[torch.FloatTensor]] =None
class a ( a_ ):
def __init__( self , _lowerCamelCase=1 , _lowerCamelCase=0 , _lowerCamelCase=2 , _lowerCamelCase=5_1_2 , _lowerCamelCase="cls" , _lowerCamelCase=False , _lowerCamelCase=True , **_lowerCamelCase , ):
super().__init__(pad_token_id=_lowerCamelCase , bos_token_id=_lowerCamelCase , eos_token_id=_lowerCamelCase , **_lowerCamelCase )
lowercase = project_dim
lowercase = pooler_fn
lowercase = learn_encoder
lowercase = use_attention_mask
class a ( a_ ):
UpperCAmelCase_ : Optional[int] =[R"pooler", R"logit_scale"]
UpperCAmelCase_ : int =[R"position_ids", R"predictions.decoder.bias"]
UpperCAmelCase_ : List[str] ="roberta"
UpperCAmelCase_ : Dict =RobertaSeriesConfig
def __init__( self , _lowerCamelCase ):
super().__init__(_lowerCamelCase )
lowercase = XLMRobertaModel(_lowerCamelCase )
lowercase = nn.Linear(config.hidden_size , config.project_dim )
lowercase = getattr(_lowerCamelCase , 'has_pre_transformation' , _lowerCamelCase )
if self.has_pre_transformation:
lowercase = nn.Linear(config.hidden_size , config.project_dim )
lowercase = nn.LayerNorm(config.hidden_size , eps=config.layer_norm_eps )
self.post_init()
def UpperCamelCase_ ( self , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , ):
lowercase = return_dict if return_dict is not None else self.config.use_return_dict
lowercase = self.base_model(
input_ids=_lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase , position_ids=_lowerCamelCase , head_mask=_lowerCamelCase , inputs_embeds=_lowerCamelCase , encoder_hidden_states=_lowerCamelCase , encoder_attention_mask=_lowerCamelCase , output_attentions=_lowerCamelCase , output_hidden_states=True if self.has_pre_transformation else output_hidden_states , return_dict=_lowerCamelCase , )
if self.has_pre_transformation:
lowercase = outputs['hidden_states'][-2]
lowercase = self.pre_LN(_lowerCamelCase )
lowercase = self.transformation_pre(_lowerCamelCase )
return TransformationModelOutput(
projection_state=_lowerCamelCase , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
else:
lowercase = self.transformation(outputs.last_hidden_state )
return TransformationModelOutput(
projection_state=_lowerCamelCase , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
| 220 |
"""simple docstring"""
from argparse import ArgumentParser
from .env import EnvironmentCommand
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowercase = ArgumentParser('Diffusers CLI tool' , usage='diffusers-cli <command> [<args>]' )
lowercase = parser.add_subparsers(help='diffusers-cli command helpers' )
# Register commands
EnvironmentCommand.register_subcommand(__snake_case )
# Let's go
lowercase = parser.parse_args()
if not hasattr(__snake_case , 'func' ):
parser.print_help()
exit(1 )
# Run
lowercase = args.func(__snake_case )
service.run()
if __name__ == "__main__":
main()
| 220 | 1 |
import math
import qiskit
def __snake_case ( __UpperCamelCase : int = 1 ,__UpperCamelCase : int = 1 ,__UpperCamelCase : int = 1 ):
"""simple docstring"""
if (
isinstance(__UpperCamelCase ,__UpperCamelCase )
or isinstance(__UpperCamelCase ,__UpperCamelCase )
or isinstance(__UpperCamelCase ,__UpperCamelCase )
):
raise TypeError("inputs must be integers." )
if (input_a < 0) or (input_a < 0) or (carry_in < 0):
raise ValueError("inputs must be positive." )
if (
(math.floor(__UpperCamelCase ) != input_a)
or (math.floor(__UpperCamelCase ) != input_a)
or (math.floor(__UpperCamelCase ) != carry_in)
):
raise ValueError("inputs must be exact integers." )
if (input_a > 2) or (input_a > 2) or (carry_in > 2):
raise ValueError("inputs must be less or equal to 2." )
# build registers
A_ = qiskit.QuantumRegister(4 ,"qr" )
A_ = qiskit.ClassicalRegister(2 ,"cr" )
# list the entries
A_ = [input_a, input_a, carry_in]
A_ = qiskit.QuantumCircuit(__UpperCamelCase ,__UpperCamelCase )
for i in range(0 ,3 ):
if entry[i] == 2:
quantum_circuit.h(__UpperCamelCase ) # for hadamard entries
elif entry[i] == 1:
quantum_circuit.x(__UpperCamelCase ) # for 1 entries
elif entry[i] == 0:
quantum_circuit.i(__UpperCamelCase ) # for 0 entries
# build the circuit
quantum_circuit.ccx(0 ,1 ,3 ) # ccx = toffoli gate
quantum_circuit.cx(0 ,1 )
quantum_circuit.ccx(1 ,2 ,3 )
quantum_circuit.cx(1 ,2 )
quantum_circuit.cx(0 ,1 )
quantum_circuit.measure([2, 3] ,__UpperCamelCase ) # measure the last two qbits
A_ = qiskit.Aer.get_backend("aer_simulator" )
A_ = qiskit.execute(__UpperCamelCase ,__UpperCamelCase ,shots=1000 )
return job.result().get_counts(__UpperCamelCase )
if __name__ == "__main__":
print(F"Total sum count for state is: {quantum_full_adder(1, 1, 1)}") | 329 |
import os
try:
from .build_directory_md import good_file_paths
except ImportError:
from build_directory_md import good_file_paths # type: ignore
__a :int = list(good_file_paths())
assert filepaths, "good_file_paths() failed!"
__a :Any = [file for file in filepaths if file != file.lower()]
if upper_files:
print(F"{len(upper_files)} files contain uppercase characters:")
print('\n'.join(upper_files) + '\n')
__a :Tuple = [file for file in filepaths if ' ' in file]
if space_files:
print(F"{len(space_files)} files contain space characters:")
print('\n'.join(space_files) + '\n')
__a :str = [file for file in filepaths if '-' in file]
if hyphen_files:
print(F"{len(hyphen_files)} files contain hyphen characters:")
print('\n'.join(hyphen_files) + '\n')
__a :List[str] = [file for file in filepaths if os.sep not in file]
if nodir_files:
print(F"{len(nodir_files)} files are not in a directory:")
print('\n'.join(nodir_files) + '\n')
__a :Any = len(upper_files + space_files + hyphen_files + nodir_files)
if bad_files:
import sys
sys.exit(bad_files) | 329 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase : Optional[int] = logging.get_logger(__name__)
lowercase : List[Any] = {
'sayakpaul/vit-msn-base': 'https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json',
# See all ViT MSN models at https://huggingface.co/models?filter=vit_msn
}
class lowerCamelCase__ ( __lowercase):
'''simple docstring'''
_A = 'vit_msn'
def __init__( self :Optional[int] , a :Any=7_6_8 , a :str=1_2 , a :Dict=1_2 , a :Optional[int]=3_0_7_2 , a :Optional[int]="gelu" , a :List[str]=0.0 , a :Any=0.0 , a :Optional[int]=0.02 , a :Union[str, Any]=1E-0_6 , a :Dict=2_2_4 , a :Tuple=1_6 , a :List[str]=3 , a :Optional[int]=True , **a :Union[str, Any] , ) -> List[Any]:
super().__init__(**a )
__UpperCamelCase : Optional[int] = hidden_size
__UpperCamelCase : Union[str, Any] = num_hidden_layers
__UpperCamelCase : str = num_attention_heads
__UpperCamelCase : Optional[int] = intermediate_size
__UpperCamelCase : Tuple = hidden_act
__UpperCamelCase : int = hidden_dropout_prob
__UpperCamelCase : str = attention_probs_dropout_prob
__UpperCamelCase : List[Any] = initializer_range
__UpperCamelCase : Tuple = layer_norm_eps
__UpperCamelCase : str = image_size
__UpperCamelCase : Union[str, Any] = patch_size
__UpperCamelCase : Optional[Any] = num_channels
__UpperCamelCase : List[Any] = qkv_bias | 232 |
from PIL import Image
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : Image , _lowerCamelCase : int) -> Image:
'''simple docstring'''
__UpperCamelCase : str = (259 * (level + 255)) / (255 * (259 - level))
def contrast(_lowerCamelCase : int) -> int:
return int(128 + factor * (c - 128))
return img.point(_lowerCamelCase)
if __name__ == "__main__":
# Load image
with Image.open('image_data/lena.jpg') as img:
# Change contrast to 170
lowercase : Tuple = change_contrast(img, 170)
cont_img.save('image_data/lena_high_contrast.png', format='png') | 232 | 1 |
'''simple docstring'''
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Any=28_123 ):
__a : Tuple = [1] * (limit + 1)
for i in range(2 , int(limit**0.5 ) + 1 ):
sum_divs[i * i] += i
for k in range(i + 1 , limit // i + 1 ):
sum_divs[k * i] += k + i
__a : Union[str, Any] = set()
__a : Dict = 0
for n in range(1 , limit + 1 ):
if sum_divs[n] > n:
abundants.add(_SCREAMING_SNAKE_CASE )
if not any((n - a in abundants) for a in abundants ):
res += n
return res
if __name__ == "__main__":
print(solution())
| 351 |
'''simple docstring'''
import gc
import importlib.metadata
import tempfile
import unittest
from packaging import version
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoTokenizer,
BitsAndBytesConfig,
pipeline,
)
from transformers.testing_utils import (
is_torch_available,
require_accelerate,
require_bitsandbytes,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
def lowerCamelCase (_SCREAMING_SNAKE_CASE : str ):
if model.config.model_type == "gpt2":
return model.transformer.h[0].mlp.c_fc
return model.transformer.h[0].mlp.dense_ah_to_h
if is_torch_available():
import torch
import torch.nn as nn
class __UpperCamelCase ( nn.Module ):
def __init__( self , __a , __a ):
'''simple docstring'''
super().__init__()
__a : int = module
__a : List[Any] = nn.Sequential(
nn.Linear(module.in_features , __a , bias=__a ) , nn.Linear(__a , module.out_features , bias=__a ) , )
__a : int = (2.0 / (5 * min(module.in_features , module.out_features ))) ** 0.5
nn.init.normal_(self.adapter[0].weight , std=__a )
nn.init.zeros_(self.adapter[1].weight )
self.adapter.to(module.weight.device )
def __UpperCAmelCase ( self , __a , *__a , **__a ):
'''simple docstring'''
return self.module(__a , *__a , **__a ) + self.adapter(__a )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class __UpperCamelCase ( unittest.TestCase ):
# We keep the constants inside the init function and model loading inside setUp function
# We need to test on relatively large models (aka >1b parameters otherwise the quantiztion may not work as expected)
# Therefore here we use only bloom-1b3 to test our module
A_ = "bigscience/bloom-1b7"
# Constant values
A_ = 2.109659552692574
A_ = "Hello my name is"
A_ = set()
EXPECTED_OUTPUTS.add("Hello my name is John and I am a professional photographer. I" )
EXPECTED_OUTPUTS.add("Hello my name is John.\nI am a friend of your father.\n" )
EXPECTED_OUTPUTS.add("Hello my name is John Doe, I am a student at the University" )
A_ = 10
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : List[Any] = AutoTokenizer.from_pretrained(self.model_name )
class __UpperCamelCase ( lowerCAmelCase_ ):
def __UpperCAmelCase ( self ):
'''simple docstring'''
super().setUp()
# Models and tokenizer
__a : int = AutoModelForCausalLM.from_pretrained(
self.model_name , torch_dtype=torch.floataa , device_map='auto' )
__a : Union[str, Any] = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=__a , device_map='auto' )
def __UpperCAmelCase ( self ):
'''simple docstring'''
del self.model_fpaa
del self.model_abit
gc.collect()
torch.cuda.empty_cache()
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : List[str] = self.model_abit.config
self.assertTrue(hasattr(__a , 'quantization_config' ) )
__a : Union[str, Any] = config.to_dict()
__a : Tuple = config.to_diff_dict()
__a : Tuple = config.to_json_string()
def __UpperCAmelCase ( self ):
'''simple docstring'''
from bitsandbytes.nn import Paramsabit
__a : List[Any] = self.model_fpaa.get_memory_footprint()
__a : List[Any] = self.model_abit.get_memory_footprint()
self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE )
__a : Tuple = get_some_linear_layer(self.model_abit )
self.assertTrue(linear.weight.__class__ == Paramsabit )
def __UpperCAmelCase ( self ):
'''simple docstring'''
from transformers import TaPreTrainedModel
self.model_fpaa.get_memory_footprint()
self.model_abit.get_memory_footprint()
for name, module in self.model_abit.named_modules():
if isinstance(__a , torch.nn.Linear ):
if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules:
# 4-bit parameters are packed in uint8 variables
self.assertTrue(module.weight.dtype == torch.uinta )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Union[str, Any] = self.tokenizer(self.input_text , return_tensors='pt' )
__a : Union[str, Any] = self.model_abit.generate(input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=__a ) , self.EXPECTED_OUTPUTS )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Tuple = BitsAndBytesConfig()
__a : Tuple = True
__a : int = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=__a , device_map='auto' )
__a : str = self.tokenizer(self.input_text , return_tensors='pt' )
__a : List[Any] = model_abit_from_config.generate(
input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=__a ) , self.EXPECTED_OUTPUTS )
def __UpperCAmelCase ( self ):
'''simple docstring'''
with self.assertRaises(__a ), tempfile.TemporaryDirectory() as tmpdirname:
self.model_abit.save_pretrained(__a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : str = BitsAndBytesConfig()
with self.assertRaises(__a ):
__a : List[str] = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=__a , load_in_abit=__a , device_map='auto' , bnb_abit_quant_type='nf4' , )
def __UpperCAmelCase ( self ):
'''simple docstring'''
with self.assertRaises(__a ):
# Tries with `str`
self.model_abit.to('cpu' )
with self.assertRaises(__a ):
# Tries with a `dtype``
self.model_abit.to(torch.floataa )
with self.assertRaises(__a ):
# Tries with a `device`
self.model_abit.to(torch.device('cuda:0' ) )
with self.assertRaises(__a ):
# Tries with a `device`
self.model_abit.float()
with self.assertRaises(__a ):
# Tries with a `device`
self.model_abit.half()
# Test if we did not break anything
__a : List[str] = self.tokenizer(self.input_text , return_tensors='pt' )
__a : Optional[int] = self.model_fpaa.to(torch.floataa )
__a : Tuple = self.model_fpaa.generate(input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 )
# Check this does not throw an error
__a : List[Any] = self.model_fpaa.to('cpu' )
# Check this does not throw an error
__a : Union[str, Any] = self.model_fpaa.half()
# Check this does not throw an error
__a : Union[str, Any] = self.model_fpaa.float()
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : str = AutoModelForSeqaSeqLM.from_pretrained('t5-small' , load_in_abit=__a , device_map='auto' )
self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class __UpperCamelCase ( unittest.TestCase ):
@classmethod
def __UpperCAmelCase ( cls ):
'''simple docstring'''
__a : Any = 't5-small'
__a : Tuple = 'google/flan-t5-small' # flan-t5 uses dense-act instead of dense-relu-dense
__a : int = AutoTokenizer.from_pretrained(cls.model_name )
__a : Union[str, Any] = 'Translate in German: Hello, my dog is cute'
def __UpperCAmelCase ( self ):
'''simple docstring'''
gc.collect()
torch.cuda.empty_cache()
def __UpperCAmelCase ( self ):
'''simple docstring'''
from transformers import TaForConditionalGeneration
__a : Optional[int] = TaForConditionalGeneration._keep_in_fpaa_modules
__a : List[str] = None
# test with `t5-small`
__a : List[str] = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=__a , device_map='auto' )
__a : Optional[int] = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 )
__a : Any = model.generate(**__a )
# test with `flan-t5-small`
__a : List[str] = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=__a , device_map='auto' )
__a : str = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 )
__a : List[Any] = model.generate(**__a )
__a : Optional[int] = modules
def __UpperCAmelCase ( self ):
'''simple docstring'''
import bitsandbytes as bnb
from transformers import TaForConditionalGeneration
# test with `t5-small`
__a : List[Any] = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=__a , device_map='auto' )
# there was a bug with decoders - this test checks that it is fixed
self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit ) )
__a : str = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 )
__a : List[str] = model.generate(**__a )
# test with `flan-t5-small`
__a : List[Any] = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=__a , device_map='auto' )
__a : Optional[Any] = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 )
__a : int = model.generate(**__a )
class __UpperCamelCase ( lowerCAmelCase_ ):
def __UpperCAmelCase ( self ):
'''simple docstring'''
super().setUp()
# model_name
__a : List[Any] = 'bigscience/bloom-560m'
__a : Union[str, Any] = 't5-small'
# Different types of model
__a : Optional[Any] = AutoModel.from_pretrained(self.model_name , load_in_abit=__a , device_map='auto' )
# Sequence classification model
__a : Dict = AutoModelForSequenceClassification.from_pretrained(
self.model_name , load_in_abit=__a , device_map='auto' )
# CausalLM model
__a : Union[str, Any] = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=__a , device_map='auto' )
# Seq2seq model
__a : Any = AutoModelForSeqaSeqLM.from_pretrained(
self.seq_to_seq_name , load_in_abit=__a , device_map='auto' )
def __UpperCAmelCase ( self ):
'''simple docstring'''
del self.base_model
del self.sequence_model
del self.model_abit
del self.seq_to_seq_model
gc.collect()
torch.cuda.empty_cache()
def __UpperCAmelCase ( self ):
'''simple docstring'''
from bitsandbytes.nn import Paramsabit
self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit )
# Other heads should be nn.Parameter
self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter )
class __UpperCamelCase ( lowerCAmelCase_ ):
def __UpperCAmelCase ( self ):
'''simple docstring'''
super().setUp()
def __UpperCAmelCase ( self ):
'''simple docstring'''
del self.pipe
gc.collect()
torch.cuda.empty_cache()
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Dict = pipeline(
'text-generation' , model=self.model_name , model_kwargs={'device_map': 'auto', 'load_in_4bit': True, 'torch_dtype': torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , )
# Real second forward pass
__a : str = self.pipe(self.input_text )
self.assertIn(pipeline_output[0]['generated_text'] , self.EXPECTED_OUTPUTS )
@require_torch_multi_gpu
class __UpperCamelCase ( lowerCAmelCase_ ):
def __UpperCAmelCase ( self ):
'''simple docstring'''
super().setUp()
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Any = AutoModelForCausalLM.from_pretrained(
self.model_name , load_in_abit=__a , device_map='balanced' )
# Check correct device map
self.assertEqual(set(model_parallel.hf_device_map.values() ) , {0, 1} )
# Check that inference pass works on the model
__a : List[Any] = self.tokenizer(self.input_text , return_tensors='pt' )
# Second real batch
__a : str = model_parallel.generate(input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=__a ) , self.EXPECTED_OUTPUTS )
class __UpperCamelCase ( lowerCAmelCase_ ):
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Union[str, Any] = 'facebook/opt-350m'
super().setUp()
def __UpperCAmelCase ( self ):
'''simple docstring'''
if version.parse(importlib.metadata.version('bitsandbytes' ) ) < version.parse('0.37.0' ):
return
# Step 1: freeze all parameters
__a : Union[str, Any] = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=__a )
self.assertEqual(set(model.hf_device_map.values() ) , {torch.cuda.current_device()} )
for param in model.parameters():
__a : Tuple = False # freeze the model - train adapters later
if param.ndim == 1:
# cast the small parameters (e.g. layernorm) to fp32 for stability
__a : Tuple = param.data.to(torch.floataa )
# Step 2: add adapters
for _, module in model.named_modules():
if "OPTAttention" in repr(type(__a ) ):
__a : str = LoRALayer(module.q_proj , rank=16 )
__a : str = LoRALayer(module.k_proj , rank=16 )
__a : Optional[int] = LoRALayer(module.v_proj , rank=16 )
# Step 3: dummy batch
__a : List[str] = self.tokenizer('Test batch ' , return_tensors='pt' ).to(0 )
# Step 4: Check if the gradient is not None
with torch.cuda.amp.autocast():
__a : int = model.forward(**__a )
out.logits.norm().backward()
for module in model.modules():
if isinstance(__a , __a ):
self.assertTrue(module.adapter[1].weight.grad is not None )
self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 )
elif isinstance(__a , nn.Embedding ):
self.assertTrue(module.weight.grad is None )
class __UpperCamelCase ( lowerCAmelCase_ ):
A_ = "gpt2-xl"
A_ = 3.3191854854152187
| 294 | 0 |
import pytest
import datasets
# Import fixture modules as plugins
_a = ["tests.fixtures.files", "tests.fixtures.hub", "tests.fixtures.fsspec"]
def lowerCAmelCase__(__snake_case ,__snake_case ) -> Union[str, Any]:
'''simple docstring'''
for item in items:
if any(marker in item.keywords for marker in ['''integration''', '''unit'''] ):
continue
item.add_marker(pytest.mark.unit )
def lowerCAmelCase__(__snake_case ) -> List[str]:
'''simple docstring'''
config.addinivalue_line('''markers''' ,'''torchaudio_latest: mark test to run with torchaudio>=0.12''' )
@pytest.fixture(autouse=__snake_case )
def lowerCAmelCase__(__snake_case ,__snake_case ) -> int:
'''simple docstring'''
lowerCamelCase__ = tmp_path_factory.getbasetemp() / '''cache'''
lowerCamelCase__ = test_hf_cache_home / '''datasets'''
lowerCamelCase__ = test_hf_cache_home / '''metrics'''
lowerCamelCase__ = test_hf_cache_home / '''modules'''
monkeypatch.setattr('''datasets.config.HF_DATASETS_CACHE''' ,str(__snake_case ) )
monkeypatch.setattr('''datasets.config.HF_METRICS_CACHE''' ,str(__snake_case ) )
monkeypatch.setattr('''datasets.config.HF_MODULES_CACHE''' ,str(__snake_case ) )
lowerCamelCase__ = test_hf_datasets_cache / '''downloads'''
monkeypatch.setattr('''datasets.config.DOWNLOADED_DATASETS_PATH''' ,str(__snake_case ) )
lowerCamelCase__ = test_hf_datasets_cache / '''downloads''' / '''extracted'''
monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_PATH''' ,str(__snake_case ) )
@pytest.fixture(autouse=__snake_case ,scope='''session''' )
def lowerCAmelCase__() -> List[str]:
'''simple docstring'''
datasets.disable_progress_bar()
@pytest.fixture(autouse=__snake_case )
def lowerCAmelCase__(__snake_case ) -> Dict:
'''simple docstring'''
monkeypatch.setattr('''datasets.config.HF_UPDATE_DOWNLOAD_COUNTS''' ,__snake_case )
@pytest.fixture
def lowerCAmelCase__(__snake_case ) -> str:
'''simple docstring'''
monkeypatch.setattr('''sqlalchemy.util.deprecations.SILENCE_UBER_WARNING''' ,__snake_case )
| 209 |
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import BatchEncoding, PreTrainedTokenizer
from ...utils import logging
_a = logging.get_logger(__name__)
_a = "▁"
_a = {
"vocab_file": "vocab.json",
"spm_file": "sentencepiece.bpe.model",
"tokenizer_config_file": "tokenizer_config.json",
}
_a = {
"vocab_file": {
"facebook/m2m100_418M": "https://huggingface.co/facebook/m2m100_418M/resolve/main/vocab.json",
"facebook/m2m100_1.2B": "https://huggingface.co/facebook/m2m100_1.2B/resolve/main/vocab.json",
},
"spm_file": {
"facebook/m2m100_418M": "https://huggingface.co/facebook/m2m100_418M/resolve/main/sentencepiece.bpe.model",
"facebook/m2m100_1.2B": "https://huggingface.co/facebook/m2m100_1.2B/resolve/main/sentencepiece.bpe.model",
},
"tokenizer_config_file": {
"facebook/m2m100_418M": "https://huggingface.co/facebook/m2m100_418M/resolve/main/tokenizer_config.json",
"facebook/m2m100_1.2B": "https://huggingface.co/facebook/m2m100_1.2B/resolve/main/tokenizer_config.json",
},
}
_a = {
"facebook/m2m100_418M": 1_024,
}
# fmt: off
_a = {
"m2m100": ["af", "am", "ar", "ast", "az", "ba", "be", "bg", "bn", "br", "bs", "ca", "ceb", "cs", "cy", "da", "de", "el", "en", "es", "et", "fa", "ff", "fi", "fr", "fy", "ga", "gd", "gl", "gu", "ha", "he", "hi", "hr", "ht", "hu", "hy", "id", "ig", "ilo", "is", "it", "ja", "jv", "ka", "kk", "km", "kn", "ko", "lb", "lg", "ln", "lo", "lt", "lv", "mg", "mk", "ml", "mn", "mr", "ms", "my", "ne", "nl", "no", "ns", "oc", "or", "pa", "pl", "ps", "pt", "ro", "ru", "sd", "si", "sk", "sl", "so", "sq", "sr", "ss", "su", "sv", "sw", "ta", "th", "tl", "tn", "tr", "uk", "ur", "uz", "vi", "wo", "xh", "yi", "yo", "zh", "zu"],
"wmt21": ["en", "ha", "is", "ja", "cs", "ru", "zh", "de"]
}
class __A ( lowerCAmelCase ):
'''simple docstring'''
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = ["""input_ids""", """attention_mask"""]
lowerCAmelCase_ = []
lowerCAmelCase_ = []
def __init__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase="<s>" , __lowerCAmelCase="</s>" , __lowerCAmelCase="</s>" , __lowerCAmelCase="<pad>" , __lowerCAmelCase="<unk>" , __lowerCAmelCase="m2m100" , __lowerCAmelCase = None , __lowerCAmelCase=8 , **__lowerCAmelCase , ):
'''simple docstring'''
lowerCamelCase__ = {} if sp_model_kwargs is None else sp_model_kwargs
lowerCamelCase__ = language_codes
lowerCamelCase__ = FAIRSEQ_LANGUAGE_CODES[language_codes]
lowerCamelCase__ = {lang_code: F'__{lang_code}__' for lang_code in fairseq_language_code}
lowerCamelCase__ = kwargs.get('''additional_special_tokens''' , [] )
kwargs["additional_special_tokens"] += [
self.get_lang_token(__lowerCAmelCase )
for lang_code in fairseq_language_code
if self.get_lang_token(__lowerCAmelCase ) not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=__lowerCAmelCase , tgt_lang=__lowerCAmelCase , bos_token=__lowerCAmelCase , eos_token=__lowerCAmelCase , sep_token=__lowerCAmelCase , unk_token=__lowerCAmelCase , pad_token=__lowerCAmelCase , language_codes=__lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , num_madeup_words=__lowerCAmelCase , **__lowerCAmelCase , )
lowerCamelCase__ = vocab_file
lowerCamelCase__ = load_json(__lowerCAmelCase )
lowerCamelCase__ = {v: k for k, v in self.encoder.items()}
lowerCamelCase__ = spm_file
lowerCamelCase__ = load_spm(__lowerCAmelCase , self.sp_model_kwargs )
lowerCamelCase__ = len(self.encoder )
lowerCamelCase__ = {
self.get_lang_token(__lowerCAmelCase ): self.encoder_size + i for i, lang_code in enumerate(__lowerCAmelCase )
}
lowerCamelCase__ = {lang_code: self.encoder_size + i for i, lang_code in enumerate(__lowerCAmelCase )}
lowerCamelCase__ = {v: k for k, v in self.lang_token_to_id.items()}
lowerCamelCase__ = src_lang if src_lang is not None else '''en'''
lowerCamelCase__ = tgt_lang
lowerCamelCase__ = self.get_lang_id(self._src_lang )
self.set_src_lang_special_tokens(self._src_lang )
lowerCamelCase__ = num_madeup_words
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return len(self.encoder ) + len(self.lang_token_to_id )
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self._src_lang
@src_lang.setter
def __lowerCamelCase ( self , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __lowerCamelCase ( self , __lowerCAmelCase ):
'''simple docstring'''
return self.sp_model.encode(__lowerCAmelCase , out_type=__lowerCAmelCase )
def __lowerCamelCase ( self , __lowerCAmelCase ):
'''simple docstring'''
if token in self.lang_token_to_id:
return self.lang_token_to_id[token]
return self.encoder.get(__lowerCAmelCase , self.encoder[self.unk_token] )
def __lowerCamelCase ( self , __lowerCAmelCase ):
'''simple docstring'''
if index in self.id_to_lang_token:
return self.id_to_lang_token[index]
return self.decoder.get(__lowerCAmelCase , self.unk_token )
def __lowerCamelCase ( self , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = []
lowerCamelCase__ = ''''''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(__lowerCAmelCase ) + token
lowerCamelCase__ = []
else:
current_sub_tokens.append(__lowerCAmelCase )
out_string += self.sp_model.decode(__lowerCAmelCase )
return out_string.strip()
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowerCAmelCase , token_ids_a=__lowerCAmelCase , already_has_special_tokens=__lowerCAmelCase )
lowerCamelCase__ = [1] * len(self.prefix_tokens )
lowerCamelCase__ = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(__lowerCAmelCase )) + suffix_ones
return prefix_ones + ([0] * len(__lowerCAmelCase )) + ([0] * len(__lowerCAmelCase )) + suffix_ones
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase = None ):
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = {self.convert_ids_to_tokens(__lowerCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
'''simple docstring'''
lowerCamelCase__ = self.__dict__.copy()
lowerCamelCase__ = None
return state
def __setstate__( self , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
lowerCamelCase__ = {}
lowerCamelCase__ = load_spm(self.spm_file , self.sp_model_kwargs )
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase = None ):
'''simple docstring'''
lowerCamelCase__ = Path(__lowerCAmelCase )
if not save_dir.is_dir():
raise OSError(F'{save_directory} should be a directory' )
lowerCamelCase__ = save_dir / (
(filename_prefix + '''-''' if filename_prefix else '''''') + self.vocab_files_names['''vocab_file''']
)
lowerCamelCase__ = save_dir / (
(filename_prefix + '''-''' if filename_prefix else '''''') + self.vocab_files_names['''spm_file''']
)
save_json(self.encoder , __lowerCAmelCase )
if os.path.abspath(self.spm_file ) != os.path.abspath(__lowerCAmelCase ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , __lowerCAmelCase )
elif not os.path.isfile(self.spm_file ):
with open(__lowerCAmelCase , '''wb''' ) as fi:
lowerCamelCase__ = self.sp_model.serialized_model_proto()
fi.write(__lowerCAmelCase )
return (str(__lowerCAmelCase ), str(__lowerCAmelCase ))
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase = "en" , __lowerCAmelCase = None , __lowerCAmelCase = "ro" , **__lowerCAmelCase , ):
'''simple docstring'''
lowerCamelCase__ = src_lang
lowerCamelCase__ = tgt_lang
self.set_src_lang_special_tokens(self.src_lang )
return super().prepare_seqaseq_batch(__lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase )
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ):
'''simple docstring'''
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' )
lowerCamelCase__ = src_lang
lowerCamelCase__ = self(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , **__lowerCAmelCase )
lowerCamelCase__ = self.get_lang_id(__lowerCAmelCase )
lowerCamelCase__ = tgt_lang_id
return inputs
def __lowerCamelCase ( self ):
'''simple docstring'''
self.set_src_lang_special_tokens(self.src_lang )
def __lowerCamelCase ( self ):
'''simple docstring'''
self.set_tgt_lang_special_tokens(self.tgt_lang )
def __lowerCamelCase ( self , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = self.get_lang_token(__lowerCAmelCase )
lowerCamelCase__ = self.lang_token_to_id[lang_token]
lowerCamelCase__ = [self.cur_lang_id]
lowerCamelCase__ = [self.eos_token_id]
def __lowerCamelCase ( self , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = self.get_lang_token(__lowerCAmelCase )
lowerCamelCase__ = self.lang_token_to_id[lang_token]
lowerCamelCase__ = [self.cur_lang_id]
lowerCamelCase__ = [self.eos_token_id]
def __lowerCamelCase ( self , __lowerCAmelCase ):
'''simple docstring'''
return self.lang_code_to_token[lang]
def __lowerCamelCase ( self , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = self.get_lang_token(__lowerCAmelCase )
return self.lang_token_to_id[lang_token]
def lowerCAmelCase__(__snake_case ,__snake_case ) -> sentencepiece.SentencePieceProcessor:
'''simple docstring'''
lowerCamelCase__ = sentencepiece.SentencePieceProcessor(**__snake_case )
spm.Load(str(__snake_case ) )
return spm
def lowerCAmelCase__(__snake_case ) -> Union[Dict, List]:
'''simple docstring'''
with open(__snake_case ,'''r''' ) as f:
return json.load(__snake_case )
def lowerCAmelCase__(__snake_case ,__snake_case ) -> None:
'''simple docstring'''
with open(__snake_case ,'''w''' ) as f:
json.dump(__snake_case ,__snake_case ,indent=2 )
| 209 | 1 |
def __magic_name__ ( __a : list[int] ):
'''simple docstring'''
if not nums: # Makes sure that the list is not empty
raise ValueError("""List is empty""" )
UpperCamelCase__ = sum(__a ) / len(__a ) # Calculate the average
return sum(abs(x - average ) for x in nums ) / len(__a )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 178 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import (
AutoProcessor,
BertTokenizerFast,
BlipImageProcessor,
GPTaTokenizer,
InstructBlipProcessor,
PreTrainedTokenizerFast,
)
@require_vision
class __A( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase_ (self ):
UpperCamelCase__ = tempfile.mkdtemp()
UpperCamelCase__ = BlipImageProcessor()
UpperCamelCase__ = GPTaTokenizer.from_pretrained("""hf-internal-testing/tiny-random-GPT2Model""" )
UpperCamelCase__ = BertTokenizerFast.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
UpperCamelCase__ = InstructBlipProcessor(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
processor.save_pretrained(self.tmpdirname )
def UpperCAmelCase_ (self , **SCREAMING_SNAKE_CASE_ ):
return AutoProcessor.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ ).tokenizer
def UpperCAmelCase_ (self , **SCREAMING_SNAKE_CASE_ ):
return AutoProcessor.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ ).image_processor
def UpperCAmelCase_ (self , **SCREAMING_SNAKE_CASE_ ):
return AutoProcessor.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ ).qformer_tokenizer
def UpperCAmelCase_ (self ):
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase_ (self ):
UpperCamelCase__ = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
UpperCamelCase__ = [Image.fromarray(np.moveaxis(SCREAMING_SNAKE_CASE_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def UpperCAmelCase_ (self ):
UpperCamelCase__ = InstructBlipProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() , qformer_tokenizer=self.get_qformer_tokenizer() , )
processor.save_pretrained(self.tmpdirname )
UpperCamelCase__ = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
UpperCamelCase__ = self.get_image_processor(do_normalize=SCREAMING_SNAKE_CASE_ , padding_value=1.0 )
UpperCamelCase__ = InstructBlipProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=SCREAMING_SNAKE_CASE_ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , SCREAMING_SNAKE_CASE_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(processor.qformer_tokenizer , SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ (self ):
UpperCamelCase__ = self.get_image_processor()
UpperCamelCase__ = self.get_tokenizer()
UpperCamelCase__ = self.get_qformer_tokenizer()
UpperCamelCase__ = InstructBlipProcessor(
tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ , qformer_tokenizer=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = self.prepare_image_inputs()
UpperCamelCase__ = image_processor(SCREAMING_SNAKE_CASE_ , return_tensors="""np""" )
UpperCamelCase__ = processor(images=SCREAMING_SNAKE_CASE_ , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def UpperCAmelCase_ (self ):
UpperCamelCase__ = self.get_image_processor()
UpperCamelCase__ = self.get_tokenizer()
UpperCamelCase__ = self.get_qformer_tokenizer()
UpperCamelCase__ = InstructBlipProcessor(
tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ , qformer_tokenizer=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = """lower newer"""
UpperCamelCase__ = processor(text=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = tokenizer(SCREAMING_SNAKE_CASE_ , return_token_type_ids=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = qformer_tokenizer(SCREAMING_SNAKE_CASE_ , return_token_type_ids=SCREAMING_SNAKE_CASE_ )
for key in encoded_tokens.keys():
self.assertListEqual(encoded_tokens[key] , encoded_processor[key] )
for key in encoded_tokens_qformer.keys():
self.assertListEqual(encoded_tokens_qformer[key] , encoded_processor["""qformer_""" + key] )
def UpperCAmelCase_ (self ):
UpperCamelCase__ = self.get_image_processor()
UpperCamelCase__ = self.get_tokenizer()
UpperCamelCase__ = self.get_qformer_tokenizer()
UpperCamelCase__ = InstructBlipProcessor(
tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ , qformer_tokenizer=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = """lower newer"""
UpperCamelCase__ = self.prepare_image_inputs()
UpperCamelCase__ = processor(text=SCREAMING_SNAKE_CASE_ , images=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(
list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """qformer_input_ids""", """qformer_attention_mask""", """pixel_values"""] , )
# test if it raises when no input is passed
with pytest.raises(SCREAMING_SNAKE_CASE_ ):
processor()
def UpperCAmelCase_ (self ):
UpperCamelCase__ = self.get_image_processor()
UpperCamelCase__ = self.get_tokenizer()
UpperCamelCase__ = self.get_qformer_tokenizer()
UpperCamelCase__ = InstructBlipProcessor(
tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ , qformer_tokenizer=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
UpperCamelCase__ = processor.batch_decode(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ (self ):
UpperCamelCase__ = self.get_image_processor()
UpperCamelCase__ = self.get_tokenizer()
UpperCamelCase__ = self.get_qformer_tokenizer()
UpperCamelCase__ = InstructBlipProcessor(
tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ , qformer_tokenizer=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = """lower newer"""
UpperCamelCase__ = self.prepare_image_inputs()
UpperCamelCase__ = processor(text=SCREAMING_SNAKE_CASE_ , images=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(
list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """qformer_input_ids""", """qformer_attention_mask""", """pixel_values"""] , )
| 178 | 1 |
"""simple docstring"""
import tempfile
import unittest
from make_student import create_student_by_copying_alternating_layers
from transformers import AutoConfig
from transformers.file_utils import cached_property
from transformers.testing_utils import require_torch
lowerCAmelCase__ = '''sshleifer/bart-tiny-random'''
lowerCAmelCase__ = '''patrickvonplaten/t5-tiny-random'''
@require_torch
class __snake_case ( unittest.TestCase):
@cached_property
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
return AutoConfig.from_pretrained(__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase , *_lowerCamelCase : List[Any] = create_student_by_copying_alternating_layers(__lowerCAmelCase , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.num_hidden_layers , 1 )
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
_lowerCamelCase , *_lowerCamelCase : Optional[int] = create_student_by_copying_alternating_layers(__lowerCAmelCase , tempfile.mkdtemp() , e=1 , d=__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
_lowerCamelCase , *_lowerCamelCase : List[str] = create_student_by_copying_alternating_layers(__lowerCAmelCase , tempfile.mkdtemp() , e=1 , d=__lowerCAmelCase )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , self.teacher_config.encoder_layers )
def SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
_lowerCamelCase , *_lowerCamelCase : Any = create_student_by_copying_alternating_layers(__lowerCAmelCase , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , 1 )
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
with self.assertRaises(__lowerCAmelCase ):
create_student_by_copying_alternating_layers(__lowerCAmelCase , tempfile.mkdtemp() , e=__lowerCAmelCase , d=__lowerCAmelCase )
| 72 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
lowerCAmelCase :Tuple = {'''processing_layoutxlm''': ['''LayoutXLMProcessor''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase :str = ['''LayoutXLMTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase :int = ['''LayoutXLMTokenizerFast''']
if TYPE_CHECKING:
from .processing_layoutxlm import LayoutXLMProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm import LayoutXLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast
else:
import sys
lowerCAmelCase :str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 331 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tensorflow_text_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase = {
'configuration_bert': ['BERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BertConfig', 'BertOnnxConfig'],
'tokenization_bert': ['BasicTokenizer', 'BertTokenizer', 'WordpieceTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = ['BertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
'BERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'BertForMaskedLM',
'BertForMultipleChoice',
'BertForNextSentencePrediction',
'BertForPreTraining',
'BertForQuestionAnswering',
'BertForSequenceClassification',
'BertForTokenClassification',
'BertLayer',
'BertLMHeadModel',
'BertModel',
'BertPreTrainedModel',
'load_tf_weights_in_bert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
'TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFBertEmbeddings',
'TFBertForMaskedLM',
'TFBertForMultipleChoice',
'TFBertForNextSentencePrediction',
'TFBertForPreTraining',
'TFBertForQuestionAnswering',
'TFBertForSequenceClassification',
'TFBertForTokenClassification',
'TFBertLMHeadModel',
'TFBertMainLayer',
'TFBertModel',
'TFBertPreTrainedModel',
]
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = ['TFBertTokenizer']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
'FlaxBertForCausalLM',
'FlaxBertForMaskedLM',
'FlaxBertForMultipleChoice',
'FlaxBertForNextSentencePrediction',
'FlaxBertForPreTraining',
'FlaxBertForQuestionAnswering',
'FlaxBertForSequenceClassification',
'FlaxBertForTokenClassification',
'FlaxBertModel',
'FlaxBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_bert import BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BertConfig, BertOnnxConfig
from .tokenization_bert import BasicTokenizer, BertTokenizer, WordpieceTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_fast import BertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bert import (
BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
BertForMaskedLM,
BertForMultipleChoice,
BertForNextSentencePrediction,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertForTokenClassification,
BertLayer,
BertLMHeadModel,
BertModel,
BertPreTrainedModel,
load_tf_weights_in_bert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_bert import (
TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBertEmbeddings,
TFBertForMaskedLM,
TFBertForMultipleChoice,
TFBertForNextSentencePrediction,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertForTokenClassification,
TFBertLMHeadModel,
TFBertMainLayer,
TFBertModel,
TFBertPreTrainedModel,
)
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_tf import TFBertTokenizer
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_bert import (
FlaxBertForCausalLM,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
FlaxBertPreTrainedModel,
)
else:
import sys
UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 187 |
'''simple docstring'''
from __future__ import annotations
UpperCAmelCase = []
def _snake_case ( _SCREAMING_SNAKE_CASE : list[list[int]] , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ) -> bool:
"""simple docstring"""
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
if board[row][i] == 1:
return False
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
if board[i][column] == 1:
return False
for i, j in zip(range(_SCREAMING_SNAKE_CASE , -1 , -1 ) , range(_SCREAMING_SNAKE_CASE , -1 , -1 ) ):
if board[i][j] == 1:
return False
for i, j in zip(range(_SCREAMING_SNAKE_CASE , -1 , -1 ) , range(_SCREAMING_SNAKE_CASE , len(_SCREAMING_SNAKE_CASE ) ) ):
if board[i][j] == 1:
return False
return True
def _snake_case ( _SCREAMING_SNAKE_CASE : list[list[int]] , _SCREAMING_SNAKE_CASE : int ) -> bool:
"""simple docstring"""
if row >= len(_SCREAMING_SNAKE_CASE ):
solution.append(_SCREAMING_SNAKE_CASE )
printboard(_SCREAMING_SNAKE_CASE )
print()
return True
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
if is_safe(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
lowerCAmelCase = 1
solve(_SCREAMING_SNAKE_CASE , row + 1 )
lowerCAmelCase = 0
return False
def _snake_case ( _SCREAMING_SNAKE_CASE : list[list[int]] ) -> None:
"""simple docstring"""
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
for j in range(len(_SCREAMING_SNAKE_CASE ) ):
if board[i][j] == 1:
print("""Q""" , end=""" """ )
else:
print(""".""" , end=""" """ )
print()
# n=int(input("The no. of queens"))
UpperCAmelCase = 8
UpperCAmelCase = [[0 for i in range(n)] for j in range(n)]
solve(board, 0)
print('The total no. of solutions are :', len(solution)) | 187 | 1 |
import argparse
import logging
import os
import datasets
import tensorflow as tf
from transformers import AutoTokenizer
lowerCAmelCase__ :Optional[int] = logging.getLogger(__name__)
def lowerCAmelCase__ ( ) -> Tuple:
'''simple docstring'''
_UpperCAmelCase = argparse.ArgumentParser(
description='Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset.' )
parser.add_argument(
'--dataset_name' , type=a__ , default='wikitext' , help='Name of the training. Explore datasets at: hf.co/datasets.' , )
parser.add_argument(
'--dataset_config' , type=a__ , default='wikitext-103-raw-v1' , help='Configuration name of the dataset.' )
parser.add_argument(
'--tokenizer_name_or_path' , type=a__ , default='sayakpaul/unigram-tokenizer-wikitext' , help='Tokenizer identifier. Can be a local filepath or a Hub identifier.' , )
parser.add_argument(
'--shard_size' , type=a__ , default=1_0_0_0 , help='Number of entries to go in a single shard.' , )
parser.add_argument('--split' , type=a__ , default='train' , choices=['train', 'test', 'validation'] )
parser.add_argument(
'--limit' , default=a__ , type=a__ , help='Limit the number of shards (used for debugging).' , )
parser.add_argument(
'--max_length' , type=a__ , default=5_1_2 , help='Maximum sequence length. For training on TPUs, it helps to have a maximum'
' sequence length that is a multiple of 8.' , )
parser.add_argument(
'--output_dir' , default='tf-tpu' , type=a__ , help='Output directory where the TFRecord shards will be saved. If the'
' path is appended with `gs://` (\'gs://tf-tpu\', for example) then the TFRecord'
' shards will be directly saved to a Google Cloud Storage bucket.' , )
_UpperCAmelCase = parser.parse_args()
return args
def lowerCAmelCase__ ( a__: Union[str, Any] ) -> List[Any]:
'''simple docstring'''
def fn(a__: str ):
return tokenizer(examples['text'] )
return fn
def lowerCAmelCase__ ( a__: List[str] ) -> Any:
'''simple docstring'''
_UpperCAmelCase = []
for i in range(len(tokenized_data['input_ids'] ) ):
_UpperCAmelCase = {
'input_ids': tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data['input_ids'][i] ) ),
'attention_mask': tf.train.Feature(
intaa_list=tf.train.IntaaList(value=tokenized_data['attention_mask'][i] ) ),
}
_UpperCAmelCase = tf.train.Features(feature=a__ )
_UpperCAmelCase = tf.train.Example(features=a__ )
_UpperCAmelCase = example.SerializeToString()
records.append(a__ )
return records
def lowerCAmelCase__ ( a__: Union[str, Any] ) -> int:
'''simple docstring'''
_UpperCAmelCase = datasets.load_dataset(args.dataset_name , args.dataset_config , split=args.split )
if args.limit is not None:
_UpperCAmelCase = min(len(a__ ) , args.limit )
_UpperCAmelCase = dataset.select(range(a__ ) )
print(F'''Limiting the dataset to {args.limit} entries.''' )
_UpperCAmelCase = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path )
# Handle output directory creation.
# For serializing into a Google Cloud Storage Bucket, one needs to first
# create a bucket.
if "gs" not in args.output_dir:
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
_UpperCAmelCase = os.path.join(args.output_dir , args.split )
if not os.path.exists(a__ ):
os.makedirs(a__ )
else:
_UpperCAmelCase = os.path.join(args.output_dir , args.split )
# Tokenize the whole dataset at once.
_UpperCAmelCase = tokenize_function(a__ )
_UpperCAmelCase = dataset.map(a__ , batched=a__ , num_proc=4 , remove_columns=['text'] )
# We need to concatenate all our texts together, and then split the result
# into chunks of a fixed size, which we will call block_size. To do this, we
# will use the map method again, with the option batched=True. When we use batched=True,
# the function we pass to map() will be passed multiple inputs at once, allowing us
# to group them into more or fewer examples than we had in the input.
# This allows us to create our new fixed-length samples. The advantage of this
# method is that we don't lose a whole lot of content from the dataset compared to the
# case where we simply tokenize with a pre-defined max_length.
def group_texts(a__: Optional[int] ):
# Concatenate all texts.
_UpperCAmelCase = {k: sum(examples[k] , [] ) for k in examples.keys()}
_UpperCAmelCase = len(concatenated_examples[list(examples.keys() )[0]] )
# We drop the small remainder, though you could add padding instead if the model supports it
# In this, as in all things, we advise you to follow your heart 🫀
_UpperCAmelCase = (total_length // args.max_length) * args.max_length
# Split by chunks of max_len.
_UpperCAmelCase = {
k: [t[i : i + args.max_length] for i in range(0 , a__ , args.max_length )]
for k, t in concatenated_examples.items()
}
return result
_UpperCAmelCase = dataset_tokenized.map(a__ , batched=a__ , batch_size=1_0_0_0 , num_proc=4 )
_UpperCAmelCase = 0
_UpperCAmelCase = 0
for shard in range(0 , len(a__ ) , args.shard_size ):
_UpperCAmelCase = grouped_dataset[shard : shard + args.shard_size]
_UpperCAmelCase = len(dataset_snapshot['input_ids'] )
_UpperCAmelCase = os.path.join(a__ , F'''dataset-{shard_count}-{records_containing}.tfrecord''' )
_UpperCAmelCase = get_serialized_examples(a__ )
with tf.io.TFRecordWriter(a__ ) as out_file:
for i in range(len(a__ ) ):
_UpperCAmelCase = serialized_examples[i]
out_file.write(a__ )
print('Wrote file {} containing {} records'.format(a__ , a__ ) )
shard_count += 1
total_records += records_containing
with open(F'''split-{args.split}-records-count.txt''' , 'w' ) as f:
print(F'''Total {args.split} records: {total_records}''' , file=a__ )
if __name__ == "__main__":
lowerCAmelCase__ :str = parse_args()
main(args)
| 329 |
import numpy as np
import torch
from torch.utils.data import Dataset, IterableDataset
from ..utils.generic import ModelOutput
class __a ( UpperCAmelCase ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Any:
"""simple docstring"""
_UpperCAmelCase = dataset
_UpperCAmelCase = process
_UpperCAmelCase = params
def __len__( self ) -> Union[str, Any]:
"""simple docstring"""
return len(self.dataset )
def __getitem__( self , _SCREAMING_SNAKE_CASE ) -> Any:
"""simple docstring"""
_UpperCAmelCase = self.dataset[i]
_UpperCAmelCase = self.process(_SCREAMING_SNAKE_CASE , **self.params )
return processed
class __a ( UpperCAmelCase ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = loader
_UpperCAmelCase = infer
_UpperCAmelCase = params
if loader_batch_size == 1:
# Let's spare some time by deactivating altogether
_UpperCAmelCase = None
_UpperCAmelCase = loader_batch_size
# Internal bookkeeping
_UpperCAmelCase = None
_UpperCAmelCase = None
def __len__( self ) -> Any:
"""simple docstring"""
return len(self.loader )
def __iter__( self ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = iter(self.loader )
return self
def UpperCAmelCase__ ( self ) -> int:
"""simple docstring"""
if isinstance(self._loader_batch_data , torch.Tensor ):
# Batch data is simple tensor, just fetch the slice
_UpperCAmelCase = self._loader_batch_data[self._loader_batch_index]
else:
# Batch data is assumed to be BaseModelOutput (or dict)
_UpperCAmelCase = {}
for k, element in self._loader_batch_data.items():
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
# Convert ModelOutput to tuple first
_UpperCAmelCase = element.to_tuple()
if isinstance(element[0] , torch.Tensor ):
_UpperCAmelCase = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
_UpperCAmelCase = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
# Those are stored as lists of tensors so need specific unbatching.
if isinstance(element[0] , torch.Tensor ):
_UpperCAmelCase = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
_UpperCAmelCase = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if element is None:
# This can happen for optional data that get passed around
_UpperCAmelCase = None
elif isinstance(element[self._loader_batch_index] , torch.Tensor ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
_UpperCAmelCase = element[self._loader_batch_index].unsqueeze(0 )
elif isinstance(element[self._loader_batch_index] , np.ndarray ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
_UpperCAmelCase = np.expand_dims(element[self._loader_batch_index] , 0 )
else:
# This is typically a list, so no need to `unsqueeze`.
_UpperCAmelCase = element[self._loader_batch_index]
# Recreate the element by reusing the original class to make it look
# batch_size=1
_UpperCAmelCase = self._loader_batch_data.__class__(_SCREAMING_SNAKE_CASE )
self._loader_batch_index += 1
return result
def UpperCAmelCase__ ( self ) -> List[str]:
"""simple docstring"""
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
# We are currently unrolling a batch so we just need to return
# the current item within a batch
return self.loader_batch_item()
# We're out of items within a batch
_UpperCAmelCase = next(self.iterator )
_UpperCAmelCase = self.infer(_SCREAMING_SNAKE_CASE , **self.params )
# We now have a batch of "inferred things".
if self.loader_batch_size is not None:
# Try to infer the size of the batch
if isinstance(_SCREAMING_SNAKE_CASE , torch.Tensor ):
_UpperCAmelCase = processed
else:
_UpperCAmelCase = list(processed.keys() )[0]
_UpperCAmelCase = processed[key]
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = len(_SCREAMING_SNAKE_CASE )
else:
_UpperCAmelCase = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
_UpperCAmelCase = observed_batch_size
# Setting internal index to unwrap the batch
_UpperCAmelCase = processed
_UpperCAmelCase = 0
return self.loader_batch_item()
else:
# We're not unrolling batches
return processed
class __a ( UpperCAmelCase ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) -> Tuple:
"""simple docstring"""
super().__init__(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __iter__( self ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = iter(self.loader )
_UpperCAmelCase = None
return self
def UpperCAmelCase__ ( self ) -> int:
"""simple docstring"""
if self.subiterator is None:
_UpperCAmelCase = self.infer(next(self.iterator ) , **self.params )
try:
# Try to return next item
_UpperCAmelCase = next(self.subiterator )
except StopIteration:
# When a preprocess iterator ends, we can start lookig at the next item
# ChunkIterator will keep feeding until ALL elements of iterator
# all have created their subiterator and have been iterating against.
#
# Another way to look at it, is we're basically flattening lists of lists
# into a single list, but with generators
_UpperCAmelCase = self.infer(next(self.iterator ) , **self.params )
_UpperCAmelCase = next(self.subiterator )
return processed
class __a ( UpperCAmelCase ):
def __iter__( self ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = iter(self.loader )
return self
def UpperCAmelCase__ ( self ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = False
_UpperCAmelCase = []
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
while self._loader_batch_index < self.loader_batch_size:
_UpperCAmelCase = self.loader_batch_item()
_UpperCAmelCase = item.pop('is_last' )
accumulator.append(_SCREAMING_SNAKE_CASE )
if is_last:
return accumulator
while not is_last:
_UpperCAmelCase = self.infer(next(self.iterator ) , **self.params )
if self.loader_batch_size is not None:
if isinstance(_SCREAMING_SNAKE_CASE , torch.Tensor ):
_UpperCAmelCase = processed
else:
_UpperCAmelCase = list(processed.keys() )[0]
_UpperCAmelCase = processed[key]
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = len(_SCREAMING_SNAKE_CASE )
else:
_UpperCAmelCase = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
_UpperCAmelCase = observed_batch_size
_UpperCAmelCase = processed
_UpperCAmelCase = 0
while self._loader_batch_index < self.loader_batch_size:
_UpperCAmelCase = self.loader_batch_item()
_UpperCAmelCase = item.pop('is_last' )
accumulator.append(_SCREAMING_SNAKE_CASE )
if is_last:
return accumulator
else:
_UpperCAmelCase = processed
_UpperCAmelCase = item.pop('is_last' )
accumulator.append(_SCREAMING_SNAKE_CASE )
return accumulator
class __a ( UpperCAmelCase ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = dataset
_UpperCAmelCase = key
def __len__( self ) -> Optional[int]:
"""simple docstring"""
return len(self.dataset )
def __getitem__( self , _SCREAMING_SNAKE_CASE ) -> List[str]:
"""simple docstring"""
return self.dataset[i][self.key]
class __a ( UpperCAmelCase ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = dataset
_UpperCAmelCase = keya
_UpperCAmelCase = keya
def __len__( self ) -> Optional[int]:
"""simple docstring"""
return len(self.dataset )
def __getitem__( self , _SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
return {"text": self.dataset[i][self.keya], "text_pair": self.dataset[i][self.keya]}
| 329 | 1 |
def lowerCAmelCase_ (lowerCAmelCase__: int , lowerCAmelCase__: float , lowerCAmelCase__: float ):
"""simple docstring"""
return round(float(moles / volume ) * nfactor )
def lowerCAmelCase_ (lowerCAmelCase__: float , lowerCAmelCase__: float , lowerCAmelCase__: float ):
"""simple docstring"""
return round(float((moles * 0.0821 * temperature) / (volume) ) )
def lowerCAmelCase_ (lowerCAmelCase__: float , lowerCAmelCase__: float , lowerCAmelCase__: float ):
"""simple docstring"""
return round(float((moles * 0.0821 * temperature) / (pressure) ) )
def lowerCAmelCase_ (lowerCAmelCase__: float , lowerCAmelCase__: float , lowerCAmelCase__: float ):
"""simple docstring"""
return round(float((pressure * volume) / (0.0821 * moles) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 82 |
import math
from typing import Optional
import numpy as np
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a : Tuple = logging.get_logger(__name__)
a : Optional[Any] = {
'facebook/encodec_24khz': 'https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json',
'facebook/encodec_48khz': 'https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json',
}
class _a ( _lowerCAmelCase ):
A = '''encodec'''
def __init__(self, SCREAMING_SNAKE_CASE_=[1.5, 3.0, 6.0, 1_2.0, 2_4.0], SCREAMING_SNAKE_CASE_=24000, SCREAMING_SNAKE_CASE_=1, SCREAMING_SNAKE_CASE_=False, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=128, SCREAMING_SNAKE_CASE_=32, SCREAMING_SNAKE_CASE_=1, SCREAMING_SNAKE_CASE_=[8, 5, 4, 2], SCREAMING_SNAKE_CASE_="weight_norm", SCREAMING_SNAKE_CASE_=7, SCREAMING_SNAKE_CASE_=7, SCREAMING_SNAKE_CASE_=3, SCREAMING_SNAKE_CASE_=2, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_="reflect", SCREAMING_SNAKE_CASE_=2, SCREAMING_SNAKE_CASE_=2, SCREAMING_SNAKE_CASE_=1.0, SCREAMING_SNAKE_CASE_=1024, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=True, **SCREAMING_SNAKE_CASE_, ) -> Optional[int]:
UpperCAmelCase_: List[Any] = target_bandwidths
UpperCAmelCase_: str = sampling_rate
UpperCAmelCase_: Any = audio_channels
UpperCAmelCase_: List[str] = normalize
UpperCAmelCase_: List[Any] = chunk_length_s
UpperCAmelCase_: List[Any] = overlap
UpperCAmelCase_: Any = hidden_size
UpperCAmelCase_: str = num_filters
UpperCAmelCase_: Any = num_residual_layers
UpperCAmelCase_: int = upsampling_ratios
UpperCAmelCase_: Tuple = norm_type
UpperCAmelCase_: Union[str, Any] = kernel_size
UpperCAmelCase_: str = last_kernel_size
UpperCAmelCase_: Union[str, Any] = residual_kernel_size
UpperCAmelCase_: str = dilation_growth_rate
UpperCAmelCase_: int = use_causal_conv
UpperCAmelCase_: int = pad_mode
UpperCAmelCase_: List[Any] = compress
UpperCAmelCase_: Dict = num_lstm_layers
UpperCAmelCase_: List[Any] = trim_right_ratio
UpperCAmelCase_: List[Any] = codebook_size
UpperCAmelCase_: List[Any] = codebook_dim if codebook_dim is not None else hidden_size
UpperCAmelCase_: Optional[Any] = use_conv_shortcut
if self.norm_type not in ["weight_norm", "time_group_norm"]:
raise ValueError(
f'self.norm_type must be one of `"weight_norm"`, `"time_group_norm"`), got {self.norm_type}' )
super().__init__(**SCREAMING_SNAKE_CASE_ )
@property
def __snake_case (self ) -> Optional[int]:
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def __snake_case (self ) -> Optional[int]:
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1, int((1.0 - self.overlap) * self.chunk_length ) )
@property
def __snake_case (self ) -> int:
UpperCAmelCase_: Optional[int] = np.prod(self.upsampling_ratios )
return math.ceil(self.sampling_rate / hop_length )
@property
def __snake_case (self ) -> int:
return int(1000 * self.target_bandwidths[-1] // (self.frame_rate * 10) )
| 82 | 1 |
"""simple docstring"""
snake_case__ : Optional[Any] = {str(digit): digit**5 for digit in range(10)}
def _snake_case ( _snake_case : int ):
return sum(DIGITS_FIFTH_POWER[digit] for digit in str(_snake_case ) )
def _snake_case ( ):
return sum(
number
for number in range(1000 , 1000000 )
if number == digits_fifth_powers_sum(_snake_case ) )
if __name__ == "__main__":
print(solution())
| 60 |
"""simple docstring"""
import tempfile
import torch
from diffusers import IPNDMScheduler
from .test_schedulers import SchedulerCommonTest
class UpperCamelCase ( snake_case_ ):
UpperCamelCase : int = (IPNDMScheduler,)
UpperCamelCase : int = (('''num_inference_steps''', 50),)
def _lowercase ( self : Union[str, Any] , **UpperCAmelCase__ : Tuple ) -> int:
_a : Optional[int] = {"""num_train_timesteps""": 1000}
config.update(**UpperCAmelCase__ )
return config
def _lowercase ( self : Dict , UpperCAmelCase__ : Any=0 , **UpperCAmelCase__ : Optional[Any] ) -> Union[str, Any]:
_a : Optional[int] = dict(self.forward_default_kwargs )
_a : Dict = kwargs.pop("""num_inference_steps""" , UpperCAmelCase__ )
_a : Optional[Any] = self.dummy_sample
_a : Union[str, Any] = 0.1 * sample
_a : Union[str, Any] = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5]
for scheduler_class in self.scheduler_classes:
_a : Optional[int] = self.get_scheduler_config(**UpperCAmelCase__ )
_a : Union[str, Any] = scheduler_class(**UpperCAmelCase__ )
scheduler.set_timesteps(UpperCAmelCase__ )
# copy over dummy past residuals
_a : Any = dummy_past_residuals[:]
if time_step is None:
_a : str = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(UpperCAmelCase__ )
_a : Union[str, Any] = scheduler_class.from_pretrained(UpperCAmelCase__ )
new_scheduler.set_timesteps(UpperCAmelCase__ )
# copy over dummy past residuals
_a : Optional[Any] = dummy_past_residuals[:]
_a : List[Any] = scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ ).prev_sample
_a : str = new_scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
_a : Optional[int] = scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ ).prev_sample
_a : Tuple = new_scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def _lowercase ( self : Tuple ) -> List[str]:
pass
def _lowercase ( self : Optional[int] , UpperCAmelCase__ : List[str]=0 , **UpperCAmelCase__ : Optional[Any] ) -> List[Any]:
_a : Optional[Any] = dict(self.forward_default_kwargs )
_a : Optional[Any] = kwargs.pop("""num_inference_steps""" , UpperCAmelCase__ )
_a : Optional[Any] = self.dummy_sample
_a : List[Any] = 0.1 * sample
_a : Optional[Any] = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5]
for scheduler_class in self.scheduler_classes:
_a : Union[str, Any] = self.get_scheduler_config()
_a : Optional[Any] = scheduler_class(**UpperCAmelCase__ )
scheduler.set_timesteps(UpperCAmelCase__ )
# copy over dummy past residuals (must be after setting timesteps)
_a : Any = dummy_past_residuals[:]
if time_step is None:
_a : List[Any] = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(UpperCAmelCase__ )
_a : Any = scheduler_class.from_pretrained(UpperCAmelCase__ )
# copy over dummy past residuals
new_scheduler.set_timesteps(UpperCAmelCase__ )
# copy over dummy past residual (must be after setting timesteps)
_a : Optional[Any] = dummy_past_residuals[:]
_a : List[str] = scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ ).prev_sample
_a : Tuple = new_scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
_a : Union[str, Any] = scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ ).prev_sample
_a : int = new_scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def _lowercase ( self : str , **UpperCAmelCase__ : Any ) -> List[str]:
_a : Optional[int] = self.scheduler_classes[0]
_a : Optional[Any] = self.get_scheduler_config(**UpperCAmelCase__ )
_a : Union[str, Any] = scheduler_class(**UpperCAmelCase__ )
_a : int = 10
_a : List[Any] = self.dummy_model()
_a : str = self.dummy_sample_deter
scheduler.set_timesteps(UpperCAmelCase__ )
for i, t in enumerate(scheduler.timesteps ):
_a : str = model(UpperCAmelCase__ , UpperCAmelCase__ )
_a : List[Any] = scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ).prev_sample
for i, t in enumerate(scheduler.timesteps ):
_a : Union[str, Any] = model(UpperCAmelCase__ , UpperCAmelCase__ )
_a : Any = scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ).prev_sample
return sample
def _lowercase ( self : int ) -> str:
_a : Dict = dict(self.forward_default_kwargs )
_a : int = kwargs.pop("""num_inference_steps""" , UpperCAmelCase__ )
for scheduler_class in self.scheduler_classes:
_a : Optional[int] = self.get_scheduler_config()
_a : Tuple = scheduler_class(**UpperCAmelCase__ )
_a : Tuple = self.dummy_sample
_a : Optional[Any] = 0.1 * sample
if num_inference_steps is not None and hasattr(UpperCAmelCase__ , """set_timesteps""" ):
scheduler.set_timesteps(UpperCAmelCase__ )
elif num_inference_steps is not None and not hasattr(UpperCAmelCase__ , """set_timesteps""" ):
_a : List[str] = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
_a : Union[str, Any] = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5]
_a : Optional[Any] = dummy_past_residuals[:]
_a : Optional[Any] = scheduler.timesteps[5]
_a : str = scheduler.timesteps[6]
_a : Optional[int] = scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ ).prev_sample
_a : Union[str, Any] = scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
_a : Tuple = scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ ).prev_sample
_a : List[str] = scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def _lowercase ( self : List[str] ) -> List[str]:
for timesteps in [100, 1000]:
self.check_over_configs(num_train_timesteps=UpperCAmelCase__ , time_step=UpperCAmelCase__ )
def _lowercase ( self : List[str] ) -> List[str]:
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ):
self.check_over_forward(num_inference_steps=UpperCAmelCase__ , time_step=UpperCAmelCase__ )
def _lowercase ( self : int ) -> List[Any]:
_a : str = self.full_loop()
_a : List[Any] = torch.mean(torch.abs(UpperCAmelCase__ ) )
assert abs(result_mean.item() - 2540529 ) < 10
| 294 | 0 |
from __future__ import annotations
from collections.abc import Callable
from typing import Any, Generic, TypeVar
A_ = TypeVar('''T''')
class __SCREAMING_SNAKE_CASE ( Generic[T] ):
def __init__( self : List[Any] , snake_case : str , snake_case : str ):
'''simple docstring'''
A__ : Any | T = None
A__ : int = len(snake_case )
A__ : list[T] = [any_type for _ in range(self.N )] + arr
A__ : Optional[int] = fnc
self.build()
def _UpperCamelCase ( self : Optional[Any] ):
'''simple docstring'''
for p in range(self.N - 1 , 0 , -1 ):
A__ : Optional[int] = self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def _UpperCamelCase ( self : Union[str, Any] , snake_case : str , snake_case : Optional[Any] ):
'''simple docstring'''
p += self.N
A__ : str = v
while p > 1:
A__ : List[Any] = p // 2
A__ : str = self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def _UpperCamelCase ( self : Optional[int] , snake_case : str , snake_case : Optional[Any] ): # noqa: E741
'''simple docstring'''
A__ : List[Any] = l + self.N, r + self.N
A__ : T | None = None
while l <= r:
if l % 2 == 1:
A__ : Optional[Any] = self.st[l] if res is None else self.fn(snake_case , self.st[l] )
if r % 2 == 0:
A__ : Tuple = self.st[r] if res is None else self.fn(snake_case , self.st[r] )
A__ : List[Any] = (l + 1) // 2, (r - 1) // 2
return res
if __name__ == "__main__":
from functools import reduce
A_ = [1, 10, -2, 9, -3, 8, 4, -7, 5, 6, 11, -12]
A_ = {
0: 7,
1: 2,
2: 6,
3: -14,
4: 5,
5: 4,
6: 7,
7: -10,
8: 9,
9: 10,
10: 12,
11: 1,
}
A_ = SegmentTree(test_array, min)
A_ = SegmentTree(test_array, max)
A_ = SegmentTree(test_array, lambda a, b: a + b)
def _lowerCAmelCase ( ) ->None:
for i in range(len(lowerCamelCase_ ) ):
for j in range(lowerCamelCase_, len(lowerCamelCase_ ) ):
A__ : Dict = reduce(lowerCamelCase_, test_array[i : j + 1] )
A__ : int = reduce(lowerCamelCase_, test_array[i : j + 1] )
A__ : Optional[int] = reduce(lambda UpperCAmelCase__, UpperCAmelCase__ : a + b, test_array[i : j + 1] )
assert min_range == min_segment_tree.query(lowerCamelCase_, lowerCamelCase_ )
assert max_range == max_segment_tree.query(lowerCamelCase_, lowerCamelCase_ )
assert sum_range == sum_segment_tree.query(lowerCamelCase_, lowerCamelCase_ )
test_all_segments()
for index, value in test_updates.items():
A_ = value
min_segment_tree.update(index, value)
max_segment_tree.update(index, value)
sum_segment_tree.update(index, value)
test_all_segments()
| 368 |
"""simple docstring"""
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers import is_speech_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import WhisperFeatureExtractor
if is_torch_available():
import torch
A_ = random.Random()
def _lowerCAmelCase ( UpperCAmelCase__ : Dict, UpperCAmelCase__ : Tuple=1.0, UpperCAmelCase__ : Optional[int]=None, UpperCAmelCase__ : str=None ) ->Union[str, Any]:
if rng is None:
A__ : Optional[int] = global_rng
A__ : Optional[Any] = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __init__( self : Union[str, Any] , snake_case : str , snake_case : List[str]=7 , snake_case : str=400 , snake_case : Optional[Any]=2000 , snake_case : Union[str, Any]=10 , snake_case : str=160 , snake_case : List[str]=8 , snake_case : List[Any]=0.0 , snake_case : Optional[Any]=4000 , snake_case : Any=False , snake_case : int=True , ):
'''simple docstring'''
A__ : Any = parent
A__ : str = batch_size
A__ : List[str] = min_seq_length
A__ : Dict = max_seq_length
A__ : str = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
A__ : Dict = padding_value
A__ : Optional[Any] = sampling_rate
A__ : Any = return_attention_mask
A__ : Optional[int] = do_normalize
A__ : Tuple = feature_size
A__ : Optional[Any] = chunk_length
A__ : Union[str, Any] = hop_length
def _UpperCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
return {
"feature_size": self.feature_size,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def _UpperCamelCase ( self : Union[str, Any] , snake_case : Dict=False , snake_case : Optional[Any]=False ):
'''simple docstring'''
def _flatten(snake_case : Dict ):
return list(itertools.chain(*snake_case ) )
if equal_length:
A__ : Dict = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
A__ : Optional[int] = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
A__ : List[str] = [np.asarray(snake_case ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class __SCREAMING_SNAKE_CASE ( UpperCamelCase , unittest.TestCase ):
snake_case_ = WhisperFeatureExtractor if is_speech_available() else None
def _UpperCamelCase ( self : Dict ):
'''simple docstring'''
A__ : str = WhisperFeatureExtractionTester(self )
def _UpperCamelCase ( self : int ):
'''simple docstring'''
A__ : Any = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
A__ : List[Any] = feat_extract_first.save_pretrained(snake_case )[0]
check_json_file_has_correct_format(snake_case )
A__ : Union[str, Any] = self.feature_extraction_class.from_pretrained(snake_case )
A__ : str = feat_extract_first.to_dict()
A__ : Union[str, Any] = feat_extract_second.to_dict()
A__ : List[Any] = feat_extract_first.mel_filters
A__ : Optional[Any] = feat_extract_second.mel_filters
self.assertTrue(np.allclose(snake_case , snake_case ) )
self.assertEqual(snake_case , snake_case )
def _UpperCamelCase ( self : Tuple ):
'''simple docstring'''
A__ : Any = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
A__ : Any = os.path.join(snake_case , """feat_extract.json""" )
feat_extract_first.to_json_file(snake_case )
A__ : int = self.feature_extraction_class.from_json_file(snake_case )
A__ : Dict = feat_extract_first.to_dict()
A__ : str = feat_extract_second.to_dict()
A__ : str = feat_extract_first.mel_filters
A__ : Dict = feat_extract_second.mel_filters
self.assertTrue(np.allclose(snake_case , snake_case ) )
self.assertEqual(snake_case , snake_case )
def _UpperCamelCase ( self : Any ):
'''simple docstring'''
A__ : Any = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
A__ : str = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
A__ : Union[str, Any] = [np.asarray(snake_case ) for speech_input in speech_inputs]
# Test feature size
A__ : Dict = feature_extractor(snake_case , padding="""max_length""" , return_tensors="""np""" ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames )
self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size )
# Test not batched input
A__ : str = feature_extractor(speech_inputs[0] , return_tensors="""np""" ).input_features
A__ : Optional[int] = feature_extractor(np_speech_inputs[0] , return_tensors="""np""" ).input_features
self.assertTrue(np.allclose(snake_case , snake_case , atol=1e-3 ) )
# Test batched
A__ : List[str] = feature_extractor(snake_case , return_tensors="""np""" ).input_features
A__ : List[str] = feature_extractor(snake_case , return_tensors="""np""" ).input_features
for enc_seq_a, enc_seq_a in zip(snake_case , snake_case ):
self.assertTrue(np.allclose(snake_case , snake_case , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
A__ : Tuple = [floats_list((1, x) )[0] for x in (800, 800, 800)]
A__ : str = np.asarray(snake_case )
A__ : List[str] = feature_extractor(snake_case , return_tensors="""np""" ).input_features
A__ : Optional[int] = feature_extractor(snake_case , return_tensors="""np""" ).input_features
for enc_seq_a, enc_seq_a in zip(snake_case , snake_case ):
self.assertTrue(np.allclose(snake_case , snake_case , atol=1e-3 ) )
# Test truncation required
A__ : Optional[Any] = [floats_list((1, x) )[0] for x in range(200 , (feature_extractor.n_samples + 500) , 200 )]
A__ : Union[str, Any] = [np.asarray(snake_case ) for speech_input in speech_inputs]
A__ : Union[str, Any] = [x[: feature_extractor.n_samples] for x in speech_inputs]
A__ : str = [np.asarray(snake_case ) for speech_input in speech_inputs_truncated]
A__ : Optional[int] = feature_extractor(snake_case , return_tensors="""np""" ).input_features
A__ : str = feature_extractor(snake_case , return_tensors="""np""" ).input_features
for enc_seq_a, enc_seq_a in zip(snake_case , snake_case ):
self.assertTrue(np.allclose(snake_case , snake_case , atol=1e-3 ) )
def _UpperCamelCase ( self : str ):
'''simple docstring'''
import torch
A__ : Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
A__ : List[str] = np.random.rand(100 , 32 ).astype(np.floataa )
A__ : Tuple = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
A__ : Optional[Any] = feature_extractor.pad([{"""input_features""": inputs}] , return_tensors="""np""" )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
A__ : Optional[int] = feature_extractor.pad([{"""input_features""": inputs}] , return_tensors="""pt""" )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def _UpperCamelCase ( self : Optional[Any] , snake_case : Optional[int] ):
'''simple docstring'''
A__ : int = load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" )
# automatic decoding with librispeech
A__ : Union[str, Any] = ds.sort("""id""" ).select(range(snake_case ) )[:num_samples]["""audio"""]
return [x["array"] for x in speech_samples]
def _UpperCamelCase ( self : List[Any] ):
'''simple docstring'''
A__ : str = torch.tensor(
[
0.1193, -0.0946, -0.1098, -0.0196, 0.0225, -0.0690, -0.1736, 0.0951,
0.0971, -0.0817, -0.0702, 0.0162, 0.0260, 0.0017, -0.0192, -0.1678,
0.0709, -0.1867, -0.0655, -0.0274, -0.0234, -0.1884, -0.0516, -0.0554,
-0.0274, -0.1425, -0.1423, 0.0837, 0.0377, -0.0854
] )
# fmt: on
A__ : Optional[Any] = self._load_datasamples(1 )
A__ : Union[str, Any] = WhisperFeatureExtractor()
A__ : List[str] = feature_extractor(snake_case , return_tensors="""pt""" ).input_features
self.assertEqual(input_features.shape , (1, 80, 3000) )
self.assertTrue(torch.allclose(input_features[0, 0, :30] , snake_case , atol=1e-4 ) )
def _UpperCamelCase ( self : Tuple ):
'''simple docstring'''
A__ : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
A__ : Union[str, Any] = self._load_datasamples(1 )[0]
A__ : Any = ((audio - audio.min()) / (audio.max() - audio.min())) * 6_5535 # Rescale to [0, 65535] to show issue
A__ : str = feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=snake_case )[0]
self.assertTrue(np.all(np.mean(snake_case ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(snake_case ) - 1 ) < 1e-3 ) )
| 296 | 0 |
from __future__ import annotations
import unittest
import numpy as np
from transformers import LayoutLMConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.layoutlm.modeling_tf_layoutlm import (
TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMForMaskedLM,
TFLayoutLMForQuestionAnswering,
TFLayoutLMForSequenceClassification,
TFLayoutLMForTokenClassification,
TFLayoutLMModel,
)
class UpperCamelCase_ :
'''simple docstring'''
def __init__( self , a , a=13 , a=7 , a=True , a=True , a=True , a=True , a=99 , a=32 , a=2 , a=4 , a=37 , a="gelu" , a=0.1 , a=0.1 , a=5_12 , a=16 , a=2 , a=0.02 , a=3 , a=4 , a=None , a=10_00 , ) -> List[Any]:
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = seq_length
snake_case_ = is_training
snake_case_ = use_input_mask
snake_case_ = use_token_type_ids
snake_case_ = use_labels
snake_case_ = vocab_size
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = max_position_embeddings
snake_case_ = type_vocab_size
snake_case_ = type_sequence_label_size
snake_case_ = initializer_range
snake_case_ = num_labels
snake_case_ = num_choices
snake_case_ = scope
snake_case_ = range_bbox
def _UpperCamelCase ( self ) -> Optional[int]:
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
# convert bbox to numpy since TF does not support item assignment
snake_case_ = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox ).numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
snake_case_ = bbox[i, j, 3]
snake_case_ = bbox[i, j, 1]
snake_case_ = t
if bbox[i, j, 2] < bbox[i, j, 0]:
snake_case_ = bbox[i, j, 2]
snake_case_ = bbox[i, j, 0]
snake_case_ = t
snake_case_ = tf.convert_to_tensor(a )
snake_case_ = None
if self.use_input_mask:
snake_case_ = random_attention_mask([self.batch_size, self.seq_length] )
snake_case_ = None
if self.use_token_type_ids:
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case_ = None
snake_case_ = None
snake_case_ = None
if self.use_labels:
snake_case_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case_ = ids_tensor([self.batch_size] , self.num_choices )
snake_case_ = LayoutLMConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _UpperCamelCase ( self , a , a , a , a , a , a , a , a ) -> Dict:
snake_case_ = TFLayoutLMModel(config=a )
snake_case_ = model(a , a , attention_mask=a , token_type_ids=a )
snake_case_ = model(a , a , token_type_ids=a )
snake_case_ = model(a , a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _UpperCamelCase ( self , a , a , a , a , a , a , a , a ) -> int:
snake_case_ = TFLayoutLMForMaskedLM(config=a )
snake_case_ = model(a , a , attention_mask=a , token_type_ids=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _UpperCamelCase ( self , a , a , a , a , a , a , a , a ) -> Dict:
snake_case_ = self.num_labels
snake_case_ = TFLayoutLMForSequenceClassification(config=a )
snake_case_ = model(a , a , attention_mask=a , token_type_ids=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _UpperCamelCase ( self , a , a , a , a , a , a , a , a ) -> str:
snake_case_ = self.num_labels
snake_case_ = TFLayoutLMForTokenClassification(config=a )
snake_case_ = model(a , a , attention_mask=a , token_type_ids=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _UpperCamelCase ( self , a , a , a , a , a , a , a , a ) -> List[Any]:
snake_case_ = TFLayoutLMForQuestionAnswering(config=a )
snake_case_ = model(a , a , attention_mask=a , token_type_ids=a )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _UpperCamelCase ( self ) -> Dict:
snake_case_ = self.prepare_config_and_inputs()
(
(
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) ,
) = config_and_inputs
snake_case_ = {
'input_ids': input_ids,
'bbox': bbox,
'token_type_ids': token_type_ids,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_tf
class UpperCamelCase_ ( snake_case_ , snake_case_ , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase = (
(
TFLayoutLMModel,
TFLayoutLMForMaskedLM,
TFLayoutLMForTokenClassification,
TFLayoutLMForSequenceClassification,
TFLayoutLMForQuestionAnswering,
)
if is_tf_available()
else ()
)
lowerCAmelCase = (
{
'''feature-extraction''': TFLayoutLMModel,
'''fill-mask''': TFLayoutLMForMaskedLM,
'''text-classification''': TFLayoutLMForSequenceClassification,
'''token-classification''': TFLayoutLMForTokenClassification,
'''zero-shot''': TFLayoutLMForSequenceClassification,
}
if is_tf_available()
else {}
)
lowerCAmelCase = False
lowerCAmelCase = True
lowerCAmelCase = 1_0
def _UpperCamelCase ( self ) -> List[Any]:
snake_case_ = TFLayoutLMModelTester(self )
snake_case_ = ConfigTester(self , config_class=a , hidden_size=37 )
def _UpperCamelCase ( self ) -> Optional[int]:
self.config_tester.run_common_tests()
def _UpperCamelCase ( self ) -> Dict:
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a )
def _UpperCamelCase ( self ) -> int:
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*a )
def _UpperCamelCase ( self ) -> Dict:
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*a )
def _UpperCamelCase ( self ) -> List[str]:
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*a )
def _UpperCamelCase ( self ) -> Optional[Any]:
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*a )
@slow
def _UpperCamelCase ( self ) -> Dict:
for model_name in TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ = TFLayoutLMModel.from_pretrained(a )
self.assertIsNotNone(a )
@unittest.skip('Onnx compliancy broke with TF 2.10' )
def _UpperCamelCase ( self ) -> str:
pass
def __UpperCAmelCase ( ):
# Here we prepare a batch of 2 sequences to test a LayoutLM forward pass on:
# fmt: off
snake_case_ = tf.convert_to_tensor([[1_01,10_19,10_14,10_16,10_37,1_28_49,47_47,10_04,1_42_46,22_78,54_39,45_24,50_02,29_30,21_93,29_30,43_41,32_08,10_05,10_55,21_71,28_48,1_13_00,35_31,1_02],[1_01,40_70,40_34,70_20,10_24,30_58,10_15,10_13,28_61,10_13,60_70,1_92_74,27_72,62_05,2_78_14,1_61_47,1_61_47,43_43,20_47,1_02_83,1_09_69,1_43_89,10_12,23_38,1_02]]) # noqa: E231
snake_case_ = tf.convert_to_tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],]) # noqa: E231
snake_case_ = tf.convert_to_tensor([[[0,0,0,0],[4_23,2_37,4_40,2_51],[4_27,2_72,4_41,2_87],[4_19,1_15,4_37,1_29],[9_61,8_85,9_92,9_12],[2_56,38,3_30,58],[2_56,38,3_30,58],[3_36,42,3_53,57],[3_60,39,4_01,56],[3_60,39,4_01,56],[4_11,39,4_71,59],[4_79,41,5_28,59],[5_33,39,6_30,60],[67,1_13,1_34,1_31],[1_41,1_15,2_09,1_32],[68,1_49,1_33,1_66],[1_41,1_49,1_87,1_64],[1_95,1_48,2_87,1_65],[1_95,1_48,2_87,1_65],[1_95,1_48,2_87,1_65],[2_95,1_48,3_49,1_65],[4_41,1_49,4_92,1_66],[4_97,1_49,5_46,1_64],[64,2_01,1_25,2_18],[10_00,10_00,10_00,10_00]],[[0,0,0,0],[6_62,1_50,7_54,1_66],[6_65,1_99,7_42,2_11],[5_19,2_13,5_54,2_28],[5_19,2_13,5_54,2_28],[1_34,4_33,1_87,4_54],[1_30,4_67,2_04,4_80],[1_30,4_67,2_04,4_80],[1_30,4_67,2_04,4_80],[1_30,4_67,2_04,4_80],[1_30,4_67,2_04,4_80],[3_14,4_69,3_76,4_82],[5_04,6_84,5_82,7_06],[9_41,8_25,9_73,9_00],[9_41,8_25,9_73,9_00],[9_41,8_25,9_73,9_00],[9_41,8_25,9_73,9_00],[6_10,7_49,6_52,7_65],[1_30,6_59,1_68,6_72],[1_76,6_57,2_37,6_72],[2_38,6_57,3_12,6_72],[4_43,6_53,6_28,6_72],[4_43,6_53,6_28,6_72],[7_16,3_01,8_25,3_17],[10_00,10_00,10_00,10_00]]]) # noqa: E231
snake_case_ = tf.convert_to_tensor([[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]]) # noqa: E231
# these are sequence labels (i.e. at the token level)
snake_case_ = tf.convert_to_tensor([[-1_00,10,10,10,9,1,-1_00,7,7,-1_00,7,7,4,2,5,2,8,8,-1_00,-1_00,5,0,3,2,-1_00],[-1_00,12,12,12,-1_00,12,10,-1_00,-1_00,-1_00,-1_00,10,12,9,-1_00,-1_00,-1_00,10,10,10,9,12,-1_00,10,-1_00]]) # noqa: E231
# fmt: on
return input_ids, attention_mask, bbox, token_type_ids, labels
@require_tf
class UpperCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _UpperCamelCase ( self ) -> Tuple:
snake_case_ = TFLayoutLMModel.from_pretrained('microsoft/layoutlm-base-uncased' )
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ = prepare_layoutlm_batch_inputs()
# forward pass
snake_case_ = model(input_ids=a , bbox=a , attention_mask=a , token_type_ids=a )
# test the sequence output on [0, :3, :3]
snake_case_ = tf.convert_to_tensor(
[[0.1_785, -0.1_947, -0.0_425], [-0.3_254, -0.2_807, 0.2_553], [-0.5_391, -0.3_322, 0.3_364]] , )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , a , atol=1E-3 ) )
# test the pooled output on [1, :3]
snake_case_ = tf.convert_to_tensor([-0.6_580, -0.0_214, 0.8_552] )
self.assertTrue(np.allclose(outputs.pooler_output[1, :3] , a , atol=1E-3 ) )
@slow
def _UpperCamelCase ( self ) -> Union[str, Any]:
# initialize model with randomly initialized sequence classification head
snake_case_ = TFLayoutLMForSequenceClassification.from_pretrained('microsoft/layoutlm-base-uncased' , num_labels=2 )
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ = prepare_layoutlm_batch_inputs()
# forward pass
snake_case_ = model(
input_ids=a , bbox=a , attention_mask=a , token_type_ids=a , labels=tf.convert_to_tensor([1, 1] ) , )
# test whether we get a loss as a scalar
snake_case_ = outputs.loss
snake_case_ = (2,)
self.assertEqual(loss.shape , a )
# test the shape of the logits
snake_case_ = outputs.logits
snake_case_ = (2, 2)
self.assertEqual(logits.shape , a )
@slow
def _UpperCamelCase ( self ) -> int:
# initialize model with randomly initialized token classification head
snake_case_ = TFLayoutLMForTokenClassification.from_pretrained('microsoft/layoutlm-base-uncased' , num_labels=13 )
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ = prepare_layoutlm_batch_inputs()
# forward pass
snake_case_ = model(
input_ids=a , bbox=a , attention_mask=a , token_type_ids=a , labels=a )
# test the shape of the logits
snake_case_ = outputs.logits
snake_case_ = tf.convert_to_tensor((2, 25, 13) )
self.assertEqual(logits.shape , a )
@slow
def _UpperCamelCase ( self ) -> Union[str, Any]:
# initialize model with randomly initialized token classification head
snake_case_ = TFLayoutLMForQuestionAnswering.from_pretrained('microsoft/layoutlm-base-uncased' )
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ = prepare_layoutlm_batch_inputs()
# forward pass
snake_case_ = model(input_ids=a , bbox=a , attention_mask=a , token_type_ids=a )
# test the shape of the logits
snake_case_ = tf.convert_to_tensor((2, 25) )
self.assertEqual(outputs.start_logits.shape , a )
self.assertEqual(outputs.end_logits.shape , a )
| 178 |
import unittest
from transformers import GPTNeoXJapaneseConfig, is_torch_available
from transformers.models.gpt_neox_japanese.tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import GPTNeoXJapaneseForCausalLM, GPTNeoXJapaneseModel
class UpperCamelCase_ :
'''simple docstring'''
def __init__( self , a , a=13 , a=7 , a=True , a=True , a=True , a=True , a=99 , a=32 , a=5 , a=4 , a=4 , a="gelu" , a=0.0 , a=0.1 , a=True , a=5_12 , a=16 , a=2 , a=0.02 , a=3 , a=4 , a=None , ) -> Optional[Any]:
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = seq_length
snake_case_ = is_training
snake_case_ = use_input_mask
snake_case_ = use_token_type_ids
snake_case_ = use_labels
snake_case_ = vocab_size
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = intermediate_multiple_size
snake_case_ = hidden_act
snake_case_ = hidden_dropout
snake_case_ = attention_dropout
snake_case_ = weight_tying
snake_case_ = max_position_embeddings
snake_case_ = type_vocab_size
snake_case_ = type_sequence_label_size
snake_case_ = initializer_range
snake_case_ = num_labels
snake_case_ = num_choices
snake_case_ = scope
def _UpperCamelCase ( self ) -> List[Any]:
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ = None
if self.use_input_mask:
snake_case_ = random_attention_mask([self.batch_size, self.seq_length] )
snake_case_ = None
if self.use_labels:
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case_ = self.get_config()
return config, input_ids, input_mask, token_labels
def _UpperCamelCase ( self ) -> Dict:
return GPTNeoXJapaneseConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_multiple_size=self.intermediate_multiple_size , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , weight_tying=self.weight_tying , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=a , initializer_range=self.initializer_range , )
def _UpperCamelCase ( self ) -> int:
snake_case_ , snake_case_ , snake_case_ , snake_case_ = self.prepare_config_and_inputs()
snake_case_ = True
return config, input_ids, input_mask, token_labels
def _UpperCamelCase ( self , a , a , a ) -> Any:
snake_case_ = GPTNeoXJapaneseModel(config=a )
model.to(a )
model.eval()
snake_case_ = model(a , attention_mask=a )
snake_case_ = model(a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCamelCase ( self , a , a , a ) -> Union[str, Any]:
snake_case_ = True
snake_case_ = GPTNeoXJapaneseModel(a )
model.to(a )
model.eval()
snake_case_ = model(a , attention_mask=a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCamelCase ( self , a , a , a , a ) -> int:
snake_case_ = GPTNeoXJapaneseForCausalLM(config=a )
model.to(a )
model.eval()
snake_case_ = model(a , attention_mask=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _UpperCamelCase ( self , a , a , a ) -> Tuple:
snake_case_ = True
snake_case_ = GPTNeoXJapaneseForCausalLM(config=a )
model.to(a )
model.eval()
# first forward pass
snake_case_ = model(a , attention_mask=a , use_cache=a )
snake_case_ = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
snake_case_ = ids_tensor((self.batch_size, 3) , config.vocab_size )
snake_case_ = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
snake_case_ = torch.cat([input_ids, next_tokens] , dim=-1 )
snake_case_ = torch.cat([input_mask, next_mask] , dim=-1 )
snake_case_ = model(a , attention_mask=a , output_hidden_states=a )
snake_case_ = output_from_no_past['hidden_states'][0]
snake_case_ = model(
a , attention_mask=a , past_key_values=a , output_hidden_states=a , )['hidden_states'][0]
# select random slice
snake_case_ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
snake_case_ = output_from_no_past[:, -3:, random_slice_idx].detach()
snake_case_ = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(a , a , atol=1E-3 ) )
def _UpperCamelCase ( self ) -> Dict:
snake_case_ = self.prepare_config_and_inputs()
snake_case_ , snake_case_ , snake_case_ , snake_case_ = config_and_inputs
snake_case_ = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase_ ( snake_case_ , snake_case_ , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase = (GPTNeoXJapaneseModel, GPTNeoXJapaneseForCausalLM) if is_torch_available() else ()
lowerCAmelCase = (GPTNeoXJapaneseForCausalLM,) if is_torch_available() else ()
lowerCAmelCase = (
{'''feature-extraction''': GPTNeoXJapaneseModel, '''text-generation''': GPTNeoXJapaneseForCausalLM}
if is_torch_available()
else {}
)
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = False
def _UpperCamelCase ( self ) -> List[Any]:
snake_case_ = GPTNeoXJapaneseModelTester(self )
snake_case_ = ConfigTester(self , config_class=a , hidden_size=37 )
def _UpperCamelCase ( self ) -> str:
self.config_tester.run_common_tests()
def _UpperCamelCase ( self ) -> Optional[Any]:
snake_case_ , snake_case_ , snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(a , a , a )
def _UpperCamelCase ( self ) -> Union[str, Any]:
snake_case_ , snake_case_ , snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(a , a , a )
def _UpperCamelCase ( self ) -> Optional[int]:
# This regression test was failing with PyTorch < 1.3
snake_case_ , snake_case_ , snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_decoder()
snake_case_ = None
self.model_tester.create_and_check_model_as_decoder(a , a , a )
def _UpperCamelCase ( self ) -> Dict:
snake_case_ , snake_case_ , snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(a , a , a )
def _UpperCamelCase ( self ) -> List[Any]:
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*a )
@slow
def _UpperCamelCase ( self ) -> Any:
snake_case_ = 'abeja/gpt-neox-japanese-2.7b'
snake_case_ = ['データサイエンティストとは、', '100年後に必要とされる会社は、', 'フルリモートの環境で働くために必要なことは、', '国境の長いトンネルを抜けると', '美味しい日本食といえば、']
snake_case_ = [
'データサイエンティストとは、データを分析し、ビジネスに役立つ知見を導き出す専門家のことです。',
'100年後に必要とされる会社は、「人」が中心の会社です。',
'フルリモートの環境で働くために必要なことは、「自分の時間をコントロールする」ことです。',
'国境の長いトンネルを抜けると、そこは雪国だった。',
'美味しい日本食といえば、やっぱりお寿司ですよね。',
]
snake_case_ = GPTNeoXJapaneseTokenizer.from_pretrained(a )
snake_case_ = GPTNeoXJapaneseForCausalLM.from_pretrained(a )
snake_case_ = []
for prompt in prompts:
snake_case_ = tokenizer(a , return_tensors='pt' ).input_ids
snake_case_ = model.generate(a , max_length=50 )
snake_case_ = tokenizer.batch_decode(a , skip_special_tokens=a )
predicted_outputs += generated_string
self.assertListEqual(a , a )
| 178 | 1 |
from typing import List
import datasets
from datasets.tasks import AudioClassification
from ..folder_based_builder import folder_based_builder
UpperCAmelCase__ : List[Any] = datasets.utils.logging.get_logger(__name__)
class UpperCAmelCase ( folder_based_builder.FolderBasedBuilderConfig ):
'''simple docstring'''
__UpperCamelCase : bool = None
__UpperCamelCase : bool = None
class UpperCAmelCase ( folder_based_builder.FolderBasedBuilder ):
'''simple docstring'''
__UpperCamelCase : Dict = datasets.Audio()
__UpperCamelCase : List[str] = """audio"""
__UpperCamelCase : str = AudioFolderConfig
__UpperCamelCase : List[str] # definition at the bottom of the script
__UpperCamelCase : Tuple = AudioClassification(audio_column='''audio''' , label_column='''label''' )
UpperCAmelCase__ : int = [
""".aiff""",
""".au""",
""".avr""",
""".caf""",
""".flac""",
""".htk""",
""".svx""",
""".mat4""",
""".mat5""",
""".mpc2k""",
""".ogg""",
""".paf""",
""".pvf""",
""".raw""",
""".rf64""",
""".sd2""",
""".sds""",
""".ircam""",
""".voc""",
""".w64""",
""".wav""",
""".nist""",
""".wavex""",
""".wve""",
""".xi""",
""".mp3""",
""".opus""",
]
UpperCAmelCase__ : Dict = AUDIO_EXTENSIONS
| 366 |
from typing import TYPE_CHECKING
from ..utils import _LazyModule
UpperCAmelCase__ : Tuple = {
'config': [
'EXTERNAL_DATA_FORMAT_SIZE_LIMIT',
'OnnxConfig',
'OnnxConfigWithPast',
'OnnxSeq2SeqConfigWithPast',
'PatchingSpec',
],
'convert': ['export', 'validate_model_outputs'],
'features': ['FeaturesManager'],
'utils': ['ParameterFormat', 'compute_serialized_parameters_size'],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
UpperCAmelCase__ : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 301 | 0 |
def lowerCamelCase__ ( _A , _A ):
'''simple docstring'''
return number | (1 << position)
def lowerCamelCase__ ( _A , _A ):
'''simple docstring'''
return number & ~(1 << position)
def lowerCamelCase__ ( _A , _A ):
'''simple docstring'''
return number ^ (1 << position)
def lowerCamelCase__ ( _A , _A ):
'''simple docstring'''
return ((number >> position) & 1) == 1
def lowerCamelCase__ ( _A , _A ):
'''simple docstring'''
return int((number & (1 << position)) != 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 187 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def lowerCamelCase__ ( ):
'''simple docstring'''
snake_case_ = ArgumentParser("Accelerate CLI tool" , usage="accelerate <command> [<args>]" , allow_abbrev=_A )
snake_case_ = parser.add_subparsers(help="accelerate command helpers" )
# Register commands
get_config_parser(subparsers=_A )
env_command_parser(subparsers=_A )
launch_command_parser(subparsers=_A )
tpu_command_parser(subparsers=_A )
test_command_parser(subparsers=_A )
# Let's go
snake_case_ = parser.parse_args()
if not hasattr(_A , "func" ):
parser.print_help()
exit(1 )
# Run
args.func(_A )
if __name__ == "__main__":
main()
| 187 | 1 |
import unittest
import numpy as np
from transformers import DistilBertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.distilbert.modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
)
class UpperCAmelCase (unittest.TestCase ):
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=7 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=99 , _UpperCAmelCase=32 , _UpperCAmelCase=5 , _UpperCAmelCase=4 , _UpperCAmelCase=37 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=16 , _UpperCAmelCase=2 , _UpperCAmelCase=0.02 , _UpperCAmelCase=4 , ):
lowercase__: int = parent
lowercase__: List[Any] = batch_size
lowercase__: List[Any] = seq_length
lowercase__: Optional[int] = is_training
lowercase__: List[Any] = use_attention_mask
lowercase__: List[Any] = use_token_type_ids
lowercase__: Optional[Any] = use_labels
lowercase__: Any = vocab_size
lowercase__: Optional[int] = hidden_size
lowercase__: int = num_hidden_layers
lowercase__: Any = num_attention_heads
lowercase__: Optional[Any] = intermediate_size
lowercase__: int = hidden_act
lowercase__: int = hidden_dropout_prob
lowercase__: Optional[Any] = attention_probs_dropout_prob
lowercase__: Optional[Any] = max_position_embeddings
lowercase__: List[Any] = type_vocab_size
lowercase__: List[Any] = type_sequence_label_size
lowercase__: Any = initializer_range
lowercase__: Optional[int] = num_choices
def _snake_case ( self ):
lowercase__: Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase__: Optional[int] = None
if self.use_attention_mask:
lowercase__: List[str] = random_attention_mask([self.batch_size, self.seq_length] )
lowercase__: Dict = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , tie_weights_=_UpperCAmelCase , )
return config, input_ids, attention_mask
def _snake_case ( self ):
lowercase__: Dict = self.prepare_config_and_inputs()
lowercase__: Union[str, Any] = config_and_inputs
lowercase__: Optional[Any] = {'''input_ids''': input_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
@require_flax
class UpperCAmelCase (_UpperCAmelCase ,unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase :Tuple = (
(
FlaxDistilBertModel,
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def _snake_case ( self ):
lowercase__: Optional[Any] = FlaxDistilBertModelTester(self )
@slow
def _snake_case ( self ):
for model_class_name in self.all_model_classes:
lowercase__: List[str] = model_class_name.from_pretrained('''distilbert-base-uncased''' )
lowercase__: Dict = model(np.ones((1, 1) ) )
self.assertIsNotNone(_UpperCAmelCase )
@require_flax
class UpperCAmelCase (unittest.TestCase ):
"""simple docstring"""
@slow
def _snake_case ( self ):
lowercase__: List[Any] = FlaxDistilBertModel.from_pretrained('''distilbert-base-uncased''' )
lowercase__: Optional[Any] = np.array([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
lowercase__: Dict = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
lowercase__: Union[str, Any] = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase )[0]
lowercase__: int = (1, 11, 768)
self.assertEqual(output.shape , _UpperCAmelCase )
lowercase__: List[str] = np.array([[[-0.1_639, 0.3_299, 0.1_648], [-0.1_746, 0.3_289, 0.1_710], [-0.1_884, 0.3_357, 0.1_810]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , _UpperCAmelCase , atol=1e-4 ) )
| 367 | """simple docstring"""
import math
from typing import Any, Callable, List, Optional, Tuple, Union
import numpy as np
import torch
from ...models import TaFilmDecoder
from ...schedulers import DDPMScheduler
from ...utils import is_onnx_available, logging, randn_tensor
if is_onnx_available():
from ..onnx_utils import OnnxRuntimeModel
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
from .continous_encoder import SpectrogramContEncoder
from .notes_encoder import SpectrogramNotesEncoder
__A = logging.get_logger(__name__) # pylint: disable=invalid-name
__A = 2_5_6
class UpperCAmelCase (_UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase :int = ["melgan"]
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ):
super().__init__()
# From MELGAN
lowercase__: Union[str, Any] = math.log(1e-5 ) # Matches MelGAN training.
lowercase__: Union[str, Any] = 4.0 # Largest value for most examples
lowercase__: Union[str, Any] = 128
self.register_modules(
notes_encoder=_UpperCAmelCase , continuous_encoder=_UpperCAmelCase , decoder=_UpperCAmelCase , scheduler=_UpperCAmelCase , melgan=_UpperCAmelCase , )
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase=(-1.0, 1.0) , _UpperCAmelCase=False ):
lowercase__, lowercase__: int = output_range
if clip:
lowercase__: Any = torch.clip(_UpperCAmelCase , self.min_value , self.max_value )
# Scale to [0, 1].
lowercase__: Optional[int] = (features - self.min_value) / (self.max_value - self.min_value)
# Scale to [min_out, max_out].
return zero_one * (max_out - min_out) + min_out
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase=(-1.0, 1.0) , _UpperCAmelCase=False ):
lowercase__, lowercase__: str = input_range
lowercase__: Dict = torch.clip(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) if clip else outputs
# Scale to [0, 1].
lowercase__: Tuple = (outputs - min_out) / (max_out - min_out)
# Scale to [self.min_value, self.max_value].
return zero_one * (self.max_value - self.min_value) + self.min_value
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
lowercase__: List[str] = input_tokens > 0
lowercase__, lowercase__: str = self.notes_encoder(
encoder_input_tokens=_UpperCAmelCase , encoder_inputs_mask=_UpperCAmelCase )
lowercase__, lowercase__: Optional[int] = self.continuous_encoder(
encoder_inputs=_UpperCAmelCase , encoder_inputs_mask=_UpperCAmelCase )
return [(tokens_encoded, tokens_mask), (continuous_encoded, continuous_mask)]
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
lowercase__: Tuple = noise_time
if not torch.is_tensor(_UpperCAmelCase ):
lowercase__: Tuple = torch.tensor([timesteps] , dtype=torch.long , device=input_tokens.device )
elif torch.is_tensor(_UpperCAmelCase ) and len(timesteps.shape ) == 0:
lowercase__: str = timesteps[None].to(input_tokens.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
lowercase__: Dict = timesteps * torch.ones(input_tokens.shape[0] , dtype=timesteps.dtype , device=timesteps.device )
lowercase__: Union[str, Any] = self.decoder(
encodings_and_masks=_UpperCAmelCase , decoder_input_tokens=_UpperCAmelCase , decoder_noise_time=_UpperCAmelCase )
return logits
@torch.no_grad()
def __call__( self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = 100 , _UpperCAmelCase = True , _UpperCAmelCase = "numpy" , _UpperCAmelCase = None , _UpperCAmelCase = 1 , ):
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(_UpperCAmelCase , _UpperCAmelCase ) or callback_steps <= 0)
):
raise ValueError(
F"""`callback_steps` has to be a positive integer but is {callback_steps} of type"""
F""" {type(_UpperCAmelCase )}.""" )
lowercase__: List[str] = np.zeros([1, TARGET_FEATURE_LENGTH, self.n_dims] , dtype=np.floataa )
lowercase__: Any = np.zeros([1, 0, self.n_dims] , np.floataa )
lowercase__: Tuple = torch.ones((1, TARGET_FEATURE_LENGTH) , dtype=_UpperCAmelCase , device=self.device )
for i, encoder_input_tokens in enumerate(_UpperCAmelCase ):
if i == 0:
lowercase__: str = torch.from_numpy(pred_mel[:1].copy() ).to(
device=self.device , dtype=self.decoder.dtype )
# The first chunk has no previous context.
lowercase__: Optional[int] = torch.zeros((1, TARGET_FEATURE_LENGTH) , dtype=_UpperCAmelCase , device=self.device )
else:
# The full song pipeline does not feed in a context feature, so the mask
# will be all 0s after the feature converter. Because we know we're
# feeding in a full context chunk from the previous prediction, set it
# to all 1s.
lowercase__: Union[str, Any] = ones
lowercase__: str = self.scale_features(
_UpperCAmelCase , output_range=[-1.0, 1.0] , clip=_UpperCAmelCase )
lowercase__: Dict = self.encode(
input_tokens=torch.IntTensor([encoder_input_tokens] ).to(device=self.device ) , continuous_inputs=_UpperCAmelCase , continuous_mask=_UpperCAmelCase , )
# Sample encoder_continuous_inputs shaped gaussian noise to begin loop
lowercase__: int = randn_tensor(
shape=encoder_continuous_inputs.shape , generator=_UpperCAmelCase , device=self.device , dtype=self.decoder.dtype , )
# set step values
self.scheduler.set_timesteps(_UpperCAmelCase )
# Denoising diffusion loop
for j, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
lowercase__: List[Any] = self.decode(
encodings_and_masks=_UpperCAmelCase , input_tokens=_UpperCAmelCase , noise_time=t / self.scheduler.config.num_train_timesteps , )
# Compute previous output: x_t -> x_t-1
lowercase__: Union[str, Any] = self.scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , generator=_UpperCAmelCase ).prev_sample
lowercase__: int = self.scale_to_features(_UpperCAmelCase , input_range=[-1.0, 1.0] )
lowercase__: Dict = mel[:1]
lowercase__: List[Any] = mel.cpu().float().numpy()
lowercase__: Optional[int] = np.concatenate([full_pred_mel, pred_mel[:1]] , axis=1 )
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(_UpperCAmelCase , _UpperCAmelCase )
logger.info('''Generated segment''' , _UpperCAmelCase )
if output_type == "numpy" and not is_onnx_available():
raise ValueError(
'''Cannot return output in \'np\' format if ONNX is not available. Make sure to have ONNX installed or set \'output_type\' to \'mel\'.''' )
elif output_type == "numpy" and self.melgan is None:
raise ValueError(
'''Cannot return output in \'np\' format if melgan component is not defined. Make sure to define `self.melgan` or set \'output_type\' to \'mel\'.''' )
if output_type == "numpy":
lowercase__: Tuple = self.melgan(input_features=full_pred_mel.astype(np.floataa ) )
else:
lowercase__: Dict = full_pred_mel
if not return_dict:
return (output,)
return AudioPipelineOutput(audios=_UpperCAmelCase )
| 2 | 0 |
import logging
import os
import sys
from dataclasses import dataclass, field
from itertools import chain
from typing import Optional, Union
import datasets
import numpy as np
import torch
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.31.0""")
A__ = logging.getLogger(__name__)
@dataclass
class __lowerCAmelCase :
__lowerCamelCase = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
__lowerCamelCase = field(
default=lowerCamelCase__ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
__lowerCamelCase = field(
default=lowerCamelCase__ , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
__lowerCamelCase = field(
default=lowerCamelCase__ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
__lowerCamelCase = field(
default=lowerCamelCase__ , metadata={'''help''': '''Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'''} , )
__lowerCamelCase = field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
__lowerCamelCase = field(
default=lowerCamelCase__ , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
@dataclass
class __lowerCAmelCase :
__lowerCamelCase = field(default=lowerCamelCase__ , metadata={'''help''': '''The input training data file (a text file).'''} )
__lowerCamelCase = field(
default=lowerCamelCase__ , metadata={'''help''': '''An optional input evaluation data file to evaluate the perplexity on (a text file).'''} , )
__lowerCamelCase = field(
default=lowerCamelCase__ , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
__lowerCamelCase = field(
default=lowerCamelCase__ , metadata={'''help''': '''The number of processes to use for the preprocessing.'''} , )
__lowerCamelCase = field(
default=lowerCamelCase__ , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. If passed, sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
__lowerCamelCase = field(
default=lowerCamelCase__ , metadata={
'''help''': (
'''Whether to pad all samples to the maximum sentence length. '''
'''If False, will pad the samples dynamically when batching to the maximum length in the batch. More '''
'''efficient on GPU but very bad for TPU.'''
)
} , )
__lowerCamelCase = field(
default=lowerCamelCase__ , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of training examples to this '''
'''value if set.'''
)
} , )
__lowerCamelCase = field(
default=lowerCamelCase__ , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of evaluation examples to this '''
'''value if set.'''
)
} , )
def snake_case ( self ):
"""simple docstring"""
if self.train_file is not None:
_lowerCAmelCase = self.train_file.split(""".""" )[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
_lowerCAmelCase = self.validation_file.split(""".""" )[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
@dataclass
class __lowerCAmelCase :
__lowerCamelCase = 42
__lowerCamelCase = True
__lowerCamelCase = None
__lowerCamelCase = None
def __call__( self , _snake_case ):
"""simple docstring"""
_lowerCAmelCase = """label""" if """label""" in features[0].keys() else """labels"""
_lowerCAmelCase = [feature.pop(_snake_case ) for feature in features]
_lowerCAmelCase = len(_snake_case )
_lowerCAmelCase = len(features[0]["""input_ids"""] )
_lowerCAmelCase = [
[{k: v[i] for k, v in feature.items()} for i in range(_snake_case )] for feature in features
]
_lowerCAmelCase = list(chain(*_snake_case ) )
_lowerCAmelCase = self.tokenizer.pad(
_snake_case , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="""pt""" , )
# Un-flatten
_lowerCAmelCase = {k: v.view(_snake_case , _snake_case , -1 ) for k, v in batch.items()}
# Add back labels
_lowerCAmelCase = torch.tensor(_snake_case , dtype=torch.intaa )
return batch
def _UpperCAmelCase ( ):
"""simple docstring"""
_lowerCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("""run_swag""" , snake_case , snake_case )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
_lowerCAmelCase = training_args.get_process_log_level()
logger.setLevel(snake_case )
datasets.utils.logging.set_verbosity(snake_case )
transformers.utils.logging.set_verbosity(snake_case )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'
+ F'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
logger.info(F'Training/evaluation parameters {training_args}' )
# Detecting last checkpoint.
_lowerCAmelCase = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_lowerCAmelCase = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. '
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.train_file is not None or data_args.validation_file is not None:
_lowerCAmelCase = {}
if data_args.train_file is not None:
_lowerCAmelCase = data_args.train_file
if data_args.validation_file is not None:
_lowerCAmelCase = data_args.validation_file
_lowerCAmelCase = data_args.train_file.split(""".""" )[-1]
_lowerCAmelCase = load_dataset(
snake_case , data_files=snake_case , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
# Downloading and loading the swag dataset from the hub.
_lowerCAmelCase = load_dataset(
"""swag""" , """regular""" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_lowerCAmelCase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
_lowerCAmelCase = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
_lowerCAmelCase = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=snake_case , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# When using your own dataset or a different dataset from swag, you will probably need to change this.
_lowerCAmelCase = [F'ending{i}' for i in range(4 )]
_lowerCAmelCase = """sent1"""
_lowerCAmelCase = """sent2"""
if data_args.max_seq_length is None:
_lowerCAmelCase = tokenizer.model_max_length
if max_seq_length > 10_24:
logger.warning(
"""The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value"""
""" of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can"""
""" override this default with `--block_size xxx`.""" )
_lowerCAmelCase = 10_24
else:
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F'The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the'
F'model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.' )
_lowerCAmelCase = min(data_args.max_seq_length , tokenizer.model_max_length )
# Preprocessing the datasets.
def preprocess_function(snake_case ):
_lowerCAmelCase = [[context] * 4 for context in examples[context_name]]
_lowerCAmelCase = examples[question_header_name]
_lowerCAmelCase = [
[F'{header} {examples[end][i]}' for end in ending_names] for i, header in enumerate(snake_case )
]
# Flatten out
_lowerCAmelCase = list(chain(*snake_case ) )
_lowerCAmelCase = list(chain(*snake_case ) )
# Tokenize
_lowerCAmelCase = tokenizer(
snake_case , snake_case , truncation=snake_case , max_length=snake_case , padding="""max_length""" if data_args.pad_to_max_length else False , )
# Un-flatten
return {k: [v[i : i + 4] for i in range(0 , len(snake_case ) , 4 )] for k, v in tokenized_examples.items()}
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError("""--do_train requires a train dataset""" )
_lowerCAmelCase = raw_datasets["""train"""]
if data_args.max_train_samples is not None:
_lowerCAmelCase = min(len(snake_case ) , data_args.max_train_samples )
_lowerCAmelCase = train_dataset.select(range(snake_case ) )
with training_args.main_process_first(desc="""train dataset map pre-processing""" ):
_lowerCAmelCase = train_dataset.map(
snake_case , batched=snake_case , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
if training_args.do_eval:
if "validation" not in raw_datasets:
raise ValueError("""--do_eval requires a validation dataset""" )
_lowerCAmelCase = raw_datasets["""validation"""]
if data_args.max_eval_samples is not None:
_lowerCAmelCase = min(len(snake_case ) , data_args.max_eval_samples )
_lowerCAmelCase = eval_dataset.select(range(snake_case ) )
with training_args.main_process_first(desc="""validation dataset map pre-processing""" ):
_lowerCAmelCase = eval_dataset.map(
snake_case , batched=snake_case , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
# Data collator
_lowerCAmelCase = (
default_data_collator
if data_args.pad_to_max_length
else DataCollatorForMultipleChoice(tokenizer=snake_case , pad_to_multiple_of=8 if training_args.fpaa else None )
)
# Metric
def compute_metrics(snake_case ):
_lowerCAmelCase , _lowerCAmelCase = eval_predictions
_lowerCAmelCase = np.argmax(snake_case , axis=1 )
return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()}
# Initialize our Trainer
_lowerCAmelCase = Trainer(
model=snake_case , args=snake_case , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=snake_case , data_collator=snake_case , compute_metrics=snake_case , )
# Training
if training_args.do_train:
_lowerCAmelCase = None
if training_args.resume_from_checkpoint is not None:
_lowerCAmelCase = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
_lowerCAmelCase = last_checkpoint
_lowerCAmelCase = trainer.train(resume_from_checkpoint=snake_case )
trainer.save_model() # Saves the tokenizer too for easy upload
_lowerCAmelCase = train_result.metrics
_lowerCAmelCase = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(snake_case )
)
_lowerCAmelCase = min(snake_case , len(snake_case ) )
trainer.log_metrics("""train""" , snake_case )
trainer.save_metrics("""train""" , snake_case )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
_lowerCAmelCase = trainer.evaluate()
_lowerCAmelCase = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(snake_case )
_lowerCAmelCase = min(snake_case , len(snake_case ) )
trainer.log_metrics("""eval""" , snake_case )
trainer.save_metrics("""eval""" , snake_case )
_lowerCAmelCase = {
"""finetuned_from""": model_args.model_name_or_path,
"""tasks""": """multiple-choice""",
"""dataset_tags""": """swag""",
"""dataset_args""": """regular""",
"""dataset""": """SWAG""",
"""language""": """en""",
}
if training_args.push_to_hub:
trainer.push_to_hub(**snake_case )
else:
trainer.create_model_card(**snake_case )
def _UpperCAmelCase ( snake_case ):
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 82 |
from __future__ import annotations
import math
def _UpperCAmelCase ( snake_case ):
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(snake_case ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def _UpperCAmelCase ( snake_case ):
"""simple docstring"""
_lowerCAmelCase = str(snake_case )
_lowerCAmelCase = [n]
for i in range(1 , len(snake_case ) ):
list_nums.append(int(str_num[i:] ) )
list_nums.append(int(str_num[:-i] ) )
return list_nums
def _UpperCAmelCase ( snake_case ):
"""simple docstring"""
if len(str(snake_case ) ) > 3:
if not is_prime(int(str(snake_case )[-3:] ) ) or not is_prime(int(str(snake_case )[:3] ) ):
return False
return True
def _UpperCAmelCase ( snake_case = 11 ):
"""simple docstring"""
_lowerCAmelCase = []
_lowerCAmelCase = 13
while len(snake_case ) != count:
if validate(snake_case ):
_lowerCAmelCase = list_truncated_nums(snake_case )
if all(is_prime(snake_case ) for i in list_nums ):
list_truncated_primes.append(snake_case )
num += 2
return list_truncated_primes
def _UpperCAmelCase ( ):
"""simple docstring"""
return sum(compute_truncated_primes(11 ) )
if __name__ == "__main__":
print(f"{sum(compute_truncated_primes(11)) = }")
| 82 | 1 |
from __future__ import annotations
def __lowercase ( lowerCamelCase : int ):
UpperCamelCase_ : str = 2
UpperCamelCase_ : int = []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(lowerCamelCase )
if n > 1:
factors.append(lowerCamelCase )
return factors
if __name__ == "__main__":
import doctest
doctest.testmod()
| 353 | import argparse
import shutil
from pathlib import Path
from tqdm import tqdm
from transformers import AutoTokenizer
def __lowercase ( lowerCamelCase : Optional[Any] , lowerCamelCase : Optional[int] , lowerCamelCase : Union[str, Any] , lowerCamelCase : Union[str, Any]=1024 ):
UpperCamelCase_, UpperCamelCase_ : int = [], []
UpperCamelCase_ : Dict = list(zip(lowerCamelCase , lowerCamelCase ) )
UpperCamelCase_, UpperCamelCase_ : int = sorted_examples[0]
def is_too_big(lowerCamelCase : str ):
return tok(lowerCamelCase , return_tensors='pt' ).input_ids.shape[1] > max_tokens
for src, tgt in tqdm(sorted_examples[1:] ):
UpperCamelCase_ : Optional[Any] = new_src + ' ' + src
UpperCamelCase_ : int = new_tgt + ' ' + tgt
if is_too_big(lowerCamelCase ) or is_too_big(lowerCamelCase ): # cant fit, finalize example
finished_src.append(lowerCamelCase )
finished_tgt.append(lowerCamelCase )
UpperCamelCase_, UpperCamelCase_ : Dict = src, tgt
else: # can fit, keep adding
UpperCamelCase_, UpperCamelCase_ : Union[str, Any] = cand_src, cand_tgt
# cleanup
if new_src:
assert new_tgt
finished_src.append(lowerCamelCase )
finished_tgt.append(lowerCamelCase )
return finished_src, finished_tgt
def __lowercase ( lowerCamelCase : Dict , lowerCamelCase : Path , lowerCamelCase : Tuple , lowerCamelCase : Dict ):
UpperCamelCase_ : List[Any] = Path(lowerCamelCase )
save_path.mkdir(exist_ok=lowerCamelCase )
for split in ["train"]:
UpperCamelCase_, UpperCamelCase_ : Any = data_dir / F"{split}.source", data_dir / F"{split}.target"
UpperCamelCase_ : List[Any] = [x.rstrip() for x in Path(lowerCamelCase ).open().readlines()]
UpperCamelCase_ : Optional[int] = [x.rstrip() for x in Path(lowerCamelCase ).open().readlines()]
UpperCamelCase_, UpperCamelCase_ : Union[str, Any] = pack_examples(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
print(F"packed {split} split from {len(lowerCamelCase )} examples -> {len(lowerCamelCase )}." )
Path(save_path / F"{split}.source" ).open('w' ).write('\n'.join(lowerCamelCase ) )
Path(save_path / F"{split}.target" ).open('w' ).write('\n'.join(lowerCamelCase ) )
for split in ["val", "test"]:
UpperCamelCase_, UpperCamelCase_ : Any = data_dir / F"{split}.source", data_dir / F"{split}.target"
shutil.copyfile(lowerCamelCase , save_path / F"{split}.source" )
shutil.copyfile(lowerCamelCase , save_path / F"{split}.target" )
def __lowercase ( ):
UpperCamelCase_ : int = argparse.ArgumentParser()
parser.add_argument('--tok_name' , type=lowerCamelCase , help='like facebook/bart-large-cnn,t5-base, etc.' )
parser.add_argument('--max_seq_len' , type=lowerCamelCase , default=128 )
parser.add_argument('--data_dir' , type=lowerCamelCase )
parser.add_argument('--save_path' , type=lowerCamelCase )
UpperCamelCase_ : Tuple = parser.parse_args()
UpperCamelCase_ : Optional[int] = AutoTokenizer.from_pretrained(args.tok_name )
return pack_data_dir(lowerCamelCase , Path(args.data_dir ) , args.max_seq_len , args.save_path )
if __name__ == "__main__":
packer_cli()
| 50 | 0 |
"""simple docstring"""
import argparse
import logging
import pickle
import random
import time
import numpy as np
from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO
)
_UpperCamelCase : Any = logging.getLogger(__name__)
def a_ ( ):
'''simple docstring'''
lowercase__ : Union[str, Any] = argparse.ArgumentParser(
description='Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids).' )
parser.add_argument('--file_path' , type=_lowerCAmelCase , default='data/dump.txt' , help='The path to the data.' )
parser.add_argument('--tokenizer_type' , type=_lowerCAmelCase , default='bert' , choices=['bert', 'roberta', 'gpt2'] )
parser.add_argument('--tokenizer_name' , type=_lowerCAmelCase , default='bert-base-uncased' , help='The tokenizer to use.' )
parser.add_argument('--dump_file' , type=_lowerCAmelCase , default='data/dump' , help='The dump file prefix.' )
lowercase__ : Any = parser.parse_args()
logger.info(f"""Loading Tokenizer ({args.tokenizer_name})""" )
if args.tokenizer_type == "bert":
lowercase__ : Tuple = BertTokenizer.from_pretrained(args.tokenizer_name )
lowercase__ : Optional[int] = tokenizer.special_tokens_map['cls_token'] # `[CLS]`
lowercase__ : List[str] = tokenizer.special_tokens_map['sep_token'] # `[SEP]`
elif args.tokenizer_type == "roberta":
lowercase__ : Optional[int] = RobertaTokenizer.from_pretrained(args.tokenizer_name )
lowercase__ : Dict = tokenizer.special_tokens_map['cls_token'] # `<s>`
lowercase__ : Any = tokenizer.special_tokens_map['sep_token'] # `</s>`
elif args.tokenizer_type == "gpt2":
lowercase__ : List[str] = GPTaTokenizer.from_pretrained(args.tokenizer_name )
lowercase__ : Any = tokenizer.special_tokens_map['bos_token'] # `<|endoftext|>`
lowercase__ : Tuple = tokenizer.special_tokens_map['eos_token'] # `<|endoftext|>`
logger.info(f"""Loading text from {args.file_path}""" )
with open(args.file_path , 'r' , encoding='utf8' ) as fp:
lowercase__ : int = fp.readlines()
logger.info('Start encoding' )
logger.info(f"""{len(_lowerCAmelCase )} examples to process.""" )
lowercase__ : Optional[Any] = []
lowercase__ : Optional[int] = 0
lowercase__ : List[Any] = 1_0000
lowercase__ : int = time.time()
for text in data:
lowercase__ : Any = f"""{bos} {text.strip()} {sep}"""
lowercase__ : Optional[int] = tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
rslt.append(_lowerCAmelCase )
iter += 1
if iter % interval == 0:
lowercase__ : List[str] = time.time()
logger.info(f"""{iter} examples processed. - {(end-start):.2f}s/{interval}expl""" )
lowercase__ : List[Any] = time.time()
logger.info('Finished binarization' )
logger.info(f"""{len(_lowerCAmelCase )} examples processed.""" )
lowercase__ : Tuple = f"""{args.dump_file}.{args.tokenizer_name}.pickle"""
lowercase__ : Any = tokenizer.vocab_size
if vocab_size < (1 << 16):
lowercase__ : Tuple = [np.uintaa(_lowerCAmelCase ) for d in rslt]
else:
lowercase__ : Dict = [np.intaa(_lowerCAmelCase ) for d in rslt]
random.shuffle(rslt_ )
logger.info(f"""Dump to {dp_file}""" )
with open(_lowerCAmelCase , 'wb' ) as handle:
pickle.dump(rslt_ , _lowerCAmelCase , protocol=pickle.HIGHEST_PROTOCOL )
if __name__ == "__main__":
main()
| 77 |
import argparse
import json
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinConfig, SwinForImageClassification
def __lowercase ( _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = SwinConfig()
SCREAMING_SNAKE_CASE = swin_name.split("""_""" )
SCREAMING_SNAKE_CASE = name_split[1]
SCREAMING_SNAKE_CASE = int(name_split[4] )
SCREAMING_SNAKE_CASE = int(name_split[3][-1] )
if model_size == "tiny":
SCREAMING_SNAKE_CASE = 96
SCREAMING_SNAKE_CASE = (2, 2, 6, 2)
SCREAMING_SNAKE_CASE = (3, 6, 12, 24)
elif model_size == "small":
SCREAMING_SNAKE_CASE = 96
SCREAMING_SNAKE_CASE = (2, 2, 18, 2)
SCREAMING_SNAKE_CASE = (3, 6, 12, 24)
elif model_size == "base":
SCREAMING_SNAKE_CASE = 1_28
SCREAMING_SNAKE_CASE = (2, 2, 18, 2)
SCREAMING_SNAKE_CASE = (4, 8, 16, 32)
else:
SCREAMING_SNAKE_CASE = 1_92
SCREAMING_SNAKE_CASE = (2, 2, 18, 2)
SCREAMING_SNAKE_CASE = (6, 12, 24, 48)
if "in22k" in swin_name:
SCREAMING_SNAKE_CASE = 2_18_41
else:
SCREAMING_SNAKE_CASE = 10_00
SCREAMING_SNAKE_CASE = """huggingface/label-files"""
SCREAMING_SNAKE_CASE = """imagenet-1k-id2label.json"""
SCREAMING_SNAKE_CASE = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type="""dataset""" ) , """r""" ) )
SCREAMING_SNAKE_CASE = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE = idalabel
SCREAMING_SNAKE_CASE = {v: k for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE = img_size
SCREAMING_SNAKE_CASE = num_classes
SCREAMING_SNAKE_CASE = embed_dim
SCREAMING_SNAKE_CASE = depths
SCREAMING_SNAKE_CASE = num_heads
SCREAMING_SNAKE_CASE = window_size
return config
def __lowercase ( _SCREAMING_SNAKE_CASE ) -> Dict:
'''simple docstring'''
if "patch_embed.proj" in name:
SCREAMING_SNAKE_CASE = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
SCREAMING_SNAKE_CASE = name.replace("""patch_embed.norm""" , """embeddings.norm""" )
if "layers" in name:
SCREAMING_SNAKE_CASE = """encoder.""" + name
if "attn.proj" in name:
SCREAMING_SNAKE_CASE = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name:
SCREAMING_SNAKE_CASE = name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
SCREAMING_SNAKE_CASE = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
SCREAMING_SNAKE_CASE = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
SCREAMING_SNAKE_CASE = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
SCREAMING_SNAKE_CASE = name.replace("""mlp.fc2""" , """output.dense""" )
if name == "norm.weight":
SCREAMING_SNAKE_CASE = """layernorm.weight"""
if name == "norm.bias":
SCREAMING_SNAKE_CASE = """layernorm.bias"""
if "head" in name:
SCREAMING_SNAKE_CASE = name.replace("""head""" , """classifier""" )
else:
SCREAMING_SNAKE_CASE = """swin.""" + name
return name
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Any:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
SCREAMING_SNAKE_CASE = orig_state_dict.pop(_SCREAMING_SNAKE_CASE )
if "mask" in key:
continue
elif "qkv" in key:
SCREAMING_SNAKE_CASE = key.split(""".""" )
SCREAMING_SNAKE_CASE = int(key_split[1] )
SCREAMING_SNAKE_CASE = int(key_split[3] )
SCREAMING_SNAKE_CASE = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
SCREAMING_SNAKE_CASE = val[:dim, :]
SCREAMING_SNAKE_CASE = val[
dim : dim * 2, :
]
SCREAMING_SNAKE_CASE = val[-dim:, :]
else:
SCREAMING_SNAKE_CASE = val[
:dim
]
SCREAMING_SNAKE_CASE = val[
dim : dim * 2
]
SCREAMING_SNAKE_CASE = val[
-dim:
]
else:
SCREAMING_SNAKE_CASE = val
return orig_state_dict
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = timm.create_model(_SCREAMING_SNAKE_CASE , pretrained=_SCREAMING_SNAKE_CASE )
timm_model.eval()
SCREAMING_SNAKE_CASE = get_swin_config(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = SwinForImageClassification(_SCREAMING_SNAKE_CASE )
model.eval()
SCREAMING_SNAKE_CASE = convert_state_dict(timm_model.state_dict() , _SCREAMING_SNAKE_CASE )
model.load_state_dict(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = """http://images.cocodataset.org/val2017/000000039769.jpg"""
SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained("""microsoft/{}""".format(swin_name.replace("""_""" , """-""" ) ) )
SCREAMING_SNAKE_CASE = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw )
SCREAMING_SNAKE_CASE = image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors="""pt""" )
SCREAMING_SNAKE_CASE = timm_model(inputs["""pixel_values"""] )
SCREAMING_SNAKE_CASE = model(**_SCREAMING_SNAKE_CASE ).logits
assert torch.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1E-3 )
print(F"""Saving model {swin_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--swin_name""",
default="""swin_tiny_patch4_window7_224""",
type=str,
help="""Name of the Swin timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
SCREAMING_SNAKE_CASE_ = parser.parse_args()
convert_swin_checkpoint(args.swin_name, args.pytorch_dump_folder_path)
| 296 | 0 |
import argparse
import collections
import json
from pathlib import Path
import requests
import torch
import yaml
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTImageProcessor,
MobileViTVaConfig,
MobileViTVaForImageClassification,
MobileViTVaForSemanticSegmentation,
)
from transformers.utils import logging
logging.set_verbosity_info()
_A : int = logging.get_logger(__name__)
def _a ( UpperCAmelCase ) -> List[str]:
"""simple docstring"""
print('''Loading config file...''' )
def flatten_yaml_as_dict(UpperCAmelCase , UpperCAmelCase="" , UpperCAmelCase="." ):
lowerCamelCase__ : List[Any] = []
for k, v in d.items():
lowerCamelCase__ : Dict = parent_key + sep + k if parent_key else k
if isinstance(UpperCAmelCase , collections.abc.MutableMapping ):
items.extend(flatten_yaml_as_dict(UpperCAmelCase , UpperCAmelCase , sep=UpperCAmelCase ).items() )
else:
items.append((new_key, v) )
return dict(UpperCAmelCase )
lowerCamelCase__ : Optional[Any] = argparse.Namespace()
with open(UpperCAmelCase , '''r''' ) as yaml_file:
try:
lowerCamelCase__ : Union[str, Any] = yaml.load(UpperCAmelCase , Loader=yaml.FullLoader )
lowerCamelCase__ : Tuple = flatten_yaml_as_dict(UpperCAmelCase )
for k, v in flat_cfg.items():
setattr(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
except yaml.YAMLError as exc:
logger.error('''Error while loading config file: {}. Error message: {}'''.format(UpperCAmelCase , str(UpperCAmelCase ) ) )
return config
def _a ( UpperCAmelCase , UpperCAmelCase ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase__ : Any = MobileViTVaConfig()
lowerCamelCase__ : Optional[int] = False
# dataset
if task_name.startswith('''imagenet1k_''' ):
lowerCamelCase__ : Optional[Any] = 1000
if int(task_name.strip().split('''_''' )[-1] ) == 384:
lowerCamelCase__ : Union[str, Any] = 384
else:
lowerCamelCase__ : int = 256
lowerCamelCase__ : Dict = '''imagenet-1k-id2label.json'''
elif task_name.startswith('''imagenet21k_to_1k_''' ):
lowerCamelCase__ : Dict = 21000
if int(task_name.strip().split('''_''' )[-1] ) == 384:
lowerCamelCase__ : Union[str, Any] = 384
else:
lowerCamelCase__ : int = 256
lowerCamelCase__ : Any = '''imagenet-22k-id2label.json'''
elif task_name.startswith('''ade20k_''' ):
lowerCamelCase__ : Any = 151
lowerCamelCase__ : List[str] = 512
lowerCamelCase__ : List[str] = '''ade20k-id2label.json'''
lowerCamelCase__ : List[Any] = True
elif task_name.startswith('''voc_''' ):
lowerCamelCase__ : Optional[Any] = 21
lowerCamelCase__ : Optional[Any] = 512
lowerCamelCase__ : List[str] = '''pascal-voc-id2label.json'''
lowerCamelCase__ : str = True
# orig_config
lowerCamelCase__ : Dict = load_orig_config_file(UpperCAmelCase )
assert getattr(UpperCAmelCase , '''model.classification.name''' , -1 ) == "mobilevit_v2", "Invalid model"
lowerCamelCase__ : Union[str, Any] = getattr(UpperCAmelCase , '''model.classification.mitv2.width_multiplier''' , 1.0 )
assert (
getattr(UpperCAmelCase , '''model.classification.mitv2.attn_norm_layer''' , -1 ) == "layer_norm_2d"
), "Norm layers other than layer_norm_2d is not supported"
lowerCamelCase__ : Tuple = getattr(UpperCAmelCase , '''model.classification.activation.name''' , '''swish''' )
# config.image_size == getattr(orig_config, 'sampler.bs.crop_size_width', 256)
if is_segmentation_model:
lowerCamelCase__ : str = getattr(UpperCAmelCase , '''model.segmentation.output_stride''' , 16 )
if "_deeplabv3" in task_name:
lowerCamelCase__ : List[Any] = getattr(UpperCAmelCase , '''model.segmentation.deeplabv3.aspp_rates''' , [12, 24, 36] )
lowerCamelCase__ : Dict = getattr(UpperCAmelCase , '''model.segmentation.deeplabv3.aspp_out_channels''' , 512 )
lowerCamelCase__ : int = getattr(UpperCAmelCase , '''model.segmentation.deeplabv3.aspp_dropout''' , 0.1 )
# id2label
lowerCamelCase__ : List[str] = '''huggingface/label-files'''
lowerCamelCase__ : Any = json.load(open(hf_hub_download(UpperCAmelCase , UpperCAmelCase , repo_type='''dataset''' ) , '''r''' ) )
lowerCamelCase__ : str = {int(UpperCAmelCase ): v for k, v in idalabel.items()}
lowerCamelCase__ : Any = idalabel
lowerCamelCase__ : List[str] = {v: k for k, v in idalabel.items()}
return config
def _a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> int:
"""simple docstring"""
lowerCamelCase__ : List[Any] = dct.pop(UpperCAmelCase )
lowerCamelCase__ : str = val
def _a ( UpperCAmelCase , UpperCAmelCase=False ) -> Optional[Any]:
"""simple docstring"""
if base_model:
lowerCamelCase__ : int = ''''''
else:
lowerCamelCase__ : Dict = '''mobilevitv2.'''
lowerCamelCase__ : int = []
for k in state_dict.keys():
if k[:8] == "encoder.":
lowerCamelCase__ : Any = k[8:]
else:
lowerCamelCase__ : int = k
if ".block." in k:
lowerCamelCase__ : Tuple = k_new.replace('''.block.''' , '''.''' )
if ".conv." in k:
lowerCamelCase__ : str = k_new.replace('''.conv.''' , '''.convolution.''' )
if ".norm." in k:
lowerCamelCase__ : List[Any] = k_new.replace('''.norm.''' , '''.normalization.''' )
if "conv_1." in k:
lowerCamelCase__ : Optional[Any] = k_new.replace('''conv_1.''' , f"{model_prefix}conv_stem." )
for i in [1, 2]:
if f"layer_{i}." in k:
lowerCamelCase__ : int = k_new.replace(f"layer_{i}." , f"{model_prefix}encoder.layer.{i-1}.layer." )
if ".exp_1x1." in k:
lowerCamelCase__ : int = k_new.replace('''.exp_1x1.''' , '''.expand_1x1.''' )
if ".red_1x1." in k:
lowerCamelCase__ : Optional[Any] = k_new.replace('''.red_1x1.''' , '''.reduce_1x1.''' )
for i in [3, 4, 5]:
if f"layer_{i}.0." in k:
lowerCamelCase__ : Union[str, Any] = k_new.replace(f"layer_{i}.0." , f"{model_prefix}encoder.layer.{i-1}.downsampling_layer." )
if f"layer_{i}.1.local_rep.0." in k:
lowerCamelCase__ : Dict = k_new.replace(f"layer_{i}.1.local_rep.0." , f"{model_prefix}encoder.layer.{i-1}.conv_kxk." )
if f"layer_{i}.1.local_rep.1." in k:
lowerCamelCase__ : int = k_new.replace(f"layer_{i}.1.local_rep.1." , f"{model_prefix}encoder.layer.{i-1}.conv_1x1." )
for i in [3, 4, 5]:
if i == 3:
lowerCamelCase__ : Union[str, Any] = [0, 1]
elif i == 4:
lowerCamelCase__ : int = [0, 1, 2, 3]
elif i == 5:
lowerCamelCase__ : Any = [0, 1, 2]
for j in j_in:
if f"layer_{i}.1.global_rep.{j}." in k:
lowerCamelCase__ : Tuple = k_new.replace(
f"layer_{i}.1.global_rep.{j}." , f"{model_prefix}encoder.layer.{i-1}.transformer.layer.{j}." )
if f"layer_{i}.1.global_rep.{j+1}." in k:
lowerCamelCase__ : Tuple = k_new.replace(
f"layer_{i}.1.global_rep.{j+1}." , f"{model_prefix}encoder.layer.{i-1}.layernorm." )
if f"layer_{i}.1.conv_proj." in k:
lowerCamelCase__ : Union[str, Any] = k_new.replace(f"layer_{i}.1.conv_proj." , f"{model_prefix}encoder.layer.{i-1}.conv_projection." )
if "pre_norm_attn.0." in k:
lowerCamelCase__ : Dict = k_new.replace('''pre_norm_attn.0.''' , '''layernorm_before.''' )
if "pre_norm_attn.1." in k:
lowerCamelCase__ : List[Any] = k_new.replace('''pre_norm_attn.1.''' , '''attention.''' )
if "pre_norm_ffn.0." in k:
lowerCamelCase__ : List[str] = k_new.replace('''pre_norm_ffn.0.''' , '''layernorm_after.''' )
if "pre_norm_ffn.1." in k:
lowerCamelCase__ : Optional[Any] = k_new.replace('''pre_norm_ffn.1.''' , '''ffn.conv1.''' )
if "pre_norm_ffn.3." in k:
lowerCamelCase__ : List[Any] = k_new.replace('''pre_norm_ffn.3.''' , '''ffn.conv2.''' )
if "classifier.1." in k:
lowerCamelCase__ : Dict = k_new.replace('''classifier.1.''' , '''classifier.''' )
if "seg_head." in k:
lowerCamelCase__ : List[str] = k_new.replace('''seg_head.''' , '''segmentation_head.''' )
if ".aspp_layer." in k:
lowerCamelCase__ : Optional[Any] = k_new.replace('''.aspp_layer.''' , '''.''' )
if ".aspp_pool." in k:
lowerCamelCase__ : str = k_new.replace('''.aspp_pool.''' , '''.''' )
rename_keys.append((k, k_new) )
return rename_keys
def _a ( UpperCAmelCase ) -> int:
"""simple docstring"""
lowerCamelCase__ : List[Any] = []
for k in state_dict.keys():
if k.startswith('''seg_head.aux_head.''' ):
keys_to_ignore.append(UpperCAmelCase )
for k in keys_to_ignore:
state_dict.pop(UpperCAmelCase , UpperCAmelCase )
def _a ( ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase__ : Optional[int] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
# url = "https://cdn.britannica.com/86/141086-050-9D7C75EE/Gulfstream-G450-business-jet-passengers.jpg"
lowerCamelCase__ : Tuple = Image.open(requests.get(UpperCAmelCase , stream=UpperCAmelCase ).raw )
return im
@torch.no_grad()
def _a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> str:
"""simple docstring"""
lowerCamelCase__ : Tuple = get_mobilevitva_config(UpperCAmelCase , UpperCAmelCase )
# load original state_dict
lowerCamelCase__ : Optional[int] = torch.load(UpperCAmelCase , map_location='''cpu''' )
# load huggingface model
if task_name.startswith('''ade20k_''' ) or task_name.startswith('''voc_''' ):
lowerCamelCase__ : Tuple = MobileViTVaForSemanticSegmentation(UpperCAmelCase ).eval()
lowerCamelCase__ : Union[str, Any] = False
else:
lowerCamelCase__ : Optional[int] = MobileViTVaForImageClassification(UpperCAmelCase ).eval()
lowerCamelCase__ : Optional[Any] = False
# remove and rename some keys of load the original model
lowerCamelCase__ : Dict = checkpoint
remove_unused_keys(UpperCAmelCase )
lowerCamelCase__ : Tuple = create_rename_keys(UpperCAmelCase , base_model=UpperCAmelCase )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# load modified state_dict
model.load_state_dict(UpperCAmelCase )
# Check outputs on an image, prepared by MobileViTImageProcessor
lowerCamelCase__ : int = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 )
lowerCamelCase__ : Any = image_processor(images=prepare_img() , return_tensors='''pt''' )
lowerCamelCase__ : List[Any] = model(**UpperCAmelCase )
# verify classification model
if task_name.startswith('''imagenet''' ):
lowerCamelCase__ : Optional[int] = outputs.logits
lowerCamelCase__ : Tuple = logits.argmax(-1 ).item()
print('''Predicted class:''' , model.config.idalabel[predicted_class_idx] )
if task_name.startswith('''imagenet1k_256''' ) and config.width_multiplier == 1.0:
# expected_logits for base variant
lowerCamelCase__ : Dict = torch.tensor([-1.6_3_3_6E0_0, -7.3_2_0_4E-0_2, -5.1_8_8_3E-0_1] )
assert torch.allclose(logits[0, :3] , UpperCAmelCase , atol=1E-4 )
Path(UpperCAmelCase ).mkdir(exist_ok=UpperCAmelCase )
print(f"Saving model {task_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(UpperCAmelCase )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(UpperCAmelCase )
if __name__ == "__main__":
_A : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--task',
default='imagenet1k_256',
type=str,
help=(
'Name of the task for which the MobileViTV2 model you\'d like to convert is trained on . '
'\n Classification (ImageNet-1k)\n - MobileViTV2 (256x256) : imagenet1k_256\n - MobileViTV2 (Trained on 256x256 and Finetuned on 384x384) : imagenet1k_384\n - MobileViTV2 (Trained on ImageNet-21k and Finetuned on ImageNet-1k 256x256) :\n imagenet21k_to_1k_256\n - MobileViTV2 (Trained on ImageNet-21k, Finetuned on ImageNet-1k 256x256, and Finetuned on\n ImageNet-1k 384x384) : imagenet21k_to_1k_384\n Segmentation\n - ADE20K Dataset : ade20k_deeplabv3\n - Pascal VOC 2012 Dataset: voc_deeplabv3\n '
),
choices=[
'imagenet1k_256',
'imagenet1k_384',
'imagenet21k_to_1k_256',
'imagenet21k_to_1k_384',
'ade20k_deeplabv3',
'voc_deeplabv3',
],
)
parser.add_argument(
'--orig_checkpoint_path', required=True, type=str, help='Path to the original state dict (.pt file).'
)
parser.add_argument('--orig_config_path', required=True, type=str, help='Path to the original config file.')
parser.add_argument(
'--pytorch_dump_folder_path', required=True, type=str, help='Path to the output PyTorch model directory.'
)
_A : List[Any] = parser.parse_args()
convert_mobilevitva_checkpoint(
args.task, args.orig_checkpoint_path, args.orig_config_path, args.pytorch_dump_folder_path
)
| 265 |
import random
import timeit
from functools import wraps
from typing import Callable, Optional
from ..configuration_utils import PretrainedConfig
from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING
from ..utils import is_pyanvml_available, is_tf_available, logging
from .benchmark_utils import (
Benchmark,
Memory,
MemorySummary,
measure_peak_memory_cpu,
start_memory_tracing,
stop_memory_tracing,
)
if is_tf_available():
import tensorflow as tf
from tensorflow.python.framework.errors_impl import ResourceExhaustedError
from .benchmark_args_tf import TensorFlowBenchmarkArguments
if is_pyanvml_available():
import pyanvml.pyanvml as nvml
_A : Any = logging.get_logger(__name__)
def _a ( UpperCAmelCase , UpperCAmelCase ) -> List[str]:
"""simple docstring"""
def run_func(UpperCAmelCase ):
@wraps(UpperCAmelCase )
def run_in_eager_mode(*UpperCAmelCase , **UpperCAmelCase ):
return func(*UpperCAmelCase , **UpperCAmelCase )
@wraps(UpperCAmelCase )
@tf.function(experimental_compile=UpperCAmelCase )
def run_in_graph_mode(*UpperCAmelCase , **UpperCAmelCase ):
return func(*UpperCAmelCase , **UpperCAmelCase )
if do_eager_mode is True:
if use_xla is not False:
raise ValueError(
'''Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`.''' )
return run_in_eager_mode
else:
return run_in_graph_mode
return run_func
def _a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> ["tf.Tensor"]:
"""simple docstring"""
lowerCamelCase__ : List[Any] = random.Random()
lowerCamelCase__ : str = [rng.randint(0 , vocab_size - 1 ) for i in range(batch_size * sequence_length )]
return tf.constant(UpperCAmelCase , shape=(batch_size, sequence_length) , dtype=tf.intaa )
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
_UpperCAmelCase : TensorFlowBenchmarkArguments
_UpperCAmelCase : PretrainedConfig
_UpperCAmelCase : str = "TensorFlow"
@property
def __lowerCamelCase ( self : int ) ->Optional[int]:
return tf.__version__
def __lowerCamelCase ( self : Optional[int] , A : str , A : int , A : int ) ->float:
# initialize GPU on separate process
lowerCamelCase__ : Dict = self.args.strategy
if strategy is None:
raise ValueError('''A device strategy has to be initialized before using TensorFlow.''' )
lowerCamelCase__ : int = self._prepare_inference_func(A , A , A )
return self._measure_speed(_inference )
def __lowerCamelCase ( self : str , A : str , A : int , A : int ) ->float:
lowerCamelCase__ : Optional[int] = self.args.strategy
if strategy is None:
raise ValueError('''A device strategy has to be initialized before using TensorFlow.''' )
lowerCamelCase__ : List[Any] = self._prepare_train_func(A , A , A )
return self._measure_speed(_train )
def __lowerCamelCase ( self : int , A : str , A : int , A : int ) ->[Memory, Optional[MemorySummary]]:
# initialize GPU on separate process
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , A )
lowerCamelCase__ : int = self.args.strategy
if strategy is None:
raise ValueError('''A device strategy has to be initialized before using TensorFlow.''' )
lowerCamelCase__ : str = self._prepare_inference_func(A , A , A )
return self._measure_memory(_inference )
def __lowerCamelCase ( self : List[str] , A : str , A : int , A : int ) ->[Memory, Optional[MemorySummary]]:
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , A )
lowerCamelCase__ : List[Any] = self.args.strategy
if strategy is None:
raise ValueError('''A device strategy has to be initialized before using TensorFlow.''' )
lowerCamelCase__ : str = self._prepare_train_func(A , A , A )
return self._measure_memory(_train )
def __lowerCamelCase ( self : Dict , A : str , A : int , A : int ) ->Callable[[], None]:
lowerCamelCase__ : Tuple = self.config_dict[model_name]
if self.args.fpaa:
raise NotImplementedError('''Mixed precision is currently not supported.''' )
lowerCamelCase__ : Tuple = (
hasattr(A , '''architectures''' )
and isinstance(config.architectures , A )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
lowerCamelCase__ : Any = '''TF''' + config.architectures[0] # prepend 'TF' for tensorflow model
lowerCamelCase__ : List[Any] = __import__('''transformers''' , fromlist=[model_class] )
lowerCamelCase__ : int = getattr(A , A )
lowerCamelCase__ : int = model_cls(A )
except ImportError:
raise ImportError(
F"{model_class} does not exist. If you just want to test the pretrained model, you might want to"
''' set `--only_pretrain_model` or `args.only_pretrain_model=True`.''' )
else:
lowerCamelCase__ : Union[str, Any] = TF_MODEL_MAPPING[config.__class__](A )
# encoder-decoder has vocab size saved differently
lowerCamelCase__ : Tuple = config.vocab_size if hasattr(A , '''vocab_size''' ) else config.encoder.vocab_size
lowerCamelCase__ : Optional[Any] = random_input_ids(A , A , A )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_forward():
return model(A , decoder_input_ids=A , training=A )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_forward():
return model(A , training=A )
lowerCamelCase__ : int = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward
return _inference
def __lowerCamelCase ( self : List[str] , A : str , A : int , A : int ) ->Callable[[], None]:
lowerCamelCase__ : Tuple = self.config_dict[model_name]
if self.args.eager_mode is not False:
raise ValueError('''Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`.''' )
if self.args.fpaa:
raise NotImplementedError('''Mixed precision is currently not supported.''' )
lowerCamelCase__ : Optional[int] = (
hasattr(A , '''architectures''' )
and isinstance(config.architectures , A )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
lowerCamelCase__ : Any = '''TF''' + config.architectures[0] # prepend 'TF' for tensorflow model
lowerCamelCase__ : List[str] = __import__('''transformers''' , fromlist=[model_class] )
lowerCamelCase__ : Optional[int] = getattr(A , A )
lowerCamelCase__ : Optional[Any] = model_cls(A )
except ImportError:
raise ImportError(
F"{model_class} does not exist. If you just want to test the pretrained model, you might want to"
''' set `--only_pretrain_model` or `args.only_pretrain_model=True`.''' )
else:
lowerCamelCase__ : List[str] = TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](A )
# encoder-decoder has vocab size saved differently
lowerCamelCase__ : Optional[int] = config.vocab_size if hasattr(A , '''vocab_size''' ) else config.encoder.vocab_size
lowerCamelCase__ : Dict = random_input_ids(A , A , A )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_train():
lowerCamelCase__ : int = model(A , decoder_input_ids=A , labels=A , training=A )[0]
lowerCamelCase__ : List[Any] = tf.gradients(A , model.trainable_variables )
return gradients
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_train():
lowerCamelCase__ : Optional[int] = model(A , labels=A , training=A )[0]
lowerCamelCase__ : List[str] = tf.gradients(A , model.trainable_variables )
return gradients
lowerCamelCase__ : Tuple = encoder_decoder_train if config.is_encoder_decoder else encoder_train
return _train
def __lowerCamelCase ( self : Tuple , A : Any ) ->float:
with self.args.strategy.scope():
try:
if self.args.is_tpu or self.args.use_xla:
# run additional 10 times to stabilize compilation for tpu
logger.info('''Do inference on TPU. Running model 5 times to stabilize compilation''' )
timeit.repeat(A , repeat=1 , number=5 )
# as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average
lowerCamelCase__ : Optional[Any] = timeit.repeat(
A , repeat=self.args.repeat , number=1_0 , )
return min(A ) / 10.0
except ResourceExhaustedError as e:
self.print_fn(F"Doesn't fit on GPU. {e}" )
def __lowerCamelCase ( self : List[Any] , A : Callable[[], None] ) ->[Memory, MemorySummary]:
logger.info(
'''Note that TensorFlow allocates more memory than '''
'''it might need to speed up computation. '''
'''The memory reported here corresponds to the memory '''
'''reported by `nvidia-smi`, which can vary depending '''
'''on total available memory on the GPU that is used.''' )
with self.args.strategy.scope():
try:
if self.args.trace_memory_line_by_line:
if not self.args.eager_mode:
raise ValueError(
'''`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory'''
''' consumption line by line.''' )
lowerCamelCase__ : Union[str, Any] = start_memory_tracing('''transformers''' )
if self.args.is_tpu:
# tpu
raise NotImplementedError(
'''Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking'''
''' with `args.memory=False`''' )
elif self.args.is_gpu:
# gpu
if not is_pyanvml_available():
logger.warning(
'''py3nvml not installed, we won\'t log GPU memory usage. '''
'''Install py3nvml (pip install py3nvml) to log information about GPU.''' )
lowerCamelCase__ : Union[str, Any] = '''N/A'''
else:
logger.info(
'''Measuring total GPU usage on GPU device. Make sure to not have additional processes'''
''' running on the same GPU.''' )
# init nvml
nvml.nvmlInit()
func()
lowerCamelCase__ : Any = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx )
lowerCamelCase__ : Optional[int] = nvml.nvmlDeviceGetMemoryInfo(A )
lowerCamelCase__ : List[Any] = meminfo.used
lowerCamelCase__ : Union[str, Any] = Memory(A )
# shutdown nvml
nvml.nvmlShutdown()
else:
# cpu
if self.args.trace_memory_line_by_line:
logger.info(
'''When enabling line by line tracing, the max peak memory for CPU is inaccurate in'''
''' TensorFlow.''' )
lowerCamelCase__ : Tuple = None
else:
lowerCamelCase__ : Dict = measure_peak_memory_cpu(A )
lowerCamelCase__ : Optional[Any] = Memory(A ) if isinstance(A , A ) else memory_bytes
if self.args.trace_memory_line_by_line:
lowerCamelCase__ : Union[str, Any] = stop_memory_tracing(A )
if memory is None:
lowerCamelCase__ : Dict = summary.total
else:
lowerCamelCase__ : Optional[int] = None
return memory, summary
except ResourceExhaustedError as e:
self.print_fn(F"Doesn't fit on GPU. {e}" )
return "N/A", None
| 265 | 1 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_beit import BeitImageProcessor
a__ : int =logging.get_logger(__name__)
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
def __init__( self : List[str] , *__A : Optional[int] , **__A : str ):
warnings.warn(
'The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use BeitImageProcessor instead.' , __A , )
super().__init__(*__A , **__A )
| 53 |
"""simple docstring"""
import importlib.util
import json
import os
import warnings
from dataclasses import dataclass, field
import torch
from ..training_args import TrainingArguments
from ..utils import cached_property, is_sagemaker_dp_enabled, logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
def lowercase ():
# Get the sagemaker specific mp parameters from smp_options variable.
__lowerCAmelCase = os.getenv("""SM_HP_MP_PARAMETERS""" , """{}""" )
try:
# Parse it and check the field "partitions" is included, it is required for model parallel.
__lowerCAmelCase = json.loads(_lowerCAmelCase )
if "partitions" not in smp_options:
return False
except json.JSONDecodeError:
return False
# Get the sagemaker specific framework parameters from mpi_options variable.
__lowerCAmelCase = os.getenv("""SM_FRAMEWORK_PARAMS""" , """{}""" )
try:
# Parse it and check the field "sagemaker_distributed_dataparallel_enabled".
__lowerCAmelCase = json.loads(_lowerCAmelCase )
if not mpi_options.get("""sagemaker_mpi_enabled""" , _lowerCAmelCase ):
return False
except json.JSONDecodeError:
return False
# Lastly, check if the `smdistributed` module is present.
return importlib.util.find_spec("""smdistributed""" ) is not None
if is_sagemaker_model_parallel_available():
import smdistributed.modelparallel.torch as smp
smp.init()
@dataclass
class lowerCAmelCase_ ( A__ ):
'''simple docstring'''
_snake_case = field(
default='''''' , metadata={'''help''': '''Used by the SageMaker launcher to send mp-specific args. Ignored in SageMakerTrainer'''} , )
def A__ ( self ) -> Tuple:
super().__post_init__()
warnings.warn(
"""`SageMakerTrainingArguments` is deprecated and will be removed in v5 of Transformers. You can use """
"""`TrainingArguments` instead.""" , snake_case_ , )
@cached_property
def A__ ( self ) -> "torch.device":
logger.info("""PyTorch: setting up devices""" )
if torch.distributed.is_available() and torch.distributed.is_initialized() and self.local_rank == -1:
logger.warning(
"""torch.distributed process group is initialized, but local_rank == -1. """
"""In order to use Torch DDP, launch your script with `python -m torch.distributed.launch""" )
if self.no_cuda:
__lowerCAmelCase = torch.device("""cpu""" )
__lowerCAmelCase = 0
elif is_sagemaker_model_parallel_available():
__lowerCAmelCase = smp.local_rank()
__lowerCAmelCase = torch.device("""cuda""" , snake_case_ )
__lowerCAmelCase = 1
elif is_sagemaker_dp_enabled():
import smdistributed.dataparallel.torch.torch_smddp # noqa: F401
torch.distributed.init_process_group(backend="""smddp""" , timeout=self.ddp_timeout_delta )
__lowerCAmelCase = int(os.getenv("""SMDATAPARALLEL_LOCAL_RANK""" ) )
__lowerCAmelCase = torch.device("""cuda""" , self.local_rank )
__lowerCAmelCase = 1
elif self.local_rank == -1:
# if n_gpu is > 1 we'll use nn.DataParallel.
# If you only want to use a specific subset of GPUs use `CUDA_VISIBLE_DEVICES=0`
# Explicitly set CUDA to the first (index 0) CUDA device, otherwise `set_device` will
# trigger an error that a device index is missing. Index 0 takes into account the
# GPUs available in the environment, so `CUDA_VISIBLE_DEVICES=1,2` with `cuda:0`
# will use the first GPU in that env, i.e. GPU#1
__lowerCAmelCase = torch.device("""cuda:0""" if torch.cuda.is_available() else """cpu""" )
# Sometimes the line in the postinit has not been run before we end up here, so just checking we're not at
# the default value.
__lowerCAmelCase = torch.cuda.device_count()
else:
# Here, we'll use torch.distributed.
# Initializes the distributed backend which will take care of synchronizing nodes/GPUs
if not torch.distributed.is_initialized():
torch.distributed.init_process_group(backend="""nccl""" , timeout=self.ddp_timeout_delta )
__lowerCAmelCase = torch.device("""cuda""" , self.local_rank )
__lowerCAmelCase = 1
if device.type == "cuda":
torch.cuda.set_device(snake_case_ )
return device
@property
def A__ ( self ) -> Dict:
if is_sagemaker_model_parallel_available():
return smp.dp_size()
return super().world_size
@property
def A__ ( self ) -> Optional[int]:
return not is_sagemaker_model_parallel_available()
@property
def A__ ( self ) -> Tuple:
return False
| 301 | 0 |
import re
import string
import numpy as np
import datasets
_SCREAMING_SNAKE_CASE = """
Returns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.
"""
_SCREAMING_SNAKE_CASE = """
Args:
predictions: List of predicted texts.
references: List of reference texts.
regexes_to_ignore: List, defaults to None. Regex expressions of characters to
ignore when calculating the exact matches. Note: these regexes are removed
from the input data before the changes based on the options below (e.g. ignore_case,
ignore_punctuation, ignore_numbers) are applied.
ignore_case: Boolean, defaults to False. If true, turns everything
to lowercase so that capitalization differences are ignored.
ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before
comparing predictions and references.
ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before
comparing predictions and references.
Returns:
exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.
Examples:
>>> exact_match = datasets.load_metric(\"exact_match\")
>>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]
>>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]
>>> results = exact_match.compute(references=refs, predictions=preds)
>>> print(round(results[\"exact_match\"], 1))
25.0
>>> exact_match = datasets.load_metric(\"exact_match\")
>>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]
>>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]
>>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\"], ignore_case=True, ignore_punctuation=True)
>>> print(round(results[\"exact_match\"], 1))
50.0
>>> exact_match = datasets.load_metric(\"exact_match\")
>>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]
>>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]
>>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\", \"YELL\"], ignore_case=True, ignore_punctuation=True)
>>> print(round(results[\"exact_match\"], 1))
75.0
>>> exact_match = datasets.load_metric(\"exact_match\")
>>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]
>>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]
>>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\", \"YELL\"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)
>>> print(round(results[\"exact_match\"], 1))
100.0
>>> exact_match = datasets.load_metric(\"exact_match\")
>>> refs = [\"The cat sat on the mat.\", \"Theaters are great.\", \"It's like comparing oranges and apples.\"]
>>> preds = [\"The cat sat on the mat?\", \"Theaters are great.\", \"It's like comparing apples and oranges.\"]
>>> results = exact_match.compute(references=refs, predictions=preds)
>>> print(round(results[\"exact_match\"], 1))
33.3
"""
_SCREAMING_SNAKE_CASE = """
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE_ ( datasets.Metric ):
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , reference_urls=[] , )
def UpperCAmelCase_ ( self : List[Any] , _A : Optional[Any] , _A : Optional[int] , _A : Optional[int]=None , _A : Dict=False , _A : Dict=False , _A : Optional[Any]=False , ) -> List[str]:
"""simple docstring"""
if regexes_to_ignore is not None:
for s in regexes_to_ignore:
snake_case_ : List[str] = np.array([re.sub(_A , '' , _A ) for x in predictions] )
snake_case_ : int = np.array([re.sub(_A , '' , _A ) for x in references] )
else:
snake_case_ : Optional[Any] = np.asarray(_A )
snake_case_ : Optional[Any] = np.asarray(_A )
if ignore_case:
snake_case_ : int = np.char.lower(_A )
snake_case_ : List[str] = np.char.lower(_A )
if ignore_punctuation:
snake_case_ : str = string.punctuation.maketrans('' , '' , string.punctuation )
snake_case_ : str = np.char.translate(_A , table=_A )
snake_case_ : Any = np.char.translate(_A , table=_A )
if ignore_numbers:
snake_case_ : int = string.digits.maketrans('' , '' , string.digits )
snake_case_ : Tuple = np.char.translate(_A , table=_A )
snake_case_ : Optional[Any] = np.char.translate(_A , table=_A )
snake_case_ : Optional[Any] = predictions == references
return {"exact_match": np.mean(_A ) * 100}
| 88 |
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class SCREAMING_SNAKE_CASE_ ( snake_case_ ):
__magic_name__: Optional[Any] = ["image_processor", "tokenizer"]
__magic_name__: Optional[Any] = "LayoutLMv3ImageProcessor"
__magic_name__: str = ("LayoutLMv3Tokenizer", "LayoutLMv3TokenizerFast")
def __init__( self : int , _A : List[str]=None , _A : Dict=None , **_A : int ) -> List[str]:
"""simple docstring"""
snake_case_ : Any = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , _A , )
snake_case_ : Any = kwargs.pop('feature_extractor' )
snake_case_ : Optional[int] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(_A , _A )
def __call__( self : List[str] , _A : Optional[Any] , _A : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , _A : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , _A : Union[List[List[int]], List[List[List[int]]]] = None , _A : Optional[Union[List[int], List[List[int]]]] = None , _A : bool = True , _A : Union[bool, str, PaddingStrategy] = False , _A : Union[bool, str, TruncationStrategy] = None , _A : Optional[int] = None , _A : int = 0 , _A : Optional[int] = None , _A : Optional[bool] = None , _A : Optional[bool] = None , _A : bool = False , _A : bool = False , _A : bool = False , _A : bool = False , _A : bool = True , _A : Optional[Union[str, TensorType]] = None , **_A : str , ) -> BatchEncoding:
"""simple docstring"""
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
'You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True.' )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
'You cannot provide word labels if you initialized the image processor with apply_ocr set to True.' )
# first, apply the image processor
snake_case_ : str = self.image_processor(images=_A , return_tensors=_A )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(_A , _A ):
snake_case_ : List[Any] = [text] # add batch dimension (as the image processor always adds a batch dimension)
snake_case_ : str = features['words']
snake_case_ : Optional[int] = self.tokenizer(
text=text if text is not None else features['words'] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['boxes'] , word_labels=_A , add_special_tokens=_A , padding=_A , truncation=_A , max_length=_A , stride=_A , pad_to_multiple_of=_A , return_token_type_ids=_A , return_attention_mask=_A , return_overflowing_tokens=_A , return_special_tokens_mask=_A , return_offsets_mapping=_A , return_length=_A , verbose=_A , return_tensors=_A , **_A , )
# add pixel values
snake_case_ : List[str] = features.pop('pixel_values' )
if return_overflowing_tokens is True:
snake_case_ : Dict = self.get_overflowing_images(_A , encoded_inputs['overflow_to_sample_mapping'] )
snake_case_ : Optional[Any] = images
return encoded_inputs
def UpperCAmelCase_ ( self : Dict , _A : Tuple , _A : Dict ) -> Union[str, Any]:
"""simple docstring"""
snake_case_ : List[str] = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(_A ) != len(_A ):
raise ValueError(
'Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got'
F""" {len(_A )} and {len(_A )}""" )
return images_with_overflow
def UpperCAmelCase_ ( self : Optional[Any] , *_A : Optional[Any] , **_A : List[Any] ) -> List[str]:
"""simple docstring"""
return self.tokenizer.batch_decode(*_A , **_A )
def UpperCAmelCase_ ( self : Union[str, Any] , *_A : Dict , **_A : str ) -> Any:
"""simple docstring"""
return self.tokenizer.decode(*_A , **_A )
@property
def UpperCAmelCase_ ( self : Optional[int] ) -> int:
"""simple docstring"""
return ["input_ids", "bbox", "attention_mask", "pixel_values"]
@property
def UpperCAmelCase_ ( self : Any ) -> Any:
"""simple docstring"""
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , _A , )
return self.image_processor_class
@property
def UpperCAmelCase_ ( self : List[Any] ) -> int:
"""simple docstring"""
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , _A , )
return self.image_processor
| 88 | 1 |
'''simple docstring'''
import json
import os
from pathlib import Path
import pytest
from datasets.download.download_config import DownloadConfig
from datasets.download.download_manager import DownloadManager
from datasets.utils.file_utils import hash_url_to_filename
UpperCAmelCase = 'http://www.mocksite.com/file1.txt'
UpperCAmelCase = '"text": ["foo", "foo"]'
UpperCAmelCase = '6d8ce9aa78a471c7477201efbeabd3bb01ac2e7d100a6dc024ba1608361f90a8'
class lowerCAmelCase :
lowerCAmelCase_ = 2_0_0
lowerCAmelCase_ = {"""Content-Length""": """100"""}
lowerCAmelCase_ = {}
def snake_case ( self : Tuple , **__lowercase : int ):
"""simple docstring"""
return [bytes(__lowercase , 'utf-8' )]
def __UpperCamelCase ( *lowercase__ : Union[str, Any], **lowercase__ : Optional[Any] ):
'''simple docstring'''
return MockResponse()
@pytest.mark.parametrize('urls_type', [str, list, dict] )
def __UpperCamelCase ( lowercase__ : List[str], lowercase__ : int, lowercase__ : Dict ):
'''simple docstring'''
import requests
monkeypatch.setattr(lowercase__, 'request', lowercase__ )
__lowercase =URL
if issubclass(lowercase__, lowercase__ ):
__lowercase =url
elif issubclass(lowercase__, lowercase__ ):
__lowercase =[url]
elif issubclass(lowercase__, lowercase__ ):
__lowercase ={'train': url}
__lowercase ='dummy'
__lowercase ='downloads'
__lowercase =tmp_path
__lowercase =DownloadConfig(
cache_dir=os.path.join(lowercase__, lowercase__ ), use_etag=lowercase__, )
__lowercase =DownloadManager(dataset_name=lowercase__, download_config=lowercase__ )
__lowercase =dl_manager.download(lowercase__ )
__lowercase =urls
for downloaded_paths in [downloaded_paths]:
if isinstance(lowercase__, lowercase__ ):
__lowercase =[downloaded_paths]
__lowercase =[urls]
elif isinstance(lowercase__, lowercase__ ):
assert "train" in downloaded_paths.keys()
__lowercase =downloaded_paths.values()
__lowercase =urls.values()
assert downloaded_paths
for downloaded_path, input_url in zip(lowercase__, lowercase__ ):
assert downloaded_path == dl_manager.downloaded_paths[input_url]
__lowercase =Path(lowercase__ )
__lowercase =downloaded_path.parts
assert parts[-1] == HASH
assert parts[-2] == cache_subdir
assert downloaded_path.exists()
__lowercase =downloaded_path.read_text()
assert content == CONTENT
__lowercase =downloaded_path.with_suffix('.json' )
assert metadata_downloaded_path.exists()
__lowercase =json.loads(metadata_downloaded_path.read_text() )
assert metadata_content == {"url": URL, "etag": None}
@pytest.mark.parametrize('paths_type', [str, list, dict] )
def __UpperCamelCase ( lowercase__ : Dict, lowercase__ : Any, lowercase__ : Dict ):
'''simple docstring'''
__lowercase =str(lowercase__ )
if issubclass(lowercase__, lowercase__ ):
__lowercase =filename
elif issubclass(lowercase__, lowercase__ ):
__lowercase =[filename]
elif issubclass(lowercase__, lowercase__ ):
__lowercase ={'train': filename}
__lowercase ='dummy'
__lowercase =xz_file.parent
__lowercase ='extracted'
__lowercase =DownloadConfig(
cache_dir=lowercase__, use_etag=lowercase__, )
__lowercase =DownloadManager(dataset_name=lowercase__, download_config=lowercase__ )
__lowercase =dl_manager.extract(lowercase__ )
__lowercase =paths
for extracted_paths in [extracted_paths]:
if isinstance(lowercase__, lowercase__ ):
__lowercase =[extracted_paths]
__lowercase =[paths]
elif isinstance(lowercase__, lowercase__ ):
assert "train" in extracted_paths.keys()
__lowercase =extracted_paths.values()
__lowercase =paths.values()
assert extracted_paths
for extracted_path, input_path in zip(lowercase__, lowercase__ ):
assert extracted_path == dl_manager.extracted_paths[input_path]
__lowercase =Path(lowercase__ )
__lowercase =extracted_path.parts
assert parts[-1] == hash_url_to_filename(lowercase__, etag=lowercase__ )
assert parts[-2] == extracted_subdir
assert extracted_path.exists()
__lowercase =extracted_path.read_text()
__lowercase =text_file.read_text()
assert extracted_file_content == expected_file_content
def __UpperCamelCase ( lowercase__ : Any, lowercase__ : List[Any] ):
'''simple docstring'''
assert path.endswith('.jsonl' )
for num_items, line in enumerate(lowercase__, start=1 ):
__lowercase =json.loads(line.decode('utf-8' ) )
assert item.keys() == {"col_1", "col_2", "col_3"}
assert num_items == 4
@pytest.mark.parametrize('archive_jsonl', ['tar_jsonl_path', 'zip_jsonl_path'] )
def __UpperCamelCase ( lowercase__ : Optional[Any], lowercase__ : Optional[int] ):
'''simple docstring'''
__lowercase =request.getfixturevalue(lowercase__ )
__lowercase =DownloadManager()
for num_jsonl, (path, file) in enumerate(dl_manager.iter_archive(lowercase__ ), start=1 ):
_test_jsonl(lowercase__, lowercase__ )
assert num_jsonl == 2
@pytest.mark.parametrize('archive_nested_jsonl', ['tar_nested_jsonl_path', 'zip_nested_jsonl_path'] )
def __UpperCamelCase ( lowercase__ : int, lowercase__ : List[str] ):
'''simple docstring'''
__lowercase =request.getfixturevalue(lowercase__ )
__lowercase =DownloadManager()
for num_tar, (path, file) in enumerate(dl_manager.iter_archive(lowercase__ ), start=1 ):
for num_jsonl, (subpath, subfile) in enumerate(dl_manager.iter_archive(lowercase__ ), start=1 ):
_test_jsonl(lowercase__, lowercase__ )
assert num_tar == 1
assert num_jsonl == 2
def __UpperCamelCase ( lowercase__ : Any ):
'''simple docstring'''
__lowercase =DownloadManager()
for num_file, file in enumerate(dl_manager.iter_files(lowercase__ ), start=1 ):
assert os.path.basename(lowercase__ ) == ("test.txt" if num_file == 1 else "train.txt")
assert num_file == 2
| 141 |
'''simple docstring'''
import unittest
from transformers import DonutProcessor
lowerCamelCase : Tuple = 'naver-clova-ix/donut-base'
class __lowerCAmelCase (unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ (self : int ):
'''simple docstring'''
lowercase__ = DonutProcessor.from_pretrained(UpperCamelCase )
def UpperCamelCase__ (self : Tuple ):
'''simple docstring'''
lowercase__ = {
'''name''': '''John Doe''',
'''age''': '''99''',
'''city''': '''Atlanta''',
'''state''': '''GA''',
'''zip''': '''30301''',
'''phone''': '''123-4567''',
'''nicknames''': [{'''nickname''': '''Johnny'''}, {'''nickname''': '''JD'''}],
}
lowercase__ = (
'''<s_name>John Doe</s_name><s_age>99</s_age><s_city>Atlanta</s_city>'''
'''<s_state>GA</s_state><s_zip>30301</s_zip><s_phone>123-4567</s_phone>'''
'''<s_nicknames><s_nickname>Johnny</s_nickname>'''
'''<sep/><s_nickname>JD</s_nickname></s_nicknames>'''
)
lowercase__ = self.processor.tokenajson(UpperCamelCase )
self.assertDictEqual(UpperCamelCase , UpperCamelCase )
| 2 | 0 |
'''simple docstring'''
from __future__ import annotations
from decimal import Decimal
from math import * # noqa: F403
from sympy import diff
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = 10**-10 ) -> List[Any]:
"""simple docstring"""
__snake_case : Tuple = a
while True:
__snake_case : Optional[Any] = Decimal(_a ) - (
Decimal(eval(_a ) ) / Decimal(eval(str(diff(_a ) ) ) ) # noqa: S307
)
# This number dictates the accuracy of the answer
if abs(eval(_a ) ) < precision: # noqa: S307
return float(_a )
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(f"""The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}""")
# Find root of polynomial
print(f"""The root of x**2 - 5*x + 2 = 0 is {newton_raphson('x**2 - 5*x + 2', 0.4)}""")
# Find Square Root of 5
print(f"""The root of log(x) - 1 = 0 is {newton_raphson('log(x) - 1', 2)}""")
# Exponential Roots
print(f"""The root of exp(x) - 1 = 0 is {newton_raphson('exp(x) - 1', 0)}""")
| 364 |
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
# Register SEW's fairseq modules
from sew_asapp import tasks # noqa: F401
from transformers import (
SEWConfig,
SEWForCTC,
SEWModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = {
"post_extract_proj": "feature_projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.upsample.0": "encoder.upsample.projection",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "layer_norm",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
}
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> List[Any]:
"""simple docstring"""
for attribute in key.split(""".""" ):
__snake_case : Optional[int] = getattr(_lowerCamelCase , _lowerCamelCase )
if weight_type is not None:
__snake_case : Optional[Any] = getattr(_lowerCamelCase , _lowerCamelCase ).shape
else:
__snake_case : List[str] = hf_pointer.shape
assert hf_shape == value.shape, (
F'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
__snake_case : Union[str, Any] = value
elif weight_type == "weight_g":
__snake_case : str = value
elif weight_type == "weight_v":
__snake_case : Tuple = value
elif weight_type == "bias":
__snake_case : str = value
else:
__snake_case : List[Any] = value
logger.info(F'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Optional[int]:
"""simple docstring"""
__snake_case : Tuple = []
__snake_case : List[Any] = fairseq_model.state_dict()
__snake_case : int = hf_model.sew.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
__snake_case : Any = False
if "conv_layers" in name:
load_conv_layer(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , hf_model.config.feat_extract_norm == """group""" , )
__snake_case : Optional[int] = True
else:
for key, mapped_key in MAPPING.items():
__snake_case : Optional[Any] = """sew.""" + mapped_key if (is_finetuned and mapped_key != """lm_head""") else mapped_key
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
__snake_case : Dict = True
if "*" in mapped_key:
__snake_case : List[Any] = name.split(_lowerCamelCase )[0].split(""".""" )[-2]
__snake_case : Optional[int] = mapped_key.replace("""*""" , _lowerCamelCase )
if "weight_g" in name:
__snake_case : Dict = """weight_g"""
elif "weight_v" in name:
__snake_case : List[str] = """weight_v"""
elif "weight" in name:
__snake_case : str = """weight"""
elif "bias" in name:
__snake_case : int = """bias"""
else:
__snake_case : int = None
set_recursively(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
continue
if not is_used:
unused_weights.append(_lowerCamelCase )
logger.warning(F'''Unused weights: {unused_weights}''' )
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Any:
"""simple docstring"""
__snake_case : Dict = full_name.split("""conv_layers.""" )[-1]
__snake_case : Optional[int] = name.split(""".""" )
__snake_case : Dict = int(items[0] )
__snake_case : Optional[Any] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
__snake_case : Union[str, Any] = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
__snake_case : int = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
__snake_case : str = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
__snake_case : List[Any] = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(_lowerCamelCase )
def _a ( _lowerCamelCase , _lowerCamelCase ) -> Tuple:
"""simple docstring"""
__snake_case : List[str] = SEWConfig()
if is_finetuned:
__snake_case : List[Any] = model.wav_encoder.wav_model.cfg
else:
__snake_case : Optional[Any] = model.cfg
__snake_case : Tuple = fs_config.conv_bias
__snake_case : List[Any] = eval(fs_config.conv_feature_layers )
__snake_case : List[Any] = [x[0] for x in conv_layers]
__snake_case : Dict = [x[1] for x in conv_layers]
__snake_case : Tuple = [x[2] for x in conv_layers]
__snake_case : List[str] = """gelu"""
__snake_case : Dict = """layer""" if fs_config.extractor_mode == """layer_norm""" else """group"""
__snake_case : Optional[int] = 0.0
__snake_case : Optional[Any] = fs_config.activation_fn.name
__snake_case : Dict = fs_config.encoder_embed_dim
__snake_case : Dict = 0.02
__snake_case : Any = fs_config.encoder_ffn_embed_dim
__snake_case : Tuple = 1E-5
__snake_case : Dict = fs_config.encoder_layerdrop
__snake_case : Any = fs_config.encoder_attention_heads
__snake_case : int = fs_config.conv_pos_groups
__snake_case : Tuple = fs_config.conv_pos
__snake_case : Optional[int] = len(_lowerCamelCase )
__snake_case : int = fs_config.encoder_layers
__snake_case : Optional[int] = fs_config.squeeze_factor
# take care of any params that are overridden by the Wav2VecCtc model
if is_finetuned:
__snake_case : Union[str, Any] = model.cfg
__snake_case : Tuple = fs_config.final_dropout
__snake_case : Tuple = fs_config.layerdrop
__snake_case : Any = fs_config.activation_dropout
__snake_case : int = fs_config.mask_prob > 0 or fs_config.mask_channel_prob > 0
__snake_case : Tuple = fs_config.attention_dropout
__snake_case : List[Any] = fs_config.dropout_input
__snake_case : Optional[Any] = fs_config.dropout
__snake_case : str = fs_config.mask_channel_length
__snake_case : Any = fs_config.mask_channel_prob
__snake_case : int = fs_config.mask_length
__snake_case : str = fs_config.mask_prob
__snake_case : str = """Wav2Vec2FeatureExtractor"""
__snake_case : Dict = """Wav2Vec2CTCTokenizer"""
return config
@torch.no_grad()
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=True ) -> int:
"""simple docstring"""
if is_finetuned:
__snake_case , __snake_case , __snake_case : Any = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
else:
__snake_case , __snake_case , __snake_case : List[str] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
if config_path is not None:
__snake_case : Optional[Any] = SEWConfig.from_pretrained(_lowerCamelCase )
else:
__snake_case : int = convert_config(model[0] , _lowerCamelCase )
__snake_case : Dict = model[0].eval()
__snake_case : Optional[Any] = True if config.feat_extract_norm == """layer""" else False
__snake_case : Optional[Any] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=_lowerCamelCase , return_attention_mask=_lowerCamelCase , )
if is_finetuned:
if dict_path:
__snake_case : str = Dictionary.load(_lowerCamelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
__snake_case : Union[str, Any] = target_dict.pad_index
__snake_case : Optional[Any] = target_dict.bos_index
__snake_case : Tuple = target_dict.pad_index
__snake_case : List[str] = target_dict.bos_index
__snake_case : Optional[Any] = target_dict.eos_index
__snake_case : List[str] = len(target_dict.symbols )
__snake_case : Optional[Any] = os.path.join(_lowerCamelCase , """vocab.json""" )
if not os.path.isdir(_lowerCamelCase ):
logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(_lowerCamelCase ) )
return
os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase )
with open(_lowerCamelCase , """w""" , encoding="""utf-8""" ) as vocab_handle:
json.dump(target_dict.indices , _lowerCamelCase )
__snake_case : List[Any] = WavaVecaCTCTokenizer(
_lowerCamelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=_lowerCamelCase , )
__snake_case : Optional[int] = WavaVecaProcessor(feature_extractor=_lowerCamelCase , tokenizer=_lowerCamelCase )
processor.save_pretrained(_lowerCamelCase )
__snake_case : List[str] = SEWForCTC(_lowerCamelCase )
else:
__snake_case : List[str] = SEWModel(_lowerCamelCase )
feature_extractor.save_pretrained(_lowerCamelCase )
recursively_load_weights(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
hf_model.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
__UpperCamelCase = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--is_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
__UpperCamelCase = parser.parse_args()
convert_sew_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, args.is_finetuned
)
| 13 | 0 |
import argparse
from pathlib import Path
import fairseq
import torch
from fairseq.models.xmod import XMODModel as FairseqXmodModel
from packaging import version
from transformers import XmodConfig, XmodForMaskedLM, XmodForSequenceClassification
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse('''0.12.2'''):
raise Exception('''requires fairseq >= 0.12.2''')
if version.parse(fairseq.__version__) > version.parse('''2'''):
raise Exception('''requires fairseq < v2''')
logging.set_verbosity_info()
A_ :str = logging.get_logger(__name__)
A_ :Dict = """Hello, World!"""
A_ :Any = """en_XX"""
def A ( a_ ,a_ ,a_ ) -> Any:
__UpperCamelCase : Optional[Any] =Path('data_bin' )
__UpperCamelCase : Optional[Any] =FairseqXmodModel.from_pretrained(
model_name_or_path=str(Path(_UpperCAmelCase ).parent ) ,checkpoint_file=Path(_UpperCAmelCase ).name ,_name='xmod_base' ,arch='xmod_base' ,task='multilingual_masked_lm' ,data_name_or_path=str(_UpperCAmelCase ) ,bpe='sentencepiece' ,sentencepiece_model=str(Path(_UpperCAmelCase ).parent / 'sentencepiece.bpe.model' ) ,src_dict=str(data_dir / 'dict.txt' ) ,)
xmod.eval() # disable dropout
print(_UpperCAmelCase )
__UpperCamelCase : Any =xmod.model.encoder.sentence_encoder
__UpperCamelCase : Optional[int] =XmodConfig(
vocab_size=xmod_sent_encoder.embed_tokens.num_embeddings ,hidden_size=xmod.cfg.model.encoder_embed_dim ,num_hidden_layers=xmod.cfg.model.encoder_layers ,num_attention_heads=xmod.cfg.model.encoder_attention_heads ,intermediate_size=xmod.cfg.model.encoder_ffn_embed_dim ,max_position_embeddings=514 ,type_vocab_size=1 ,layer_norm_eps=1e-5 ,pre_norm=xmod.cfg.model.encoder_normalize_before ,adapter_reduction_factor=getattr(xmod.cfg.model ,'bottleneck' ,2 ) ,adapter_layer_norm=xmod.cfg.model.adapter_layer_norm ,adapter_reuse_layer_norm=xmod.cfg.model.adapter_reuse_layer_norm ,ln_before_adapter=xmod.cfg.model.ln_before_adapter ,languages=xmod.cfg.model.languages ,)
if classification_head:
__UpperCamelCase : Optional[int] =xmod.model.classification_heads['mnli'].out_proj.weight.shape[0]
print('Our X-MOD config:' ,_UpperCAmelCase )
__UpperCamelCase : Union[str, Any] =XmodForSequenceClassification(_UpperCAmelCase ) if classification_head else XmodForMaskedLM(_UpperCAmelCase )
model.eval()
# Now let's copy all the weights.
# Embeddings
__UpperCamelCase : Any =xmod_sent_encoder.embed_tokens.weight
__UpperCamelCase : str =xmod_sent_encoder.embed_positions.weight
__UpperCamelCase : Optional[int] =torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c xmod doesn't use them.
__UpperCamelCase : Any =xmod_sent_encoder.layernorm_embedding.weight
__UpperCamelCase : str =xmod_sent_encoder.layernorm_embedding.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
__UpperCamelCase : List[str] =model.roberta.encoder.layer[i]
__UpperCamelCase : Dict =xmod_sent_encoder.layers[i]
# self attention
__UpperCamelCase : Union[str, Any] =layer.attention.self
if not (
xmod_layer.self_attn.k_proj.weight.data.shape
== xmod_layer.self_attn.q_proj.weight.data.shape
== xmod_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
):
raise AssertionError('Dimensions of self-attention weights do not match.' )
__UpperCamelCase : Optional[int] =xmod_layer.self_attn.q_proj.weight
__UpperCamelCase : Any =xmod_layer.self_attn.q_proj.bias
__UpperCamelCase : Tuple =xmod_layer.self_attn.k_proj.weight
__UpperCamelCase : Tuple =xmod_layer.self_attn.k_proj.bias
__UpperCamelCase : int =xmod_layer.self_attn.v_proj.weight
__UpperCamelCase : Any =xmod_layer.self_attn.v_proj.bias
# self-attention output
__UpperCamelCase : str =layer.attention.output
if self_output.dense.weight.shape != xmod_layer.self_attn.out_proj.weight.shape:
raise AssertionError('Dimensions of self-attention output weights do not match.' )
__UpperCamelCase : Optional[Any] =xmod_layer.self_attn.out_proj.weight
__UpperCamelCase : Tuple =xmod_layer.self_attn.out_proj.bias
__UpperCamelCase : Dict =xmod_layer.self_attn_layer_norm.weight
__UpperCamelCase : Tuple =xmod_layer.self_attn_layer_norm.bias
# intermediate
__UpperCamelCase : Dict =layer.intermediate
if intermediate.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError('Dimensions of intermediate weights do not match.' )
__UpperCamelCase : Tuple =xmod_layer.fca.weight
__UpperCamelCase : List[str] =xmod_layer.fca.bias
# output
__UpperCamelCase : List[str] =layer.output
if bert_output.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError('Dimensions of feed-forward weights do not match.' )
__UpperCamelCase : Union[str, Any] =xmod_layer.fca.weight
__UpperCamelCase : int =xmod_layer.fca.bias
__UpperCamelCase : List[Any] =xmod_layer.final_layer_norm.weight
__UpperCamelCase : Union[str, Any] =xmod_layer.final_layer_norm.bias
if bert_output.adapter_layer_norm is not None:
__UpperCamelCase : Dict =xmod_layer.adapter_layer_norm.weight
__UpperCamelCase : Tuple =xmod_layer.adapter_layer_norm.bias
if sorted(bert_output.adapter_modules.keys() ) != sorted(xmod_layer.adapter_modules.keys() ):
raise AssertionError('Lists of language adapters do not match.' )
for lang_code, adapter in xmod_layer.adapter_modules.items():
__UpperCamelCase : Optional[int] =bert_output.adapter_modules[lang_code]
__UpperCamelCase : List[Any] =xmod_layer.adapter_modules[lang_code]
__UpperCamelCase : Dict =from_adapter.fca.weight
__UpperCamelCase : Any =from_adapter.fca.bias
__UpperCamelCase : Optional[Any] =from_adapter.fca.weight
__UpperCamelCase : List[str] =from_adapter.fca.bias
# end of layer
if xmod_sent_encoder.layer_norm is not None:
__UpperCamelCase : List[Any] =xmod_sent_encoder.layer_norm.weight
__UpperCamelCase : List[str] =xmod_sent_encoder.layer_norm.bias
if classification_head:
__UpperCamelCase : Union[str, Any] =xmod.model.classification_heads['mnli'].dense.weight
__UpperCamelCase : Any =xmod.model.classification_heads['mnli'].dense.bias
__UpperCamelCase : int =xmod.model.classification_heads['mnli'].out_proj.weight
__UpperCamelCase : Union[str, Any] =xmod.model.classification_heads['mnli'].out_proj.bias
else:
# LM Head
__UpperCamelCase : Optional[int] =xmod.model.encoder.lm_head.dense.weight
__UpperCamelCase : Any =xmod.model.encoder.lm_head.dense.bias
__UpperCamelCase : Dict =xmod.model.encoder.lm_head.layer_norm.weight
__UpperCamelCase : str =xmod.model.encoder.lm_head.layer_norm.bias
__UpperCamelCase : str =xmod.model.encoder.lm_head.weight
__UpperCamelCase : Optional[Any] =xmod.model.encoder.lm_head.bias
# Let's check that we get the same results.
__UpperCamelCase : Optional[Any] =xmod.encode(_UpperCAmelCase ).unsqueeze(0 ) # batch of size 1
model.roberta.set_default_language(_UpperCAmelCase )
__UpperCamelCase : Union[str, Any] =model(_UpperCAmelCase )[0]
if classification_head:
__UpperCamelCase : int =xmod.model.classification_heads['mnli'](xmod.extract_features(_UpperCAmelCase ) )
else:
__UpperCamelCase : Any =xmod.model(_UpperCAmelCase ,lang_id=[SAMPLE_LANGUAGE] )[0]
print(our_output.shape ,their_output.shape )
__UpperCamelCase : Any =torch.max(torch.abs(our_output - their_output ) ).item()
print(F'max_absolute_diff = {max_absolute_diff}' ) # ~ 1e-7
__UpperCamelCase : Any =torch.allclose(_UpperCAmelCase ,_UpperCAmelCase ,atol=1e-3 )
print('Do both models output the same tensors?' ,'🔥' if success else '💩' )
if not success:
raise Exception('Something went wRoNg' )
Path(_UpperCAmelCase ).mkdir(parents=_UpperCAmelCase ,exist_ok=_UpperCAmelCase )
print(F'Saving model to {pytorch_dump_folder_path}' )
model.save_pretrained(_UpperCAmelCase )
if __name__ == "__main__":
A_ :List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--xmod_checkpoint_path''', default=None, type=str, required=True, help='''Path the official PyTorch dump.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--classification_head''', action='''store_true''', help='''Whether to convert a final classification head.'''
)
A_ :Tuple = parser.parse_args()
convert_xmod_checkpoint_to_pytorch(
args.xmod_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 71 |
import flax.linen as nn
import jax
import jax.numpy as jnp
class lowerCAmelCase ( nn.Module ):
UpperCAmelCase__ = 42
UpperCAmelCase__ = jnp.floataa
def A_ ( self : Any ) -> Any:
lowerCamelCase__ : str = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self : int , UpperCAmelCase : Dict ) -> Optional[Any]:
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : str = hidden_states.shape
lowerCamelCase__ : Union[str, Any] = jax.image.resize(
UpperCAmelCase , shape=(batch, height * 2, width * 2, channels) , method='nearest' , )
lowerCamelCase__ : Optional[Any] = self.conv(UpperCAmelCase )
return hidden_states
class lowerCAmelCase ( nn.Module ):
UpperCAmelCase__ = 42
UpperCAmelCase__ = jnp.floataa
def A_ ( self : List[str] ) -> int:
lowerCamelCase__ : Tuple = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self : str , UpperCAmelCase : Union[str, Any] ) -> Optional[Any]:
# pad = ((0, 0), (0, 1), (0, 1), (0, 0)) # pad height and width dim
# hidden_states = jnp.pad(hidden_states, pad_width=pad)
lowerCamelCase__ : Optional[Any] = self.conv(UpperCAmelCase )
return hidden_states
class lowerCAmelCase ( nn.Module ):
UpperCAmelCase__ = 42
UpperCAmelCase__ = None
UpperCAmelCase__ = 0.0
UpperCAmelCase__ = None
UpperCAmelCase__ = jnp.floataa
def A_ ( self : List[str] ) -> Union[str, Any]:
lowerCamelCase__ : Optional[Any] = self.in_channels if self.out_channels is None else self.out_channels
lowerCamelCase__ : Tuple = nn.GroupNorm(num_groups=32 , epsilon=1e-5 )
lowerCamelCase__ : int = nn.Conv(
UpperCAmelCase , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
lowerCamelCase__ : Union[str, Any] = nn.Dense(UpperCAmelCase , dtype=self.dtype )
lowerCamelCase__ : Union[str, Any] = nn.GroupNorm(num_groups=32 , epsilon=1e-5 )
lowerCamelCase__ : List[Any] = nn.Dropout(self.dropout_prob )
lowerCamelCase__ : Tuple = nn.Conv(
UpperCAmelCase , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
lowerCamelCase__ : Optional[Any] = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut
lowerCamelCase__ : Union[str, Any] = None
if use_nin_shortcut:
lowerCamelCase__ : Dict = nn.Conv(
UpperCAmelCase , kernel_size=(1, 1) , strides=(1, 1) , padding='VALID' , dtype=self.dtype , )
def __call__( self : Optional[Any] , UpperCAmelCase : str , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : int=True ) -> Optional[int]:
lowerCamelCase__ : Union[str, Any] = hidden_states
lowerCamelCase__ : List[Any] = self.norma(UpperCAmelCase )
lowerCamelCase__ : List[Any] = nn.swish(UpperCAmelCase )
lowerCamelCase__ : Any = self.conva(UpperCAmelCase )
lowerCamelCase__ : Optional[Any] = self.time_emb_proj(nn.swish(UpperCAmelCase ) )
lowerCamelCase__ : List[str] = jnp.expand_dims(jnp.expand_dims(UpperCAmelCase , 1 ) , 1 )
lowerCamelCase__ : List[str] = hidden_states + temb
lowerCamelCase__ : Optional[Any] = self.norma(UpperCAmelCase )
lowerCamelCase__ : List[str] = nn.swish(UpperCAmelCase )
lowerCamelCase__ : Optional[int] = self.dropout(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase__ : str = self.conva(UpperCAmelCase )
if self.conv_shortcut is not None:
lowerCamelCase__ : Dict = self.conv_shortcut(UpperCAmelCase )
return hidden_states + residual
| 50 | 0 |
"""simple docstring"""
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
UpperCAmelCase : Dict = re.compile("[^A-Za-z_0-9]")
# parameters used in DuplicationIndex
UpperCAmelCase : Union[str, Any] = 10
UpperCAmelCase : Union[str, Any] = 256
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> Optional[MinHash]:
'''simple docstring'''
if len(__lowerCAmelCase ) < MIN_NUM_TOKENS:
return None
lowercase_ = MinHash(num_perm=__lowerCAmelCase )
for token in set(__lowerCAmelCase ):
min_hash.update(token.encode() )
return min_hash
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> Set[str]:
'''simple docstring'''
return {t for t in NON_ALPHA.split(__lowerCAmelCase ) if len(t.strip() ) > 0}
class SCREAMING_SNAKE_CASE__ :
def __init__( self : List[str] , *,
lowerCAmelCase_ : float = 0.85 , ):
"""simple docstring"""
lowercase_ = duplication_jaccard_threshold
lowercase_ = NUM_PERM
lowercase_ = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm)
lowercase_ = defaultdict(lowerCAmelCase_)
def _UpperCAmelCase ( self : List[Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : MinHash):
"""simple docstring"""
lowercase_ = self._index.query(lowerCAmelCase_)
if code_key in self._index.keys:
print(F'''Duplicate key {code_key}''')
return
self._index.insert(lowerCAmelCase_ , lowerCAmelCase_)
if len(lowerCAmelCase_) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(lowerCAmelCase_)
break
else:
self._duplicate_clusters[close_duplicates[0]].add(lowerCAmelCase_)
def _UpperCAmelCase ( self : Optional[Any]):
"""simple docstring"""
lowercase_ = []
for base, duplicates in self._duplicate_clusters.items():
lowercase_ = [base] + list(lowerCAmelCase_)
# reformat the cluster to be a list of dict
lowercase_ = [{"""base_index""": el[0], """repo_name""": el[1], """path""": el[2]} for el in cluster]
duplicate_clusters.append(lowerCAmelCase_)
return duplicate_clusters
def _UpperCAmelCase ( self : List[str] , lowerCAmelCase_ : List[str]):
"""simple docstring"""
lowercase_ = self.get_duplicate_clusters()
with open(lowerCAmelCase_ , """w""") as f:
json.dump(lowerCAmelCase_ , lowerCAmelCase_)
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> str:
'''simple docstring'''
lowercase_ , lowercase_ = element
lowercase_ = get_min_hash([t for t in NON_ALPHA.split(data["""content"""] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> List[str]:
'''simple docstring'''
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash , ThreadedIterator(__lowerCAmelCase , max_queue_size=1_00_00 ) , chunksize=1_00 , ):
if data is not None:
yield data
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> Optional[int]:
'''simple docstring'''
lowercase_ = DuplicationIndex(duplication_jaccard_threshold=__lowerCAmelCase )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(__lowerCAmelCase ) ) , max_queue_size=1_00 ) ):
di.add(__lowerCAmelCase , __lowerCAmelCase )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> float:
'''simple docstring'''
lowercase_ = get_tokens(__lowerCAmelCase )
lowercase_ = get_tokens(__lowerCAmelCase )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
UpperCAmelCase : Optional[Any] = None
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ = []
for elementa in cluster:
lowercase_ = _shared_dataset[elementa["""base_index"""]]["""content"""]
for elementa in extremes:
lowercase_ = _shared_dataset[elementa["""base_index"""]]["""content"""]
if jaccard_similarity(__lowerCAmelCase , __lowerCAmelCase ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
lowercase_ = 1
extremes.append(__lowerCAmelCase )
return extremes
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> List[Any]:
'''simple docstring'''
global _shared_dataset
lowercase_ = dataset
lowercase_ = []
lowercase_ = partial(_find_cluster_extremes_shared , jaccard_threshold=__lowerCAmelCase )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
__lowerCAmelCase , __lowerCAmelCase , ) , total=len(__lowerCAmelCase ) , ):
extremes_list.append(__lowerCAmelCase )
return extremes_list
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase = 0.85 ) -> Tuple[Type[Dataset], List[List[Dict]]]:
'''simple docstring'''
lowercase_ = make_duplicate_clusters(__lowerCAmelCase , __lowerCAmelCase )
lowercase_ = {x["""base_index"""] for cluster in duplicate_clusters for x in cluster}
lowercase_ = {}
lowercase_ = find_extremes(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
for extremes in extremes_clusters:
for element in extremes:
lowercase_ = element
lowercase_ = duplicate_indices - set(extreme_dict.keys() )
lowercase_ = dataset.filter(lambda __lowerCAmelCase , __lowerCAmelCase : idx not in remove_indices , with_indices=__lowerCAmelCase )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
lowercase_ = element["""base_index"""] in extreme_dict
if element["is_extreme"]:
lowercase_ = extreme_dict[element["""base_index"""]]["""copies"""]
print(F'''Original dataset size: {len(__lowerCAmelCase )}''' )
print(F'''Number of duplicate clusters: {len(__lowerCAmelCase )}''' )
print(F'''Files in duplicate cluster: {len(__lowerCAmelCase )}''' )
print(F'''Unique files in duplicate cluster: {len(__lowerCAmelCase )}''' )
print(F'''Filtered dataset size: {len(__lowerCAmelCase )}''' )
return ds_filter, duplicate_clusters
| 313 |
"""simple docstring"""
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , unittest.TestCase ):
lowercase__ = BarthezTokenizer
lowercase__ = BarthezTokenizerFast
lowercase__ = True
lowercase__ = True
def _UpperCAmelCase ( self : List[Any]):
"""simple docstring"""
super().setUp()
lowercase_ = BarthezTokenizerFast.from_pretrained("""moussaKam/mbarthez""")
tokenizer.save_pretrained(self.tmpdirname)
tokenizer.save_pretrained(self.tmpdirname , legacy_format=lowerCAmelCase_)
lowercase_ = tokenizer
def _UpperCAmelCase ( self : Any):
"""simple docstring"""
lowercase_ = """<pad>"""
lowercase_ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase_) , lowerCAmelCase_)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase_) , lowerCAmelCase_)
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
lowercase_ = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , """<s>""")
self.assertEqual(vocab_keys[1] , """<pad>""")
self.assertEqual(vocab_keys[-1] , """<mask>""")
self.assertEqual(len(lowerCAmelCase_) , 1_0_1_1_2_2)
def _UpperCAmelCase ( self : Optional[int]):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1_0_1_1_2_2)
@require_torch
def _UpperCAmelCase ( self : List[str]):
"""simple docstring"""
lowercase_ = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
lowercase_ = [0, 5_7, 3_0_1_8, 7_0_3_0_7, 9_1, 2]
lowercase_ = self.tokenizer(
lowerCAmelCase_ , max_length=len(lowerCAmelCase_) , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , return_tensors="""pt""")
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_)
self.assertEqual((2, 6) , batch.input_ids.shape)
self.assertEqual((2, 6) , batch.attention_mask.shape)
lowercase_ = batch.input_ids.tolist()[0]
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_)
def _UpperCAmelCase ( self : List[Any]):
"""simple docstring"""
if not self.test_rust_tokenizer:
return
lowercase_ = self.get_tokenizer()
lowercase_ = self.get_rust_tokenizer()
lowercase_ = """I was born in 92000, and this is falsé."""
lowercase_ = tokenizer.tokenize(lowerCAmelCase_)
lowercase_ = rust_tokenizer.tokenize(lowerCAmelCase_)
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_)
lowercase_ = tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_)
lowercase_ = rust_tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_)
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_)
lowercase_ = self.get_rust_tokenizer()
lowercase_ = tokenizer.encode(lowerCAmelCase_)
lowercase_ = rust_tokenizer.encode(lowerCAmelCase_)
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_)
@slow
def _UpperCAmelCase ( self : int):
"""simple docstring"""
lowercase_ = {"""input_ids""": [[0, 4_9_0, 1_4_3_2_8, 4_5_0_7, 3_5_4, 4_7, 4_3_6_6_9, 9_5, 2_5, 7_8_1_1_7, 2_0_2_1_5, 1_9_7_7_9, 1_9_0, 2_2, 4_0_0, 4, 3_5_3_4_3, 8_0_3_1_0, 6_0_3, 8_6, 2_4_9_3_7, 1_0_5, 3_3_4_3_8, 9_4_7_6_2, 1_9_6, 3_9_6_4_2, 7, 1_5, 1_5_9_3_3, 1_7_3, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 1_0_5_3_4, 8_7, 2_5, 6_6, 3_3_5_8, 1_9_6, 5_5_2_8_9, 8, 8_2_9_6_1, 8_1, 2_2_0_4, 7_5_2_0_3, 7, 1_5, 7_6_3, 1_2_9_5_6, 2_1_6, 1_7_8, 1_4_3_2_8, 9_5_9_5, 1_3_7_7, 6_9_6_9_3, 7, 4_4_8, 7_1_0_2_1, 1_9_6, 1_8_1_0_6, 1_4_3_7, 1_3_9_7_4, 1_0_8, 9_0_8_3, 4, 4_9_3_1_5, 7, 3_9, 8_6, 1_3_2_6, 2_7_9_3, 4_6_3_3_3, 4, 4_4_8, 1_9_6, 7_4_5_8_8, 7, 4_9_3_1_5, 7, 3_9, 2_1, 8_2_2, 3_8_4_7_0, 7_4, 2_1, 6_6_7_2_3, 6_2_4_8_0, 8, 2_2_0_5_0, 5, 2]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
lowercase_ = [
"""Le transformeur est un modèle d'apprentissage profond introduit en 2017, """
"""utilisé principalement dans le domaine du traitement automatique des langues (TAL).""",
"""À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus """
"""pour gérer des données séquentielles, telles que le langage naturel, pour des tâches """
"""telles que la traduction et la synthèse de texte.""",
]
self.tokenizer_integration_test_util(
expected_encoding=lowerCAmelCase_ , model_name="""moussaKam/mbarthez""" , revision="""c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6""" , sequences=lowerCAmelCase_ , )
| 313 | 1 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import is_flax_available
from transformers.testing_utils import require_flax
from ..test_modeling_flax_common import ids_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.generation import (
FlaxForcedBOSTokenLogitsProcessor,
FlaxForcedEOSTokenLogitsProcessor,
FlaxLogitsProcessorList,
FlaxMinLengthLogitsProcessor,
FlaxTemperatureLogitsWarper,
FlaxTopKLogitsWarper,
FlaxTopPLogitsWarper,
)
@require_flax
class UpperCamelCase_ ( unittest.TestCase ):
def _lowercase( self , A , A ) -> List[str]:
UpperCAmelCase : Tuple = jnp.ones((batch_size, length) ) / length
return scores
def _lowercase( self ) -> List[Any]:
UpperCAmelCase : Dict = None
UpperCAmelCase : Dict = 20
UpperCAmelCase : Optional[Any] = self._get_uniform_logits(batch_size=2 , length=A )
# tweak scores to not be uniform anymore
UpperCAmelCase : Dict = scores.at[1, 5].set((1 / length) + 0.1 ) # peak, 1st batch
UpperCAmelCase : Union[str, Any] = scores.at[1, 10].set((1 / length) - 0.4 ) # valley, 1st batch
# compute softmax
UpperCAmelCase : Any = jax.nn.softmax(A , axis=-1 )
UpperCAmelCase : int = FlaxTemperatureLogitsWarper(temperature=0.5 )
UpperCAmelCase : Optional[int] = FlaxTemperatureLogitsWarper(temperature=1.3 )
UpperCAmelCase : Union[str, Any] = jax.nn.softmax(temp_dist_warper_sharper(A , scores.copy() , cur_len=A ) , axis=-1 )
UpperCAmelCase : int = jax.nn.softmax(temp_dist_warper_smoother(A , scores.copy() , cur_len=A ) , axis=-1 )
# uniform distribution stays uniform
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_sharp[0, :] , atol=1e-3 ) )
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_smooth[0, :] , atol=1e-3 ) )
# sharp peaks get higher, valleys get lower
self.assertLess(probs[1, :].max() , warped_prob_sharp[1, :].max() )
self.assertGreater(probs[1, :].min() , warped_prob_sharp[1, :].min() )
# smooth peaks get lower, valleys get higher
self.assertGreater(probs[1, :].max() , warped_prob_smooth[1, :].max() )
self.assertLess(probs[1, :].min() , warped_prob_smooth[1, :].min() )
def _lowercase( self ) -> Union[str, Any]:
UpperCAmelCase : Optional[int] = None
UpperCAmelCase : int = 10
UpperCAmelCase : Optional[int] = 2
# create ramp distribution
UpperCAmelCase : Tuple = np.broadcast_to(np.arange(A )[None, :] , (batch_size, vocab_size) ).copy()
UpperCAmelCase : List[Any] = ramp_logits[1:, : vocab_size // 2] + vocab_size
UpperCAmelCase : List[Any] = FlaxTopKLogitsWarper(3 )
UpperCAmelCase : int = top_k_warp(A , A , cur_len=A )
# check that correct tokens are filtered
self.assertListEqual(jnp.isinf(scores[0] ).tolist() , 7 * [True] + 3 * [False] )
self.assertListEqual(jnp.isinf(scores[1] ).tolist() , 2 * [True] + 3 * [False] + 5 * [True] )
# check special case
UpperCAmelCase : str = 5
UpperCAmelCase : str = FlaxTopKLogitsWarper(top_k=1 , filter_value=0.0 , min_tokens_to_keep=3 )
UpperCAmelCase : Dict = np.broadcast_to(np.arange(A )[None, :] , (batch_size, length) ).copy()
UpperCAmelCase : Tuple = top_k_warp_safety_check(A , A , cur_len=A )
# min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified
self.assertListEqual((scores == 0.0).sum(axis=-1 ).tolist() , [2, 2] )
def _lowercase( self ) -> List[str]:
UpperCAmelCase : Optional[Any] = None
UpperCAmelCase : Tuple = 10
UpperCAmelCase : Dict = 2
# create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper)
UpperCAmelCase : Dict = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.1_5, 0.3, 0.3, 0.2_5]] ) )
UpperCAmelCase : Union[str, Any] = FlaxTopPLogitsWarper(0.8 )
UpperCAmelCase : Optional[Any] = np.exp(top_p_warp(A , A , cur_len=A ) )
# dist should be filtered to keep min num values so that sum is >= top_p
# exp (-inf) => 0
UpperCAmelCase : Union[str, Any] = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.2_5]] )
self.assertTrue(np.allclose(A , A , atol=1e-3 ) )
# check edge cases with negative and extreme logits
UpperCAmelCase : List[str] = np.broadcast_to(np.arange(A )[None, :] , (batch_size, vocab_size) ).copy() - (
vocab_size // 2
)
# make ramp_logits more extreme
UpperCAmelCase : Dict = ramp_logits[1] * 1_0_0.0
# make sure at least 2 tokens are kept
UpperCAmelCase : str = FlaxTopPLogitsWarper(0.9 , min_tokens_to_keep=2 , filter_value=0.0 )
UpperCAmelCase : int = top_p_warp(A , A , cur_len=A )
# first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2.
self.assertListEqual((filtered_dist != 0.0).sum(axis=-1 ).tolist() , [3, 2] )
def _lowercase( self ) -> Union[str, Any]:
UpperCAmelCase : List[str] = 20
UpperCAmelCase : str = 4
UpperCAmelCase : Any = 0
UpperCAmelCase : List[str] = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=A )
# check that min length is applied at length 5
UpperCAmelCase : Any = ids_tensor((batch_size, 20) , vocab_size=20 )
UpperCAmelCase : List[str] = 5
UpperCAmelCase : Dict = self._get_uniform_logits(A , A )
UpperCAmelCase : Dict = min_dist_processor(A , A , cur_len=A )
self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist() , 4 * [-float("""inf""" )] )
# check that min length is not applied anymore at length 15
UpperCAmelCase : Optional[Any] = self._get_uniform_logits(A , A )
UpperCAmelCase : List[Any] = 15
UpperCAmelCase : int = min_dist_processor(A , A , cur_len=A )
self.assertFalse(jnp.isinf(A ).any() )
def _lowercase( self ) -> List[str]:
UpperCAmelCase : Union[str, Any] = 20
UpperCAmelCase : Tuple = 4
UpperCAmelCase : Optional[Any] = 0
UpperCAmelCase : int = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=A )
# check that all scores are -inf except the bos_token_id score
UpperCAmelCase : Dict = ids_tensor((batch_size, 1) , vocab_size=20 )
UpperCAmelCase : Union[str, Any] = 1
UpperCAmelCase : Union[str, Any] = self._get_uniform_logits(A , A )
UpperCAmelCase : Union[str, Any] = logits_processor(A , A , cur_len=A )
self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, bos_token_id].tolist() , 4 * [0] ) # score for bos_token_id shold be zero
# check that bos_token_id is not forced if current length is greater than 1
UpperCAmelCase : List[Any] = 3
UpperCAmelCase : List[Any] = self._get_uniform_logits(A , A )
UpperCAmelCase : Optional[Any] = logits_processor(A , A , cur_len=A )
self.assertFalse(jnp.isinf(A ).any() )
def _lowercase( self ) -> int:
UpperCAmelCase : Optional[Any] = 20
UpperCAmelCase : int = 4
UpperCAmelCase : Union[str, Any] = 0
UpperCAmelCase : str = 5
UpperCAmelCase : List[str] = FlaxForcedEOSTokenLogitsProcessor(max_length=A , eos_token_id=A )
# check that all scores are -inf except the eos_token_id when max_length is reached
UpperCAmelCase : str = ids_tensor((batch_size, 4) , vocab_size=20 )
UpperCAmelCase : str = 4
UpperCAmelCase : Optional[Any] = self._get_uniform_logits(A , A )
UpperCAmelCase : Dict = logits_processor(A , A , cur_len=A )
self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, eos_token_id].tolist() , 4 * [0] ) # score for eos_token_id should be zero
# check that eos_token_id is not forced if max_length is not reached
UpperCAmelCase : Tuple = 3
UpperCAmelCase : Union[str, Any] = self._get_uniform_logits(A , A )
UpperCAmelCase : str = logits_processor(A , A , cur_len=A )
self.assertFalse(jnp.isinf(A ).any() )
def _lowercase( self ) -> Optional[int]:
UpperCAmelCase : List[str] = 4
UpperCAmelCase : Optional[int] = 10
UpperCAmelCase : Union[str, Any] = 15
UpperCAmelCase : Union[str, Any] = 2
UpperCAmelCase : Optional[Any] = 1
UpperCAmelCase : Any = 15
# dummy input_ids and scores
UpperCAmelCase : Optional[int] = ids_tensor((batch_size, sequence_length) , A )
UpperCAmelCase : Tuple = input_ids.copy()
UpperCAmelCase : Union[str, Any] = self._get_uniform_logits(A , A )
UpperCAmelCase : Tuple = scores.copy()
# instantiate all dist processors
UpperCAmelCase : Any = FlaxTemperatureLogitsWarper(temperature=0.5 )
UpperCAmelCase : Union[str, Any] = FlaxTopKLogitsWarper(3 )
UpperCAmelCase : Optional[Any] = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
UpperCAmelCase : List[str] = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=A )
UpperCAmelCase : Any = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=A )
UpperCAmelCase : List[Any] = FlaxForcedEOSTokenLogitsProcessor(max_length=A , eos_token_id=A )
UpperCAmelCase : Tuple = 10
# no processor list
UpperCAmelCase : Dict = temp_dist_warp(A , A , cur_len=A )
UpperCAmelCase : int = top_k_warp(A , A , cur_len=A )
UpperCAmelCase : Union[str, Any] = top_p_warp(A , A , cur_len=A )
UpperCAmelCase : Optional[Any] = min_dist_proc(A , A , cur_len=A )
UpperCAmelCase : Optional[int] = bos_dist_proc(A , A , cur_len=A )
UpperCAmelCase : Union[str, Any] = eos_dist_proc(A , A , cur_len=A )
# with processor list
UpperCAmelCase : Dict = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
UpperCAmelCase : Tuple = processor(A , A , cur_len=A )
# scores should be equal
self.assertTrue(jnp.allclose(A , A , atol=1e-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
def _lowercase( self ) -> Optional[int]:
UpperCAmelCase : Tuple = 4
UpperCAmelCase : Any = 10
UpperCAmelCase : int = 15
UpperCAmelCase : Union[str, Any] = 2
UpperCAmelCase : Optional[int] = 1
UpperCAmelCase : Union[str, Any] = 15
# dummy input_ids and scores
UpperCAmelCase : Dict = ids_tensor((batch_size, sequence_length) , A )
UpperCAmelCase : Tuple = input_ids.copy()
UpperCAmelCase : Tuple = self._get_uniform_logits(A , A )
UpperCAmelCase : str = scores.copy()
# instantiate all dist processors
UpperCAmelCase : Optional[Any] = FlaxTemperatureLogitsWarper(temperature=0.5 )
UpperCAmelCase : Tuple = FlaxTopKLogitsWarper(3 )
UpperCAmelCase : List[str] = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
UpperCAmelCase : Optional[int] = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=A )
UpperCAmelCase : Tuple = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=A )
UpperCAmelCase : Optional[int] = FlaxForcedEOSTokenLogitsProcessor(max_length=A , eos_token_id=A )
UpperCAmelCase : Optional[Any] = 10
# no processor list
def run_no_processor_list(A , A , A ):
UpperCAmelCase : Union[str, Any] = temp_dist_warp(A , A , cur_len=A )
UpperCAmelCase : str = top_k_warp(A , A , cur_len=A )
UpperCAmelCase : int = top_p_warp(A , A , cur_len=A )
UpperCAmelCase : Dict = min_dist_proc(A , A , cur_len=A )
UpperCAmelCase : Optional[Any] = bos_dist_proc(A , A , cur_len=A )
UpperCAmelCase : Tuple = eos_dist_proc(A , A , cur_len=A )
return scores
# with processor list
def run_processor_list(A , A , A ):
UpperCAmelCase : Optional[int] = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
UpperCAmelCase : List[str] = processor(A , A , cur_len=A )
return scores
UpperCAmelCase : str = jax.jit(A )
UpperCAmelCase : Dict = jax.jit(A )
UpperCAmelCase : List[Any] = jitted_run_no_processor_list(A , A , A )
UpperCAmelCase : int = jitted_run_processor_list(A , A , A )
# scores should be equal
self.assertTrue(jnp.allclose(A , A , atol=1e-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
| 265 |
'''simple docstring'''
import itertools
import json
import os
import unittest
from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast
from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCamelCase_ ( __magic_name__ , unittest.TestCase ):
lowercase = LongformerTokenizer
lowercase = True
lowercase = LongformerTokenizerFast
lowercase = True
def _lowercase( self ) -> List[Any]:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
UpperCAmelCase : List[str] = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
UpperCAmelCase : int = dict(zip(A , range(len(A ) ) ) )
UpperCAmelCase : Any = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
UpperCAmelCase : Dict = {"""unk_token""": """<unk>"""}
UpperCAmelCase : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
UpperCAmelCase : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(A ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(A ) )
def _lowercase( self , **A ) -> Optional[Any]:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **A )
def _lowercase( self , **A ) -> int:
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **A )
def _lowercase( self , A ) -> Optional[int]:
UpperCAmelCase : Optional[Any] = """lower newer"""
UpperCAmelCase : Optional[int] = """lower newer"""
return input_text, output_text
def _lowercase( self ) -> Optional[Any]:
UpperCAmelCase : Tuple = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
UpperCAmelCase : Dict = """lower newer"""
UpperCAmelCase : int = ["""l""", """o""", """w""", """er""", """\u0120""", """n""", """e""", """w""", """er"""]
UpperCAmelCase : Tuple = tokenizer.tokenize(A ) # , add_prefix_space=True)
self.assertListEqual(A , A )
UpperCAmelCase : Any = tokens + [tokenizer.unk_token]
UpperCAmelCase : Tuple = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(A ) , A )
def _lowercase( self ) -> Union[str, Any]:
UpperCAmelCase : str = self.get_tokenizer()
self.assertListEqual(tokenizer.encode("""Hello world!""" , add_special_tokens=A ) , [0, 31414, 232, 328, 2] )
self.assertListEqual(
tokenizer.encode("""Hello world! cécé herlolip 418""" , add_special_tokens=A ) , [0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2] , )
@slow
def _lowercase( self ) -> Optional[int]:
UpperCAmelCase : Any = self.tokenizer_class.from_pretrained("""allenai/longformer-base-4096""" )
UpperCAmelCase : List[Any] = tokenizer.encode("""sequence builders""" , add_special_tokens=A )
UpperCAmelCase : Optional[Any] = tokenizer.encode("""multi-sequence build""" , add_special_tokens=A )
UpperCAmelCase : List[str] = tokenizer.encode(
"""sequence builders""" , add_special_tokens=A , add_prefix_space=A )
UpperCAmelCase : List[str] = tokenizer.encode(
"""sequence builders""" , """multi-sequence build""" , add_special_tokens=A , add_prefix_space=A )
UpperCAmelCase : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(A )
UpperCAmelCase : Any = tokenizer.build_inputs_with_special_tokens(A , A )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def _lowercase( self ) -> List[Any]:
UpperCAmelCase : str = self.get_tokenizer()
UpperCAmelCase : List[Any] = """Encode this sequence."""
UpperCAmelCase : List[str] = tokenizer.byte_encoder[""" """.encode("""utf-8""" )[0]]
# Testing encoder arguments
UpperCAmelCase : Union[str, Any] = tokenizer.encode(A , add_special_tokens=A , add_prefix_space=A )
UpperCAmelCase : Dict = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(A , A )
UpperCAmelCase : Tuple = tokenizer.encode(A , add_special_tokens=A , add_prefix_space=A )
UpperCAmelCase : int = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(A , A )
tokenizer.add_special_tokens({"""bos_token""": """<s>"""} )
UpperCAmelCase : int = tokenizer.encode(A , add_special_tokens=A )
UpperCAmelCase : List[Any] = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(A , A )
# Testing spaces after special tokens
UpperCAmelCase : Union[str, Any] = """<mask>"""
tokenizer.add_special_tokens(
{"""mask_token""": AddedToken(A , lstrip=A , rstrip=A )} ) # mask token has a left space
UpperCAmelCase : str = tokenizer.convert_tokens_to_ids(A )
UpperCAmelCase : Union[str, Any] = """Encode <mask> sequence"""
UpperCAmelCase : Union[str, Any] = """Encode <mask>sequence"""
UpperCAmelCase : Union[str, Any] = tokenizer.encode(A )
UpperCAmelCase : Union[str, Any] = encoded.index(A )
UpperCAmelCase : List[str] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(A , A )
UpperCAmelCase : Tuple = tokenizer.encode(A )
UpperCAmelCase : Optional[int] = encoded.index(A )
UpperCAmelCase : Optional[int] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(A , A )
def _lowercase( self ) -> Optional[int]:
pass
def _lowercase( self ) -> Any:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
UpperCAmelCase : str = self.rust_tokenizer_class.from_pretrained(A , **A )
UpperCAmelCase : int = self.tokenizer_class.from_pretrained(A , **A )
UpperCAmelCase : Dict = """A, <mask> AllenNLP sentence."""
UpperCAmelCase : Dict = tokenizer_r.encode_plus(A , add_special_tokens=A , return_token_type_ids=A )
UpperCAmelCase : Tuple = tokenizer_p.encode_plus(A , add_special_tokens=A , return_token_type_ids=A )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r["""token_type_ids"""] ) , sum(tokens_p["""token_type_ids"""] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r["""attention_mask"""] ) / len(tokens_r["""attention_mask"""] ) , sum(tokens_p["""attention_mask"""] ) / len(tokens_p["""attention_mask"""] ) , )
UpperCAmelCase : List[Any] = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""] )
UpperCAmelCase : int = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p["""input_ids"""] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r["""input_ids"""] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(
A , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
self.assertSequenceEqual(
A , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
def _lowercase( self ) -> List[Any]:
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
UpperCAmelCase : Optional[int] = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=A , add_prefix_space=A , trim_offsets=A )
UpperCAmelCase : Optional[int] = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
UpperCAmelCase : Optional[int] = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state["""add_prefix_space"""] , A )
self.assertEqual(post_processor_state["""add_prefix_space"""] , A )
self.assertEqual(post_processor_state["""trim_offsets"""] , A )
def _lowercase( self ) -> Optional[Any]:
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and
# `trim_offsets`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
UpperCAmelCase : Union[str, Any] = """hello""" # `hello` is a token in the vocabulary of `pretrained_name`
UpperCAmelCase : int = f'''{text_of_1_token} {text_of_1_token}'''
UpperCAmelCase : List[Any] = self.rust_tokenizer_class.from_pretrained(
A , use_fast=A , add_prefix_space=A , trim_offsets=A )
UpperCAmelCase : str = tokenizer_r(A , return_offsets_mapping=A , add_special_tokens=A )
self.assertEqual(encoding.offset_mapping[0] , (0, len(A )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(A ) + 1, len(A ) + 1 + len(A )) , )
UpperCAmelCase : Optional[Any] = self.rust_tokenizer_class.from_pretrained(
A , use_fast=A , add_prefix_space=A , trim_offsets=A )
UpperCAmelCase : Dict = tokenizer_r(A , return_offsets_mapping=A , add_special_tokens=A )
self.assertEqual(encoding.offset_mapping[0] , (0, len(A )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(A ) + 1, len(A ) + 1 + len(A )) , )
UpperCAmelCase : int = self.rust_tokenizer_class.from_pretrained(
A , use_fast=A , add_prefix_space=A , trim_offsets=A )
UpperCAmelCase : List[Any] = tokenizer_r(A , return_offsets_mapping=A , add_special_tokens=A )
self.assertEqual(encoding.offset_mapping[0] , (0, len(A )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(A ), len(A ) + 1 + len(A )) , )
UpperCAmelCase : Any = self.rust_tokenizer_class.from_pretrained(
A , use_fast=A , add_prefix_space=A , trim_offsets=A )
UpperCAmelCase : str = tokenizer_r(A , return_offsets_mapping=A , add_special_tokens=A )
self.assertEqual(encoding.offset_mapping[0] , (0, len(A )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(A ), len(A ) + 1 + len(A )) , )
UpperCAmelCase : Optional[Any] = f''' {text}'''
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
UpperCAmelCase : Any = self.rust_tokenizer_class.from_pretrained(
A , use_fast=A , add_prefix_space=A , trim_offsets=A )
UpperCAmelCase : str = tokenizer_r(A , return_offsets_mapping=A , add_special_tokens=A )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(A )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(A ) + 1, 1 + len(A ) + 1 + len(A )) , )
UpperCAmelCase : Optional[int] = self.rust_tokenizer_class.from_pretrained(
A , use_fast=A , add_prefix_space=A , trim_offsets=A )
UpperCAmelCase : Union[str, Any] = tokenizer_r(A , return_offsets_mapping=A , add_special_tokens=A )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(A )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(A ), 1 + len(A ) + 1 + len(A )) , )
UpperCAmelCase : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(
A , use_fast=A , add_prefix_space=A , trim_offsets=A )
UpperCAmelCase : Optional[int] = tokenizer_r(A , return_offsets_mapping=A , add_special_tokens=A )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(A )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(A ), 1 + len(A ) + 1 + len(A )) , )
| 265 | 1 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__a :int = logging.get_logger(__name__)
__a :Optional[int] = {
"""facebook/data2vec-text-base""": """https://huggingface.co/data2vec/resolve/main/config.json""",
}
class _a ( lowerCAmelCase_ ):
"""simple docstring"""
_lowerCamelCase : List[Any] = """data2vec-text"""
def __init__( self : Optional[int] , UpperCAmelCase : List[Any]=30522 , UpperCAmelCase : Union[str, Any]=768 , UpperCAmelCase : Any=12 , UpperCAmelCase : Optional[int]=12 , UpperCAmelCase : Union[str, Any]=3072 , UpperCAmelCase : Optional[int]="gelu" , UpperCAmelCase : List[str]=0.1 , UpperCAmelCase : Any=0.1 , UpperCAmelCase : Optional[int]=512 , UpperCAmelCase : str=2 , UpperCAmelCase : Optional[int]=0.02 , UpperCAmelCase : Tuple=1E-12 , UpperCAmelCase : List[Any]=1 , UpperCAmelCase : Dict=0 , UpperCAmelCase : Dict=2 , UpperCAmelCase : List[Any]="absolute" , UpperCAmelCase : Optional[int]=True , UpperCAmelCase : int=None , **UpperCAmelCase : Dict , ):
super().__init__(pad_token_id=__SCREAMING_SNAKE_CASE , bos_token_id=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
A_ = vocab_size
A_ = hidden_size
A_ = num_hidden_layers
A_ = num_attention_heads
A_ = hidden_act
A_ = intermediate_size
A_ = hidden_dropout_prob
A_ = attention_probs_dropout_prob
A_ = max_position_embeddings
A_ = type_vocab_size
A_ = initializer_range
A_ = layer_norm_eps
A_ = position_embedding_type
A_ = use_cache
A_ = classifier_dropout
class _a ( lowerCAmelCase_ ):
"""simple docstring"""
@property
def __A ( self : List[str] ):
if self.task == "multiple-choice":
A_ = {0: "batch", 1: "choice", 2: "sequence"}
else:
A_ = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] ) | 352 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__a :Union[str, Any] = {
'configuration_biogpt': ['BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BioGptConfig'],
'tokenization_biogpt': ['BioGptTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a :Optional[int] = [
'BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST',
'BioGptForCausalLM',
'BioGptForTokenClassification',
'BioGptForSequenceClassification',
'BioGptModel',
'BioGptPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig
from .tokenization_biogpt import BioGptTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_biogpt import (
BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST,
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptPreTrainedModel,
)
else:
import sys
__a :str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 329 | 0 |
import logging
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import arg_to_scheduler
from transformers import TrainingArguments
__lowerCAmelCase : List[str] = logging.getLogger(__name__)
@dataclass
class UpperCAmelCase_ ( _A ):
'''simple docstring'''
a__ = field(
default=0.0 , metadata={"""help""": """The label smoothing epsilon to apply (if not zero)."""} )
a__ = field(default=_A , metadata={"""help""": """Whether to SortishSamler or not."""} )
a__ = field(
default=_A , metadata={"""help""": """Whether to use generate to calculate generative metrics (ROUGE, BLEU)."""} )
a__ = field(default=_A , metadata={"""help""": """whether to use adafactor"""} )
a__ = field(
default=_A , metadata={"""help""": """Encoder layer dropout probability. Goes into model.config."""} )
a__ = field(
default=_A , metadata={"""help""": """Decoder layer dropout probability. Goes into model.config."""} )
a__ = field(default=_A , metadata={"""help""": """Dropout probability. Goes into model.config."""} )
a__ = field(
default=_A , metadata={"""help""": """Attention dropout probability. Goes into model.config."""} )
a__ = field(
default="""linear""" , metadata={"""help""": f"""Which lr scheduler to use. Selected in {sorted(arg_to_scheduler.keys() )}"""} , )
| 88 |
import string
# frequency taken from https://en.wikipedia.org/wiki/Letter_frequency
__lowerCAmelCase : Optional[int] = {
'E': 12.70,
'T': 9.06,
'A': 8.17,
'O': 7.51,
'I': 6.97,
'N': 6.75,
'S': 6.33,
'H': 6.09,
'R': 5.99,
'D': 4.25,
'L': 4.03,
'C': 2.78,
'U': 2.76,
'M': 2.41,
'W': 2.36,
'F': 2.23,
'G': 2.02,
'Y': 1.97,
'P': 1.93,
'B': 1.29,
'V': 0.98,
'K': 0.77,
'J': 0.15,
'X': 0.15,
'Q': 0.10,
'Z': 0.07,
}
__lowerCAmelCase : Optional[Any] = 'ETAOINSHRDLCUMWFGYPBVKJXQZ'
__lowerCAmelCase : Optional[Any] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
def a__ ( A_ ):
'''simple docstring'''
__magic_name__ = {letter: 0 for letter in string.ascii_uppercase}
for letter in message.upper():
if letter in LETTERS:
letter_count[letter] += 1
return letter_count
def a__ ( A_ ):
'''simple docstring'''
return x[0]
def a__ ( A_ ):
'''simple docstring'''
__magic_name__ = get_letter_count(A_ )
__magic_name__ = {
freq: [] for letter, freq in letter_to_freq.items()
}
for letter in LETTERS:
freq_to_letter[letter_to_freq[letter]].append(A_ )
__magic_name__ = {}
for freq in freq_to_letter:
freq_to_letter[freq].sort(key=ETAOIN.find, reverse=A_ )
__magic_name__ = """""".join(freq_to_letter[freq] )
__magic_name__ = list(freq_to_letter_str.items() )
freq_pairs.sort(key=A_, reverse=A_ )
__magic_name__ = [freq_pair[1] for freq_pair in freq_pairs]
return "".join(A_ )
def a__ ( A_ ):
'''simple docstring'''
__magic_name__ = get_frequency_order(A_ )
__magic_name__ = 0
for common_letter in ETAOIN[:6]:
if common_letter in freq_order[:6]:
match_score += 1
for uncommon_letter in ETAOIN[-6:]:
if uncommon_letter in freq_order[-6:]:
match_score += 1
return match_score
if __name__ == "__main__":
import doctest
doctest.testmod()
| 88 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__lowerCAmelCase = {
'''configuration_swiftformer''': [
'''SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''SwiftFormerConfig''',
'''SwiftFormerOnnxConfig''',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
'''SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''SwiftFormerForImageClassification''',
'''SwiftFormerModel''',
'''SwiftFormerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_swiftformer import (
SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
SwiftFormerConfig,
SwiftFormerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swiftformer import (
SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
SwiftFormerForImageClassification,
SwiftFormerModel,
SwiftFormerPreTrainedModel,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 288 |
from __future__ import annotations
def snake_case_ ( snake_case , snake_case ) -> list[int]:
lowercase__: Tuple = 0
lowercase__: str = len(snake_case ) - 1
while i < j:
if nums[i] + nums[j] == target:
return [i, j]
elif nums[i] + nums[j] < target:
lowercase__: str = i + 1
else:
lowercase__: Dict = j - 1
return []
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F'''{two_pointer([2, 7, 11, 15], 9) = }''')
| 288 | 1 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = {
"""distilbert-base-uncased""": """https://huggingface.co/distilbert-base-uncased/resolve/main/config.json""",
"""distilbert-base-uncased-distilled-squad""": (
"""https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/config.json"""
),
"""distilbert-base-cased""": """https://huggingface.co/distilbert-base-cased/resolve/main/config.json""",
"""distilbert-base-cased-distilled-squad""": (
"""https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/config.json"""
),
"""distilbert-base-german-cased""": """https://huggingface.co/distilbert-base-german-cased/resolve/main/config.json""",
"""distilbert-base-multilingual-cased""": (
"""https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/config.json"""
),
"""distilbert-base-uncased-finetuned-sst-2-english""": (
"""https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english/resolve/main/config.json"""
),
}
class UpperCAmelCase ( A_ ):
A__ : int = "distilbert"
A__ : List[str] = {
"hidden_size": "dim",
"num_attention_heads": "n_heads",
"num_hidden_layers": "n_layers",
}
def __init__(self : str , snake_case__ : Union[str, Any]=3_05_22 , snake_case__ : int=5_12 , snake_case__ : Optional[int]=False , snake_case__ : Optional[int]=6 , snake_case__ : Any=12 , snake_case__ : List[Any]=7_68 , snake_case__ : int=4 * 7_68 , snake_case__ : int=0.1 , snake_case__ : List[str]=0.1 , snake_case__ : str="gelu" , snake_case__ : Any=0.02 , snake_case__ : Optional[Any]=0.1 , snake_case__ : List[Any]=0.2 , snake_case__ : Dict=0 , **snake_case__ : List[str] , ) -> Optional[int]:
'''simple docstring'''
snake_case : Any = vocab_size
snake_case : int = max_position_embeddings
snake_case : Optional[Any] = sinusoidal_pos_embds
snake_case : List[str] = n_layers
snake_case : List[Any] = n_heads
snake_case : str = dim
snake_case : Tuple = hidden_dim
snake_case : Union[str, Any] = dropout
snake_case : List[str] = attention_dropout
snake_case : Any = activation
snake_case : int = initializer_range
snake_case : List[Any] = qa_dropout
snake_case : str = seq_classif_dropout
super().__init__(**snake_case__ , pad_token_id=snake_case__ )
class UpperCAmelCase ( A_ ):
@property
def _SCREAMING_SNAKE_CASE (self : Optional[int] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
snake_case : str = {0: "batch", 1: "choice", 2: "sequence"}
else:
snake_case : int = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 59 |
import math
import sys
def A_ ( _UpperCAmelCase ):
if number != int(_UpperCAmelCase ):
raise ValueError("the value of input must be a natural number" )
if number < 0:
raise ValueError("the value of input must not be a negative number" )
if number == 0:
return 1
SCREAMING_SNAKE_CASE_: List[str] = [-1] * (number + 1)
SCREAMING_SNAKE_CASE_: str = 0
for i in range(1 , number + 1 ):
SCREAMING_SNAKE_CASE_: str = sys.maxsize
SCREAMING_SNAKE_CASE_: List[Any] = int(math.sqrt(_UpperCAmelCase ) )
for j in range(1 , root + 1 ):
SCREAMING_SNAKE_CASE_: List[str] = 1 + answers[i - (j**2)]
SCREAMING_SNAKE_CASE_: Optional[Any] = min(_UpperCAmelCase , _UpperCAmelCase )
SCREAMING_SNAKE_CASE_: Dict = answer
return answers[number]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 13 | 0 |
"""simple docstring"""
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
_lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__)
class lowerCAmelCase__ ( __magic_name__ ):
SCREAMING_SNAKE_CASE_ ='''linear'''
SCREAMING_SNAKE_CASE_ ='''cosine'''
SCREAMING_SNAKE_CASE_ ='''cosine_with_restarts'''
SCREAMING_SNAKE_CASE_ ='''polynomial'''
SCREAMING_SNAKE_CASE_ ='''constant'''
SCREAMING_SNAKE_CASE_ ='''constant_with_warmup'''
SCREAMING_SNAKE_CASE_ ='''piecewise_constant'''
def SCREAMING_SNAKE_CASE__ ( snake_case : Optimizer , snake_case : int = -1 )-> Tuple:
'''simple docstring'''
return LambdaLR(snake_case , lambda snake_case : 1 , last_epoch=snake_case )
def SCREAMING_SNAKE_CASE__ ( snake_case : Optimizer , snake_case : int , snake_case : int = -1 )-> Tuple:
'''simple docstring'''
def lr_lambda(snake_case : int ):
if current_step < num_warmup_steps:
return float(snake_case ) / float(max(1.0 , snake_case ) )
return 1.0
return LambdaLR(snake_case , snake_case , last_epoch=snake_case )
def SCREAMING_SNAKE_CASE__ ( snake_case : Optimizer , snake_case : str , snake_case : int = -1 )-> List[Any]:
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = {}
UpperCAmelCase__ : List[Any] = step_rules.split("," )
for rule_str in rule_list[:-1]:
UpperCAmelCase__ , UpperCAmelCase__ : Tuple = rule_str.split(":" )
UpperCAmelCase__ : Union[str, Any] = int(snake_case )
UpperCAmelCase__ : Union[str, Any] = float(snake_case )
UpperCAmelCase__ : int = value
UpperCAmelCase__ : Any = float(rule_list[-1] )
def create_rules_function(snake_case : Union[str, Any] , snake_case : Optional[Any] ):
def rule_func(snake_case : int ) -> float:
UpperCAmelCase__ : Any = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(snake_case ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
UpperCAmelCase__ : int = create_rules_function(snake_case , snake_case )
return LambdaLR(snake_case , snake_case , last_epoch=snake_case )
def SCREAMING_SNAKE_CASE__ ( snake_case : Any , snake_case : Union[str, Any] , snake_case : List[str] , snake_case : Any=-1 )-> Dict:
'''simple docstring'''
def lr_lambda(snake_case : int ):
if current_step < num_warmup_steps:
return float(snake_case ) / float(max(1 , snake_case ) )
return max(
0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) )
return LambdaLR(snake_case , snake_case , snake_case )
def SCREAMING_SNAKE_CASE__ ( snake_case : Optimizer , snake_case : int , snake_case : int , snake_case : float = 0.5 , snake_case : int = -1 )-> Optional[Any]:
'''simple docstring'''
def lr_lambda(snake_case : Dict ):
if current_step < num_warmup_steps:
return float(snake_case ) / float(max(1 , snake_case ) )
UpperCAmelCase__ : int = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(snake_case ) * 2.0 * progress )) )
return LambdaLR(snake_case , snake_case , snake_case )
def SCREAMING_SNAKE_CASE__ ( snake_case : Optimizer , snake_case : int , snake_case : int , snake_case : int = 1 , snake_case : int = -1 )-> Any:
'''simple docstring'''
def lr_lambda(snake_case : List[str] ):
if current_step < num_warmup_steps:
return float(snake_case ) / float(max(1 , snake_case ) )
UpperCAmelCase__ : str = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(snake_case ) * progress) % 1.0) )) )
return LambdaLR(snake_case , snake_case , snake_case )
def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[int] , snake_case : Optional[int] , snake_case : Dict , snake_case : Union[str, Any]=1E-7 , snake_case : List[str]=1.0 , snake_case : List[Any]=-1 )-> int:
'''simple docstring'''
UpperCAmelCase__ : Dict = optimizer.defaults["lr"]
if not (lr_init > lr_end):
raise ValueError(f'lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})' )
def lr_lambda(snake_case : int ):
if current_step < num_warmup_steps:
return float(snake_case ) / float(max(1 , snake_case ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
UpperCAmelCase__ : Optional[Any] = lr_init - lr_end
UpperCAmelCase__ : List[Any] = num_training_steps - num_warmup_steps
UpperCAmelCase__ : Dict = 1 - (current_step - num_warmup_steps) / decay_steps
UpperCAmelCase__ : Union[str, Any] = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(snake_case , snake_case , snake_case )
_lowerCAmelCase : Optional[int] = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def SCREAMING_SNAKE_CASE__ ( snake_case : Union[str, SchedulerType] , snake_case : Optimizer , snake_case : Optional[str] = None , snake_case : Optional[int] = None , snake_case : Optional[int] = None , snake_case : int = 1 , snake_case : float = 1.0 , snake_case : int = -1 , )-> str:
'''simple docstring'''
UpperCAmelCase__ : Dict = SchedulerType(snake_case )
UpperCAmelCase__ : Union[str, Any] = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(snake_case , last_epoch=snake_case )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(snake_case , step_rules=snake_case , last_epoch=snake_case )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(f'{name} requires `num_warmup_steps`, please provide that argument.' )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(snake_case , num_warmup_steps=snake_case , last_epoch=snake_case )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(f'{name} requires `num_training_steps`, please provide that argument.' )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
snake_case , num_warmup_steps=snake_case , num_training_steps=snake_case , num_cycles=snake_case , last_epoch=snake_case , )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
snake_case , num_warmup_steps=snake_case , num_training_steps=snake_case , power=snake_case , last_epoch=snake_case , )
return schedule_func(
snake_case , num_warmup_steps=snake_case , num_training_steps=snake_case , last_epoch=snake_case )
| 298 |
"""simple docstring"""
import argparse
import re
from typing import Dict
import torch
from datasets import Audio, Dataset, load_dataset, load_metric
from transformers import AutoFeatureExtractor, pipeline
def SCREAMING_SNAKE_CASE__ ( snake_case : Dataset , snake_case : Dict[str, str] )-> Any:
'''simple docstring'''
UpperCAmelCase__ : str = args.log_outputs
UpperCAmelCase__ : str = "_".join(args.dataset.split("/" ) + [args.config, args.split] )
# load metric
UpperCAmelCase__ : List[str] = load_metric("wer" )
UpperCAmelCase__ : Tuple = load_metric("cer" )
# compute metrics
UpperCAmelCase__ : List[str] = wer.compute(references=result["target"] , predictions=result["prediction"] )
UpperCAmelCase__ : Tuple = cer.compute(references=result["target"] , predictions=result["prediction"] )
# print & log results
UpperCAmelCase__ : Union[str, Any] = f'WER: {wer_result}\nCER: {cer_result}'
print(snake_case )
with open(f'{dataset_id}_eval_results.txt' , "w" ) as f:
f.write(snake_case )
# log all results in text file. Possibly interesting for analysis
if log_outputs is not None:
UpperCAmelCase__ : str = f'log_{dataset_id}_predictions.txt'
UpperCAmelCase__ : List[str] = f'log_{dataset_id}_targets.txt'
with open(snake_case , "w" ) as p, open(snake_case , "w" ) as t:
# mapping function to write output
def write_to_file(snake_case : List[Any] , snake_case : List[str] ):
p.write(f'{i}' + "\n" )
p.write(batch["prediction"] + "\n" )
t.write(f'{i}' + "\n" )
t.write(batch["target"] + "\n" )
result.map(snake_case , with_indices=snake_case )
def SCREAMING_SNAKE_CASE__ ( snake_case : str )-> str:
'''simple docstring'''
UpperCAmelCase__ : str = "[,?.!\-\;\:\"“%‘”�—’…–]" # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training
UpperCAmelCase__ : str = re.sub(snake_case , "" , text.lower() )
# In addition, we can normalize the target text, e.g. removing new lines characters etc...
# note that order is important here!
UpperCAmelCase__ : Tuple = ["\n\n", "\n", " ", " "]
for t in token_sequences_to_ignore:
UpperCAmelCase__ : List[Any] = " ".join(text.split(snake_case ) )
return text
def SCREAMING_SNAKE_CASE__ ( snake_case : List[str] )-> str:
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=snake_case )
# for testing: only process the first two examples as a test
# dataset = dataset.select(range(10))
# load processor
UpperCAmelCase__ : List[Any] = AutoFeatureExtractor.from_pretrained(args.model_id )
UpperCAmelCase__ : str = feature_extractor.sampling_rate
# resample audio
UpperCAmelCase__ : Dict = dataset.cast_column("audio" , Audio(sampling_rate=snake_case ) )
# load eval pipeline
if args.device is None:
UpperCAmelCase__ : List[str] = 0 if torch.cuda.is_available() else -1
UpperCAmelCase__ : Optional[int] = pipeline("automatic-speech-recognition" , model=args.model_id , device=args.device )
# map function to decode audio
def map_to_pred(snake_case : Any ):
UpperCAmelCase__ : List[str] = asr(
batch["audio"]["array"] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s )
UpperCAmelCase__ : List[Any] = prediction["text"]
UpperCAmelCase__ : Optional[int] = normalize_text(batch["sentence"] )
return batch
# run inference on all examples
UpperCAmelCase__ : Dict = dataset.map(snake_case , remove_columns=dataset.column_names )
# compute and log_results
# do not change function below
log_results(snake_case , snake_case )
if __name__ == "__main__":
_lowerCAmelCase : Any = argparse.ArgumentParser()
parser.add_argument(
"""--model_id""", type=str, required=True, help="""Model identifier. Should be loadable with 🤗 Transformers"""
)
parser.add_argument(
"""--dataset""",
type=str,
required=True,
help="""Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets""",
)
parser.add_argument(
"""--config""", type=str, required=True, help="""Config of the dataset. *E.g.* `'en'` for Common Voice"""
)
parser.add_argument("""--split""", type=str, required=True, help="""Split of the dataset. *E.g.* `'test'`""")
parser.add_argument(
"""--chunk_length_s""", type=float, default=None, help="""Chunk length in seconds. Defaults to 5 seconds."""
)
parser.add_argument(
"""--stride_length_s""", type=float, default=None, help="""Stride of the audio chunks. Defaults to 1 second."""
)
parser.add_argument(
"""--log_outputs""", action="""store_true""", help="""If defined, write outputs to log file for analysis."""
)
parser.add_argument(
"""--device""",
type=int,
default=None,
help="""The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.""",
)
_lowerCAmelCase : Tuple = parser.parse_args()
main(args)
| 298 | 1 |
import os
import zipfile
import pytest
from datasets.utils.extract import (
BzipaExtractor,
Extractor,
GzipExtractor,
LzaExtractor,
SevenZipExtractor,
TarExtractor,
XzExtractor,
ZipExtractor,
ZstdExtractor,
)
from .utils import require_lza, require_pyazr, require_zstandard
@pytest.mark.parametrize(
'''compression_format, is_archive''' , [
('''7z''', True),
('''bz2''', False),
('''gzip''', False),
('''lz4''', False),
('''tar''', True),
('''xz''', False),
('''zip''', True),
('''zstd''', False),
] , )
def UpperCAmelCase_( a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = {
'''7z''': (seven_zip_file, SevenZipExtractor),
'''bz2''': (bza_file, BzipaExtractor),
'''gzip''': (gz_file, GzipExtractor),
'''lz4''': (lza_file, LzaExtractor),
'''tar''': (tar_file, TarExtractor),
'''xz''': (xz_file, XzExtractor),
'''zip''': (zip_file, ZipExtractor),
'''zstd''': (zstd_file, ZstdExtractor),
}
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = input_paths_and_base_extractors[compression_format]
if input_path is None:
SCREAMING_SNAKE_CASE : Dict = F"""for '{compression_format}' compression_format, """
if compression_format == "7z":
reason += require_pyazr.kwargs["reason"]
elif compression_format == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_format == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(a__ )
assert base_extractor.is_extractable(a__ )
SCREAMING_SNAKE_CASE : Tuple = tmp_path / ('''extracted''' if is_archive else '''extracted.txt''')
base_extractor.extract(a__ , a__ )
if is_archive:
assert output_path.is_dir()
for file_path in output_path.iterdir():
assert file_path.name == text_file.name
SCREAMING_SNAKE_CASE : Union[str, Any] = file_path.read_text(encoding='''utf-8''' )
else:
SCREAMING_SNAKE_CASE : List[str] = output_path.read_text(encoding='''utf-8''' )
SCREAMING_SNAKE_CASE : List[str] = text_file.read_text(encoding='''utf-8''' )
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize(
'''compression_format, is_archive''' , [
('''7z''', True),
('''bz2''', False),
('''gzip''', False),
('''lz4''', False),
('''tar''', True),
('''xz''', False),
('''zip''', True),
('''zstd''', False),
] , )
def UpperCAmelCase_( a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = {
'''7z''': seven_zip_file,
'''bz2''': bza_file,
'''gzip''': gz_file,
'''lz4''': lza_file,
'''tar''': tar_file,
'''xz''': xz_file,
'''zip''': zip_file,
'''zstd''': zstd_file,
}
SCREAMING_SNAKE_CASE : Tuple = input_paths[compression_format]
if input_path is None:
SCREAMING_SNAKE_CASE : str = F"""for '{compression_format}' compression_format, """
if compression_format == "7z":
reason += require_pyazr.kwargs["reason"]
elif compression_format == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_format == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(a__ )
SCREAMING_SNAKE_CASE : Dict = Extractor.infer_extractor_format(a__ )
assert extractor_format is not None
SCREAMING_SNAKE_CASE : Dict = tmp_path / ('''extracted''' if is_archive else '''extracted.txt''')
Extractor.extract(a__ , a__ , a__ )
if is_archive:
assert output_path.is_dir()
for file_path in output_path.iterdir():
assert file_path.name == text_file.name
SCREAMING_SNAKE_CASE : Union[str, Any] = file_path.read_text(encoding='''utf-8''' )
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = output_path.read_text(encoding='''utf-8''' )
SCREAMING_SNAKE_CASE : List[str] = text_file.read_text(encoding='''utf-8''' )
assert extracted_file_content == expected_file_content
@pytest.fixture
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
import tarfile
SCREAMING_SNAKE_CASE : List[Any] = tmp_path / '''data_dot_dot'''
directory.mkdir()
SCREAMING_SNAKE_CASE : int = directory / '''tar_file_with_dot_dot.tar'''
with tarfile.TarFile(a__ , '''w''' ) as f:
f.add(a__ , arcname=os.path.join('''..''' , text_file.name ) )
return path
@pytest.fixture
def UpperCAmelCase_( a__ ):
"""simple docstring"""
import tarfile
SCREAMING_SNAKE_CASE : List[str] = tmp_path / '''data_sym_link'''
directory.mkdir()
SCREAMING_SNAKE_CASE : int = directory / '''tar_file_with_sym_link.tar'''
os.symlink('''..''' , directory / '''subdir''' , target_is_directory=a__ )
with tarfile.TarFile(a__ , '''w''' ) as f:
f.add(str(directory / '''subdir''' ) , arcname='''subdir''' ) # str required by os.readlink on Windows and Python < 3.8
return path
@pytest.mark.parametrize(
'''insecure_tar_file, error_log''' , [('''tar_file_with_dot_dot''', '''illegal path'''), ('''tar_file_with_sym_link''', '''Symlink''')] , )
def UpperCAmelCase_( a__ , a__ , a__ , a__ , a__ , a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = {
'''tar_file_with_dot_dot''': tar_file_with_dot_dot,
'''tar_file_with_sym_link''': tar_file_with_sym_link,
}
SCREAMING_SNAKE_CASE : Any = insecure_tar_files[insecure_tar_file]
SCREAMING_SNAKE_CASE : Dict = tmp_path / '''extracted'''
TarExtractor.extract(a__ , a__ )
assert caplog.text
for record in caplog.records:
assert record.levelname == "ERROR"
assert error_log in record.msg
def UpperCAmelCase_( a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = tmpdir / '''not_a_zip_file'''
# From: https://github.com/python/cpython/pull/5053
SCREAMING_SNAKE_CASE : Union[str, Any] = (
b'''\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00\x01\x00\x00'''
b'''\x00\x02\x08\x06\x00\x00\x00\x99\x81\xb6\'\x00\x00\x00\x15I'''
b'''DATx\x01\x01\n\x00\xf5\xff\x00PK\x05\x06\x00PK\x06\x06\x07'''
b'''\xac\x01N\xc6|a\r\x00\x00\x00\x00IEND\xaeB`\x82'''
)
with not_a_zip_file.open('''wb''' ) as f:
f.write(a__ )
assert zipfile.is_zipfile(str(a__ ) ) # is a false positive for `zipfile`
assert not ZipExtractor.is_extractable(a__ ) # but we're right
| 313 |
import torch
from diffusers import EulerDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class a_ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = (EulerDiscreteScheduler,)
__SCREAMING_SNAKE_CASE : Optional[int] = 10
def __lowerCAmelCase ( self , **_lowerCamelCase ) ->Tuple:
SCREAMING_SNAKE_CASE : Optional[int] = {
'''num_train_timesteps''': 1100,
'''beta_start''': 0.0_0_0_1,
'''beta_end''': 0.0_2,
'''beta_schedule''': '''linear''',
}
config.update(**_lowerCamelCase )
return config
def __lowerCAmelCase ( self ) ->Tuple:
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=_lowerCamelCase )
def __lowerCAmelCase ( self ) ->Any:
for beta_start, beta_end in zip([0.0_0_0_0_1, 0.0_0_0_1, 0.0_0_1] , [0.0_0_0_2, 0.0_0_2, 0.0_2] ):
self.check_over_configs(beta_start=_lowerCamelCase , beta_end=_lowerCamelCase )
def __lowerCAmelCase ( self ) ->int:
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=_lowerCamelCase )
def __lowerCAmelCase ( self ) ->Optional[Any]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_lowerCamelCase )
def __lowerCAmelCase ( self ) ->List[Any]:
SCREAMING_SNAKE_CASE : str = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE : int = self.get_scheduler_config()
SCREAMING_SNAKE_CASE : List[Any] = scheduler_class(**_lowerCamelCase )
scheduler.set_timesteps(self.num_inference_steps )
SCREAMING_SNAKE_CASE : Any = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Any = self.dummy_model()
SCREAMING_SNAKE_CASE : int = self.dummy_sample_deter * scheduler.init_noise_sigma
SCREAMING_SNAKE_CASE : Any = sample.to(_lowerCamelCase )
for i, t in enumerate(scheduler.timesteps ):
SCREAMING_SNAKE_CASE : Union[str, Any] = scheduler.scale_model_input(_lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = model(_lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE : List[str] = scheduler.step(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , generator=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = output.prev_sample
SCREAMING_SNAKE_CASE : List[Any] = torch.sum(torch.abs(_lowerCamelCase ) )
SCREAMING_SNAKE_CASE : List[Any] = torch.mean(torch.abs(_lowerCamelCase ) )
assert abs(result_sum.item() - 1_0.0_8_0_7 ) < 1e-2
assert abs(result_mean.item() - 0.0_1_3_1 ) < 1e-3
def __lowerCAmelCase ( self ) ->List[str]:
SCREAMING_SNAKE_CASE : Optional[int] = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_scheduler_config(prediction_type='''v_prediction''' )
SCREAMING_SNAKE_CASE : Tuple = scheduler_class(**_lowerCamelCase )
scheduler.set_timesteps(self.num_inference_steps )
SCREAMING_SNAKE_CASE : Tuple = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : int = self.dummy_model()
SCREAMING_SNAKE_CASE : List[str] = self.dummy_sample_deter * scheduler.init_noise_sigma
SCREAMING_SNAKE_CASE : List[str] = sample.to(_lowerCamelCase )
for i, t in enumerate(scheduler.timesteps ):
SCREAMING_SNAKE_CASE : str = scheduler.scale_model_input(_lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE : List[Any] = model(_lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = scheduler.step(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , generator=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[int] = output.prev_sample
SCREAMING_SNAKE_CASE : str = torch.sum(torch.abs(_lowerCamelCase ) )
SCREAMING_SNAKE_CASE : int = torch.mean(torch.abs(_lowerCamelCase ) )
assert abs(result_sum.item() - 0.0_0_0_2 ) < 1e-2
assert abs(result_mean.item() - 2.2676e-06 ) < 1e-3
def __lowerCAmelCase ( self ) ->Tuple:
SCREAMING_SNAKE_CASE : Optional[int] = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE : Dict = self.get_scheduler_config()
SCREAMING_SNAKE_CASE : Tuple = scheduler_class(**_lowerCamelCase )
scheduler.set_timesteps(self.num_inference_steps , device=_lowerCamelCase )
SCREAMING_SNAKE_CASE : int = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Optional[Any] = self.dummy_model()
SCREAMING_SNAKE_CASE : str = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
SCREAMING_SNAKE_CASE : Optional[Any] = sample.to(_lowerCamelCase )
for t in scheduler.timesteps:
SCREAMING_SNAKE_CASE : Dict = scheduler.scale_model_input(_lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE : List[Any] = model(_lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = scheduler.step(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , generator=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[Any] = output.prev_sample
SCREAMING_SNAKE_CASE : List[Any] = torch.sum(torch.abs(_lowerCamelCase ) )
SCREAMING_SNAKE_CASE : str = torch.mean(torch.abs(_lowerCamelCase ) )
assert abs(result_sum.item() - 1_0.0_8_0_7 ) < 1e-2
assert abs(result_mean.item() - 0.0_1_3_1 ) < 1e-3
def __lowerCAmelCase ( self ) ->Optional[int]:
SCREAMING_SNAKE_CASE : Dict = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_scheduler_config()
SCREAMING_SNAKE_CASE : List[Any] = scheduler_class(**_lowerCamelCase , use_karras_sigmas=_lowerCamelCase )
scheduler.set_timesteps(self.num_inference_steps , device=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[int] = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Optional[int] = self.dummy_model()
SCREAMING_SNAKE_CASE : Dict = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
SCREAMING_SNAKE_CASE : int = sample.to(_lowerCamelCase )
for t in scheduler.timesteps:
SCREAMING_SNAKE_CASE : List[Any] = scheduler.scale_model_input(_lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = model(_lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE : Dict = scheduler.step(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , generator=_lowerCamelCase )
SCREAMING_SNAKE_CASE : int = output.prev_sample
SCREAMING_SNAKE_CASE : Optional[Any] = torch.sum(torch.abs(_lowerCamelCase ) )
SCREAMING_SNAKE_CASE : Any = torch.mean(torch.abs(_lowerCamelCase ) )
assert abs(result_sum.item() - 1_2_4.5_2_2_9_9_4_9_9_5_1_1_7_1_9 ) < 1e-2
assert abs(result_mean.item() - 0.1_6_2_1_3_9_3_2_6_3_3_3_9_9_9_6_3 ) < 1e-3
| 313 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a_ : Dict = logging.get_logger(__name__)
a_ : Dict = {
"sail/poolformer_s12": "https://huggingface.co/sail/poolformer_s12/resolve/main/config.json",
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
}
class a ( _SCREAMING_SNAKE_CASE ):
_lowerCAmelCase = """poolformer"""
def __init__( self , __magic_name__=3 , __magic_name__=16 , __magic_name__=16 , __magic_name__=3 , __magic_name__=4.0 , __magic_name__=[2, 2, 6, 2] , __magic_name__=[64, 1_28, 3_20, 5_12] , __magic_name__=[7, 3, 3, 3] , __magic_name__=[4, 2, 2, 2] , __magic_name__=[2, 1, 1, 1] , __magic_name__=4 , __magic_name__=0.0 , __magic_name__="gelu" , __magic_name__=True , __magic_name__=1e-5 , __magic_name__=0.0_2 , **__magic_name__ , ) -> Tuple:
_a = num_channels
_a = patch_size
_a = stride
_a = padding
_a = pool_size
_a = hidden_sizes
_a = mlp_ratio
_a = depths
_a = patch_sizes
_a = strides
_a = num_encoder_blocks
_a = drop_path_rate
_a = hidden_act
_a = use_layer_scale
_a = layer_scale_init_value
_a = initializer_range
super().__init__(**__magic_name__ )
class a ( _SCREAMING_SNAKE_CASE ):
_lowerCAmelCase = version.parse("""1.11""" )
@property
def __UpperCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def __UpperCAmelCase ( self ) -> float:
return 2e-3
| 363 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
a_ : List[Any] = {"configuration_gpt_neox": ["GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTNeoXConfig"]}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : List[str] = ["GPTNeoXTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : List[Any] = [
"GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST",
"GPTNeoXForCausalLM",
"GPTNeoXForQuestionAnswering",
"GPTNeoXForSequenceClassification",
"GPTNeoXForTokenClassification",
"GPTNeoXLayer",
"GPTNeoXModel",
"GPTNeoXPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_gpt_neox import GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_neox_fast import GPTNeoXTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox import (
GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXLayer,
GPTNeoXModel,
GPTNeoXPreTrainedModel,
)
else:
import sys
a_ : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 104 | 0 |
import unittest
from transformers import AutoConfig, AutoTokenizer, BertConfig, TensorType, is_flax_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, slow
if is_flax_available():
import jax
from transformers.models.auto.modeling_flax_auto import FlaxAutoModel
from transformers.models.bert.modeling_flax_bert import FlaxBertModel
from transformers.models.roberta.modeling_flax_roberta import FlaxRobertaModel
@require_flax
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@slow
def _A ( self : Any ):
for model_name in ["bert-base-cased", "bert-large-uncased"]:
with self.subTest(__lowerCamelCase ):
UpperCamelCase :List[str] = AutoConfig.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
UpperCamelCase :Optional[int] = FlaxAutoModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
@slow
def _A ( self : Optional[int] ):
for model_name in ["roberta-base", "roberta-large"]:
with self.subTest(__lowerCamelCase ):
UpperCamelCase :Dict = AutoConfig.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
UpperCamelCase :Union[str, Any] = FlaxAutoModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
@slow
def _A ( self : int ):
for model_name in ["bert-base-cased", "bert-large-uncased"]:
UpperCamelCase :str = AutoTokenizer.from_pretrained(__lowerCamelCase )
UpperCamelCase :Union[str, Any] = FlaxBertModel.from_pretrained(__lowerCamelCase )
UpperCamelCase :List[Any] = tokenizer("""Do you support jax jitted function?""" , return_tensors=TensorType.JAX )
@jax.jit
def eval(**__lowerCamelCase : int ):
return model(**__lowerCamelCase )
eval(**__lowerCamelCase ).block_until_ready()
@slow
def _A ( self : Union[str, Any] ):
for model_name in ["roberta-base", "roberta-large"]:
UpperCamelCase :Tuple = AutoTokenizer.from_pretrained(__lowerCamelCase )
UpperCamelCase :Any = FlaxRobertaModel.from_pretrained(__lowerCamelCase )
UpperCamelCase :List[str] = tokenizer("""Do you support jax jitted function?""" , return_tensors=TensorType.JAX )
@jax.jit
def eval(**__lowerCamelCase : Any ):
return model(**__lowerCamelCase )
eval(**__lowerCamelCase ).block_until_ready()
def _A ( self : Any ):
with self.assertRaisesRegex(
__lowerCamelCase , """bert-base is not a local folder and is not a valid model identifier""" ):
UpperCamelCase :Dict = FlaxAutoModel.from_pretrained("""bert-base""" )
def _A ( self : Union[str, Any] ):
with self.assertRaisesRegex(
__lowerCamelCase , R"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
UpperCamelCase :Optional[int] = FlaxAutoModel.from_pretrained(__lowerCamelCase , revision="""aaaaaa""" )
def _A ( self : Any ):
with self.assertRaisesRegex(
__lowerCamelCase , """hf-internal-testing/config-no-model does not appear to have a file named flax_model.msgpack""" , ):
UpperCamelCase :int = FlaxAutoModel.from_pretrained("""hf-internal-testing/config-no-model""" )
def _A ( self : List[Any] ):
with self.assertRaisesRegex(__lowerCamelCase , """Use `from_pt=True` to load this model""" ):
UpperCamelCase :str = FlaxAutoModel.from_pretrained("""hf-internal-testing/tiny-bert-pt-only""" )
| 38 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase__ :int = logging.get_logger(__name__)
lowerCAmelCase__ :Optional[Any] = {
'''facebook/data2vec-text-base''': '''https://huggingface.co/data2vec/resolve/main/config.json''',
}
class __a ( UpperCAmelCase ):
_a : str = 'data2vec-text'
def __init__( self , _SCREAMING_SNAKE_CASE=30522 , _SCREAMING_SNAKE_CASE=768 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=3072 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=512 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=1e-1_2 , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE="absolute" , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE , ) -> List[Any]:
"""simple docstring"""
super().__init__(pad_token_id=_SCREAMING_SNAKE_CASE , bos_token_id=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = hidden_act
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = position_embedding_type
_UpperCAmelCase = use_cache
_UpperCAmelCase = classifier_dropout
class __a ( UpperCAmelCase ):
@property
def UpperCAmelCase__ ( self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
_UpperCAmelCase = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_UpperCAmelCase = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 329 | 0 |
import argparse
import json
import os
import re
import shutil
import torch
from transformers import BioGptConfig, BioGptForCausalLM
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
lowerCamelCase__ = 2
class __magic_name__ :
def __init__( self , *, # begin keyword-only arguments
_a="<s>" , _a="<pad>" , _a="</s>" , _a="<unk>" , _a=None , ) -> int:
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = bos, unk, pad, eos
lowerCAmelCase_ = []
lowerCAmelCase_ = []
lowerCAmelCase_ = {}
lowerCAmelCase_ = self.add_symbol(_a )
lowerCAmelCase_ = self.add_symbol(_a )
lowerCAmelCase_ = self.add_symbol(_a )
lowerCAmelCase_ = self.add_symbol(_a )
if extra_special_symbols:
for s in extra_special_symbols:
self.add_symbol(_a )
lowerCAmelCase_ = len(self.symbols )
def __eq__( self , _a ) -> Dict:
return self.indices == other.indices
def __getitem__( self , _a ) -> List[Any]:
if idx < len(self.symbols ):
return self.symbols[idx]
return self.unk_word
def __len__( self ) -> List[str]:
return len(self.symbols )
def __contains__( self , _a ) -> str:
return sym in self.indices
@classmethod
def __a ( cls , _a ) -> List[str]:
lowerCAmelCase_ = cls()
d.add_from_file(_a )
return d
def __a ( self , _a , _a=1 , _a=False ) -> List[Any]:
if word in self.indices and not overwrite:
lowerCAmelCase_ = self.indices[word]
lowerCAmelCase_ = self.count[idx] + n
return idx
else:
lowerCAmelCase_ = len(self.symbols )
lowerCAmelCase_ = idx
self.symbols.append(_a )
self.count.append(_a )
return idx
def __a ( self , _a ) -> str:
return 0
def __a ( self , _a ) -> Optional[int]:
if isinstance(_a , _a ):
try:
with open(_a , "r" , encoding="utf-8" ) as fd:
self.add_from_file(_a )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception("Incorrect encoding detected in {}, please rebuild the dataset".format(_a ) )
return
lowerCAmelCase_ = f.readlines()
lowerCAmelCase_ = self._load_meta(_a )
for line in lines[indices_start_line:]:
try:
lowerCAmelCase_ , lowerCAmelCase_ = line.rstrip().rsplit(" " , 1 )
if field == "#fairseq:overwrite":
lowerCAmelCase_ = True
lowerCAmelCase_ , lowerCAmelCase_ = line.rsplit(" " , 1 )
else:
lowerCAmelCase_ = False
lowerCAmelCase_ = int(_a )
lowerCAmelCase_ = line
if word in self and not overwrite:
raise RuntimeError(
"Duplicate word found when loading Dictionary: '{}'. "
"Duplicate words can overwrite earlier ones by adding the "
"#fairseq:overwrite flag at the end of the corresponding row "
"in the dictionary file. If using the Camembert model, please "
"download an updated copy of the model file.".format(_a ) )
self.add_symbol(_a , n=_a , overwrite=_a )
except ValueError:
raise ValueError("Incorrect dictionary format, expected '<token> <cnt> [flags]'" )
def A(__a: List[Any] ):
# (1) remove word breaking symbol, (2) add word ending symbol where the word is not broken up,
# e.g.: d = {'le@@': 5, 'tt@@': 6, 'er': 7} => {'le': 5, 'tt': 6, 'er</w>': 7}
lowerCAmelCase_ = dict((re.sub(r"@@$" , "" , __a ), v) if k.endswith("@@" ) else (re.sub(r"$" , "</w>" , __a ), v) for k, v in d.items() )
lowerCAmelCase_ = "<s> <pad> </s> <unk>".split()
# restore the special tokens
for k in keep_keys:
del da[F"{k}</w>"]
lowerCAmelCase_ = d[k] # restore
return da
def A(__a: Optional[Any] , __a: Dict ):
# prep
if not os.path.exists(__a ):
raise ValueError(F"path {biogpt_checkpoint_path} does not exist!" )
os.makedirs(__a , exist_ok=__a )
print(F"Writing results to {pytorch_dump_folder_path}" )
# handle various types of models
lowerCAmelCase_ = os.path.join(__a , "checkpoint.pt" )
if not os.path.isfile(__a ):
raise ValueError(F"path to the file {checkpoint_file} does not exist!" )
lowerCAmelCase_ = torch.load(__a , map_location="cpu" )
lowerCAmelCase_ = chkpt["cfg"]["model"]
# dicts
lowerCAmelCase_ = os.path.join(__a , "dict.txt" )
if not os.path.isfile(__a ):
raise ValueError(F"path to the file {dict_file} does not exist!" )
lowerCAmelCase_ = Dictionary.load(__a )
lowerCAmelCase_ = rewrite_dict_keys(src_dict.indices )
lowerCAmelCase_ = len(__a )
lowerCAmelCase_ = os.path.join(__a , VOCAB_FILES_NAMES["vocab_file"] )
print(F"Generating {src_vocab_file} of {src_vocab_size} records" )
with open(__a , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(__a , ensure_ascii=__a , indent=__a ) )
# merges_file (bpecodes)
lowerCAmelCase_ = os.path.join(__a , "bpecodes" )
if not os.path.isfile(__a ):
raise ValueError(F"path to the file {bpecodes_file} does not exist!" )
lowerCAmelCase_ = os.path.join(__a , VOCAB_FILES_NAMES["merges_file"] )
shutil.copyfile(__a , __a )
# model config
lowerCAmelCase_ = os.path.join(__a , "config.json" )
lowerCAmelCase_ = {
"activation_dropout": args["activation_dropout"],
"architectures": ["BioGptForCausalLM"],
"attention_probs_dropout_prob": args["attention_dropout"],
"bos_token_id": 0,
"eos_token_id": 2,
"hidden_act": args["activation_fn"],
"hidden_dropout_prob": args["dropout"],
"hidden_size": args["decoder_embed_dim"],
"initializer_range": 0.02,
"intermediate_size": args["decoder_ffn_embed_dim"],
"layer_norm_eps": 1E-12,
"layerdrop": args["decoder_layerdrop"],
"max_position_embeddings": args["max_target_positions"],
"model_type": "biogpt",
"num_attention_heads": args["decoder_attention_heads"],
"num_hidden_layers": args["decoder_layers"],
"pad_token_id": 1,
"scale_embedding": not args["no_scale_embedding"],
"tie_word_embeddings": args["share_decoder_input_output_embed"],
"vocab_size": src_vocab_size,
}
# good hparam defaults to start with
print(F"Generating {biogpt_model_config_file}" )
with open(__a , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(__a , ensure_ascii=__a , indent=__a ) )
# tokenizer config
lowerCAmelCase_ = os.path.join(__a , __a )
lowerCAmelCase_ = {
"bos_token": "<s>",
"eos_token": "</s>",
"model_max_length": 1024,
"pad_token": "<pad>",
"special_tokens_map_file": None,
"tokenizer_class": "BioGptTokenizer",
"unk_token": "<unk>",
}
print(F"Generating {biogpt_tokenizer_config_file}" )
with open(__a , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(__a , ensure_ascii=__a , indent=__a ) )
# model
lowerCAmelCase_ = chkpt["model"]
# remove unneeded keys
lowerCAmelCase_ = [
"decoder.version",
]
for k in ignore_keys:
model_state_dict.pop(__a , __a )
lowerCAmelCase_ = list(model_state_dict.keys() )
for layer_name in layer_names:
if layer_name.endswith("output_projection.weight" ):
lowerCAmelCase_ = model_state_dict.pop(__a )
else:
lowerCAmelCase_ = model_state_dict.pop(__a )
lowerCAmelCase_ = BioGptConfig.from_pretrained(__a )
lowerCAmelCase_ = BioGptForCausalLM(__a )
# check that it loads ok
model_new.load_state_dict(__a )
# save
lowerCAmelCase_ = os.path.join(__a , __a )
print(F"Generating {pytorch_weights_dump_path}" )
torch.save(__a , __a )
print("Conversion is done!" )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--biogpt_checkpoint_path''',
default=None,
type=str,
required=True,
help=(
'''Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,'''
''' bpecodes, etc.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
lowerCamelCase__ = parser.parse_args()
convert_biogpt_checkpoint_to_pytorch(args.biogpt_checkpoint_path, args.pytorch_dump_folder_path)
| 352 |
import warnings
from ...utils import is_sklearn_available, requires_backends
if is_sklearn_available():
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
lowerCamelCase__ = (
'''This metric will be removed from the library soon, metrics should be handled with the 🤗 Evaluate '''
'''library. You can have a look at this example script for pointers: '''
'''https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py'''
)
def A(__a: str , __a: List[Any] ):
warnings.warn(__a , __a )
requires_backends(__a , "sklearn" )
return (preds == labels).mean()
def A(__a: Any , __a: Any ):
warnings.warn(__a , __a )
requires_backends(__a , "sklearn" )
lowerCAmelCase_ = simple_accuracy(__a , __a )
lowerCAmelCase_ = fa_score(y_true=__a , y_pred=__a )
return {
"acc": acc,
"f1": fa,
"acc_and_f1": (acc + fa) / 2,
}
def A(__a: List[str] , __a: Optional[int] ):
warnings.warn(__a , __a )
requires_backends(__a , "sklearn" )
lowerCAmelCase_ = pearsonr(__a , __a )[0]
lowerCAmelCase_ = spearmanr(__a , __a )[0]
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
"corr": (pearson_corr + spearman_corr) / 2,
}
def A(__a: Union[str, Any] , __a: Any , __a: str ):
warnings.warn(__a , __a )
requires_backends(__a , "sklearn" )
assert len(__a ) == len(__a ), F"Predictions and labels have mismatched lengths {len(__a )} and {len(__a )}"
if task_name == "cola":
return {"mcc": matthews_corrcoef(__a , __a )}
elif task_name == "sst-2":
return {"acc": simple_accuracy(__a , __a )}
elif task_name == "mrpc":
return acc_and_fa(__a , __a )
elif task_name == "sts-b":
return pearson_and_spearman(__a , __a )
elif task_name == "qqp":
return acc_and_fa(__a , __a )
elif task_name == "mnli":
return {"mnli/acc": simple_accuracy(__a , __a )}
elif task_name == "mnli-mm":
return {"mnli-mm/acc": simple_accuracy(__a , __a )}
elif task_name == "qnli":
return {"acc": simple_accuracy(__a , __a )}
elif task_name == "rte":
return {"acc": simple_accuracy(__a , __a )}
elif task_name == "wnli":
return {"acc": simple_accuracy(__a , __a )}
elif task_name == "hans":
return {"acc": simple_accuracy(__a , __a )}
else:
raise KeyError(__a )
def A(__a: int , __a: Optional[Any] , __a: Optional[Any] ):
warnings.warn(__a , __a )
requires_backends(__a , "sklearn" )
if len(__a ) != len(__a ):
raise ValueError(F"Predictions and labels have mismatched lengths {len(__a )} and {len(__a )}" )
if task_name == "xnli":
return {"acc": simple_accuracy(__a , __a )}
else:
raise KeyError(__a )
| 22 | 0 |
from . import __version__
# Backward compatibility imports, to make sure all those objects can be found in file_utils
from .utils import (
CLOUDFRONT_DISTRIB_PREFIX,
CONFIG_NAME,
DISABLE_TELEMETRY,
DUMMY_INPUTS,
DUMMY_MASK,
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
FEATURE_EXTRACTOR_NAME,
FLAX_WEIGHTS_NAME,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
MODEL_CARD_NAME,
MULTIPLE_CHOICE_DUMMY_INPUTS,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
SENTENCEPIECE_UNDERLINE,
SPIECE_UNDERLINE,
TF2_WEIGHTS_NAME,
TF_WEIGHTS_NAME,
TORCH_FX_REQUIRED_VERSION,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
USE_JAX,
USE_TF,
USE_TORCH,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
ContextManagers,
DummyObject,
EntryNotFoundError,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
TensorType,
_LazyModule,
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
cached_property,
copy_func,
default_cache_path,
define_sagemaker_information,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
get_torch_version,
has_file,
http_user_agent,
is_apex_available,
is_bsa_available,
is_coloredlogs_available,
is_datasets_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_librosa_available,
is_offline_mode,
is_onnx_available,
is_pandas_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_tensor,
is_tensorflow_probability_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_training_run_on_sagemaker,
is_vision_available,
replace_return_docstrings,
requires_backends,
to_numpy,
to_py_obj,
torch_only_method,
)
| 296 |
import json
import sys
def _a ( lowerCamelCase, lowerCamelCase ):
with open(lowerCamelCase, encoding="""utf-8""" ) as f:
lowerCamelCase : List[Any] = json.load(lowerCamelCase )
lowerCamelCase : Optional[Any] = ["""<details>""", """<summary>Show updated benchmarks!</summary>""", """ """]
for benchmark_name in sorted(lowerCamelCase ):
lowerCamelCase : List[Any] = results[benchmark_name]
lowerCamelCase : Union[str, Any] = benchmark_name.split("""/""" )[-1]
output_md.append(F'''### Benchmark: {benchmark_file_name}''' )
lowerCamelCase : Any = """| metric |"""
lowerCamelCase : str = """|--------|"""
lowerCamelCase : List[Any] = """| new / old (diff) |"""
for metric_name in sorted(lowerCamelCase ):
lowerCamelCase : List[Any] = benchmark_res[metric_name]
lowerCamelCase : Tuple = metric_vals["""new"""]
lowerCamelCase : int = metric_vals.get("""old""", lowerCamelCase )
lowerCamelCase : Dict = metric_vals.get("""diff""", lowerCamelCase )
lowerCamelCase : Dict = F''' {new_val:f}''' if isinstance(lowerCamelCase, (int, float) ) else """None"""
if old_val is not None:
val_str += F''' / {old_val:f}''' if isinstance(lowerCamelCase, (int, float) ) else "None"
if dif_val is not None:
val_str += F''' ({dif_val:f})''' if isinstance(lowerCamelCase, (int, float) ) else "None"
title += " " + metric_name + " |"
lines += "---|"
value += val_str + " |"
output_md += [title, lines, value, " "]
output_md.append("""</details>""" )
with open(lowerCamelCase, """w""", encoding="""utf-8""" ) as f:
f.writelines("""\n""".join(lowerCamelCase ) )
if __name__ == "__main__":
_lowerCamelCase =sys.argv[1]
_lowerCamelCase =sys.argv[2]
format_json_to_md(input_json_file, output_md_file)
| 287 | 0 |
import random
import unittest
import numpy as np
import transformers
from transformers import is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax
if is_flax_available():
import os
import jax.numpy as jnp
from jax import jit
from transformers import AutoTokenizer, FlaxAutoModelForCausalLM
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
_lowerCamelCase : Any = "0.12" # assumed parallelism: 8
if is_torch_available():
import torch
def a__ ( UpperCAmelCase : int , UpperCAmelCase : List[str] , UpperCAmelCase : List[str]=None ) -> List[Any]:
if rng is None:
UpperCAmelCase : Dict = random.Random()
UpperCAmelCase : Optional[Any] = 1
for dim in shape:
total_dims *= dim
UpperCAmelCase : List[str] = []
for _ in range(UpperCAmelCase ):
values.append(rng.randint(0 , vocab_size - 1 ) )
UpperCAmelCase : List[str] = np.array(UpperCAmelCase , dtype=jnp.intaa ).reshape(UpperCAmelCase )
return output
def a__ ( UpperCAmelCase : List[str] , UpperCAmelCase : Optional[int]=None ) -> List[str]:
UpperCAmelCase : Optional[int] = ids_tensor(UpperCAmelCase , vocab_size=2 , rng=UpperCAmelCase )
# make sure that at least one token is attended to for each batch
UpperCAmelCase : str = 1
return attn_mask
@require_flax
class __UpperCAmelCase :
UpperCamelCase = None
UpperCamelCase = ()
def __magic_name__ ( self : str ):
UpperCAmelCase , UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
# cut to half length & take max batch_size 3
UpperCAmelCase : Optional[int] = 2
UpperCAmelCase : Dict = inputs['''input_ids'''].shape[-1] // 2
UpperCAmelCase : Dict = inputs['''input_ids'''][:max_batch_size, :sequence_length]
UpperCAmelCase : Optional[int] = jnp.ones_like(__A )
UpperCAmelCase : Optional[int] = attention_mask[:max_batch_size, :sequence_length]
# generate max 5 tokens
UpperCAmelCase : Optional[Any] = input_ids.shape[-1] + 5
if config.eos_token_id is not None and config.pad_token_id is None:
# hack to allow generate for models such as GPT2 as is done in `generate()`
UpperCAmelCase : Optional[Any] = config.eos_token_id
return config, input_ids, attention_mask, max_length
@is_pt_flax_cross_test
def __magic_name__ ( self : Union[str, Any] ):
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Any = self._get_input_ids_and_config()
UpperCAmelCase : Optional[Any] = False
UpperCAmelCase : Any = max_length
UpperCAmelCase : List[Any] = 0
for model_class in self.all_generative_model_classes:
UpperCAmelCase : Union[str, Any] = model_class(__A )
UpperCAmelCase : List[str] = model_class.__name__[4:] # Skip the "Flax" at the beginning
UpperCAmelCase : List[Any] = getattr(__A, __A )
UpperCAmelCase : Union[str, Any] = pt_model_class(__A ).eval()
UpperCAmelCase : Tuple = load_flax_weights_in_pytorch_model(__A, flax_model.params )
UpperCAmelCase : Dict = flax_model.generate(__A ).sequences
UpperCAmelCase : str = pt_model.generate(torch.tensor(__A, dtype=torch.long ) )
if flax_generation_outputs.shape[-1] > pt_generation_outputs.shape[-1]:
UpperCAmelCase : Any = flax_generation_outputs[:, : pt_generation_outputs.shape[-1]]
self.assertListEqual(pt_generation_outputs.numpy().tolist(), flax_generation_outputs.tolist() )
def __magic_name__ ( self : Tuple ):
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Union[str, Any] = self._get_input_ids_and_config()
UpperCAmelCase : str = False
UpperCAmelCase : Dict = max_length
for model_class in self.all_generative_model_classes:
UpperCAmelCase : Union[str, Any] = model_class(__A )
UpperCAmelCase : Optional[int] = model.generate(__A ).sequences
self.assertEqual(generation_outputs.shape[-1], __A )
UpperCAmelCase : List[Any] = jit(model.generate )
UpperCAmelCase : Optional[Any] = jit_generate(__A ).sequences
self.assertListEqual(generation_outputs.tolist(), jit_generation_outputs.tolist() )
def __magic_name__ ( self : str ):
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : List[Any] = self._get_input_ids_and_config()
UpperCAmelCase : str = True
UpperCAmelCase : Dict = max_length
for model_class in self.all_generative_model_classes:
UpperCAmelCase : Union[str, Any] = model_class(__A )
UpperCAmelCase : Optional[Any] = model.generate(__A ).sequences
self.assertEqual(generation_outputs.shape[-1], __A )
UpperCAmelCase : str = jit(model.generate )
UpperCAmelCase : List[Any] = jit_generate(__A ).sequences
self.assertListEqual(generation_outputs.tolist(), jit_generation_outputs.tolist() )
def __magic_name__ ( self : Union[str, Any] ):
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : int = self._get_input_ids_and_config()
UpperCAmelCase : Dict = False
UpperCAmelCase : Union[str, Any] = max_length
UpperCAmelCase : List[Any] = 2
for model_class in self.all_generative_model_classes:
UpperCAmelCase : int = model_class(__A )
UpperCAmelCase : str = model.generate(__A ).sequences
self.assertEqual(generation_outputs.shape[-1], __A )
UpperCAmelCase : int = jit(model.generate )
UpperCAmelCase : Union[str, Any] = jit_generate(__A ).sequences
self.assertListEqual(generation_outputs.tolist(), jit_generation_outputs.tolist() )
def __magic_name__ ( self : List[str] ):
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Optional[Any] = self._get_input_ids_and_config()
UpperCAmelCase : Any = False
UpperCAmelCase : Optional[int] = max_length
UpperCAmelCase : Optional[int] = 2
UpperCAmelCase : str = 2
for model_class in self.all_generative_model_classes:
UpperCAmelCase : int = model_class(__A )
UpperCAmelCase : Optional[Any] = model.generate(__A ).sequences
self.assertEqual(generation_outputs.shape[0], input_ids.shape[0] * config.num_return_sequences )
def __magic_name__ ( self : Any ):
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : str = self._get_input_ids_and_config()
UpperCAmelCase : str = True
UpperCAmelCase : Union[str, Any] = max_length
UpperCAmelCase : Union[str, Any] = 0.8
UpperCAmelCase : str = 1_0
UpperCAmelCase : Any = 0.3
UpperCAmelCase : str = 1
UpperCAmelCase : Union[str, Any] = 8
UpperCAmelCase : Optional[Any] = 9
for model_class in self.all_generative_model_classes:
UpperCAmelCase : int = model_class(__A )
UpperCAmelCase : List[Any] = model.generate(__A ).sequences
self.assertEqual(generation_outputs.shape[-1], __A )
UpperCAmelCase : Optional[int] = jit(model.generate )
UpperCAmelCase : Any = jit_generate(__A ).sequences
self.assertListEqual(generation_outputs.tolist(), jit_generation_outputs.tolist() )
def __magic_name__ ( self : List[Any] ):
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Dict = self._get_input_ids_and_config()
UpperCAmelCase : Optional[Any] = max_length
UpperCAmelCase : Tuple = 1
UpperCAmelCase : Optional[Any] = 8
UpperCAmelCase : Optional[int] = 9
for model_class in self.all_generative_model_classes:
UpperCAmelCase : int = model_class(__A )
UpperCAmelCase : List[str] = model.generate(__A ).sequences
self.assertEqual(generation_outputs.shape[-1], __A )
UpperCAmelCase : Dict = jit(model.generate )
UpperCAmelCase : List[str] = jit_generate(__A ).sequences
self.assertListEqual(generation_outputs.tolist(), jit_generation_outputs.tolist() )
def __magic_name__ ( self : Optional[Any] ):
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Union[str, Any] = self._get_input_ids_and_config()
UpperCAmelCase : List[str] = max_length
UpperCAmelCase : Union[str, Any] = 2
UpperCAmelCase : List[Any] = 1
UpperCAmelCase : List[str] = 8
UpperCAmelCase : int = 9
for model_class in self.all_generative_model_classes:
UpperCAmelCase : Optional[Any] = model_class(__A )
UpperCAmelCase : Union[str, Any] = model.generate(__A ).sequences
self.assertEqual(generation_outputs.shape[-1], __A )
UpperCAmelCase : List[str] = jit(model.generate )
UpperCAmelCase : Tuple = jit_generate(__A ).sequences
self.assertListEqual(generation_outputs.tolist(), jit_generation_outputs.tolist() )
def __magic_name__ ( self : str ):
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Optional[int] = self._get_input_ids_and_config()
# pad attention mask on the left
UpperCAmelCase : Union[str, Any] = attention_mask.at[(0, 0)].set(0 )
UpperCAmelCase : Tuple = False
UpperCAmelCase : str = max_length
for model_class in self.all_generative_model_classes:
UpperCAmelCase : Any = model_class(__A )
UpperCAmelCase : Union[str, Any] = model.generate(__A, attention_mask=__A ).sequences
self.assertEqual(generation_outputs.shape[-1], __A )
UpperCAmelCase : List[Any] = jit(model.generate )
UpperCAmelCase : Optional[Any] = jit_generate(__A, attention_mask=__A ).sequences
self.assertListEqual(generation_outputs.tolist(), jit_generation_outputs.tolist() )
def __magic_name__ ( self : Optional[int] ):
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Optional[int] = self._get_input_ids_and_config()
# pad attention mask on the left
UpperCAmelCase : Union[str, Any] = attention_mask.at[(0, 0)].set(0 )
UpperCAmelCase : Union[str, Any] = True
UpperCAmelCase : Dict = max_length
for model_class in self.all_generative_model_classes:
UpperCAmelCase : Any = model_class(__A )
UpperCAmelCase : Optional[Any] = model.generate(__A, attention_mask=__A ).sequences
self.assertEqual(generation_outputs.shape[-1], __A )
UpperCAmelCase : Optional[Any] = jit(model.generate )
UpperCAmelCase : Optional[Any] = jit_generate(__A, attention_mask=__A ).sequences
self.assertListEqual(generation_outputs.tolist(), jit_generation_outputs.tolist() )
def __magic_name__ ( self : Tuple ):
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Union[str, Any] = self._get_input_ids_and_config()
# pad attention mask on the left
UpperCAmelCase : Dict = attention_mask.at[(0, 0)].set(0 )
UpperCAmelCase : Union[str, Any] = 2
UpperCAmelCase : str = max_length
for model_class in self.all_generative_model_classes:
UpperCAmelCase : str = model_class(__A )
UpperCAmelCase : int = model.generate(__A, attention_mask=__A ).sequences
self.assertEqual(generation_outputs.shape[-1], __A )
UpperCAmelCase : Optional[Any] = jit(model.generate )
UpperCAmelCase : Dict = jit_generate(__A, attention_mask=__A ).sequences
self.assertListEqual(generation_outputs.tolist(), jit_generation_outputs.tolist() )
@require_flax
class __UpperCAmelCase ( unittest.TestCase ):
def __magic_name__ ( self : str ):
UpperCAmelCase : Union[str, Any] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-bert''' )
UpperCAmelCase : List[str] = FlaxAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' )
UpperCAmelCase : int = '''Hello world'''
UpperCAmelCase : Optional[int] = tokenizer(__A, return_tensors='''np''' ).input_ids
# typos are quickly detected (the correct argument is `do_sample`)
with self.assertRaisesRegex(__A, '''do_samples''' ):
model.generate(__A, do_samples=__A )
# arbitrary arguments that will not be used anywhere are also not accepted
with self.assertRaisesRegex(__A, '''foo''' ):
UpperCAmelCase : Any = {'''foo''': '''bar'''}
model.generate(__A, **__A )
| 99 |
import random
import unittest
import numpy as np
import transformers
from transformers import is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax
if is_flax_available():
import os
import jax.numpy as jnp
from jax import jit
from transformers import AutoTokenizer, FlaxAutoModelForCausalLM
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
_lowerCamelCase : Any = "0.12" # assumed parallelism: 8
if is_torch_available():
import torch
def a__ ( UpperCAmelCase : int , UpperCAmelCase : List[str] , UpperCAmelCase : List[str]=None ) -> List[Any]:
if rng is None:
UpperCAmelCase : Dict = random.Random()
UpperCAmelCase : Optional[Any] = 1
for dim in shape:
total_dims *= dim
UpperCAmelCase : List[str] = []
for _ in range(UpperCAmelCase ):
values.append(rng.randint(0 , vocab_size - 1 ) )
UpperCAmelCase : List[str] = np.array(UpperCAmelCase , dtype=jnp.intaa ).reshape(UpperCAmelCase )
return output
def a__ ( UpperCAmelCase : List[str] , UpperCAmelCase : Optional[int]=None ) -> List[str]:
UpperCAmelCase : Optional[int] = ids_tensor(UpperCAmelCase , vocab_size=2 , rng=UpperCAmelCase )
# make sure that at least one token is attended to for each batch
UpperCAmelCase : str = 1
return attn_mask
@require_flax
class __UpperCAmelCase :
UpperCamelCase = None
UpperCamelCase = ()
def __magic_name__ ( self : str ):
UpperCAmelCase , UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
# cut to half length & take max batch_size 3
UpperCAmelCase : Optional[int] = 2
UpperCAmelCase : Dict = inputs['''input_ids'''].shape[-1] // 2
UpperCAmelCase : Dict = inputs['''input_ids'''][:max_batch_size, :sequence_length]
UpperCAmelCase : Optional[int] = jnp.ones_like(__A )
UpperCAmelCase : Optional[int] = attention_mask[:max_batch_size, :sequence_length]
# generate max 5 tokens
UpperCAmelCase : Optional[Any] = input_ids.shape[-1] + 5
if config.eos_token_id is not None and config.pad_token_id is None:
# hack to allow generate for models such as GPT2 as is done in `generate()`
UpperCAmelCase : Optional[Any] = config.eos_token_id
return config, input_ids, attention_mask, max_length
@is_pt_flax_cross_test
def __magic_name__ ( self : Union[str, Any] ):
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Any = self._get_input_ids_and_config()
UpperCAmelCase : Optional[Any] = False
UpperCAmelCase : Any = max_length
UpperCAmelCase : List[Any] = 0
for model_class in self.all_generative_model_classes:
UpperCAmelCase : Union[str, Any] = model_class(__A )
UpperCAmelCase : List[str] = model_class.__name__[4:] # Skip the "Flax" at the beginning
UpperCAmelCase : List[Any] = getattr(__A, __A )
UpperCAmelCase : Union[str, Any] = pt_model_class(__A ).eval()
UpperCAmelCase : Tuple = load_flax_weights_in_pytorch_model(__A, flax_model.params )
UpperCAmelCase : Dict = flax_model.generate(__A ).sequences
UpperCAmelCase : str = pt_model.generate(torch.tensor(__A, dtype=torch.long ) )
if flax_generation_outputs.shape[-1] > pt_generation_outputs.shape[-1]:
UpperCAmelCase : Any = flax_generation_outputs[:, : pt_generation_outputs.shape[-1]]
self.assertListEqual(pt_generation_outputs.numpy().tolist(), flax_generation_outputs.tolist() )
def __magic_name__ ( self : Tuple ):
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Union[str, Any] = self._get_input_ids_and_config()
UpperCAmelCase : str = False
UpperCAmelCase : Dict = max_length
for model_class in self.all_generative_model_classes:
UpperCAmelCase : Union[str, Any] = model_class(__A )
UpperCAmelCase : Optional[int] = model.generate(__A ).sequences
self.assertEqual(generation_outputs.shape[-1], __A )
UpperCAmelCase : List[Any] = jit(model.generate )
UpperCAmelCase : Optional[Any] = jit_generate(__A ).sequences
self.assertListEqual(generation_outputs.tolist(), jit_generation_outputs.tolist() )
def __magic_name__ ( self : str ):
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : List[Any] = self._get_input_ids_and_config()
UpperCAmelCase : str = True
UpperCAmelCase : Dict = max_length
for model_class in self.all_generative_model_classes:
UpperCAmelCase : Union[str, Any] = model_class(__A )
UpperCAmelCase : Optional[Any] = model.generate(__A ).sequences
self.assertEqual(generation_outputs.shape[-1], __A )
UpperCAmelCase : str = jit(model.generate )
UpperCAmelCase : List[Any] = jit_generate(__A ).sequences
self.assertListEqual(generation_outputs.tolist(), jit_generation_outputs.tolist() )
def __magic_name__ ( self : Union[str, Any] ):
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : int = self._get_input_ids_and_config()
UpperCAmelCase : Dict = False
UpperCAmelCase : Union[str, Any] = max_length
UpperCAmelCase : List[Any] = 2
for model_class in self.all_generative_model_classes:
UpperCAmelCase : int = model_class(__A )
UpperCAmelCase : str = model.generate(__A ).sequences
self.assertEqual(generation_outputs.shape[-1], __A )
UpperCAmelCase : int = jit(model.generate )
UpperCAmelCase : Union[str, Any] = jit_generate(__A ).sequences
self.assertListEqual(generation_outputs.tolist(), jit_generation_outputs.tolist() )
def __magic_name__ ( self : List[str] ):
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Optional[Any] = self._get_input_ids_and_config()
UpperCAmelCase : Any = False
UpperCAmelCase : Optional[int] = max_length
UpperCAmelCase : Optional[int] = 2
UpperCAmelCase : str = 2
for model_class in self.all_generative_model_classes:
UpperCAmelCase : int = model_class(__A )
UpperCAmelCase : Optional[Any] = model.generate(__A ).sequences
self.assertEqual(generation_outputs.shape[0], input_ids.shape[0] * config.num_return_sequences )
def __magic_name__ ( self : Any ):
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : str = self._get_input_ids_and_config()
UpperCAmelCase : str = True
UpperCAmelCase : Union[str, Any] = max_length
UpperCAmelCase : Union[str, Any] = 0.8
UpperCAmelCase : str = 1_0
UpperCAmelCase : Any = 0.3
UpperCAmelCase : str = 1
UpperCAmelCase : Union[str, Any] = 8
UpperCAmelCase : Optional[Any] = 9
for model_class in self.all_generative_model_classes:
UpperCAmelCase : int = model_class(__A )
UpperCAmelCase : List[Any] = model.generate(__A ).sequences
self.assertEqual(generation_outputs.shape[-1], __A )
UpperCAmelCase : Optional[int] = jit(model.generate )
UpperCAmelCase : Any = jit_generate(__A ).sequences
self.assertListEqual(generation_outputs.tolist(), jit_generation_outputs.tolist() )
def __magic_name__ ( self : List[Any] ):
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Dict = self._get_input_ids_and_config()
UpperCAmelCase : Optional[Any] = max_length
UpperCAmelCase : Tuple = 1
UpperCAmelCase : Optional[Any] = 8
UpperCAmelCase : Optional[int] = 9
for model_class in self.all_generative_model_classes:
UpperCAmelCase : int = model_class(__A )
UpperCAmelCase : List[str] = model.generate(__A ).sequences
self.assertEqual(generation_outputs.shape[-1], __A )
UpperCAmelCase : Dict = jit(model.generate )
UpperCAmelCase : List[str] = jit_generate(__A ).sequences
self.assertListEqual(generation_outputs.tolist(), jit_generation_outputs.tolist() )
def __magic_name__ ( self : Optional[Any] ):
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Union[str, Any] = self._get_input_ids_and_config()
UpperCAmelCase : List[str] = max_length
UpperCAmelCase : Union[str, Any] = 2
UpperCAmelCase : List[Any] = 1
UpperCAmelCase : List[str] = 8
UpperCAmelCase : int = 9
for model_class in self.all_generative_model_classes:
UpperCAmelCase : Optional[Any] = model_class(__A )
UpperCAmelCase : Union[str, Any] = model.generate(__A ).sequences
self.assertEqual(generation_outputs.shape[-1], __A )
UpperCAmelCase : List[str] = jit(model.generate )
UpperCAmelCase : Tuple = jit_generate(__A ).sequences
self.assertListEqual(generation_outputs.tolist(), jit_generation_outputs.tolist() )
def __magic_name__ ( self : str ):
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Optional[int] = self._get_input_ids_and_config()
# pad attention mask on the left
UpperCAmelCase : Union[str, Any] = attention_mask.at[(0, 0)].set(0 )
UpperCAmelCase : Tuple = False
UpperCAmelCase : str = max_length
for model_class in self.all_generative_model_classes:
UpperCAmelCase : Any = model_class(__A )
UpperCAmelCase : Union[str, Any] = model.generate(__A, attention_mask=__A ).sequences
self.assertEqual(generation_outputs.shape[-1], __A )
UpperCAmelCase : List[Any] = jit(model.generate )
UpperCAmelCase : Optional[Any] = jit_generate(__A, attention_mask=__A ).sequences
self.assertListEqual(generation_outputs.tolist(), jit_generation_outputs.tolist() )
def __magic_name__ ( self : Optional[int] ):
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Optional[int] = self._get_input_ids_and_config()
# pad attention mask on the left
UpperCAmelCase : Union[str, Any] = attention_mask.at[(0, 0)].set(0 )
UpperCAmelCase : Union[str, Any] = True
UpperCAmelCase : Dict = max_length
for model_class in self.all_generative_model_classes:
UpperCAmelCase : Any = model_class(__A )
UpperCAmelCase : Optional[Any] = model.generate(__A, attention_mask=__A ).sequences
self.assertEqual(generation_outputs.shape[-1], __A )
UpperCAmelCase : Optional[Any] = jit(model.generate )
UpperCAmelCase : Optional[Any] = jit_generate(__A, attention_mask=__A ).sequences
self.assertListEqual(generation_outputs.tolist(), jit_generation_outputs.tolist() )
def __magic_name__ ( self : Tuple ):
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Union[str, Any] = self._get_input_ids_and_config()
# pad attention mask on the left
UpperCAmelCase : Dict = attention_mask.at[(0, 0)].set(0 )
UpperCAmelCase : Union[str, Any] = 2
UpperCAmelCase : str = max_length
for model_class in self.all_generative_model_classes:
UpperCAmelCase : str = model_class(__A )
UpperCAmelCase : int = model.generate(__A, attention_mask=__A ).sequences
self.assertEqual(generation_outputs.shape[-1], __A )
UpperCAmelCase : Optional[Any] = jit(model.generate )
UpperCAmelCase : Dict = jit_generate(__A, attention_mask=__A ).sequences
self.assertListEqual(generation_outputs.tolist(), jit_generation_outputs.tolist() )
@require_flax
class __UpperCAmelCase ( unittest.TestCase ):
def __magic_name__ ( self : str ):
UpperCAmelCase : Union[str, Any] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-bert''' )
UpperCAmelCase : List[str] = FlaxAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' )
UpperCAmelCase : int = '''Hello world'''
UpperCAmelCase : Optional[int] = tokenizer(__A, return_tensors='''np''' ).input_ids
# typos are quickly detected (the correct argument is `do_sample`)
with self.assertRaisesRegex(__A, '''do_samples''' ):
model.generate(__A, do_samples=__A )
# arbitrary arguments that will not be used anywhere are also not accepted
with self.assertRaisesRegex(__A, '''foo''' ):
UpperCAmelCase : Any = {'''foo''': '''bar'''}
model.generate(__A, **__A )
| 99 | 1 |
'''simple docstring'''
import argparse
from pathlib import Path
import fairseq
import torch
from fairseq.models.xmod import XMODModel as FairseqXmodModel
from packaging import version
from transformers import XmodConfig, XmodForMaskedLM, XmodForSequenceClassification
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse('''0.12.2'''):
raise Exception('''requires fairseq >= 0.12.2''')
if version.parse(fairseq.__version__) > version.parse('''2'''):
raise Exception('''requires fairseq < v2''')
logging.set_verbosity_info()
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = '''Hello, World!'''
_lowerCAmelCase = '''en_XX'''
def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ ):
__UpperCamelCase : Union[str, Any] = Path("data_bin" )
__UpperCamelCase : Union[str, Any] = FairseqXmodModel.from_pretrained(
model_name_or_path=str(Path(snake_case__ ).parent ) , checkpoint_file=Path(snake_case__ ).name , _name="xmod_base" , arch="xmod_base" , task="multilingual_masked_lm" , data_name_or_path=str(snake_case__ ) , bpe="sentencepiece" , sentencepiece_model=str(Path(snake_case__ ).parent / "sentencepiece.bpe.model" ) , src_dict=str(data_dir / "dict.txt" ) , )
xmod.eval() # disable dropout
print(snake_case__ )
__UpperCamelCase : List[str] = xmod.model.encoder.sentence_encoder
__UpperCamelCase : Optional[int] = XmodConfig(
vocab_size=xmod_sent_encoder.embed_tokens.num_embeddings , hidden_size=xmod.cfg.model.encoder_embed_dim , num_hidden_layers=xmod.cfg.model.encoder_layers , num_attention_heads=xmod.cfg.model.encoder_attention_heads , intermediate_size=xmod.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=514 , type_vocab_size=1 , layer_norm_eps=1E-5 , pre_norm=xmod.cfg.model.encoder_normalize_before , adapter_reduction_factor=getattr(xmod.cfg.model , "bottleneck" , 2 ) , adapter_layer_norm=xmod.cfg.model.adapter_layer_norm , adapter_reuse_layer_norm=xmod.cfg.model.adapter_reuse_layer_norm , ln_before_adapter=xmod.cfg.model.ln_before_adapter , languages=xmod.cfg.model.languages , )
if classification_head:
__UpperCamelCase : Any = xmod.model.classification_heads["mnli"].out_proj.weight.shape[0]
print("Our X-MOD config:" , snake_case__ )
__UpperCamelCase : Dict = XmodForSequenceClassification(snake_case__ ) if classification_head else XmodForMaskedLM(snake_case__ )
model.eval()
# Now let's copy all the weights.
# Embeddings
__UpperCamelCase : List[Any] = xmod_sent_encoder.embed_tokens.weight
__UpperCamelCase : List[Any] = xmod_sent_encoder.embed_positions.weight
__UpperCamelCase : str = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c xmod doesn't use them.
__UpperCamelCase : Any = xmod_sent_encoder.layernorm_embedding.weight
__UpperCamelCase : str = xmod_sent_encoder.layernorm_embedding.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
__UpperCamelCase : int = model.roberta.encoder.layer[i]
__UpperCamelCase : Any = xmod_sent_encoder.layers[i]
# self attention
__UpperCamelCase : List[str] = layer.attention.self
if not (
xmod_layer.self_attn.k_proj.weight.data.shape
== xmod_layer.self_attn.q_proj.weight.data.shape
== xmod_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
):
raise AssertionError("Dimensions of self-attention weights do not match." )
__UpperCamelCase : Dict = xmod_layer.self_attn.q_proj.weight
__UpperCamelCase : Optional[Any] = xmod_layer.self_attn.q_proj.bias
__UpperCamelCase : Any = xmod_layer.self_attn.k_proj.weight
__UpperCamelCase : Tuple = xmod_layer.self_attn.k_proj.bias
__UpperCamelCase : Union[str, Any] = xmod_layer.self_attn.v_proj.weight
__UpperCamelCase : Any = xmod_layer.self_attn.v_proj.bias
# self-attention output
__UpperCamelCase : Optional[int] = layer.attention.output
if self_output.dense.weight.shape != xmod_layer.self_attn.out_proj.weight.shape:
raise AssertionError("Dimensions of self-attention output weights do not match." )
__UpperCamelCase : Union[str, Any] = xmod_layer.self_attn.out_proj.weight
__UpperCamelCase : str = xmod_layer.self_attn.out_proj.bias
__UpperCamelCase : Dict = xmod_layer.self_attn_layer_norm.weight
__UpperCamelCase : Any = xmod_layer.self_attn_layer_norm.bias
# intermediate
__UpperCamelCase : Dict = layer.intermediate
if intermediate.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError("Dimensions of intermediate weights do not match." )
__UpperCamelCase : List[Any] = xmod_layer.fca.weight
__UpperCamelCase : Optional[int] = xmod_layer.fca.bias
# output
__UpperCamelCase : List[Any] = layer.output
if bert_output.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError("Dimensions of feed-forward weights do not match." )
__UpperCamelCase : Tuple = xmod_layer.fca.weight
__UpperCamelCase : int = xmod_layer.fca.bias
__UpperCamelCase : Dict = xmod_layer.final_layer_norm.weight
__UpperCamelCase : int = xmod_layer.final_layer_norm.bias
if bert_output.adapter_layer_norm is not None:
__UpperCamelCase : Any = xmod_layer.adapter_layer_norm.weight
__UpperCamelCase : int = xmod_layer.adapter_layer_norm.bias
if sorted(bert_output.adapter_modules.keys() ) != sorted(xmod_layer.adapter_modules.keys() ):
raise AssertionError("Lists of language adapters do not match." )
for lang_code, adapter in xmod_layer.adapter_modules.items():
__UpperCamelCase : Any = bert_output.adapter_modules[lang_code]
__UpperCamelCase : Dict = xmod_layer.adapter_modules[lang_code]
__UpperCamelCase : int = from_adapter.fca.weight
__UpperCamelCase : Dict = from_adapter.fca.bias
__UpperCamelCase : List[Any] = from_adapter.fca.weight
__UpperCamelCase : int = from_adapter.fca.bias
# end of layer
if xmod_sent_encoder.layer_norm is not None:
__UpperCamelCase : Tuple = xmod_sent_encoder.layer_norm.weight
__UpperCamelCase : List[Any] = xmod_sent_encoder.layer_norm.bias
if classification_head:
__UpperCamelCase : Optional[Any] = xmod.model.classification_heads["mnli"].dense.weight
__UpperCamelCase : Any = xmod.model.classification_heads["mnli"].dense.bias
__UpperCamelCase : Tuple = xmod.model.classification_heads["mnli"].out_proj.weight
__UpperCamelCase : List[Any] = xmod.model.classification_heads["mnli"].out_proj.bias
else:
# LM Head
__UpperCamelCase : Any = xmod.model.encoder.lm_head.dense.weight
__UpperCamelCase : Optional[Any] = xmod.model.encoder.lm_head.dense.bias
__UpperCamelCase : Tuple = xmod.model.encoder.lm_head.layer_norm.weight
__UpperCamelCase : List[Any] = xmod.model.encoder.lm_head.layer_norm.bias
__UpperCamelCase : Tuple = xmod.model.encoder.lm_head.weight
__UpperCamelCase : Any = xmod.model.encoder.lm_head.bias
# Let's check that we get the same results.
__UpperCamelCase : Any = xmod.encode(snake_case__ ).unsqueeze(0 ) # batch of size 1
model.roberta.set_default_language(snake_case__ )
__UpperCamelCase : Optional[Any] = model(snake_case__ )[0]
if classification_head:
__UpperCamelCase : int = xmod.model.classification_heads["mnli"](xmod.extract_features(snake_case__ ) )
else:
__UpperCamelCase : Optional[Any] = xmod.model(snake_case__ , lang_id=[SAMPLE_LANGUAGE] )[0]
print(our_output.shape , their_output.shape )
__UpperCamelCase : Dict = torch.max(torch.abs(our_output - their_output ) ).item()
print(F"max_absolute_diff = {max_absolute_diff}" ) # ~ 1e-7
__UpperCamelCase : Union[str, Any] = torch.allclose(snake_case__ , snake_case__ , atol=1E-3 )
print("Do both models output the same tensors?" , "🔥" if success else "💩" )
if not success:
raise Exception("Something went wRoNg" )
Path(snake_case__ ).mkdir(parents=snake_case__ , exist_ok=snake_case__ )
print(F"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(snake_case__ )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--xmod_checkpoint_path''', default=None, type=str, required=True, help='''Path the official PyTorch dump.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--classification_head''', action='''store_true''', help='''Whether to convert a final classification head.'''
)
_lowerCAmelCase = parser.parse_args()
convert_xmod_checkpoint_to_pytorch(
args.xmod_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 298 |
'''simple docstring'''
import argparse
from pathlib import Path
import fairseq
import torch
from fairseq.models.xmod import XMODModel as FairseqXmodModel
from packaging import version
from transformers import XmodConfig, XmodForMaskedLM, XmodForSequenceClassification
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse('''0.12.2'''):
raise Exception('''requires fairseq >= 0.12.2''')
if version.parse(fairseq.__version__) > version.parse('''2'''):
raise Exception('''requires fairseq < v2''')
logging.set_verbosity_info()
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = '''Hello, World!'''
_lowerCAmelCase = '''en_XX'''
def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ ):
__UpperCamelCase : Union[str, Any] = Path("data_bin" )
__UpperCamelCase : Union[str, Any] = FairseqXmodModel.from_pretrained(
model_name_or_path=str(Path(snake_case__ ).parent ) , checkpoint_file=Path(snake_case__ ).name , _name="xmod_base" , arch="xmod_base" , task="multilingual_masked_lm" , data_name_or_path=str(snake_case__ ) , bpe="sentencepiece" , sentencepiece_model=str(Path(snake_case__ ).parent / "sentencepiece.bpe.model" ) , src_dict=str(data_dir / "dict.txt" ) , )
xmod.eval() # disable dropout
print(snake_case__ )
__UpperCamelCase : List[str] = xmod.model.encoder.sentence_encoder
__UpperCamelCase : Optional[int] = XmodConfig(
vocab_size=xmod_sent_encoder.embed_tokens.num_embeddings , hidden_size=xmod.cfg.model.encoder_embed_dim , num_hidden_layers=xmod.cfg.model.encoder_layers , num_attention_heads=xmod.cfg.model.encoder_attention_heads , intermediate_size=xmod.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=514 , type_vocab_size=1 , layer_norm_eps=1E-5 , pre_norm=xmod.cfg.model.encoder_normalize_before , adapter_reduction_factor=getattr(xmod.cfg.model , "bottleneck" , 2 ) , adapter_layer_norm=xmod.cfg.model.adapter_layer_norm , adapter_reuse_layer_norm=xmod.cfg.model.adapter_reuse_layer_norm , ln_before_adapter=xmod.cfg.model.ln_before_adapter , languages=xmod.cfg.model.languages , )
if classification_head:
__UpperCamelCase : Any = xmod.model.classification_heads["mnli"].out_proj.weight.shape[0]
print("Our X-MOD config:" , snake_case__ )
__UpperCamelCase : Dict = XmodForSequenceClassification(snake_case__ ) if classification_head else XmodForMaskedLM(snake_case__ )
model.eval()
# Now let's copy all the weights.
# Embeddings
__UpperCamelCase : List[Any] = xmod_sent_encoder.embed_tokens.weight
__UpperCamelCase : List[Any] = xmod_sent_encoder.embed_positions.weight
__UpperCamelCase : str = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c xmod doesn't use them.
__UpperCamelCase : Any = xmod_sent_encoder.layernorm_embedding.weight
__UpperCamelCase : str = xmod_sent_encoder.layernorm_embedding.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
__UpperCamelCase : int = model.roberta.encoder.layer[i]
__UpperCamelCase : Any = xmod_sent_encoder.layers[i]
# self attention
__UpperCamelCase : List[str] = layer.attention.self
if not (
xmod_layer.self_attn.k_proj.weight.data.shape
== xmod_layer.self_attn.q_proj.weight.data.shape
== xmod_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
):
raise AssertionError("Dimensions of self-attention weights do not match." )
__UpperCamelCase : Dict = xmod_layer.self_attn.q_proj.weight
__UpperCamelCase : Optional[Any] = xmod_layer.self_attn.q_proj.bias
__UpperCamelCase : Any = xmod_layer.self_attn.k_proj.weight
__UpperCamelCase : Tuple = xmod_layer.self_attn.k_proj.bias
__UpperCamelCase : Union[str, Any] = xmod_layer.self_attn.v_proj.weight
__UpperCamelCase : Any = xmod_layer.self_attn.v_proj.bias
# self-attention output
__UpperCamelCase : Optional[int] = layer.attention.output
if self_output.dense.weight.shape != xmod_layer.self_attn.out_proj.weight.shape:
raise AssertionError("Dimensions of self-attention output weights do not match." )
__UpperCamelCase : Union[str, Any] = xmod_layer.self_attn.out_proj.weight
__UpperCamelCase : str = xmod_layer.self_attn.out_proj.bias
__UpperCamelCase : Dict = xmod_layer.self_attn_layer_norm.weight
__UpperCamelCase : Any = xmod_layer.self_attn_layer_norm.bias
# intermediate
__UpperCamelCase : Dict = layer.intermediate
if intermediate.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError("Dimensions of intermediate weights do not match." )
__UpperCamelCase : List[Any] = xmod_layer.fca.weight
__UpperCamelCase : Optional[int] = xmod_layer.fca.bias
# output
__UpperCamelCase : List[Any] = layer.output
if bert_output.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError("Dimensions of feed-forward weights do not match." )
__UpperCamelCase : Tuple = xmod_layer.fca.weight
__UpperCamelCase : int = xmod_layer.fca.bias
__UpperCamelCase : Dict = xmod_layer.final_layer_norm.weight
__UpperCamelCase : int = xmod_layer.final_layer_norm.bias
if bert_output.adapter_layer_norm is not None:
__UpperCamelCase : Any = xmod_layer.adapter_layer_norm.weight
__UpperCamelCase : int = xmod_layer.adapter_layer_norm.bias
if sorted(bert_output.adapter_modules.keys() ) != sorted(xmod_layer.adapter_modules.keys() ):
raise AssertionError("Lists of language adapters do not match." )
for lang_code, adapter in xmod_layer.adapter_modules.items():
__UpperCamelCase : Any = bert_output.adapter_modules[lang_code]
__UpperCamelCase : Dict = xmod_layer.adapter_modules[lang_code]
__UpperCamelCase : int = from_adapter.fca.weight
__UpperCamelCase : Dict = from_adapter.fca.bias
__UpperCamelCase : List[Any] = from_adapter.fca.weight
__UpperCamelCase : int = from_adapter.fca.bias
# end of layer
if xmod_sent_encoder.layer_norm is not None:
__UpperCamelCase : Tuple = xmod_sent_encoder.layer_norm.weight
__UpperCamelCase : List[Any] = xmod_sent_encoder.layer_norm.bias
if classification_head:
__UpperCamelCase : Optional[Any] = xmod.model.classification_heads["mnli"].dense.weight
__UpperCamelCase : Any = xmod.model.classification_heads["mnli"].dense.bias
__UpperCamelCase : Tuple = xmod.model.classification_heads["mnli"].out_proj.weight
__UpperCamelCase : List[Any] = xmod.model.classification_heads["mnli"].out_proj.bias
else:
# LM Head
__UpperCamelCase : Any = xmod.model.encoder.lm_head.dense.weight
__UpperCamelCase : Optional[Any] = xmod.model.encoder.lm_head.dense.bias
__UpperCamelCase : Tuple = xmod.model.encoder.lm_head.layer_norm.weight
__UpperCamelCase : List[Any] = xmod.model.encoder.lm_head.layer_norm.bias
__UpperCamelCase : Tuple = xmod.model.encoder.lm_head.weight
__UpperCamelCase : Any = xmod.model.encoder.lm_head.bias
# Let's check that we get the same results.
__UpperCamelCase : Any = xmod.encode(snake_case__ ).unsqueeze(0 ) # batch of size 1
model.roberta.set_default_language(snake_case__ )
__UpperCamelCase : Optional[Any] = model(snake_case__ )[0]
if classification_head:
__UpperCamelCase : int = xmod.model.classification_heads["mnli"](xmod.extract_features(snake_case__ ) )
else:
__UpperCamelCase : Optional[Any] = xmod.model(snake_case__ , lang_id=[SAMPLE_LANGUAGE] )[0]
print(our_output.shape , their_output.shape )
__UpperCamelCase : Dict = torch.max(torch.abs(our_output - their_output ) ).item()
print(F"max_absolute_diff = {max_absolute_diff}" ) # ~ 1e-7
__UpperCamelCase : Union[str, Any] = torch.allclose(snake_case__ , snake_case__ , atol=1E-3 )
print("Do both models output the same tensors?" , "🔥" if success else "💩" )
if not success:
raise Exception("Something went wRoNg" )
Path(snake_case__ ).mkdir(parents=snake_case__ , exist_ok=snake_case__ )
print(F"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(snake_case__ )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--xmod_checkpoint_path''', default=None, type=str, required=True, help='''Path the official PyTorch dump.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--classification_head''', action='''store_true''', help='''Whether to convert a final classification head.'''
)
_lowerCAmelCase = parser.parse_args()
convert_xmod_checkpoint_to_pytorch(
args.xmod_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 298 | 1 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase : Optional[int] = logging.get_logger(__name__)
UpperCAmelCase : str = {
"""xlm-mlm-en-2048""": """https://huggingface.co/xlm-mlm-en-2048/resolve/main/config.json""",
"""xlm-mlm-ende-1024""": """https://huggingface.co/xlm-mlm-ende-1024/resolve/main/config.json""",
"""xlm-mlm-enfr-1024""": """https://huggingface.co/xlm-mlm-enfr-1024/resolve/main/config.json""",
"""xlm-mlm-enro-1024""": """https://huggingface.co/xlm-mlm-enro-1024/resolve/main/config.json""",
"""xlm-mlm-tlm-xnli15-1024""": """https://huggingface.co/xlm-mlm-tlm-xnli15-1024/resolve/main/config.json""",
"""xlm-mlm-xnli15-1024""": """https://huggingface.co/xlm-mlm-xnli15-1024/resolve/main/config.json""",
"""xlm-clm-enfr-1024""": """https://huggingface.co/xlm-clm-enfr-1024/resolve/main/config.json""",
"""xlm-clm-ende-1024""": """https://huggingface.co/xlm-clm-ende-1024/resolve/main/config.json""",
"""xlm-mlm-17-1280""": """https://huggingface.co/xlm-mlm-17-1280/resolve/main/config.json""",
"""xlm-mlm-100-1280""": """https://huggingface.co/xlm-mlm-100-1280/resolve/main/config.json""",
}
class __lowerCAmelCase ( UpperCamelCase__):
_lowercase : str = """xlm"""
_lowercase : Dict = {
"""hidden_size""": """emb_dim""",
"""num_attention_heads""": """n_heads""",
"""num_hidden_layers""": """n_layers""",
"""n_words""": """vocab_size""", # For backward compatibility
}
def __init__( self , lowerCAmelCase__=3_0_1_4_5 , lowerCAmelCase__=2_0_4_8 , lowerCAmelCase__=1_2 , lowerCAmelCase__=1_6 , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=True , lowerCAmelCase__=False , lowerCAmelCase__=False , lowerCAmelCase__=False , lowerCAmelCase__=1 , lowerCAmelCase__=True , lowerCAmelCase__=5_1_2 , lowerCAmelCase__=2_0_4_8**-0.5 , lowerCAmelCase__=1E-12 , lowerCAmelCase__=0.02 , lowerCAmelCase__=0 , lowerCAmelCase__=1 , lowerCAmelCase__=2 , lowerCAmelCase__=3 , lowerCAmelCase__=5 , lowerCAmelCase__=True , lowerCAmelCase__="first" , lowerCAmelCase__=True , lowerCAmelCase__=None , lowerCAmelCase__=True , lowerCAmelCase__=0.1 , lowerCAmelCase__=5 , lowerCAmelCase__=5 , lowerCAmelCase__=0 , lowerCAmelCase__=0 , lowerCAmelCase__=2 , lowerCAmelCase__=0 , **lowerCAmelCase__ , ) -> Union[str, Any]:
'''simple docstring'''
a__ : Optional[int] =vocab_size
a__ : List[Any] =emb_dim
a__ : List[str] =n_layers
a__ : Dict =n_heads
a__ : List[str] =dropout
a__ : Any =attention_dropout
a__ : Tuple =gelu_activation
a__ : Tuple =sinusoidal_embeddings
a__ : Any =causal
a__ : str =asm
a__ : List[str] =n_langs
a__ : Any =use_lang_emb
a__ : Dict =layer_norm_eps
a__ : Union[str, Any] =bos_index
a__ : int =eos_index
a__ : str =pad_index
a__ : List[Any] =unk_index
a__ : Dict =mask_index
a__ : Dict =is_encoder
a__ : Optional[Any] =max_position_embeddings
a__ : str =embed_init_std
a__ : Dict =init_std
a__ : Dict =summary_type
a__ : Union[str, Any] =summary_use_proj
a__ : int =summary_activation
a__ : Union[str, Any] =summary_proj_to_labels
a__ : Union[str, Any] =summary_first_dropout
a__ : str =start_n_top
a__ : Optional[Any] =end_n_top
a__ : int =mask_token_id
a__ : List[str] =lang_id
if "n_words" in kwargs:
a__ : Optional[int] =kwargs["n_words"]
super().__init__(pad_token_id=lowerCAmelCase__ , bos_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
class __lowerCAmelCase ( UpperCamelCase__):
@property
def _lowercase ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
a__ : int ={0: "batch", 1: "choice", 2: "sequence"}
else:
a__ : int ={0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("token_type_ids", dynamic_axis),
] )
| 148 |
from math import acos, sin
from typing import List, Tuple, Union
import numpy as np
import torch
from PIL import Image
from ...models import AutoencoderKL, UNetaDConditionModel
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput
from .mel import Mel
class __lowerCAmelCase ( UpperCamelCase__):
_lowercase : str = ["""vqvae"""]
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__()
self.register_modules(unet=lowerCAmelCase__ , scheduler=lowerCAmelCase__ , mel=lowerCAmelCase__ , vqvae=lowerCAmelCase__ )
def _lowercase ( self ) -> int:
'''simple docstring'''
return 5_0 if isinstance(self.scheduler , lowerCAmelCase__ ) else 1_0_0_0
@torch.no_grad()
def __call__( self , lowerCAmelCase__ = 1 , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = 0 , lowerCAmelCase__ = 0 , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = 0 , lowerCAmelCase__ = 0 , lowerCAmelCase__ = None , lowerCAmelCase__ = 0 , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__=True , ) -> Union[
Union[AudioPipelineOutput, ImagePipelineOutput],
Tuple[List[Image.Image], Tuple[int, List[np.ndarray]]],
]:
'''simple docstring'''
a__ : List[Any] =steps or self.get_default_steps()
self.scheduler.set_timesteps(lowerCAmelCase__ )
a__ : Tuple =step_generator or generator
# For backwards compatibility
if type(self.unet.config.sample_size ) == int:
a__ : List[str] =(self.unet.config.sample_size, self.unet.config.sample_size)
if noise is None:
a__ : Optional[Any] =randn_tensor(
(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size[0],
self.unet.config.sample_size[1],
) , generator=lowerCAmelCase__ , device=self.device , )
a__ : List[str] =noise
a__ : Optional[Any] =None
if audio_file is not None or raw_audio is not None:
self.mel.load_audio(lowerCAmelCase__ , lowerCAmelCase__ )
a__ : Tuple =self.mel.audio_slice_to_image(lowerCAmelCase__ )
a__ : List[Any] =np.frombuffer(input_image.tobytes() , dtype="uint8" ).reshape(
(input_image.height, input_image.width) )
a__ : Optional[Any] =(input_image / 2_5_5) * 2 - 1
a__ : Dict =torch.tensor(input_image[np.newaxis, :, :] , dtype=torch.float ).to(self.device )
if self.vqvae is not None:
a__ : str =self.vqvae.encode(torch.unsqueeze(lowerCAmelCase__ , 0 ) ).latent_dist.sample(
generator=lowerCAmelCase__ )[0]
a__ : Any =self.vqvae.config.scaling_factor * input_images
if start_step > 0:
a__ : Optional[int] =self.scheduler.add_noise(lowerCAmelCase__ , lowerCAmelCase__ , self.scheduler.timesteps[start_step - 1] )
a__ : Tuple =(
self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length
)
a__ : Union[str, Any] =int(mask_start_secs * pixels_per_second )
a__ : List[str] =int(mask_end_secs * pixels_per_second )
a__ : Optional[Any] =self.scheduler.add_noise(lowerCAmelCase__ , lowerCAmelCase__ , torch.tensor(self.scheduler.timesteps[start_step:] ) )
for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ):
if isinstance(self.unet , lowerCAmelCase__ ):
a__ : List[str] =self.unet(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )["sample"]
else:
a__ : Optional[Any] =self.unet(lowerCAmelCase__ , lowerCAmelCase__ )["sample"]
if isinstance(self.scheduler , lowerCAmelCase__ ):
a__ : int =self.scheduler.step(
model_output=lowerCAmelCase__ , timestep=lowerCAmelCase__ , sample=lowerCAmelCase__ , eta=lowerCAmelCase__ , generator=lowerCAmelCase__ , )["prev_sample"]
else:
a__ : str =self.scheduler.step(
model_output=lowerCAmelCase__ , timestep=lowerCAmelCase__ , sample=lowerCAmelCase__ , generator=lowerCAmelCase__ , )["prev_sample"]
if mask is not None:
if mask_start > 0:
a__ : List[Any] =mask[:, step, :, :mask_start]
if mask_end > 0:
a__ : Union[str, Any] =mask[:, step, :, -mask_end:]
if self.vqvae is not None:
# 0.18215 was scaling factor used in training to ensure unit variance
a__ : Any =1 / self.vqvae.config.scaling_factor * images
a__ : str =self.vqvae.decode(lowerCAmelCase__ )["sample"]
a__ : str =(images / 2 + 0.5).clamp(0 , 1 )
a__ : int =images.cpu().permute(0 , 2 , 3 , 1 ).numpy()
a__ : List[Any] =(images * 2_5_5).round().astype("uint8" )
a__ : Dict =list(
(Image.fromarray(_[:, :, 0] ) for _ in images)
if images.shape[3] == 1
else (Image.fromarray(lowerCAmelCase__ , mode="RGB" ).convert("L" ) for _ in images) )
a__ : str =[self.mel.image_to_audio(lowerCAmelCase__ ) for _ in images]
if not return_dict:
return images, (self.mel.get_sample_rate(), audios)
return BaseOutput(**AudioPipelineOutput(np.array(lowerCAmelCase__ )[:, np.newaxis, :] ) , **ImagePipelineOutput(lowerCAmelCase__ ) )
@torch.no_grad()
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ = 5_0 ) -> np.ndarray:
'''simple docstring'''
assert isinstance(self.scheduler , lowerCAmelCase__ )
self.scheduler.set_timesteps(lowerCAmelCase__ )
a__ : Union[str, Any] =np.array(
[np.frombuffer(image.tobytes() , dtype="uint8" ).reshape((1, image.height, image.width) ) for image in images] )
a__ : Tuple =(sample / 2_5_5) * 2 - 1
a__ : List[Any] =torch.Tensor(lowerCAmelCase__ ).to(self.device )
for t in self.progress_bar(torch.flip(self.scheduler.timesteps , (0,) ) ):
a__ : str =t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
a__ : Dict =self.scheduler.alphas_cumprod[t]
a__ : Optional[Any] =(
self.scheduler.alphas_cumprod[prev_timestep]
if prev_timestep >= 0
else self.scheduler.final_alpha_cumprod
)
a__ : Optional[Any] =1 - alpha_prod_t
a__ : str =self.unet(lowerCAmelCase__ , lowerCAmelCase__ )["sample"]
a__ : Optional[Any] =(1 - alpha_prod_t_prev) ** 0.5 * model_output
a__ : List[str] =(sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5)
a__ : Optional[Any] =sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output
return sample
@staticmethod
def _lowercase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> torch.Tensor:
'''simple docstring'''
a__ : Any =acos(torch.dot(torch.flatten(lowerCAmelCase__ ) , torch.flatten(lowerCAmelCase__ ) ) / torch.norm(lowerCAmelCase__ ) / torch.norm(lowerCAmelCase__ ) )
return sin((1 - alpha) * theta ) * xa / sin(lowerCAmelCase__ ) + sin(alpha * theta ) * xa / sin(lowerCAmelCase__ )
| 148 | 1 |
import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
a_ = logging.get_logger(__name__)
class lowercase__ ( lowerCamelCase__ ):
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase )-> Tuple:
'''simple docstring'''
warnings.warn(
"The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use CLIPImageProcessor instead." , lowercase__ , )
super().__init__(*lowercase__ , **lowercase__ )
| 340 |
'''simple docstring'''
import argparse
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
parser.add_argument(
'''--txt2img_unclip''',
default='''kakaobrain/karlo-v1-alpha''',
type=str,
required=False,
help='''The pretrained txt2img unclip.''',
)
lowerCAmelCase__ = parser.parse_args()
lowerCAmelCase__ = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip)
lowerCAmelCase__ = CLIPImageProcessor()
lowerCAmelCase__ = CLIPVisionModelWithProjection.from_pretrained('''openai/clip-vit-large-patch14''')
lowerCAmelCase__ = UnCLIPImageVariationPipeline(
decoder=txtaimg.decoder,
text_encoder=txtaimg.text_encoder,
tokenizer=txtaimg.tokenizer,
text_proj=txtaimg.text_proj,
feature_extractor=feature_extractor,
image_encoder=image_encoder,
super_res_first=txtaimg.super_res_first,
super_res_last=txtaimg.super_res_last,
decoder_scheduler=txtaimg.decoder_scheduler,
super_res_scheduler=txtaimg.super_res_scheduler,
)
imgaimg.save_pretrained(args.dump_path)
| 104 | 0 |
'''simple docstring'''
from string import ascii_lowercase, ascii_uppercase
def _lowercase ( __A ):
'''simple docstring'''
if not sentence:
return ""
__UpperCamelCase = dict(zip(__A ,__A ) )
return lower_to_upper.get(sentence[0] ,sentence[0] ) + sentence[1:]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 243 |
'''simple docstring'''
from transformers import DistilBertTokenizer, DistilBertTokenizerFast
from transformers.testing_utils import require_tokenizers, slow
from ..bert.test_tokenization_bert import BertTokenizationTest
@require_tokenizers
class UpperCAmelCase__ ( UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = DistilBertTokenizer
__SCREAMING_SNAKE_CASE = DistilBertTokenizerFast
__SCREAMING_SNAKE_CASE = True
@slow
def __lowerCamelCase ( self ) -> Any:
__UpperCamelCase = DistilBertTokenizer.from_pretrained("""distilbert-base-uncased""" )
__UpperCamelCase = tokenizer.encode("""sequence builders""" , add_special_tokens=lowercase )
__UpperCamelCase = tokenizer.encode("""multi-sequence build""" , add_special_tokens=lowercase )
__UpperCamelCase = tokenizer.build_inputs_with_special_tokens(lowercase )
__UpperCamelCase = tokenizer.build_inputs_with_special_tokens(lowercase , lowercase )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
| 243 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.