code
stringlengths
86
54.5k
code_codestyle
int64
0
371
style_context
stringlengths
87
49.2k
style_context_codestyle
int64
0
349
label
int64
0
1
import json import os from typing import Optional, Tuple from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging lowerCamelCase__ = logging.get_logger(__name__) lowerCamelCase__ = {"""vocab_file""": """vocab.json"""} lowerCamelCase__ = { """vocab_file""": { """mgp-str""": """https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json""", } } lowerCamelCase__ = {"""mgp-str""": 27} class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ): __lowerCamelCase : Dict =VOCAB_FILES_NAMES __lowerCamelCase : str =PRETRAINED_VOCAB_FILES_MAP __lowerCamelCase : List[str] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__( self : int , __lowercase : Any , __lowercase : Tuple="[GO]" , __lowercase : str="[GO]" , __lowercase : Tuple="[s]" , __lowercase : Optional[Any]="[GO]" , **__lowercase : List[Any] ): '''simple docstring''' super().__init__( unk_token=__lowercase , bos_token=__lowercase , eos_token=__lowercase , pad_token=__lowercase , **__lowercase , ) with open(__lowercase , encoding="""utf-8""" ) as vocab_handle: __a = json.load(__lowercase ) __a = {v: k for k, v in self.vocab.items()} @property def UpperCamelCase_ ( self : str ): '''simple docstring''' return len(self.vocab ) def UpperCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' return dict(self.vocab , **self.added_tokens_encoder ) def UpperCamelCase_ ( self : List[str] , __lowercase : str ): '''simple docstring''' __a = [] for s in text: char_tokens.extend(__lowercase ) return char_tokens def UpperCamelCase_ ( self : str , __lowercase : Optional[Any] ): '''simple docstring''' return self.vocab.get(__lowercase , self.vocab.get(self.unk_token ) ) def UpperCamelCase_ ( self : Union[str, Any] , __lowercase : Any ): '''simple docstring''' return self.decoder.get(__lowercase ) def UpperCamelCase_ ( self : Tuple , __lowercase : str , __lowercase : Optional[str] = None ): '''simple docstring''' if not os.path.isdir(__lowercase ): logger.error("""Vocabulary path ({}) should be a directory""".format(__lowercase ) ) return __a = os.path.join( __lowercase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) with open(__lowercase , """w""" , encoding="""utf-8""" ) as f: f.write(json.dumps(self.vocab , indent=2 , sort_keys=__lowercase , ensure_ascii=__lowercase ) + """\n""" ) return (vocab_file,)
302
from functools import lru_cache def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : int ): """simple docstring""" __a = 2 __a = set() while i * i <= n: if n % i: i += 1 else: n //= i factors.add(_SCREAMING_SNAKE_CASE ) if n > 1: factors.add(_SCREAMING_SNAKE_CASE ) return factors @lru_cache def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : int ): """simple docstring""" return len(unique_prime_factors(_SCREAMING_SNAKE_CASE ) ) def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : list ): """simple docstring""" return len(set(_SCREAMING_SNAKE_CASE ) ) in (0, 1) def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : int ): """simple docstring""" __a = 2 while True: # Increment each value of a generated range __a = [base + i for i in range(_SCREAMING_SNAKE_CASE )] # Run elements through out unique_prime_factors function # Append our target number to the end. __a = [upf_len(_SCREAMING_SNAKE_CASE ) for x in group] checker.append(_SCREAMING_SNAKE_CASE ) # If all numbers in the list are equal, return the group variable. if equality(_SCREAMING_SNAKE_CASE ): return group # Increment our base variable by 1 base += 1 def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : int = 4 ): """simple docstring""" __a = run(_SCREAMING_SNAKE_CASE ) return results[0] if len(_SCREAMING_SNAKE_CASE ) else None if __name__ == "__main__": print(solution())
302
1
import argparse import torch from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert from transformers.utils import logging logging.set_verbosity_info() def lowerCAmelCase_ ( _snake_case : Optional[Any] , _snake_case : Union[str, Any] , _snake_case : Union[str, Any] ) -> Dict: '''simple docstring''' __magic_name__ : Any = RemBertConfig.from_json_file(_snake_case ) print("Building PyTorch model from configuration: {}".format(str(_snake_case ) ) ) __magic_name__ : int = RemBertModel(_snake_case ) # Load weights from tf checkpoint load_tf_weights_in_rembert(_snake_case , _snake_case , _snake_case ) # Save pytorch-model print("Save PyTorch model to {}".format(_snake_case ) ) torch.save(model.state_dict() , _snake_case ) if __name__ == "__main__": snake_case : Optional[int] = argparse.ArgumentParser() # Required parameters parser.add_argument( "--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path." ) parser.add_argument( "--rembert_config_file", default=None, type=str, required=True, help=( "The config json file corresponding to the pre-trained RemBERT model. \n" "This specifies the model architecture." ), ) parser.add_argument( "--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) snake_case : Any = parser.parse_args() convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path)
354
def lowerCAmelCase_ ( _snake_case : int , _snake_case : Optional[int] ) -> List[Any]: '''simple docstring''' return (pointa[0] - pointa[0]) ** 2 + (pointa[1] - pointa[1]) ** 2 def lowerCAmelCase_ ( _snake_case : List[Any] , _snake_case : Optional[Any]=0 ) -> str: '''simple docstring''' return sorted(_snake_case , key=lambda _snake_case : x[column] ) def lowerCAmelCase_ ( _snake_case : List[Any] , _snake_case : Any , _snake_case : Optional[int]=float("inf" ) ) -> Tuple: '''simple docstring''' for i in range(points_counts - 1 ): for j in range(i + 1 , _snake_case ): __magic_name__ : List[str] = euclidean_distance_sqr(points[i] , points[j] ) if current_dis < min_dis: __magic_name__ : Any = current_dis return min_dis def lowerCAmelCase_ ( _snake_case : List[Any] , _snake_case : str , _snake_case : str=float("inf" ) ) -> Dict: '''simple docstring''' for i in range(min(6 , points_counts - 1 ) , _snake_case ): for j in range(max(0 , i - 6 ) , _snake_case ): __magic_name__ : str = euclidean_distance_sqr(points[i] , points[j] ) if current_dis < min_dis: __magic_name__ : List[str] = current_dis return min_dis def lowerCAmelCase_ ( _snake_case : List[Any] , _snake_case : Union[str, Any] , _snake_case : Any ) -> List[Any]: '''simple docstring''' if points_counts <= 3: return dis_between_closest_pair(_snake_case , _snake_case ) # recursion __magic_name__ : Tuple = points_counts // 2 __magic_name__ : Dict = closest_pair_of_points_sqr( _snake_case , points_sorted_on_y[:mid] , _snake_case ) __magic_name__ : Optional[int] = closest_pair_of_points_sqr( _snake_case , points_sorted_on_y[mid:] , points_counts - mid ) __magic_name__ : int = min(_snake_case , _snake_case ) __magic_name__ : Optional[int] = [] for point in points_sorted_on_x: if abs(point[0] - points_sorted_on_x[mid][0] ) < closest_pair_dis: cross_strip.append(_snake_case ) __magic_name__ : Tuple = dis_between_closest_in_strip( _snake_case , len(_snake_case ) , _snake_case ) return min(_snake_case , _snake_case ) def lowerCAmelCase_ ( _snake_case : List[Any] , _snake_case : Optional[int] ) -> Dict: '''simple docstring''' __magic_name__ : Union[str, Any] = column_based_sort(_snake_case , column=0 ) __magic_name__ : List[Any] = column_based_sort(_snake_case , column=1 ) return ( closest_pair_of_points_sqr( _snake_case , _snake_case , _snake_case ) ) ** 0.5 if __name__ == "__main__": snake_case : List[str] = [(2, 3), (12, 30), (40, 50), (5, 1), (12, 10), (3, 4)] print("Distance:", closest_pair_of_points(points, len(points)))
41
0
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging _lowercase = logging.get_logger(__name__) _lowercase = { '''facebook/dpr-ctx_encoder-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json''' ), '''facebook/dpr-question_encoder-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json''' ), '''facebook/dpr-reader-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json''' ), '''facebook/dpr-ctx_encoder-multiset-base''': ( '''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json''' ), '''facebook/dpr-question_encoder-multiset-base''': ( '''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json''' ), '''facebook/dpr-reader-multiset-base''': ( '''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json''' ), } class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' _lowerCamelCase: str = '''dpr''' def __init__( self : Optional[int] ,A_ : Optional[int]=3_0522 ,A_ : Dict=768 ,A_ : str=12 ,A_ : List[Any]=12 ,A_ : str=3072 ,A_ : Tuple="gelu" ,A_ : Dict=0.1 ,A_ : Optional[Any]=0.1 ,A_ : Dict=512 ,A_ : Dict=2 ,A_ : List[Any]=0.02 ,A_ : List[str]=1e-12 ,A_ : str=0 ,A_ : Dict="absolute" ,A_ : int = 0 ,**A_ : Union[str, Any] ,) -> Dict: super().__init__(pad_token_id=A_ ,**A_ ) A = vocab_size A = hidden_size A = num_hidden_layers A = num_attention_heads A = hidden_act A = intermediate_size A = hidden_dropout_prob A = attention_probs_dropout_prob A = max_position_embeddings A = type_vocab_size A = initializer_range A = layer_norm_eps A = projection_dim A = position_embedding_type
74
import unittest import numpy as np from transformers import DistilBertConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax.numpy as jnp from transformers.models.distilbert.modeling_flax_distilbert import ( FlaxDistilBertForMaskedLM, FlaxDistilBertForMultipleChoice, FlaxDistilBertForQuestionAnswering, FlaxDistilBertForSequenceClassification, FlaxDistilBertForTokenClassification, FlaxDistilBertModel, ) class A_ ( unittest.TestCase ): def __init__( self : Tuple ,SCREAMING_SNAKE_CASE__ : List[str] ,SCREAMING_SNAKE_CASE__ : Any=1_3 ,SCREAMING_SNAKE_CASE__ : int=7 ,SCREAMING_SNAKE_CASE__ : str=True ,SCREAMING_SNAKE_CASE__ : Dict=True ,SCREAMING_SNAKE_CASE__ : str=True ,SCREAMING_SNAKE_CASE__ : Dict=True ,SCREAMING_SNAKE_CASE__ : List[Any]=9_9 ,SCREAMING_SNAKE_CASE__ : List[Any]=3_2 ,SCREAMING_SNAKE_CASE__ : int=5 ,SCREAMING_SNAKE_CASE__ : List[Any]=4 ,SCREAMING_SNAKE_CASE__ : Optional[Any]=3_7 ,SCREAMING_SNAKE_CASE__ : Union[str, Any]="gelu" ,SCREAMING_SNAKE_CASE__ : int=0.1 ,SCREAMING_SNAKE_CASE__ : Optional[int]=0.1 ,SCREAMING_SNAKE_CASE__ : Optional[int]=5_1_2 ,SCREAMING_SNAKE_CASE__ : Dict=1_6 ,SCREAMING_SNAKE_CASE__ : Dict=2 ,SCREAMING_SNAKE_CASE__ : Optional[int]=0.02 ,SCREAMING_SNAKE_CASE__ : Dict=4 ,): __lowerCamelCase : int = parent __lowerCamelCase : Dict = batch_size __lowerCamelCase : Union[str, Any] = seq_length __lowerCamelCase : List[Any] = is_training __lowerCamelCase : Tuple = use_attention_mask __lowerCamelCase : List[str] = use_token_type_ids __lowerCamelCase : Any = use_labels __lowerCamelCase : List[str] = vocab_size __lowerCamelCase : Any = hidden_size __lowerCamelCase : Tuple = num_hidden_layers __lowerCamelCase : Union[str, Any] = num_attention_heads __lowerCamelCase : Union[str, Any] = intermediate_size __lowerCamelCase : List[Any] = hidden_act __lowerCamelCase : int = hidden_dropout_prob __lowerCamelCase : int = attention_probs_dropout_prob __lowerCamelCase : Union[str, Any] = max_position_embeddings __lowerCamelCase : Union[str, Any] = type_vocab_size __lowerCamelCase : List[str] = type_sequence_label_size __lowerCamelCase : Tuple = initializer_range __lowerCamelCase : Optional[int] = num_choices def lowerCAmelCase ( self : Union[str, Any]): __lowerCamelCase : Dict = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size) __lowerCamelCase : Union[str, Any] = None if self.use_attention_mask: __lowerCamelCase : Any = random_attention_mask([self.batch_size, self.seq_length]) __lowerCamelCase : str = DistilBertConfig( vocab_size=self.vocab_size ,dim=self.hidden_size ,n_layers=self.num_hidden_layers ,n_heads=self.num_attention_heads ,hidden_dim=self.intermediate_size ,hidden_act=self.hidden_act ,dropout=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,initializer_range=self.initializer_range ,tie_weights_=SCREAMING_SNAKE_CASE__ ,) return config, input_ids, attention_mask def lowerCAmelCase ( self : List[Any]): __lowerCamelCase : List[str] = self.prepare_config_and_inputs() __lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Dict = config_and_inputs __lowerCamelCase : Any = {'input_ids': input_ids, 'attention_mask': attention_mask} return config, inputs_dict @require_flax class A_ ( SCREAMING_SNAKE_CASE , unittest.TestCase ): _UpperCAmelCase : Dict = ( ( FlaxDistilBertModel, FlaxDistilBertForMaskedLM, FlaxDistilBertForMultipleChoice, FlaxDistilBertForQuestionAnswering, FlaxDistilBertForSequenceClassification, FlaxDistilBertForTokenClassification, FlaxDistilBertForQuestionAnswering, ) if is_flax_available() else () ) def lowerCAmelCase ( self : Optional[Any]): __lowerCamelCase : Tuple = FlaxDistilBertModelTester(self) @slow def lowerCAmelCase ( self : int): for model_class_name in self.all_model_classes: __lowerCamelCase : List[Any] = model_class_name.from_pretrained('distilbert-base-uncased') __lowerCamelCase : List[str] = model(np.ones((1, 1))) self.assertIsNotNone(SCREAMING_SNAKE_CASE__) @require_flax class A_ ( unittest.TestCase ): @slow def lowerCAmelCase ( self : str): __lowerCamelCase : Union[str, Any] = FlaxDistilBertModel.from_pretrained('distilbert-base-uncased') __lowerCamelCase : str = np.array([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]]) __lowerCamelCase : List[Any] = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]) __lowerCamelCase : Union[str, Any] = model(SCREAMING_SNAKE_CASE__ ,attention_mask=SCREAMING_SNAKE_CASE__)[0] __lowerCamelCase : Optional[int] = (1, 1_1, 7_6_8) self.assertEqual(output.shape ,SCREAMING_SNAKE_CASE__) __lowerCamelCase : Union[str, Any] = np.array([[[-0.1639, 0.3299, 0.1648], [-0.1746, 0.3289, 0.1710], [-0.1884, 0.3357, 0.1810]]]) self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] ,SCREAMING_SNAKE_CASE__ ,atol=1E-4))
73
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available a_ : Dict = { 'configuration_xlm_roberta_xl': [ 'XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP', 'XLMRobertaXLConfig', 'XLMRobertaXLOnnxConfig', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ : List[Any] = [ 'XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST', 'XLMRobertaXLForCausalLM', 'XLMRobertaXLForMaskedLM', 'XLMRobertaXLForMultipleChoice', 'XLMRobertaXLForQuestionAnswering', 'XLMRobertaXLForSequenceClassification', 'XLMRobertaXLForTokenClassification', 'XLMRobertaXLModel', 'XLMRobertaXLPreTrainedModel', ] if TYPE_CHECKING: from .configuration_xlm_roberta_xl import ( XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMRobertaXLConfig, XLMRobertaXLOnnxConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xlm_roberta_xl import ( XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST, XLMRobertaXLForCausalLM, XLMRobertaXLForMaskedLM, XLMRobertaXLForMultipleChoice, XLMRobertaXLForQuestionAnswering, XLMRobertaXLForSequenceClassification, XLMRobertaXLForTokenClassification, XLMRobertaXLModel, XLMRobertaXLPreTrainedModel, ) else: import sys a_ : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure)
366
import gc import importlib.metadata import tempfile import unittest from packaging import version from transformers import ( AutoModel, AutoModelForCausalLM, AutoModelForSeqaSeqLM, AutoModelForSequenceClassification, AutoTokenizer, BitsAndBytesConfig, pipeline, ) from transformers.testing_utils import ( is_torch_available, require_accelerate, require_bitsandbytes, require_torch, require_torch_gpu, require_torch_multi_gpu, slow, ) def lowerCamelCase__ (_UpperCAmelCase): if model.config.model_type == "gpt2": return model.transformer.h[0].mlp.c_fc return model.transformer.h[0].mlp.dense_ah_to_h if is_torch_available(): import torch import torch.nn as nn class _snake_case ( nn.Module ): def __init__( self , a , a) -> Union[str, Any]: super().__init__() SCREAMING_SNAKE_CASE = module SCREAMING_SNAKE_CASE = nn.Sequential( nn.Linear(module.in_features , a , bias=a) , nn.Linear(a , module.out_features , bias=a) , ) SCREAMING_SNAKE_CASE = (2.0 / (5 * min(module.in_features , module.out_features))) ** 0.5 nn.init.normal_(self.adapter[0].weight , std=a) nn.init.zeros_(self.adapter[1].weight) self.adapter.to(module.weight.device) def SCREAMING_SNAKE_CASE__ ( self , a , *a , **a) -> Any: return self.module(a , *a , **a) + self.adapter(a) @require_bitsandbytes @require_accelerate @require_torch @require_torch_gpu @slow class _snake_case ( unittest.TestCase ): # We keep the constants inside the init function and model loading inside setUp function # We need to test on relatively large models (aka >1b parameters otherwise the quantiztion may not work as expected) # Therefore here we use only bloom-1b3 to test our module _lowercase : Union[str, Any] = '''bigscience/bloom-1b7''' # Constant values _lowercase : str = 2.109_6595_5269_2574 _lowercase : Any = '''Hello my name is''' _lowercase : Any = set() EXPECTED_OUTPUTS.add('''Hello my name is John and I am a professional photographer. I''' ) EXPECTED_OUTPUTS.add('''Hello my name is John.\nI am a friend of your father.\n''' ) EXPECTED_OUTPUTS.add('''Hello my name is John Doe, I am a student at the University''' ) _lowercase : Union[str, Any] = 10 def SCREAMING_SNAKE_CASE__ ( self) -> Any: # Models and tokenizer SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(self.model_name) class _snake_case ( A__ ): def SCREAMING_SNAKE_CASE__ ( self) -> Tuple: super().setUp() # Models and tokenizer SCREAMING_SNAKE_CASE = AutoModelForCausalLM.from_pretrained( self.model_name , torch_dtype=torch.floataa , device_map='auto') SCREAMING_SNAKE_CASE = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=a , device_map='auto') def SCREAMING_SNAKE_CASE__ ( self) -> Tuple: del self.model_fpaa del self.model_abit gc.collect() torch.cuda.empty_cache() def SCREAMING_SNAKE_CASE__ ( self) -> Any: SCREAMING_SNAKE_CASE = self.model_abit.config self.assertTrue(hasattr(a , 'quantization_config')) SCREAMING_SNAKE_CASE = config.to_dict() SCREAMING_SNAKE_CASE = config.to_diff_dict() SCREAMING_SNAKE_CASE = config.to_json_string() def SCREAMING_SNAKE_CASE__ ( self) -> Any: from bitsandbytes.nn import Paramsabit SCREAMING_SNAKE_CASE = self.model_fpaa.get_memory_footprint() SCREAMING_SNAKE_CASE = self.model_abit.get_memory_footprint() self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE) SCREAMING_SNAKE_CASE = get_some_linear_layer(self.model_abit) self.assertTrue(linear.weight.__class__ == Paramsabit) def SCREAMING_SNAKE_CASE__ ( self) -> Optional[int]: from transformers import TaPreTrainedModel self.model_fpaa.get_memory_footprint() self.model_abit.get_memory_footprint() for name, module in self.model_abit.named_modules(): if isinstance(a , torch.nn.Linear): if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules: # 4-bit parameters are packed in uint8 variables self.assertTrue(module.weight.dtype == torch.uinta) def SCREAMING_SNAKE_CASE__ ( self) -> Dict: SCREAMING_SNAKE_CASE = self.tokenizer(self.input_text , return_tensors='pt') SCREAMING_SNAKE_CASE = self.model_abit.generate(input_ids=encoded_input['input_ids'].to(0) , max_new_tokens=10) self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=a) , self.EXPECTED_OUTPUTS) def SCREAMING_SNAKE_CASE__ ( self) -> Any: SCREAMING_SNAKE_CASE = BitsAndBytesConfig() SCREAMING_SNAKE_CASE = True SCREAMING_SNAKE_CASE = AutoModelForCausalLM.from_pretrained( self.model_name , quantization_config=a , device_map='auto') SCREAMING_SNAKE_CASE = self.tokenizer(self.input_text , return_tensors='pt') SCREAMING_SNAKE_CASE = model_abit_from_config.generate( input_ids=encoded_input['input_ids'].to(0) , max_new_tokens=10) self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=a) , self.EXPECTED_OUTPUTS) def SCREAMING_SNAKE_CASE__ ( self) -> str: with self.assertRaises(a), tempfile.TemporaryDirectory() as tmpdirname: self.model_abit.save_pretrained(a) def SCREAMING_SNAKE_CASE__ ( self) -> List[str]: SCREAMING_SNAKE_CASE = BitsAndBytesConfig() with self.assertRaises(a): SCREAMING_SNAKE_CASE = AutoModelForCausalLM.from_pretrained( self.model_name , quantization_config=a , load_in_abit=a , device_map='auto' , bnb_abit_quant_type='nf4' , ) def SCREAMING_SNAKE_CASE__ ( self) -> int: with self.assertRaises(a): # Tries with `str` self.model_abit.to('cpu') with self.assertRaises(a): # Tries with a `dtype`` self.model_abit.to(torch.floataa) with self.assertRaises(a): # Tries with a `device` self.model_abit.to(torch.device('cuda:0')) with self.assertRaises(a): # Tries with a `device` self.model_abit.float() with self.assertRaises(a): # Tries with a `device` self.model_abit.half() # Test if we did not break anything SCREAMING_SNAKE_CASE = self.tokenizer(self.input_text , return_tensors='pt') SCREAMING_SNAKE_CASE = self.model_fpaa.to(torch.floataa) SCREAMING_SNAKE_CASE = self.model_fpaa.generate(input_ids=encoded_input['input_ids'].to(0) , max_new_tokens=10) # Check this does not throw an error SCREAMING_SNAKE_CASE = self.model_fpaa.to('cpu') # Check this does not throw an error SCREAMING_SNAKE_CASE = self.model_fpaa.half() # Check this does not throw an error SCREAMING_SNAKE_CASE = self.model_fpaa.float() def SCREAMING_SNAKE_CASE__ ( self) -> int: SCREAMING_SNAKE_CASE = AutoModelForSeqaSeqLM.from_pretrained('t5-small' , load_in_abit=a , device_map='auto') self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa) @require_bitsandbytes @require_accelerate @require_torch @require_torch_gpu @slow class _snake_case ( unittest.TestCase ): @classmethod def SCREAMING_SNAKE_CASE__ ( cls) -> Tuple: SCREAMING_SNAKE_CASE = 't5-small' SCREAMING_SNAKE_CASE = 'google/flan-t5-small' # flan-t5 uses dense-act instead of dense-relu-dense SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(cls.model_name) SCREAMING_SNAKE_CASE = 'Translate in German: Hello, my dog is cute' def SCREAMING_SNAKE_CASE__ ( self) -> Dict: gc.collect() torch.cuda.empty_cache() def SCREAMING_SNAKE_CASE__ ( self) -> Optional[Any]: from transformers import TaForConditionalGeneration SCREAMING_SNAKE_CASE = TaForConditionalGeneration._keep_in_fpaa_modules SCREAMING_SNAKE_CASE = None # test with `t5-small` SCREAMING_SNAKE_CASE = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=a , device_map='auto') SCREAMING_SNAKE_CASE = self.tokenizer(self.input_text , return_tensors='pt').to(0) SCREAMING_SNAKE_CASE = model.generate(**a) # test with `flan-t5-small` SCREAMING_SNAKE_CASE = TaForConditionalGeneration.from_pretrained( self.dense_act_model_name , load_in_abit=a , device_map='auto') SCREAMING_SNAKE_CASE = self.tokenizer(self.input_text , return_tensors='pt').to(0) SCREAMING_SNAKE_CASE = model.generate(**a) SCREAMING_SNAKE_CASE = modules def SCREAMING_SNAKE_CASE__ ( self) -> Optional[Any]: import bitsandbytes as bnb from transformers import TaForConditionalGeneration # test with `t5-small` SCREAMING_SNAKE_CASE = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=a , device_map='auto') # there was a bug with decoders - this test checks that it is fixed self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit)) SCREAMING_SNAKE_CASE = self.tokenizer(self.input_text , return_tensors='pt').to(0) SCREAMING_SNAKE_CASE = model.generate(**a) # test with `flan-t5-small` SCREAMING_SNAKE_CASE = TaForConditionalGeneration.from_pretrained( self.dense_act_model_name , load_in_abit=a , device_map='auto') SCREAMING_SNAKE_CASE = self.tokenizer(self.input_text , return_tensors='pt').to(0) SCREAMING_SNAKE_CASE = model.generate(**a) class _snake_case ( A__ ): def SCREAMING_SNAKE_CASE__ ( self) -> str: super().setUp() # model_name SCREAMING_SNAKE_CASE = 'bigscience/bloom-560m' SCREAMING_SNAKE_CASE = 't5-small' # Different types of model SCREAMING_SNAKE_CASE = AutoModel.from_pretrained(self.model_name , load_in_abit=a , device_map='auto') # Sequence classification model SCREAMING_SNAKE_CASE = AutoModelForSequenceClassification.from_pretrained( self.model_name , load_in_abit=a , device_map='auto') # CausalLM model SCREAMING_SNAKE_CASE = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=a , device_map='auto') # Seq2seq model SCREAMING_SNAKE_CASE = AutoModelForSeqaSeqLM.from_pretrained( self.seq_to_seq_name , load_in_abit=a , device_map='auto') def SCREAMING_SNAKE_CASE__ ( self) -> List[Any]: del self.base_model del self.sequence_model del self.model_abit del self.seq_to_seq_model gc.collect() torch.cuda.empty_cache() def SCREAMING_SNAKE_CASE__ ( self) -> Optional[int]: from bitsandbytes.nn import Paramsabit self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit) # Other heads should be nn.Parameter self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter) self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter) self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter) class _snake_case ( A__ ): def SCREAMING_SNAKE_CASE__ ( self) -> Union[str, Any]: super().setUp() def SCREAMING_SNAKE_CASE__ ( self) -> Dict: del self.pipe gc.collect() torch.cuda.empty_cache() def SCREAMING_SNAKE_CASE__ ( self) -> Union[str, Any]: SCREAMING_SNAKE_CASE = pipeline( 'text-generation' , model=self.model_name , model_kwargs={'device_map': 'auto', 'load_in_4bit': True, 'torch_dtype': torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , ) # Real second forward pass SCREAMING_SNAKE_CASE = self.pipe(self.input_text) self.assertIn(pipeline_output[0]['generated_text'] , self.EXPECTED_OUTPUTS) @require_torch_multi_gpu class _snake_case ( A__ ): def SCREAMING_SNAKE_CASE__ ( self) -> int: super().setUp() def SCREAMING_SNAKE_CASE__ ( self) -> Union[str, Any]: SCREAMING_SNAKE_CASE = AutoModelForCausalLM.from_pretrained( self.model_name , load_in_abit=a , device_map='balanced') # Check correct device map self.assertEqual(set(model_parallel.hf_device_map.values()) , {0, 1}) # Check that inference pass works on the model SCREAMING_SNAKE_CASE = self.tokenizer(self.input_text , return_tensors='pt') # Second real batch SCREAMING_SNAKE_CASE = model_parallel.generate(input_ids=encoded_input['input_ids'].to(0) , max_new_tokens=10) self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=a) , self.EXPECTED_OUTPUTS) class _snake_case ( A__ ): def SCREAMING_SNAKE_CASE__ ( self) -> Tuple: SCREAMING_SNAKE_CASE = 'facebook/opt-350m' super().setUp() def SCREAMING_SNAKE_CASE__ ( self) -> Any: if version.parse(importlib.metadata.version('bitsandbytes')) < version.parse('0.37.0'): return # Step 1: freeze all parameters SCREAMING_SNAKE_CASE = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=a) self.assertEqual(set(model.hf_device_map.values()) , {torch.cuda.current_device()}) for param in model.parameters(): SCREAMING_SNAKE_CASE = False # freeze the model - train adapters later if param.ndim == 1: # cast the small parameters (e.g. layernorm) to fp32 for stability SCREAMING_SNAKE_CASE = param.data.to(torch.floataa) # Step 2: add adapters for _, module in model.named_modules(): if "OPTAttention" in repr(type(a)): SCREAMING_SNAKE_CASE = LoRALayer(module.q_proj , rank=16) SCREAMING_SNAKE_CASE = LoRALayer(module.k_proj , rank=16) SCREAMING_SNAKE_CASE = LoRALayer(module.v_proj , rank=16) # Step 3: dummy batch SCREAMING_SNAKE_CASE = self.tokenizer('Test batch ' , return_tensors='pt').to(0) # Step 4: Check if the gradient is not None with torch.cuda.amp.autocast(): SCREAMING_SNAKE_CASE = model.forward(**a) out.logits.norm().backward() for module in model.modules(): if isinstance(a , a): self.assertTrue(module.adapter[1].weight.grad is not None) self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0) elif isinstance(a , nn.Embedding): self.assertTrue(module.weight.grad is None) class _snake_case ( A__ ): _lowercase : str = '''gpt2-xl''' _lowercase : Union[str, Any] = 3.3191_8548_5415_2187
327
0
from __future__ import annotations from math import pi # Define the Reduced Planck Constant ℏ (H bar), speed of light C, value of # Pi and the function UpperCAmelCase : List[str] =1.054_571_817e-34 # unit of ℏ : J * s UpperCAmelCase : Optional[Any] =3e8 # unit of c : m * s^-1 def _lowerCAmelCase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase): if (force, area, distance).count(0) != 1: raise ValueError("One and only one argument must be 0") if force < 0: raise ValueError("Magnitude of force can not be negative") if distance < 0: raise ValueError("Distance can not be negative") if area < 0: raise ValueError("Area can not be negative") if force == 0: UpperCamelCase_ = (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / ( 2_40 * (distance) ** 4 ) return {"force": force} elif area == 0: UpperCamelCase_ = (2_40 * force * (distance) ** 4) / ( REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 ) return {"area": area} elif distance == 0: UpperCamelCase_ = ( (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (2_40 * force) ) ** (1 / 4) return {"distance": distance} raise ValueError("One and only one argument must be 0") # Run doctest if __name__ == "__main__": import doctest doctest.testmod()
128
from __future__ import annotations import numpy as np from numpy import floataa from numpy.typing import NDArray def _lowerCAmelCase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , ): UpperCamelCase_ , UpperCamelCase_ = coefficient_matrix.shape UpperCamelCase_ , UpperCamelCase_ = constant_matrix.shape if rowsa != colsa: UpperCamelCase_ = f"""Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}""" raise ValueError(_lowerCAmelCase) if colsa != 1: UpperCamelCase_ = f"""Constant matrix must be nx1 but received {rowsa}x{colsa}""" raise ValueError(_lowerCAmelCase) if rowsa != rowsa: UpperCamelCase_ = ( "Coefficient and constant matrices dimensions must be nxn and nx1 but " f"""received {rowsa}x{colsa} and {rowsa}x{colsa}""" ) raise ValueError(_lowerCAmelCase) if len(_lowerCAmelCase) != rowsa: UpperCamelCase_ = ( "Number of initial values must be equal to number of rows in coefficient " f"""matrix but received {len(_lowerCAmelCase)} and {rowsa}""" ) raise ValueError(_lowerCAmelCase) if iterations <= 0: raise ValueError("Iterations must be at least 1") UpperCamelCase_ = np.concatenate( (coefficient_matrix, constant_matrix) , axis=1) UpperCamelCase_ , UpperCamelCase_ = table.shape strictly_diagonally_dominant(_lowerCAmelCase) # Iterates the whole matrix for given number of times for _ in range(_lowerCAmelCase): UpperCamelCase_ = [] for row in range(_lowerCAmelCase): UpperCamelCase_ = 0 for col in range(_lowerCAmelCase): if col == row: UpperCamelCase_ = table[row][col] elif col == cols - 1: UpperCamelCase_ = table[row][col] else: temp += (-1) * table[row][col] * init_val[col] UpperCamelCase_ = (temp + val) / denom new_val.append(_lowerCAmelCase) UpperCamelCase_ = new_val return [float(_lowerCAmelCase) for i in new_val] def _lowerCAmelCase (_lowerCAmelCase): UpperCamelCase_ , UpperCamelCase_ = table.shape UpperCamelCase_ = True for i in range(0 , _lowerCAmelCase): UpperCamelCase_ = 0 for j in range(0 , cols - 1): if i == j: continue else: total += table[i][j] if table[i][i] <= total: raise ValueError("Coefficient matrix is not strictly diagonally dominant") return is_diagonally_dominant # Test Cases if __name__ == "__main__": import doctest doctest.testmod()
128
1
'''simple docstring''' import os import re import unicodedata from shutil import copyfile from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import is_torch_available, logging if is_torch_available(): import torch if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation lowerCAmelCase__ : Dict = logging.get_logger(__name__) lowerCAmelCase__ : Optional[int] = {"vocab_file": "spiece.model"} lowerCAmelCase__ : Optional[int] = { "vocab_file": { "AI-Sweden/gpt-sw3-126m": "https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model", "AI-Sweden/gpt-sw3-350m": "https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model", "AI-Sweden/gpt-sw3-1.6b": "https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model", "AI-Sweden/gpt-sw3-6.7b": "https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model", "AI-Sweden/gpt-sw3-20b": "https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model", } } lowerCAmelCase__ : Optional[Any] = { "AI-Sweden/gpt-sw3-126m": 20_48, "AI-Sweden/gpt-sw3-350m": 20_48, "AI-Sweden/gpt-sw3-1.6b": 20_48, "AI-Sweden/gpt-sw3-6.7b": 20_48, "AI-Sweden/gpt-sw3-20b": 20_48, } class SCREAMING_SNAKE_CASE__ ( snake_case__ ): """simple docstring""" SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES SCREAMING_SNAKE_CASE = ['''input_ids''', '''attention_mask'''] def __init__( self : Optional[Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : List[Any]=False , UpperCAmelCase_ : Dict=False , UpperCAmelCase_ : List[str]=False , UpperCAmelCase_ : Union[str, Any]=None , UpperCAmelCase_ : List[str]=None , UpperCAmelCase_ : int=None , UpperCAmelCase_ : Dict=None , UpperCAmelCase_ : Optional[Dict[str, Any]] = None , **UpperCAmelCase_ : List[Any] , ): """simple docstring""" __UpperCAmelCase : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs __UpperCAmelCase : Union[str, Any] = kwargs.get("name_or_path" ) if name_or_path is None: logger.warning( "name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b," " you are testing the model, this can safely be ignored" ) __UpperCAmelCase : Optional[int] = "None" # Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing __UpperCAmelCase : Union[str, Any] = "<|endoftext|>" if eos_token is None else eos_token __UpperCAmelCase : List[Any] = "<unk>" if unk_token is None else unk_token if "gpt-sw3-7b" in name_or_path: __UpperCAmelCase : Dict = unk_token if pad_token is None else pad_token __UpperCAmelCase : List[Any] = eos_token if bos_token is None else bos_token else: __UpperCAmelCase : str = "<pad>" if pad_token is None else pad_token __UpperCAmelCase : List[str] = "<s>" if bos_token is None else bos_token super().__init__( do_lower_case=UpperCAmelCase_ , remove_space=UpperCAmelCase_ , keep_accents=UpperCAmelCase_ , bos_token=UpperCAmelCase_ , eos_token=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , sp_model_kwargs=self.sp_model_kwargs , **UpperCAmelCase_ , ) __UpperCAmelCase : Tuple = do_lower_case __UpperCAmelCase : int = remove_space __UpperCAmelCase : Tuple = keep_accents __UpperCAmelCase : Any = vocab_file __UpperCAmelCase : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(UpperCAmelCase_ ) # Used for whitespace normalization in input texts # fmt : off __UpperCAmelCase : Dict = {" ", " ", " ", " ", " ", " ", " ", " ", " ", " ", "", "„"} # fmt : on # Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing __UpperCAmelCase : Any = re.compile( f"[{''.join(map(UpperCAmelCase_ , list(range(0 , 9 ) ) + list(range(11 , 32 ) ) + list(range(127 , 160 ) ) + [160, 173, 8_203] ) )}]" ) def __getstate__( self : Tuple ): """simple docstring""" __UpperCAmelCase : Dict = self.__dict__.copy() __UpperCAmelCase : Any = None return state def __setstate__( self : Tuple , UpperCAmelCase_ : Any ): """simple docstring""" __UpperCAmelCase : Any = d # for backward compatibility if not hasattr(self , "sp_model_kwargs" ): __UpperCAmelCase : Optional[int] = {} __UpperCAmelCase : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) @property # Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size def lowerCamelCase_ ( self : Optional[int] ): """simple docstring""" return len(self.sp_model ) def lowerCamelCase_ ( self : Any , UpperCAmelCase_ : str ): """simple docstring""" __UpperCAmelCase : List[str] = self.non_printing_characters_re.sub("" , UpperCAmelCase_ ) # Normalize whitespaces __UpperCAmelCase : Tuple = "".join([char if char not in self.whitespaces else " " for char in text] ) # NFC Unicode normalization __UpperCAmelCase : int = unicodedata.normalize("NFC" , UpperCAmelCase_ ) return text def lowerCamelCase_ ( self : Union[str, Any] , UpperCAmelCase_ : str , **UpperCAmelCase_ : List[Any] ): """simple docstring""" __UpperCAmelCase : List[Any] = self.preprocess_text(UpperCAmelCase_ ) return self.sp_model.encode(UpperCAmelCase_ , out_type=UpperCAmelCase_ ) def lowerCamelCase_ ( self : Any , UpperCAmelCase_ : str ): """simple docstring""" return self.sp_model.PieceToId(UpperCAmelCase_ ) def lowerCamelCase_ ( self : List[str] , UpperCAmelCase_ : int ): """simple docstring""" return self.sp_model.IdToPiece(UpperCAmelCase_ ) @staticmethod def lowerCamelCase_ ( UpperCAmelCase_ : str ): """simple docstring""" return out_string def lowerCamelCase_ ( self : List[str] , UpperCAmelCase_ : List[str] ): """simple docstring""" __UpperCAmelCase : Optional[Any] = [] __UpperCAmelCase : Optional[int] = "" __UpperCAmelCase : List[str] = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: # TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document if not prev_is_special: out_string += " " out_string += self.sp_model.decode(UpperCAmelCase_ ) + token __UpperCAmelCase : Optional[int] = True __UpperCAmelCase : List[str] = [] else: current_sub_tokens.append(UpperCAmelCase_ ) __UpperCAmelCase : int = False out_string += self.sp_model.decode(UpperCAmelCase_ ) return out_string def lowerCamelCase_ ( self : List[str] ): """simple docstring""" __UpperCAmelCase : Optional[Any] = {self.convert_ids_to_tokens(UpperCAmelCase_ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def lowerCamelCase_ ( self : Optional[Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[str] = None ): """simple docstring""" if not os.path.isdir(UpperCAmelCase_ ): logger.error(f"Vocabulary path ({save_directory}) should be a directory" ) return __UpperCAmelCase : str = os.path.join( UpperCAmelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase_ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , UpperCAmelCase_ ) elif not os.path.isfile(self.vocab_file ): with open(UpperCAmelCase_ , "wb" ) as fi: __UpperCAmelCase : Optional[Any] = self.sp_model.serialized_model_proto() fi.write(UpperCAmelCase_ ) return (out_vocab_file,) def lowerCamelCase_ ( self : Tuple , UpperCAmelCase_ : Union[str, List[str]] , UpperCAmelCase_ : Union[str, bool] = False ): """simple docstring""" if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ): __UpperCAmelCase : Optional[int] = self.preprocess_text(UpperCAmelCase_ ) __UpperCAmelCase : Optional[int] = self.sp_model.encode(UpperCAmelCase_ ) else: __UpperCAmelCase : Optional[Any] = [self.preprocess_text(UpperCAmelCase_ ) for t in text] __UpperCAmelCase : List[str] = self.sp_model.encode(UpperCAmelCase_ ) if return_tensors is True or return_tensors == "pt": __UpperCAmelCase : List[Any] = torch.tensor(UpperCAmelCase_ ) return token_ids def lowerCamelCase_ ( self : int , UpperCAmelCase_ : Union[int, List[int]] ): """simple docstring""" return self.sp_model.decode(UpperCAmelCase_ ) def lowerCamelCase_ ( self : Tuple , UpperCAmelCase_ : "Conversation" ): """simple docstring""" __UpperCAmelCase : Optional[Any] = [f"User: {text}" if is_user else f"Bot: {text}" for is_user, text in conversation.iter_texts()] __UpperCAmelCase : str = ( f"{self.eos_token}{self.bos_token}" + f"{self.bos_token}".join(UpperCAmelCase_ ) + f"{self.bos_token}Bot:" ) return self.encode(text=UpperCAmelCase_ )
37
'''simple docstring''' import argparse import requests import torch from PIL import Image from torchvision.transforms import Compose, Normalize, Resize, ToTensor from transformers import SwinaSRConfig, SwinaSRForImageSuperResolution, SwinaSRImageProcessor def __UpperCamelCase ( _UpperCAmelCase ): __UpperCAmelCase : Optional[int] = SwinaSRConfig() if "Swin2SR_ClassicalSR_X4_64" in checkpoint_url: __UpperCAmelCase : List[str] = 4 elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url: __UpperCAmelCase : Optional[Any] = 4 __UpperCAmelCase : Union[str, Any] = 48 __UpperCAmelCase : int = "pixelshuffle_aux" elif "Swin2SR_Lightweight_X2_64" in checkpoint_url: __UpperCAmelCase : Tuple = [6, 6, 6, 6] __UpperCAmelCase : Tuple = 60 __UpperCAmelCase : Dict = [6, 6, 6, 6] __UpperCAmelCase : Dict = "pixelshuffledirect" elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url: __UpperCAmelCase : List[Any] = 4 __UpperCAmelCase : Optional[Any] = "nearest+conv" elif "Swin2SR_Jpeg_dynamic" in checkpoint_url: __UpperCAmelCase : Any = 1 __UpperCAmelCase : Tuple = 1 __UpperCAmelCase : Tuple = 126 __UpperCAmelCase : Optional[Any] = 7 __UpperCAmelCase : Any = 255.0 __UpperCAmelCase : List[Any] = "" return config def __UpperCamelCase ( _UpperCAmelCase, _UpperCAmelCase ): if "patch_embed.proj" in name and "layers" not in name: __UpperCAmelCase : str = name.replace("patch_embed.proj", "embeddings.patch_embeddings.projection" ) if "patch_embed.norm" in name: __UpperCAmelCase : List[Any] = name.replace("patch_embed.norm", "embeddings.patch_embeddings.layernorm" ) if "layers" in name: __UpperCAmelCase : Any = name.replace("layers", "encoder.stages" ) if "residual_group.blocks" in name: __UpperCAmelCase : List[str] = name.replace("residual_group.blocks", "layers" ) if "attn.proj" in name: __UpperCAmelCase : Union[str, Any] = name.replace("attn.proj", "attention.output.dense" ) if "attn" in name: __UpperCAmelCase : Union[str, Any] = name.replace("attn", "attention.self" ) if "norm1" in name: __UpperCAmelCase : str = name.replace("norm1", "layernorm_before" ) if "norm2" in name: __UpperCAmelCase : Dict = name.replace("norm2", "layernorm_after" ) if "mlp.fc1" in name: __UpperCAmelCase : Tuple = name.replace("mlp.fc1", "intermediate.dense" ) if "mlp.fc2" in name: __UpperCAmelCase : Union[str, Any] = name.replace("mlp.fc2", "output.dense" ) if "q_bias" in name: __UpperCAmelCase : int = name.replace("q_bias", "query.bias" ) if "k_bias" in name: __UpperCAmelCase : List[Any] = name.replace("k_bias", "key.bias" ) if "v_bias" in name: __UpperCAmelCase : Dict = name.replace("v_bias", "value.bias" ) if "cpb_mlp" in name: __UpperCAmelCase : List[str] = name.replace("cpb_mlp", "continuous_position_bias_mlp" ) if "patch_embed.proj" in name: __UpperCAmelCase : Dict = name.replace("patch_embed.proj", "patch_embed.projection" ) if name == "norm.weight": __UpperCAmelCase : Tuple = "layernorm.weight" if name == "norm.bias": __UpperCAmelCase : Optional[int] = "layernorm.bias" if "conv_first" in name: __UpperCAmelCase : Tuple = name.replace("conv_first", "first_convolution" ) if ( "upsample" in name or "conv_before_upsample" in name or "conv_bicubic" in name or "conv_up" in name or "conv_hr" in name or "conv_last" in name or "aux" in name ): # heads if "conv_last" in name: __UpperCAmelCase : Union[str, Any] = name.replace("conv_last", "final_convolution" ) if config.upsampler in ["pixelshuffle", "pixelshuffle_aux", "nearest+conv"]: if "conv_before_upsample.0" in name: __UpperCAmelCase : Optional[Any] = name.replace("conv_before_upsample.0", "conv_before_upsample" ) if "upsample.0" in name: __UpperCAmelCase : List[str] = name.replace("upsample.0", "upsample.convolution_0" ) if "upsample.2" in name: __UpperCAmelCase : List[str] = name.replace("upsample.2", "upsample.convolution_1" ) __UpperCAmelCase : List[Any] = "upsample." + name elif config.upsampler == "pixelshuffledirect": __UpperCAmelCase : Tuple = name.replace("upsample.0.weight", "upsample.conv.weight" ) __UpperCAmelCase : List[str] = name.replace("upsample.0.bias", "upsample.conv.bias" ) else: pass else: __UpperCAmelCase : Optional[int] = "swin2sr." + name return name def __UpperCamelCase ( _UpperCAmelCase, _UpperCAmelCase ): for key in orig_state_dict.copy().keys(): __UpperCAmelCase : List[Any] = orig_state_dict.pop(_UpperCAmelCase ) if "qkv" in key: __UpperCAmelCase : Tuple = key.split("." ) __UpperCAmelCase : Dict = int(key_split[1] ) __UpperCAmelCase : Optional[Any] = int(key_split[4] ) __UpperCAmelCase : int = config.embed_dim if "weight" in key: __UpperCAmelCase : Optional[Any] = val[:dim, :] __UpperCAmelCase : Optional[int] = val[dim : dim * 2, :] __UpperCAmelCase : Dict = val[-dim:, :] else: __UpperCAmelCase : Optional[Any] = val[:dim] __UpperCAmelCase : List[Any] = val[dim : dim * 2] __UpperCAmelCase : Optional[int] = val[-dim:] pass else: __UpperCAmelCase : Tuple = val return orig_state_dict def __UpperCamelCase ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ): __UpperCAmelCase : Any = get_config(_UpperCAmelCase ) __UpperCAmelCase : Tuple = SwinaSRForImageSuperResolution(_UpperCAmelCase ) model.eval() __UpperCAmelCase : List[Any] = torch.hub.load_state_dict_from_url(_UpperCAmelCase, map_location="cpu" ) __UpperCAmelCase : int = convert_state_dict(_UpperCAmelCase, _UpperCAmelCase ) __UpperCAmelCase , __UpperCAmelCase : Any = model.load_state_dict(_UpperCAmelCase, strict=_UpperCAmelCase ) if len(_UpperCAmelCase ) > 0: raise ValueError("Missing keys when converting: {}".format(_UpperCAmelCase ) ) for key in unexpected_keys: if not ("relative_position_index" in key or "relative_coords_table" in key or "self_mask" in key): raise ValueError(F"Unexpected key {key} in state_dict" ) # verify values __UpperCAmelCase : Tuple = "https://github.com/mv-lab/swin2sr/blob/main/testsets/real-inputs/shanghai.jpg?raw=true" __UpperCAmelCase : int = Image.open(requests.get(_UpperCAmelCase, stream=_UpperCAmelCase ).raw ).convert("RGB" ) __UpperCAmelCase : str = SwinaSRImageProcessor() # pixel_values = processor(image, return_tensors="pt").pixel_values __UpperCAmelCase : Tuple = 126 if "Jpeg" in checkpoint_url else 256 __UpperCAmelCase : str = Compose( [ Resize((image_size, image_size) ), ToTensor(), Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225] ), ] ) __UpperCAmelCase : List[str] = transforms(_UpperCAmelCase ).unsqueeze(0 ) if config.num_channels == 1: __UpperCAmelCase : Tuple = pixel_values[:, 0, :, :].unsqueeze(1 ) __UpperCAmelCase : int = model(_UpperCAmelCase ) # assert values if "Swin2SR_ClassicalSR_X2_64" in checkpoint_url: __UpperCAmelCase : Any = torch.Size([1, 3, 512, 512] ) __UpperCAmelCase : str = torch.tensor( [[-0.7_087, -0.7_138, -0.6_721], [-0.8_340, -0.8_095, -0.7_298], [-0.9_149, -0.8_414, -0.7_940]] ) elif "Swin2SR_ClassicalSR_X4_64" in checkpoint_url: __UpperCAmelCase : int = torch.Size([1, 3, 1024, 1024] ) __UpperCAmelCase : Dict = torch.tensor( [[-0.7_775, -0.8_105, -0.8_933], [-0.7_764, -0.8_356, -0.9_225], [-0.7_976, -0.8_686, -0.9_579]] ) elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url: # TODO values didn't match exactly here __UpperCAmelCase : Optional[int] = torch.Size([1, 3, 1024, 1024] ) __UpperCAmelCase : Union[str, Any] = torch.tensor( [[-0.8_035, -0.7_504, -0.7_491], [-0.8_538, -0.8_124, -0.7_782], [-0.8_804, -0.8_651, -0.8_493]] ) elif "Swin2SR_Lightweight_X2_64" in checkpoint_url: __UpperCAmelCase : Tuple = torch.Size([1, 3, 512, 512] ) __UpperCAmelCase : List[str] = torch.tensor( [[-0.7_669, -0.8_662, -0.8_767], [-0.8_810, -0.9_962, -0.9_820], [-0.9_340, -1.0_322, -1.1_149]] ) elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url: __UpperCAmelCase : Dict = torch.Size([1, 3, 1024, 1024] ) __UpperCAmelCase : Optional[Any] = torch.tensor( [[-0.5_238, -0.5_557, -0.6_321], [-0.6_016, -0.5_903, -0.6_391], [-0.6_244, -0.6_334, -0.6_889]] ) assert ( outputs.reconstruction.shape == expected_shape ), F"Shape of reconstruction should be {expected_shape}, but is {outputs.reconstruction.shape}" assert torch.allclose(outputs.reconstruction[0, 0, :3, :3], _UpperCAmelCase, atol=1E-3 ) print("Looks ok!" ) __UpperCAmelCase : Optional[int] = { "https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth": ( "swin2SR-classical-sr-x2-64" ), "https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X4_64.pth": ( "swin2SR-classical-sr-x4-64" ), "https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_CompressedSR_X4_48.pth": ( "swin2SR-compressed-sr-x4-48" ), "https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_Lightweight_X2_64.pth": ( "swin2SR-lightweight-x2-64" ), "https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR.pth": ( "swin2SR-realworld-sr-x4-64-bsrgan-psnr" ), } __UpperCAmelCase : int = url_to_name[checkpoint_url] if pytorch_dump_folder_path is not None: print(F"Saving model {model_name} to {pytorch_dump_folder_path}" ) model.save_pretrained(_UpperCAmelCase ) print(F"Saving image processor to {pytorch_dump_folder_path}" ) processor.save_pretrained(_UpperCAmelCase ) if push_to_hub: model.push_to_hub(F"caidas/{model_name}" ) processor.push_to_hub(F"caidas/{model_name}" ) if __name__ == "__main__": lowerCAmelCase__ : Dict = argparse.ArgumentParser() # Required parameters parser.add_argument( "--checkpoint_url", default="https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth", type=str, help="URL of the original Swin2SR checkpoint you'd like to convert.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory." ) parser.add_argument("--push_to_hub", action="store_true", help="Whether to push the converted model to the hub.") lowerCAmelCase__ : str = parser.parse_args() convert_swinasr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
37
1
import numpy as np def _a ( SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : float = 1E-12 , SCREAMING_SNAKE_CASE_ : int = 1_00 , ): assert np.shape(SCREAMING_SNAKE_CASE_ )[0] == np.shape(SCREAMING_SNAKE_CASE_ )[1] # Ensure proper dimensionality. assert np.shape(SCREAMING_SNAKE_CASE_ )[0] == np.shape(SCREAMING_SNAKE_CASE_ )[0] # Ensure inputs are either both complex or both real assert np.iscomplexobj(SCREAMING_SNAKE_CASE_ ) == np.iscomplexobj(SCREAMING_SNAKE_CASE_ ) __lowerCAmelCase = np.iscomplexobj(SCREAMING_SNAKE_CASE_ ) if is_complex: # Ensure complex input_matrix is Hermitian assert np.array_equal(SCREAMING_SNAKE_CASE_ , input_matrix.conj().T ) # Set convergence to False. Will define convergence when we exceed max_iterations # or when we have small changes from one iteration to next. __lowerCAmelCase = False __lowerCAmelCase = 0 __lowerCAmelCase = 0 __lowerCAmelCase = 1E12 while not convergence: # Multiple matrix by the vector. __lowerCAmelCase = np.dot(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # Normalize the resulting output vector. __lowerCAmelCase = w / np.linalg.norm(SCREAMING_SNAKE_CASE_ ) # Find rayleigh quotient # (faster than usual b/c we know vector is normalized already) __lowerCAmelCase = vector.conj().T if is_complex else vector.T __lowerCAmelCase = np.dot(SCREAMING_SNAKE_CASE_ , np.dot(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) # Check convergence. __lowerCAmelCase = np.abs(lambda_ - lambda_previous ) / lambda_ iterations += 1 if error <= error_tol or iterations >= max_iterations: __lowerCAmelCase = True __lowerCAmelCase = lambda_ if is_complex: __lowerCAmelCase = np.real(lambda_ ) return lambda_, vector def _a ( ): __lowerCAmelCase = np.array([[41, 4, 20], [4, 26, 30], [20, 30, 50]] ) __lowerCAmelCase = np.array([41, 4, 20] ) __lowerCAmelCase = real_input_matrix.astype(np.complexaaa ) __lowerCAmelCase = np.triu(1j * complex_input_matrix , 1 ) complex_input_matrix += imag_matrix complex_input_matrix += -1 * imag_matrix.T __lowerCAmelCase = np.array([41, 4, 20] ).astype(np.complexaaa ) for problem_type in ["real", "complex"]: if problem_type == "real": __lowerCAmelCase = real_input_matrix __lowerCAmelCase = real_vector elif problem_type == "complex": __lowerCAmelCase = complex_input_matrix __lowerCAmelCase = complex_vector # Our implementation. __lowerCAmelCase , __lowerCAmelCase = power_iteration(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # Numpy implementation. # Get eigenvalues and eigenvectors using built-in numpy # eigh (eigh used for symmetric or hermetian matrices). __lowerCAmelCase , __lowerCAmelCase = np.linalg.eigh(SCREAMING_SNAKE_CASE_ ) # Last eigenvalue is the maximum one. __lowerCAmelCase = eigen_values[-1] # Last column in this matrix is eigenvector corresponding to largest eigenvalue. __lowerCAmelCase = eigen_vectors[:, -1] # Check our implementation and numpy gives close answers. assert np.abs(eigen_value - eigen_value_max ) <= 1E-6 # Take absolute values element wise of each eigenvector. # as they are only unique to a minus sign. assert np.linalg.norm(np.abs(SCREAMING_SNAKE_CASE_ ) - np.abs(SCREAMING_SNAKE_CASE_ ) ) <= 1E-6 if __name__ == "__main__": import doctest doctest.testmod() test_power_iteration()
92
import json import logging import os import socket import git import numpy as np import torch logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - PID: %(process)d - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO, ) _UpperCamelCase = logging.getLogger(__name__) def lowerCAmelCase__( lowercase : str ) -> List[str]: __snake_case : int = git.Repo(search_parent_directories=lowercase ) __snake_case : Union[str, Any] = { "repo_id": str(lowercase ), "repo_sha": str(repo.head.object.hexsha ), "repo_branch": str(repo.active_branch ), } with open(os.path.join(lowercase , "git_log.json" ) , "w" ) as f: json.dump(lowercase , lowercase , indent=4 ) def lowerCAmelCase__( lowercase : Optional[Any] ) -> Optional[Any]: if params.n_gpu <= 0: __snake_case : Union[str, Any] = 0 __snake_case : Optional[int] = -1 __snake_case : Union[str, Any] = True __snake_case : Tuple = False return assert torch.cuda.is_available() logger.info("Initializing GPUs" ) if params.n_gpu > 1: assert params.local_rank != -1 __snake_case : Optional[int] = int(os.environ["WORLD_SIZE"] ) __snake_case : int = int(os.environ["N_GPU_NODE"] ) __snake_case : Union[str, Any] = int(os.environ["RANK"] ) # number of nodes / node ID __snake_case : Optional[Any] = params.world_size // params.n_gpu_per_node __snake_case : Optional[Any] = params.global_rank // params.n_gpu_per_node __snake_case : Union[str, Any] = True assert params.n_nodes == int(os.environ["N_NODES"] ) assert params.node_id == int(os.environ["NODE_RANK"] ) # local job (single GPU) else: assert params.local_rank == -1 __snake_case : Any = 1 __snake_case : str = 0 __snake_case : Optional[Any] = 0 __snake_case : Dict = 0 __snake_case : int = 1 __snake_case : Optional[Any] = 1 __snake_case : Tuple = False # sanity checks assert params.n_nodes >= 1 assert 0 <= params.node_id < params.n_nodes assert 0 <= params.local_rank <= params.global_rank < params.world_size assert params.world_size == params.n_nodes * params.n_gpu_per_node # define whether this is the master process / if we are in multi-node distributed mode __snake_case : List[Any] = params.node_id == 0 and params.local_rank == 0 __snake_case : List[Any] = params.n_nodes > 1 # summary __snake_case : List[Any] = f"""--- Global rank: {params.global_rank} - """ logger.info(PREFIX + "Number of nodes: %i" % params.n_nodes ) logger.info(PREFIX + "Node ID : %i" % params.node_id ) logger.info(PREFIX + "Local rank : %i" % params.local_rank ) logger.info(PREFIX + "World size : %i" % params.world_size ) logger.info(PREFIX + "GPUs per node : %i" % params.n_gpu_per_node ) logger.info(PREFIX + "Master : %s" % str(params.is_master ) ) logger.info(PREFIX + "Multi-node : %s" % str(params.multi_node ) ) logger.info(PREFIX + "Multi-GPU : %s" % str(params.multi_gpu ) ) logger.info(PREFIX + "Hostname : %s" % socket.gethostname() ) # set GPU device torch.cuda.set_device(params.local_rank ) # initialize multi-GPU if params.multi_gpu: logger.info("Initializing PyTorch distributed" ) torch.distributed.init_process_group( init_method="env://" , backend="nccl" , ) def lowerCAmelCase__( lowercase : Dict ) -> Union[str, Any]: np.random.seed(args.seed ) torch.manual_seed(args.seed ) if args.n_gpu > 0: torch.cuda.manual_seed_all(args.seed )
326
0
import random import unittest import numpy as np from diffusers import ( DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, OnnxStableDiffusionImgaImgPipeline, PNDMScheduler, ) from diffusers.utils import floats_tensor from diffusers.utils.testing_utils import ( is_onnx_available, load_image, nightly, require_onnxruntime, require_torch_gpu, ) from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin if is_onnx_available(): import onnxruntime as ort class a__ ( lowerCamelCase_ , unittest.TestCase ): a : Dict = '''hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline''' def lowerCAmelCase_ ( self , A=0 ) -> List[Any]: '''simple docstring''' a = floats_tensor((1, 3, 128, 128) , rng=random.Random(__snake_case ) ) a = np.random.RandomState(__snake_case ) a = { "prompt": "A painting of a squirrel eating a burger", "image": image, "generator": generator, "num_inference_steps": 3, "strength": 0.7_5, "guidance_scale": 7.5, "output_type": "numpy", } return inputs def lowerCAmelCase_ ( self ) -> str: '''simple docstring''' a = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" ) pipe.set_progress_bar_config(disable=__snake_case ) a = self.get_dummy_inputs() a = pipe(**__snake_case ).images a = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 128, 128, 3) a = np.array([0.6_9_6_4_3, 0.5_8_4_8_4, 0.5_0_3_1_4, 0.5_8_7_6_0, 0.5_5_3_6_8, 0.5_9_6_4_3, 0.5_1_5_2_9, 0.4_1_2_1_7, 0.4_9_0_8_7] ) assert np.abs(image_slice - expected_slice ).max() < 1e-1 def lowerCAmelCase_ ( self ) -> Tuple: '''simple docstring''' a = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" ) a = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=__snake_case ) pipe.set_progress_bar_config(disable=__snake_case ) a = self.get_dummy_inputs() a = pipe(**__snake_case ).images a = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) a = np.array([0.6_1_7_3_7, 0.5_4_6_4_2, 0.5_3_1_8_3, 0.5_4_4_6_5, 0.5_2_7_4_2, 0.6_0_5_2_5, 0.4_9_9_6_9, 0.4_0_6_5_5, 0.4_8_1_5_4] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 def lowerCAmelCase_ ( self ) -> List[str]: '''simple docstring''' a = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" ) a = LMSDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=__snake_case ) # warmup pass to apply optimizations a = pipe(**self.get_dummy_inputs() ) a = self.get_dummy_inputs() a = pipe(**__snake_case ).images a = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) a = np.array([0.5_2_7_6_1, 0.5_9_9_7_7, 0.4_9_0_3_3, 0.4_9_6_1_9, 0.5_4_2_8_2, 0.5_0_3_1_1, 0.4_7_6_0_0, 0.4_0_9_1_8, 0.4_5_2_0_3] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 def lowerCAmelCase_ ( self ) -> Dict: '''simple docstring''' a = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" ) a = EulerDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=__snake_case ) a = self.get_dummy_inputs() a = pipe(**__snake_case ).images a = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) a = np.array([0.5_2_9_1_1, 0.6_0_0_0_4, 0.4_9_2_2_9, 0.4_9_8_0_5, 0.5_4_5_0_2, 0.5_0_6_8_0, 0.4_7_7_7_7, 0.4_1_0_2_8, 0.4_5_3_0_4] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 def lowerCAmelCase_ ( self ) -> Optional[Any]: '''simple docstring''' a = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" ) a = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=__snake_case ) a = self.get_dummy_inputs() a = pipe(**__snake_case ).images a = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) a = np.array([0.5_2_9_1_1, 0.6_0_0_0_4, 0.4_9_2_2_9, 0.4_9_8_0_5, 0.5_4_5_0_2, 0.5_0_6_8_0, 0.4_7_7_7_7, 0.4_1_0_2_8, 0.4_5_3_0_4] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 def lowerCAmelCase_ ( self ) -> List[Any]: '''simple docstring''' a = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" ) a = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=__snake_case ) a = self.get_dummy_inputs() a = pipe(**__snake_case ).images a = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) a = np.array([0.6_5_3_3_1, 0.5_8_2_7_7, 0.4_8_2_0_4, 0.5_6_0_5_9, 0.5_3_6_6_5, 0.5_6_2_3_5, 0.5_0_9_6_9, 0.4_0_0_0_9, 0.4_6_5_5_2] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 @nightly @require_onnxruntime @require_torch_gpu class a__ ( unittest.TestCase ): @property def lowerCAmelCase_ ( self ) -> Union[str, Any]: '''simple docstring''' return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def lowerCAmelCase_ ( self ) -> List[str]: '''simple docstring''' a = ort.SessionOptions() a = False return options def lowerCAmelCase_ ( self ) -> Optional[Any]: '''simple docstring''' a = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/img2img/sketch-mountains-input.jpg" ) a = init_image.resize((768, 512) ) # using the PNDM scheduler by default a = OnnxStableDiffusionImgaImgPipeline.from_pretrained( "CompVis/stable-diffusion-v1-4" , revision="onnx" , safety_checker=__snake_case , feature_extractor=__snake_case , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=__snake_case ) a = "A fantasy landscape, trending on artstation" a = np.random.RandomState(0 ) a = pipe( prompt=__snake_case , image=__snake_case , strength=0.7_5 , guidance_scale=7.5 , num_inference_steps=10 , generator=__snake_case , output_type="np" , ) a = output.images a = images[0, 255:258, 383:386, -1] assert images.shape == (1, 512, 768, 3) a = np.array([0.4_9_0_9, 0.5_0_5_9, 0.5_3_7_2, 0.4_6_2_3, 0.4_8_7_6, 0.5_0_4_9, 0.4_8_2_0, 0.4_9_5_6, 0.5_0_1_9] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2 def lowerCAmelCase_ ( self ) -> Optional[Any]: '''simple docstring''' a = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/img2img/sketch-mountains-input.jpg" ) a = init_image.resize((768, 512) ) a = LMSDiscreteScheduler.from_pretrained( "runwayml/stable-diffusion-v1-5" , subfolder="scheduler" , revision="onnx" ) a = OnnxStableDiffusionImgaImgPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5" , revision="onnx" , scheduler=__snake_case , safety_checker=__snake_case , feature_extractor=__snake_case , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=__snake_case ) a = "A fantasy landscape, trending on artstation" a = np.random.RandomState(0 ) a = pipe( prompt=__snake_case , image=__snake_case , strength=0.7_5 , guidance_scale=7.5 , num_inference_steps=20 , generator=__snake_case , output_type="np" , ) a = output.images a = images[0, 255:258, 383:386, -1] assert images.shape == (1, 512, 768, 3) a = np.array([0.8_0_4_3, 0.9_2_6, 0.9_5_8_1, 0.8_1_1_9, 0.8_9_5_4, 0.9_1_3, 0.7_2_0_9, 0.7_4_6_3, 0.7_4_3_1] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
371
import math def SCREAMING_SNAKE_CASE ( __UpperCamelCase) -> bool: if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(__UpperCamelCase) + 1) , 6): if number % i == 0 or number % (i + 2) == 0: return False return True def SCREAMING_SNAKE_CASE ( __UpperCamelCase = 0.1) -> int: a = 3 a = 3 while primes / (2 * j - 1) >= ratio: for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1): primes += is_prime(__UpperCamelCase) j += 2 return j if __name__ == "__main__": import doctest doctest.testmod()
180
0
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration SCREAMING_SNAKE_CASE_:str = '''facebook/wmt19-en-de''' SCREAMING_SNAKE_CASE_:List[Any] = FSMTTokenizer.from_pretrained(mname) # get the correct vocab sizes, etc. from the master model SCREAMING_SNAKE_CASE_:Any = FSMTConfig.from_pretrained(mname) config.update( dict( d_model=4, encoder_layers=1, decoder_layers=1, encoder_ffn_dim=4, decoder_ffn_dim=4, encoder_attention_heads=1, decoder_attention_heads=1, ) ) SCREAMING_SNAKE_CASE_:List[Any] = FSMTForConditionalGeneration(config) print(F"""num of params {tiny_model.num_parameters()}""") # Test SCREAMING_SNAKE_CASE_:Tuple = tokenizer(["""Making tiny model"""], return_tensors="""pt""") SCREAMING_SNAKE_CASE_:Optional[int] = tiny_model(**batch) print("""test output:""", len(outputs.logits[0])) # Save SCREAMING_SNAKE_CASE_:Any = '''tiny-wmt19-en-de''' tiny_model.half() # makes it smaller tiny_model.save_pretrained(mname_tiny) tokenizer.save_pretrained(mname_tiny) print(F"""Generated {mname_tiny}""") # Upload # transformers-cli upload tiny-wmt19-en-de
116
'''simple docstring''' import unittest import numpy as np from transformers.testing_utils import is_flaky, require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import DonutImageProcessor class _lowercase ( unittest.TestCase ): def __init__( self: str , UpperCamelCase__: Optional[Any] , UpperCamelCase__: Any=7 , UpperCamelCase__: Optional[int]=3 , UpperCamelCase__: List[str]=18 , UpperCamelCase__: Union[str, Any]=30 , UpperCamelCase__: List[str]=400 , UpperCamelCase__: Any=True , UpperCamelCase__: Union[str, Any]=None , UpperCamelCase__: List[Any]=True , UpperCamelCase__: List[Any]=False , UpperCamelCase__: Tuple=True , UpperCamelCase__: Tuple=True , UpperCamelCase__: Any=[0.5, 0.5, 0.5] , UpperCamelCase__: Optional[Any]=[0.5, 0.5, 0.5] , ): lowerCamelCase__ : int = parent lowerCamelCase__ : Any = batch_size lowerCamelCase__ : Optional[int] = num_channels lowerCamelCase__ : Union[str, Any] = image_size lowerCamelCase__ : Optional[int] = min_resolution lowerCamelCase__ : Optional[Any] = max_resolution lowerCamelCase__ : Union[str, Any] = do_resize lowerCamelCase__ : Optional[Any] = size if size is not None else {"""height""": 18, """width""": 20} lowerCamelCase__ : Dict = do_thumbnail lowerCamelCase__ : Optional[int] = do_align_axis lowerCamelCase__ : Any = do_pad lowerCamelCase__ : Optional[Any] = do_normalize lowerCamelCase__ : Union[str, Any] = image_mean lowerCamelCase__ : Union[str, Any] = image_std def lowerCamelCase_ ( self: str ): return { "do_resize": self.do_resize, "size": self.size, "do_thumbnail": self.do_thumbnail, "do_align_long_axis": self.do_align_axis, "do_pad": self.do_pad, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, } @require_torch @require_vision class _lowercase ( _lowercase , unittest.TestCase ): a = DonutImageProcessor if is_vision_available() else None def lowerCamelCase_ ( self: Optional[int] ): lowerCamelCase__ : Any = DonutImageProcessingTester(self ) @property def lowerCamelCase_ ( self: Optional[int] ): return self.image_processor_tester.prepare_image_processor_dict() def lowerCamelCase_ ( self: Tuple ): lowerCamelCase__ : Dict = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(UpperCamelCase__ , """do_resize""" ) ) self.assertTrue(hasattr(UpperCamelCase__ , """size""" ) ) self.assertTrue(hasattr(UpperCamelCase__ , """do_thumbnail""" ) ) self.assertTrue(hasattr(UpperCamelCase__ , """do_align_long_axis""" ) ) self.assertTrue(hasattr(UpperCamelCase__ , """do_pad""" ) ) self.assertTrue(hasattr(UpperCamelCase__ , """do_normalize""" ) ) self.assertTrue(hasattr(UpperCamelCase__ , """image_mean""" ) ) self.assertTrue(hasattr(UpperCamelCase__ , """image_std""" ) ) def lowerCamelCase_ ( self: Optional[Any] ): lowerCamelCase__ : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"""height""": 18, """width""": 20} ) lowerCamelCase__ : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 ) self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} ) # Previous config had dimensions in (width, height) order lowerCamelCase__ : List[str] = self.image_processing_class.from_dict(self.image_processor_dict , size=(42, 84) ) self.assertEqual(image_processor.size , {"""height""": 84, """width""": 42} ) def lowerCamelCase_ ( self: List[str] ): pass @is_flaky() def lowerCamelCase_ ( self: Union[str, Any] ): # Initialize image_processing lowerCamelCase__ : str = self.image_processing_class(**self.image_processor_dict ) # create random PIL images lowerCamelCase__ : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__ ) for image in image_inputs: self.assertIsInstance(UpperCamelCase__ , Image.Image ) # Test not batched input lowerCamelCase__ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) , ) # Test batched lowerCamelCase__ : List[str] = image_processing(UpperCamelCase__ , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) , ) @is_flaky() def lowerCamelCase_ ( self: Optional[int] ): # Initialize image_processing lowerCamelCase__ : Dict = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors lowerCamelCase__ : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__ , numpify=UpperCamelCase__ ) for image in image_inputs: self.assertIsInstance(UpperCamelCase__ , np.ndarray ) # Test not batched input lowerCamelCase__ : List[Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) , ) # Test batched lowerCamelCase__ : Optional[Any] = image_processing(UpperCamelCase__ , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) , ) @is_flaky() def lowerCamelCase_ ( self: Dict ): # Initialize image_processing lowerCamelCase__ : int = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors lowerCamelCase__ : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__ , torchify=UpperCamelCase__ ) for image in image_inputs: self.assertIsInstance(UpperCamelCase__ , torch.Tensor ) # Test not batched input lowerCamelCase__ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) , ) # Test batched lowerCamelCase__ : Tuple = image_processing(UpperCamelCase__ , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) , )
41
0
"""simple docstring""" from dataclasses import dataclass from typing import Tuple import numpy as np import torch @dataclass class __SCREAMING_SNAKE_CASE : snake_case_ = 42 # [batch_size x 3] snake_case_ = 42 # [batch_size x 3] snake_case_ = 42 # [batch_size x 3] snake_case_ = 42 # [batch_size x 3] snake_case_ = 42 snake_case_ = 42 snake_case_ = 42 snake_case_ = 42 snake_case_ = 42 def _UpperCamelCase ( self : str ): '''simple docstring''' assert self.x.shape[0] == self.y.shape[0] == self.z.shape[0] == self.origin.shape[0] assert self.x.shape[1] == self.y.shape[1] == self.z.shape[1] == self.origin.shape[1] == 3 assert len(self.x.shape ) == len(self.y.shape ) == len(self.z.shape ) == len(self.origin.shape ) == 2 def _UpperCamelCase ( self : Any ): '''simple docstring''' return torch.from_numpy(np.array([self.width, self.height] , dtype=np.floataa ) ) def _UpperCamelCase ( self : Any ): '''simple docstring''' return torch.from_numpy(np.array([self.x_fov, self.y_fov] , dtype=np.floataa ) ) def _UpperCamelCase ( self : Optional[Any] ): '''simple docstring''' A__ : List[str] = torch.arange(self.height * self.width ) A__ : Dict = torch.stack( [ pixel_indices % self.width, torch.div(_a , self.width , rounding_mode="""trunc""" ), ] , axis=1 , ) return coords @property def _UpperCamelCase ( self : Dict ): '''simple docstring''' A__ : Optional[Any] = self.shape A__ : str = int(np.prod(_a ) ) A__ : Dict = self.get_image_coords() A__ : int = torch.broadcast_to(coords.unsqueeze(0 ) , [batch_size * inner_batch_size, *coords.shape] ) A__ : int = self.get_camera_rays(_a ) A__ : Tuple = rays.view(_a , inner_batch_size * self.height * self.width , 2 , 3 ) return rays def _UpperCamelCase ( self : Optional[int] , snake_case : Optional[int] ): '''simple docstring''' A__ : int = coords.shape assert n_coords == 2 assert batch_size == self.origin.shape[0] A__ : int = coords.view(_a , -1 , 2 ) A__ : Any = self.resolution() A__ : List[Any] = self.fov() A__ : str = (flat.float() / (res - 1)) * 2 - 1 A__ : List[Any] = fracs * torch.tan(fov / 2 ) A__ : Any = fracs.view(_a , -1 , 2 ) A__ : Dict = ( self.z.view(_a , 1 , 3 ) + self.x.view(_a , 1 , 3 ) * fracs[:, :, :1] + self.y.view(_a , 1 , 3 ) * fracs[:, :, 1:] ) A__ : Union[str, Any] = directions / directions.norm(dim=-1 , keepdim=_a ) A__ : List[Any] = torch.stack( [ torch.broadcast_to(self.origin.view(_a , 1 , 3 ) , [batch_size, directions.shape[1], 3] ), directions, ] , dim=2 , ) return rays.view(_a , *_a , 2 , 3 ) def _UpperCamelCase ( self : int , snake_case : Tuple , snake_case : int ): '''simple docstring''' assert width * self.height == height * self.width, "The aspect ratio should not change." return DifferentiableProjectiveCamera( origin=self.origin , x=self.x , y=self.y , z=self.z , width=_a , height=_a , x_fov=self.x_fov , y_fov=self.y_fov , ) def _lowerCAmelCase ( UpperCAmelCase__ : int ) ->DifferentiableProjectiveCamera: A__ : List[Any] = [] A__ : str = [] A__ : str = [] A__ : Tuple = [] for theta in np.linspace(0, 2 * np.pi, num=2_0 ): A__ : Dict = np.array([np.sin(__a ), np.cos(__a ), -0.5] ) z /= np.sqrt(np.sum(z**2 ) ) A__ : List[str] = -z * 4 A__ : Dict = np.array([np.cos(__a ), -np.sin(__a ), 0.0] ) A__ : Union[str, Any] = np.cross(__a, __a ) origins.append(__a ) xs.append(__a ) ys.append(__a ) zs.append(__a ) return DifferentiableProjectiveCamera( origin=torch.from_numpy(np.stack(__a, axis=0 ) ).float(), x=torch.from_numpy(np.stack(__a, axis=0 ) ).float(), y=torch.from_numpy(np.stack(__a, axis=0 ) ).float(), z=torch.from_numpy(np.stack(__a, axis=0 ) ).float(), width=__a, height=__a, x_fov=0.7, y_fov=0.7, shape=(1, len(__a )), )
365
"""simple docstring""" import unittest from transformers import MraConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask if is_torch_available(): import torch from transformers import ( MraForMaskedLM, MraForMultipleChoice, MraForQuestionAnswering, MraForSequenceClassification, MraForTokenClassification, MraModel, ) from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST class __SCREAMING_SNAKE_CASE : def __init__( self : List[str] , snake_case : Tuple , snake_case : List[str]=2 , snake_case : List[str]=8 , snake_case : List[Any]=True , snake_case : Optional[Any]=True , snake_case : List[Any]=True , snake_case : Dict=True , snake_case : Tuple=99 , snake_case : Dict=16 , snake_case : Dict=5 , snake_case : int=2 , snake_case : Any=36 , snake_case : str="gelu" , snake_case : Dict=0.0 , snake_case : List[Any]=0.0 , snake_case : int=512 , snake_case : List[Any]=16 , snake_case : Tuple=2 , snake_case : Any=0.02 , snake_case : Optional[Any]=3 , snake_case : List[Any]=4 , snake_case : str=None , ): '''simple docstring''' A__ : Union[str, Any] = parent A__ : Optional[Any] = batch_size A__ : Dict = seq_length A__ : str = is_training A__ : Tuple = use_input_mask A__ : Dict = use_token_type_ids A__ : Dict = use_labels A__ : int = vocab_size A__ : List[str] = hidden_size A__ : Union[str, Any] = num_hidden_layers A__ : int = num_attention_heads A__ : List[str] = intermediate_size A__ : int = hidden_act A__ : str = hidden_dropout_prob A__ : Tuple = attention_probs_dropout_prob A__ : Any = max_position_embeddings A__ : Optional[int] = type_vocab_size A__ : int = type_sequence_label_size A__ : Optional[Any] = initializer_range A__ : int = num_labels A__ : Optional[int] = num_choices A__ : Optional[int] = scope def _UpperCamelCase ( self : Optional[int] ): '''simple docstring''' A__ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) A__ : Any = None if self.use_input_mask: A__ : Any = random_attention_mask([self.batch_size, self.seq_length] ) A__ : Optional[int] = None if self.use_token_type_ids: A__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) A__ : Dict = None A__ : List[str] = None A__ : Union[str, Any] = None if self.use_labels: A__ : str = ids_tensor([self.batch_size] , self.type_sequence_label_size ) A__ : str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) A__ : Any = ids_tensor([self.batch_size] , self.num_choices ) A__ : Optional[int] = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def _UpperCamelCase ( self : List[str] ): '''simple docstring''' return MraConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case , initializer_range=self.initializer_range , ) def _UpperCamelCase ( self : Tuple ): '''simple docstring''' A__ : Any = self.get_config() A__ : List[str] = 300 return config def _UpperCamelCase ( self : Union[str, Any] ): '''simple docstring''' ( ( A__ ) , ( A__ ) , ( A__ ) , ( A__ ) , ( A__ ) , ( A__ ) , ( A__ ) , ) : Tuple = self.prepare_config_and_inputs() A__ : List[str] = True A__ : List[str] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) A__ : int = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) def _UpperCamelCase ( self : Any , snake_case : Any , snake_case : Tuple , snake_case : Any , snake_case : Optional[int] , snake_case : Optional[int] , snake_case : Optional[int] , snake_case : Dict ): '''simple docstring''' A__ : List[str] = MraModel(config=snake_case ) model.to(snake_case ) model.eval() A__ : Dict = model(snake_case , attention_mask=snake_case , token_type_ids=snake_case ) A__ : List[str] = model(snake_case , token_type_ids=snake_case ) A__ : Union[str, Any] = model(snake_case ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _UpperCamelCase ( self : Optional[Any] , snake_case : List[Any] , snake_case : Any , snake_case : Optional[Any] , snake_case : Union[str, Any] , snake_case : Tuple , snake_case : Dict , snake_case : str , snake_case : Dict , snake_case : str , ): '''simple docstring''' A__ : Dict = True A__ : Optional[Any] = MraModel(snake_case ) model.to(snake_case ) model.eval() A__ : Union[str, Any] = model( snake_case , attention_mask=snake_case , token_type_ids=snake_case , encoder_hidden_states=snake_case , encoder_attention_mask=snake_case , ) A__ : str = model( snake_case , attention_mask=snake_case , token_type_ids=snake_case , encoder_hidden_states=snake_case , ) A__ : Optional[int] = model(snake_case , attention_mask=snake_case , token_type_ids=snake_case ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _UpperCamelCase ( self : int , snake_case : Optional[Any] , snake_case : Optional[int] , snake_case : int , snake_case : str , snake_case : Union[str, Any] , snake_case : Dict , snake_case : List[str] ): '''simple docstring''' A__ : Union[str, Any] = MraForMaskedLM(config=snake_case ) model.to(snake_case ) model.eval() A__ : List[Any] = model(snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def _UpperCamelCase ( self : Optional[Any] , snake_case : Dict , snake_case : Dict , snake_case : Dict , snake_case : List[str] , snake_case : List[str] , snake_case : Tuple , snake_case : Union[str, Any] ): '''simple docstring''' A__ : Dict = MraForQuestionAnswering(config=snake_case ) model.to(snake_case ) model.eval() A__ : str = model( snake_case , attention_mask=snake_case , token_type_ids=snake_case , start_positions=snake_case , end_positions=snake_case , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def _UpperCamelCase ( self : Tuple , snake_case : List[Any] , snake_case : Optional[Any] , snake_case : Optional[Any] , snake_case : List[str] , snake_case : Optional[int] , snake_case : List[str] , snake_case : Union[str, Any] ): '''simple docstring''' A__ : str = self.num_labels A__ : Optional[Any] = MraForSequenceClassification(snake_case ) model.to(snake_case ) model.eval() A__ : str = model(snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _UpperCamelCase ( self : Union[str, Any] , snake_case : Dict , snake_case : str , snake_case : List[Any] , snake_case : Any , snake_case : Dict , snake_case : Tuple , snake_case : Optional[Any] ): '''simple docstring''' A__ : str = self.num_labels A__ : Union[str, Any] = MraForTokenClassification(config=snake_case ) model.to(snake_case ) model.eval() A__ : str = model(snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def _UpperCamelCase ( self : Tuple , snake_case : Optional[Any] , snake_case : Optional[int] , snake_case : int , snake_case : Optional[Any] , snake_case : List[str] , snake_case : Dict , snake_case : Optional[Any] ): '''simple docstring''' A__ : List[str] = self.num_choices A__ : str = MraForMultipleChoice(config=snake_case ) model.to(snake_case ) model.eval() A__ : int = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() A__ : Dict = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() A__ : Tuple = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() A__ : str = model( snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def _UpperCamelCase ( self : List[Any] ): '''simple docstring''' A__ : List[str] = self.prepare_config_and_inputs() ( ( A__ ) , ( A__ ) , ( A__ ) , ( A__ ) , ( A__ ) , ( A__ ) , ( A__ ) , ) : Dict = config_and_inputs A__ : Optional[int] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask} return config, inputs_dict @require_torch class __SCREAMING_SNAKE_CASE ( UpperCamelCase , unittest.TestCase ): snake_case_ = ( ( MraModel, MraForMaskedLM, MraForMultipleChoice, MraForQuestionAnswering, MraForSequenceClassification, MraForTokenClassification, ) if is_torch_available() else () ) snake_case_ = False snake_case_ = False snake_case_ = False snake_case_ = False snake_case_ = () def _UpperCamelCase ( self : int ): '''simple docstring''' A__ : Optional[Any] = MraModelTester(self ) A__ : List[str] = ConfigTester(self , config_class=snake_case , hidden_size=37 ) def _UpperCamelCase ( self : Tuple ): '''simple docstring''' self.config_tester.run_common_tests() def _UpperCamelCase ( self : Tuple ): '''simple docstring''' A__ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*snake_case ) def _UpperCamelCase ( self : Tuple ): '''simple docstring''' A__ : Optional[int] = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: A__ : List[str] = type self.model_tester.create_and_check_model(*snake_case ) def _UpperCamelCase ( self : Union[str, Any] ): '''simple docstring''' A__ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*snake_case ) def _UpperCamelCase ( self : Union[str, Any] ): '''simple docstring''' A__ : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*snake_case ) def _UpperCamelCase ( self : Optional[int] ): '''simple docstring''' A__ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*snake_case ) def _UpperCamelCase ( self : int ): '''simple docstring''' A__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*snake_case ) def _UpperCamelCase ( self : Union[str, Any] ): '''simple docstring''' A__ : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*snake_case ) @slow def _UpperCamelCase ( self : Any ): '''simple docstring''' for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A__ : str = MraModel.from_pretrained(snake_case ) self.assertIsNotNone(snake_case ) @unittest.skip(reason="""MRA does not output attentions""" ) def _UpperCamelCase ( self : Tuple ): '''simple docstring''' return @require_torch class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): @slow def _UpperCamelCase ( self : Union[str, Any] ): '''simple docstring''' A__ : str = MraModel.from_pretrained("""uw-madison/mra-base-512-4""" ) A__ : Any = torch.arange(256 ).unsqueeze(0 ) with torch.no_grad(): A__ : List[Any] = model(snake_case )[0] A__ : List[Any] = torch.Size((1, 256, 768) ) self.assertEqual(output.shape , snake_case ) A__ : int = torch.tensor( [[[-0.0140, 0.0830, -0.0381], [0.1546, 0.1402, 0.0220], [0.1162, 0.0851, 0.0165]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , snake_case , atol=1e-4 ) ) @slow def _UpperCamelCase ( self : List[Any] ): '''simple docstring''' A__ : Union[str, Any] = MraForMaskedLM.from_pretrained("""uw-madison/mra-base-512-4""" ) A__ : Tuple = torch.arange(256 ).unsqueeze(0 ) with torch.no_grad(): A__ : List[Any] = model(snake_case )[0] A__ : Dict = 5_0265 A__ : List[str] = torch.Size((1, 256, vocab_size) ) self.assertEqual(output.shape , snake_case ) A__ : List[Any] = torch.tensor( [[[9.2595, -3.6038, 11.8819], [9.3869, -3.2693, 11.0956], [11.8524, -3.4938, 13.1210]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , snake_case , atol=1e-4 ) ) @slow def _UpperCamelCase ( self : Dict ): '''simple docstring''' A__ : Any = MraForMaskedLM.from_pretrained("""uw-madison/mra-base-4096-8-d3""" ) A__ : List[Any] = torch.arange(4096 ).unsqueeze(0 ) with torch.no_grad(): A__ : List[Any] = model(snake_case )[0] A__ : Union[str, Any] = 5_0265 A__ : Optional[Any] = torch.Size((1, 4096, vocab_size) ) self.assertEqual(output.shape , snake_case ) A__ : Optional[int] = torch.tensor( [[[5.4789, -2.3564, 7.5064], [7.9067, -1.3369, 9.9668], [9.0712, -1.8106, 7.0380]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , snake_case , atol=1e-4 ) )
296
0
import logging from dataclasses import dataclass, field from pathlib import Path from typing import Optional, Union from .generation.configuration_utils import GenerationConfig from .training_args import TrainingArguments from .utils import add_start_docstrings __a = logging.getLogger(__name__) @dataclass @add_start_docstrings(TrainingArguments.__doc__ ) class lowercase__( UpperCAmelCase ): """simple docstring""" a :bool = field(default=UpperCAmelCase , metadata={'help': 'Whether to use SortishSampler or not.'} ) a :bool = field( default=UpperCAmelCase , metadata={'help': 'Whether to use generate to calculate generative metrics (ROUGE, BLEU).'} ) a :Optional[int] = field( default=UpperCAmelCase , metadata={ 'help': ( 'The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default ' 'to the `max_length` value of the model configuration.' ) } , ) a :Optional[int] = field( default=UpperCAmelCase , metadata={ 'help': ( 'The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default ' 'to the `num_beams` value of the model configuration.' ) } , ) a :Optional[Union[str, Path, GenerationConfig]] = field( default=UpperCAmelCase , metadata={ 'help': 'Model id, file path or url pointing to a GenerationConfig json file, to use during prediction.' } , ) def _lowercase ( self : Dict ) -> List[Any]: lowercase_ = super().to_dict() for k, v in d.items(): if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): lowercase_ = v.to_dict() return d
30
import sys _SCREAMING_SNAKE_CASE = ( """73167176531330624919225119674426574742355349194934""" """96983520312774506326239578318016984801869478851843""" """85861560789112949495459501737958331952853208805511""" """12540698747158523863050715693290963295227443043557""" """66896648950445244523161731856403098711121722383113""" """62229893423380308135336276614282806444486645238749""" """30358907296290491560440772390713810515859307960866""" """70172427121883998797908792274921901699720888093776""" """65727333001053367881220235421809751254540594752243""" """52584907711670556013604839586446706324415722155397""" """53697817977846174064955149290862569321978468622482""" """83972241375657056057490261407972968652414535100474""" """82166370484403199890008895243450658541227588666881""" """16427171479924442928230863465674813919123162824586""" """17866458359124566529476545682848912883142607690042""" """24219022671055626321111109370544217506941658960408""" """07198403850962455444362981230987879927244284909188""" """84580156166097919133875499200524063689912560717606""" """05886116467109405077541002256983155200055935729725""" """71636269561882670428252483600823257530420752963450""" ) def SCREAMING_SNAKE_CASE__ ( __a = N ): snake_case_ : Optional[Any] = -sys.maxsize - 1 for i in range(len(__a ) - 12 ): snake_case_ : Optional[Any] = 1 for j in range(13 ): product *= int(n[i + j] ) if product > largest_product: snake_case_ : int = product return largest_product if __name__ == "__main__": print(F'''{solution() = }''')
327
0
'''simple docstring''' # Algorithm for the pigeonhole sorting def UpperCamelCase ( _lowerCamelCase : int ): A__ = min(_lowerCamelCase ) # min() finds the minimum value A__ = max(_lowerCamelCase ) # max() finds the maximum value A__ = max_val - min_val + 1 # size is difference of max and min values plus one # list of pigeonholes of size equal to the variable size A__ = [0] * size # Populate the pigeonholes. for x in a: assert isinstance(_lowerCamelCase , _lowerCamelCase ), "integers only please" holes[x - min_val] += 1 # Putting the elements back into the array in an order. A__ = 0 for count in range(_lowerCamelCase ): while holes[count] > 0: holes[count] -= 1 A__ = count + min_val i += 1 def UpperCamelCase ( ): A__ = [8, 3, 2, 7, 4, 6, 8] pigeonhole_sort(_lowerCamelCase ) print("Sorted order is:" , " ".join(_lowerCamelCase ) ) if __name__ == "__main__": main()
123
'''simple docstring''' from __future__ import annotations import unittest from transformers import DistilBertConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers.models.distilbert.modeling_tf_distilbert import ( TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFDistilBertForMaskedLM, TFDistilBertForMultipleChoice, TFDistilBertForQuestionAnswering, TFDistilBertForSequenceClassification, TFDistilBertForTokenClassification, TFDistilBertModel, ) class UpperCAmelCase : def __init__( self :str , lowercase_ :str , )-> str: A__ = parent A__ = 13 A__ = 7 A__ = True A__ = True A__ = False A__ = True A__ = 99 A__ = 32 A__ = 2 A__ = 4 A__ = 37 A__ = "gelu" A__ = 0.1 A__ = 0.1 A__ = 5_12 A__ = 16 A__ = 2 A__ = 0.0_2 A__ = 3 A__ = 4 A__ = None def UpperCAmelCase_ ( self :Union[str, Any] )-> int: A__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) A__ = None if self.use_input_mask: A__ = random_attention_mask([self.batch_size, self.seq_length] ) A__ = None A__ = None A__ = None if self.use_labels: A__ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) A__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) A__ = ids_tensor([self.batch_size] , self.num_choices ) A__ = DistilBertConfig( vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , ) return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def UpperCAmelCase_ ( self :str , lowercase_ :Optional[int] , lowercase_ :List[str] , lowercase_ :Any , lowercase_ :Union[str, Any] , lowercase_ :Optional[int] , lowercase_ :str )-> List[str]: A__ = TFDistilBertModel(config=lowercase_ ) A__ = {"input_ids": input_ids, "attention_mask": input_mask} A__ = model(lowercase_ ) A__ = [input_ids, input_mask] A__ = model(lowercase_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCAmelCase_ ( self :List[str] , lowercase_ :str , lowercase_ :Optional[Any] , lowercase_ :Optional[int] , lowercase_ :Optional[int] , lowercase_ :Optional[int] , lowercase_ :Union[str, Any] )-> Optional[int]: A__ = TFDistilBertForMaskedLM(config=lowercase_ ) A__ = {"input_ids": input_ids, "attention_mask": input_mask} A__ = model(lowercase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def UpperCAmelCase_ ( self :Any , lowercase_ :str , lowercase_ :str , lowercase_ :Optional[int] , lowercase_ :str , lowercase_ :List[Any] , lowercase_ :Union[str, Any] )-> Optional[int]: A__ = TFDistilBertForQuestionAnswering(config=lowercase_ ) A__ = { "input_ids": input_ids, "attention_mask": input_mask, } A__ = model(lowercase_ ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def UpperCAmelCase_ ( self :Union[str, Any] , lowercase_ :Optional[int] , lowercase_ :Any , lowercase_ :Dict , lowercase_ :Tuple , lowercase_ :Optional[Any] , lowercase_ :Optional[int] )-> Any: A__ = self.num_labels A__ = TFDistilBertForSequenceClassification(lowercase_ ) A__ = {"input_ids": input_ids, "attention_mask": input_mask} A__ = model(lowercase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def UpperCAmelCase_ ( self :str , lowercase_ :Optional[Any] , lowercase_ :List[Any] , lowercase_ :Dict , lowercase_ :Tuple , lowercase_ :int , lowercase_ :Union[str, Any] )-> str: A__ = self.num_choices A__ = TFDistilBertForMultipleChoice(lowercase_ ) A__ = tf.tile(tf.expand_dims(lowercase_ , 1 ) , (1, self.num_choices, 1) ) A__ = tf.tile(tf.expand_dims(lowercase_ , 1 ) , (1, self.num_choices, 1) ) A__ = { "input_ids": multiple_choice_inputs_ids, "attention_mask": multiple_choice_input_mask, } A__ = model(lowercase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def UpperCAmelCase_ ( self :str , lowercase_ :Any , lowercase_ :List[str] , lowercase_ :Any , lowercase_ :int , lowercase_ :List[Any] , lowercase_ :Tuple )-> Tuple: A__ = self.num_labels A__ = TFDistilBertForTokenClassification(lowercase_ ) A__ = {"input_ids": input_ids, "attention_mask": input_mask} A__ = model(lowercase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def UpperCAmelCase_ ( self :Any )-> Union[str, Any]: A__ = self.prepare_config_and_inputs() ((A__), (A__), (A__), (A__), (A__), (A__)) = config_and_inputs A__ = {"input_ids": input_ids, "attention_mask": input_mask} return config, inputs_dict @require_tf class UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ): __lowercase = ( ( TFDistilBertModel, TFDistilBertForMaskedLM, TFDistilBertForQuestionAnswering, TFDistilBertForSequenceClassification, TFDistilBertForTokenClassification, TFDistilBertForMultipleChoice, ) if is_tf_available() else None ) __lowercase = ( { """feature-extraction""": TFDistilBertModel, """fill-mask""": TFDistilBertForMaskedLM, """question-answering""": TFDistilBertForQuestionAnswering, """text-classification""": TFDistilBertForSequenceClassification, """token-classification""": TFDistilBertForTokenClassification, """zero-shot""": TFDistilBertForSequenceClassification, } if is_tf_available() else {} ) __lowercase = False __lowercase = False def UpperCAmelCase_ ( self :Optional[Any] )-> List[Any]: A__ = TFDistilBertModelTester(self ) A__ = ConfigTester(self , config_class=lowercase_ , dim=37 ) def UpperCAmelCase_ ( self :Tuple )-> Tuple: self.config_tester.run_common_tests() def UpperCAmelCase_ ( self :int )-> Tuple: A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_model(*lowercase_ ) def UpperCAmelCase_ ( self :Optional[int] )-> Optional[Any]: A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_masked_lm(*lowercase_ ) def UpperCAmelCase_ ( self :str )-> str: A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_question_answering(*lowercase_ ) def UpperCAmelCase_ ( self :List[str] )-> Dict: A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_sequence_classification(*lowercase_ ) def UpperCAmelCase_ ( self :List[str] )-> Optional[int]: A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_multiple_choice(*lowercase_ ) def UpperCAmelCase_ ( self :str )-> int: A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_token_classification(*lowercase_ ) @slow def UpperCAmelCase_ ( self :List[str] )-> Dict: for model_name in list(TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1] ): A__ = TFDistilBertModel.from_pretrained(lowercase_ ) self.assertIsNotNone(lowercase_ ) @require_tf class UpperCAmelCase ( unittest.TestCase ): @slow def UpperCAmelCase_ ( self :List[Any] )-> Any: A__ = TFDistilBertModel.from_pretrained("distilbert-base-uncased" ) A__ = tf.constant([[0, 1, 2, 3, 4, 5]] ) A__ = model(lowercase_ )[0] A__ = [1, 6, 7_68] self.assertEqual(output.shape , lowercase_ ) A__ = tf.constant( [ [ [0.1_9_2_6_1_8_8_5, -0.1_3_7_3_2_9_5_5, 0.4_1_1_9_7_9_9], [0.2_2_1_5_0_1_5_6, -0.0_7_4_2_2_6_6_1, 0.3_9_0_3_7_2_0_4], [0.2_2_7_5_6_0_1_8, -0.0_8_9_6_4_1_4, 0.3_7_0_1_4_6_7], ] ] ) tf.debugging.assert_near(output[:, :3, :3] , lowercase_ , atol=1E-4 )
123
1
'''simple docstring''' import json from typing import List, Optional, Tuple from tokenizers import normalizers from ....tokenization_utils_fast import PreTrainedTokenizerFast from ....utils import logging from .tokenization_retribert import RetriBertTokenizer _lowerCAmelCase = logging.get_logger(__name__) _lowerCAmelCase = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''} _lowerCAmelCase = { '''vocab_file''': { '''yjernite/retribert-base-uncased''': ( '''https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/vocab.txt''' ), }, '''tokenizer_file''': { '''yjernite/retribert-base-uncased''': ( '''https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/tokenizer.json''' ), }, } _lowerCAmelCase = { '''yjernite/retribert-base-uncased''': 512, } _lowerCAmelCase = { '''yjernite/retribert-base-uncased''': {'''do_lower_case''': True}, } class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ ): '''simple docstring''' __lowercase : Union[str, Any] = VOCAB_FILES_NAMES __lowercase : int = PRETRAINED_VOCAB_FILES_MAP __lowercase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __lowercase : str = PRETRAINED_INIT_CONFIGURATION __lowercase : str = RetriBertTokenizer __lowercase : str = ['''input_ids''', '''attention_mask'''] def __init__( self ,__UpperCAmelCase=None ,__UpperCAmelCase=None ,__UpperCAmelCase=True ,__UpperCAmelCase="[UNK]" ,__UpperCAmelCase="[SEP]" ,__UpperCAmelCase="[PAD]" ,__UpperCAmelCase="[CLS]" ,__UpperCAmelCase="[MASK]" ,__UpperCAmelCase=True ,__UpperCAmelCase=None ,**__UpperCAmelCase ,) -> Tuple: super().__init__( __UpperCAmelCase ,tokenizer_file=__UpperCAmelCase ,do_lower_case=__UpperCAmelCase ,unk_token=__UpperCAmelCase ,sep_token=__UpperCAmelCase ,pad_token=__UpperCAmelCase ,cls_token=__UpperCAmelCase ,mask_token=__UpperCAmelCase ,tokenize_chinese_chars=__UpperCAmelCase ,strip_accents=__UpperCAmelCase ,**__UpperCAmelCase ,) lowerCAmelCase__ : Optional[int] = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get("""lowercase""" ,__UpperCAmelCase ) != do_lower_case or normalizer_state.get("""strip_accents""" ,__UpperCAmelCase ) != strip_accents or normalizer_state.get("""handle_chinese_chars""" ,__UpperCAmelCase ) != tokenize_chinese_chars ): lowerCAmelCase__ : Optional[Any] = getattr(__UpperCAmelCase ,normalizer_state.pop("""type""" ) ) lowerCAmelCase__ : Union[str, Any] = do_lower_case lowerCAmelCase__ : List[Any] = strip_accents lowerCAmelCase__ : Optional[int] = tokenize_chinese_chars lowerCAmelCase__ : Optional[int] = normalizer_class(**__UpperCAmelCase ) lowerCAmelCase__ : List[Any] = do_lower_case def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase=None ) -> List[Any]: lowerCAmelCase__ : int = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase = None ) -> List[int]: lowerCAmelCase__ : int = [self.sep_token_id] lowerCAmelCase__ : Optional[int] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase = None ) -> Tuple[str]: lowerCAmelCase__ : Union[str, Any] = self._tokenizer.model.save(__UpperCAmelCase ,name=__UpperCAmelCase ) return tuple(__UpperCAmelCase )
37
'''simple docstring''' import unittest from transformers import is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow if is_torch_available(): import torch from transformers import XLMRobertaModel @require_sentencepiece @require_tokenizers @require_torch class lowerCAmelCase_( unittest.TestCase ): '''simple docstring''' @slow def UpperCAmelCase_ ( self ) -> Union[str, Any]: lowerCAmelCase__ : Tuple = XLMRobertaModel.from_pretrained("""xlm-roberta-base""" ) lowerCAmelCase__ : Optional[int] = torch.tensor([[0, 581, 1_0269, 83, 9_9942, 136, 6_0742, 23, 70, 8_0583, 1_8276, 2]] ) # The dog is cute and lives in the garden house lowerCAmelCase__ : str = torch.Size((1, 12, 768) ) # batch_size, sequence_length, embedding_vector_dim lowerCAmelCase__ : Dict = torch.tensor( [[-0.0_1_0_1, 0.1_2_1_8, -0.0_8_0_3, 0.0_8_0_1, 0.1_3_2_7, 0.0_7_7_6, -0.1_2_1_5, 0.2_3_8_3, 0.3_3_3_8, 0.3_1_0_6, 0.0_3_0_0, 0.0_2_5_2]] ) # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.eval() # expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1] with torch.no_grad(): lowerCAmelCase__ : Optional[int] = model(__UpperCAmelCase )["""last_hidden_state"""].detach() self.assertEqual(output.shape ,__UpperCAmelCase ) # compare the actual values for a slice of last dim self.assertTrue(torch.allclose(output[:, :, -1] ,__UpperCAmelCase ,atol=1E-3 ) ) @slow def UpperCAmelCase_ ( self ) -> int: lowerCAmelCase__ : Union[str, Any] = XLMRobertaModel.from_pretrained("""xlm-roberta-large""" ) lowerCAmelCase__ : Optional[Any] = torch.tensor([[0, 581, 1_0269, 83, 9_9942, 136, 6_0742, 23, 70, 8_0583, 1_8276, 2]] ) # The dog is cute and lives in the garden house lowerCAmelCase__ : Dict = torch.Size((1, 12, 1024) ) # batch_size, sequence_length, embedding_vector_dim lowerCAmelCase__ : Union[str, Any] = torch.tensor( [[-0.0_6_9_9, -0.0_3_1_8, 0.0_7_0_5, -0.1_2_4_1, 0.0_9_9_9, -0.0_5_2_0, 0.1_0_0_4, -0.1_8_3_8, -0.4_7_0_4, 0.1_4_3_7, 0.0_8_2_1, 0.0_1_2_6]] ) # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large') # xlmr.eval() # expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1] with torch.no_grad(): lowerCAmelCase__ : Union[str, Any] = model(__UpperCAmelCase )["""last_hidden_state"""].detach() self.assertEqual(output.shape ,__UpperCAmelCase ) # compare the actual values for a slice of last dim self.assertTrue(torch.allclose(output[:, :, -1] ,__UpperCAmelCase ,atol=1E-3 ) )
37
1
'''simple docstring''' from __future__ import annotations from fractions import Fraction def SCREAMING_SNAKE_CASE__ ( __A , __A ) -> bool: return ( num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den ) def SCREAMING_SNAKE_CASE__ ( __A ) -> list[str]: _snake_case = [] _snake_case = 11 _snake_case = int('1' + '0' * digit_len ) for num in range(__A , __A ): while den <= 99: if (num != den) and (num % 10 == den // 10) and (den % 10 != 0): if is_digit_cancelling(__A , __A ): solutions.append(F'{num}/{den}' ) den += 1 num += 1 _snake_case = 10 return solutions def SCREAMING_SNAKE_CASE__ ( __A = 2 ) -> int: _snake_case = 1.0 for fraction in fraction_list(__A ): _snake_case = Fraction(__A ) result *= frac.denominator / frac.numerator return int(__A ) if __name__ == "__main__": print(solution())
160
'''simple docstring''' import glob import os import random from string import ascii_lowercase, digits import cva lowercase : List[str] = "" lowercase : Optional[int] = "" lowercase : int = "" lowercase : Tuple = 1 # (0 is vertical, 1 is horizontal) def SCREAMING_SNAKE_CASE__ ( ) -> None: _snake_case , _snake_case = get_dataset(__A , __A ) print('Processing...' ) _snake_case , _snake_case , _snake_case = update_image_and_anno(__A , __A , __A ) for index, image in enumerate(__A ): # Get random string code: '7b7ad245cdff75241935e4dd860f3bad' _snake_case = random_chars(32 ) _snake_case = paths[index].split(os.sep )[-1].rsplit('.' , 1 )[0] _snake_case = F'{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}' cva.imwrite(F'/{file_root}.jpg' , __A , [cva.IMWRITE_JPEG_QUALITY, 85] ) print(F'Success {index+1}/{len(__A )} with {file_name}' ) _snake_case = [] for anno in new_annos[index]: _snake_case = F'{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}' annos_list.append(__A ) with open(F'/{file_root}.txt' , 'w' ) as outfile: outfile.write('\n'.join(line for line in annos_list ) ) def SCREAMING_SNAKE_CASE__ ( __A , __A ) -> tuple[list, list]: _snake_case = [] _snake_case = [] for label_file in glob.glob(os.path.join(__A , '*.txt' ) ): _snake_case = label_file.split(os.sep )[-1].rsplit('.' , 1 )[0] with open(__A ) as in_file: _snake_case = in_file.readlines() _snake_case = os.path.join(__A , F'{label_name}.jpg' ) _snake_case = [] for obj_list in obj_lists: _snake_case = obj_list.rstrip('\n' ).split(' ' ) boxes.append( [ int(obj[0] ), float(obj[1] ), float(obj[2] ), float(obj[3] ), float(obj[4] ), ] ) if not boxes: continue img_paths.append(__A ) labels.append(__A ) return img_paths, labels def SCREAMING_SNAKE_CASE__ ( __A , __A , __A = 1 ) -> tuple[list, list, list]: _snake_case = [] _snake_case = [] _snake_case = [] for idx in range(len(__A ) ): _snake_case = [] _snake_case = img_list[idx] path_list.append(__A ) _snake_case = anno_list[idx] _snake_case = cva.imread(__A ) if flip_type == 1: _snake_case = cva.flip(__A , __A ) for bbox in img_annos: _snake_case = 1 - bbox[1] new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] ) elif flip_type == 0: _snake_case = cva.flip(__A , __A ) for bbox in img_annos: _snake_case = 1 - bbox[2] new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] ) new_annos_lists.append(__A ) new_imgs_list.append(__A ) return new_imgs_list, new_annos_lists, path_list def SCREAMING_SNAKE_CASE__ ( __A = 32 ) -> str: assert number_char > 1, "The number of character should greater than 1" _snake_case = ascii_lowercase + digits return "".join(random.choice(__A ) for _ in range(__A ) ) if __name__ == "__main__": main() print("DONE ✅")
160
1
"""simple docstring""" import unittest from dataclasses import dataclass import pytest from accelerate.commands.config.config_args import SageMakerConfig from accelerate.utils import ComputeEnvironment from accelerate.utils.launch import _convert_nargs_to_dict @dataclass class __UpperCamelCase ( a__ ): lowerCamelCase : List[str] =ComputeEnvironment.AMAZON_SAGEMAKER lowerCamelCase : str =True lowerCamelCase : Union[str, Any] ="""ml.p3.2xlarge""" lowerCamelCase : str ="""accelerate_sagemaker_execution_role""" lowerCamelCase : int ="""hf-sm""" lowerCamelCase : int ="""us-east-1""" lowerCamelCase : Tuple =1 lowerCamelCase : Any ="""accelerate-sagemaker-1""" lowerCamelCase : str ="""1.6""" lowerCamelCase : Tuple ="""4.4""" lowerCamelCase : Optional[int] ="""train.py""" lowerCamelCase : Optional[Any] =[ """--model_name_or_path""", """bert""", """--do_train""", """False""", """--epochs""", """3""", """--learning_rate""", """5e-5""", """--max_steps""", """50.5""", ] lowerCamelCase : Union[str, Any] =[ """--model_name_or_path""", """bert""", """--do_train""", """--do_test""", """False""", """--do_predict""", """--epochs""", """3""", """--learning_rate""", """5e-5""", """--max_steps""", """50.5""", ] class __UpperCamelCase ( unittest.TestCase ): def __a ( self ) -> List[str]: # If no defaults are changed, `to_kwargs` returns an empty dict. a : str = _convert_nargs_to_dict(MockLaunchConfig.success_training_script_args ) assert isinstance(converted_args["model_name_or_path"] , lowerCAmelCase__ ) assert isinstance(converted_args["do_train"] , lowerCAmelCase__ ) assert isinstance(converted_args["epochs"] , lowerCAmelCase__ ) assert isinstance(converted_args["learning_rate"] , lowerCAmelCase__ ) assert isinstance(converted_args["max_steps"] , lowerCAmelCase__ ) with pytest.raises(lowerCAmelCase__ ): _convert_nargs_to_dict(MockLaunchConfig.fail_training_script_args )
105
from ...configuration_utils import PretrainedConfig from ...utils import logging _SCREAMING_SNAKE_CASE = logging.get_logger(__name__) _SCREAMING_SNAKE_CASE = { 'microsoft/biogpt': 'https://huggingface.co/microsoft/biogpt/resolve/main/config.json', # See all BioGPT models at https://huggingface.co/models?filter=biogpt } class a ( __lowerCAmelCase ): """simple docstring""" lowerCamelCase :Any = '''biogpt''' def __init__( self , lowerCAmelCase_=4_23_84 , lowerCAmelCase_=10_24 , lowerCAmelCase_=24 , lowerCAmelCase_=16 , lowerCAmelCase_=40_96 , lowerCAmelCase_="gelu" , lowerCAmelCase_=0.1 , lowerCAmelCase_=0.1 , lowerCAmelCase_=10_24 , lowerCAmelCase_=0.02 , lowerCAmelCase_=1E-12 , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.0 , lowerCAmelCase_=1 , lowerCAmelCase_=0 , lowerCAmelCase_=2 , **lowerCAmelCase_ , ) -> Tuple: _A = vocab_size _A = max_position_embeddings _A = hidden_size _A = num_hidden_layers _A = num_attention_heads _A = intermediate_size _A = hidden_act _A = hidden_dropout_prob _A = attention_probs_dropout_prob _A = initializer_range _A = layer_norm_eps _A = scale_embedding _A = use_cache _A = layerdrop _A = activation_dropout super().__init__(pad_token_id=lowerCAmelCase_ , bos_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ , **lowerCAmelCase_ )
180
0
import math from typing import Optional import numpy as np from ...configuration_utils import PretrainedConfig from ...utils import logging _UpperCAmelCase = logging.get_logger(__name__) _UpperCAmelCase = { 'facebook/encodec_24khz': 'https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json', 'facebook/encodec_48khz': 'https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json', } class _UpperCamelCase ( lowerCAmelCase_ ): _UpperCamelCase : Union[str, Any] = '''encodec''' def __init__( self: Union[str, Any] , _SCREAMING_SNAKE_CASE: Tuple=[1.5, 3.0, 6.0, 12.0, 24.0] , _SCREAMING_SNAKE_CASE: Optional[Any]=24000 , _SCREAMING_SNAKE_CASE: Tuple=1 , _SCREAMING_SNAKE_CASE: Tuple=False , _SCREAMING_SNAKE_CASE: Optional[int]=None , _SCREAMING_SNAKE_CASE: Union[str, Any]=None , _SCREAMING_SNAKE_CASE: List[Any]=128 , _SCREAMING_SNAKE_CASE: Optional[Any]=32 , _SCREAMING_SNAKE_CASE: Optional[int]=1 , _SCREAMING_SNAKE_CASE: str=[8, 5, 4, 2] , _SCREAMING_SNAKE_CASE: int="weight_norm" , _SCREAMING_SNAKE_CASE: Optional[int]=7 , _SCREAMING_SNAKE_CASE: Any=7 , _SCREAMING_SNAKE_CASE: Optional[Any]=3 , _SCREAMING_SNAKE_CASE: List[Any]=2 , _SCREAMING_SNAKE_CASE: Optional[Any]=True , _SCREAMING_SNAKE_CASE: int="reflect" , _SCREAMING_SNAKE_CASE: List[Any]=2 , _SCREAMING_SNAKE_CASE: Tuple=2 , _SCREAMING_SNAKE_CASE: Optional[Any]=1.0 , _SCREAMING_SNAKE_CASE: Tuple=1024 , _SCREAMING_SNAKE_CASE: Optional[Any]=None , _SCREAMING_SNAKE_CASE: Union[str, Any]=True , **_SCREAMING_SNAKE_CASE: Any , ) -> Tuple: """simple docstring""" UpperCamelCase_ = target_bandwidths UpperCamelCase_ = sampling_rate UpperCamelCase_ = audio_channels UpperCamelCase_ = normalize UpperCamelCase_ = chunk_length_s UpperCamelCase_ = overlap UpperCamelCase_ = hidden_size UpperCamelCase_ = num_filters UpperCamelCase_ = num_residual_layers UpperCamelCase_ = upsampling_ratios UpperCamelCase_ = norm_type UpperCamelCase_ = kernel_size UpperCamelCase_ = last_kernel_size UpperCamelCase_ = residual_kernel_size UpperCamelCase_ = dilation_growth_rate UpperCamelCase_ = use_causal_conv UpperCamelCase_ = pad_mode UpperCamelCase_ = compress UpperCamelCase_ = num_lstm_layers UpperCamelCase_ = trim_right_ratio UpperCamelCase_ = codebook_size UpperCamelCase_ = codebook_dim if codebook_dim is not None else hidden_size UpperCamelCase_ = use_conv_shortcut if self.norm_type not in ["weight_norm", "time_group_norm"]: raise ValueError( f'''self.norm_type must be one of `"weight_norm"`, `"time_group_norm"`), got {self.norm_type}''' ) super().__init__(**_SCREAMING_SNAKE_CASE ) @property def lowercase ( self: Union[str, Any] ) -> Optional[int]: """simple docstring""" if self.chunk_length_s is None: return None else: return int(self.chunk_length_s * self.sampling_rate ) @property def lowercase ( self: str ) -> Optional[int]: """simple docstring""" if self.chunk_length_s is None or self.overlap is None: return None else: return max(1 , int((1.0 - self.overlap) * self.chunk_length ) ) @property def lowercase ( self: Any ) -> int: """simple docstring""" UpperCamelCase_ = np.prod(self.upsampling_ratios ) return math.ceil(self.sampling_rate / hop_length ) @property def lowercase ( self: List[Any] ) -> int: """simple docstring""" return int(1000 * self.target_bandwidths[-1] // (self.frame_rate * 10) )
328
import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging _UpperCAmelCase = '▁' _UpperCAmelCase = {'vocab_file': 'spiece.model'} _UpperCAmelCase = { 'vocab_file': {'google/pegasus-xsum': 'https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model'} } _UpperCAmelCase = { 'google/pegasus-xsum': 5_1_2, } _UpperCAmelCase = logging.get_logger(__name__) class _UpperCamelCase ( lowerCAmelCase_ ): _UpperCamelCase : Optional[Any] = VOCAB_FILES_NAMES _UpperCamelCase : List[Any] = VOCAB_FILES_NAMES _UpperCamelCase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP _UpperCamelCase : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _UpperCamelCase : Optional[int] = ['''input_ids''', '''attention_mask'''] def __init__( self: str , _SCREAMING_SNAKE_CASE: Any , _SCREAMING_SNAKE_CASE: str="<pad>" , _SCREAMING_SNAKE_CASE: Optional[Any]="</s>" , _SCREAMING_SNAKE_CASE: Any="<unk>" , _SCREAMING_SNAKE_CASE: int="<mask_2>" , _SCREAMING_SNAKE_CASE: List[Any]="<mask_1>" , _SCREAMING_SNAKE_CASE: Union[str, Any]=None , _SCREAMING_SNAKE_CASE: Optional[int]=103 , _SCREAMING_SNAKE_CASE: Optional[Dict[str, Any]] = None , **_SCREAMING_SNAKE_CASE: Dict , ) -> None: """simple docstring""" UpperCamelCase_ = offset if additional_special_tokens is not None: if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): raise TypeError( f'''additional_special_tokens should be of type {type(_SCREAMING_SNAKE_CASE )}, but is''' f''' {type(_SCREAMING_SNAKE_CASE )}''' ) UpperCamelCase_ = ( ([mask_token_sent] + additional_special_tokens) if mask_token_sent not in additional_special_tokens and mask_token_sent is not None else additional_special_tokens ) # fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken additional_special_tokens_extended += [ f'''<unk_{i}>''' for i in range(len(_SCREAMING_SNAKE_CASE ) , self.offset - 1 ) ] if len(set(_SCREAMING_SNAKE_CASE ) ) != len(_SCREAMING_SNAKE_CASE ): raise ValueError( "Please make sure that the provided additional_special_tokens do not contain an incorrectly" f''' shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.''' ) UpperCamelCase_ = additional_special_tokens_extended else: UpperCamelCase_ = [mask_token_sent] if mask_token_sent is not None else [] additional_special_tokens += [f'''<unk_{i}>''' for i in range(2 , self.offset )] UpperCamelCase_ = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( eos_token=_SCREAMING_SNAKE_CASE , unk_token=_SCREAMING_SNAKE_CASE , mask_token=_SCREAMING_SNAKE_CASE , pad_token=_SCREAMING_SNAKE_CASE , mask_token_sent=_SCREAMING_SNAKE_CASE , offset=_SCREAMING_SNAKE_CASE , additional_special_tokens=_SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , **_SCREAMING_SNAKE_CASE , ) UpperCamelCase_ = mask_token_sent UpperCamelCase_ = vocab_file UpperCamelCase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(_SCREAMING_SNAKE_CASE ) # add special tokens to encoder dict UpperCamelCase_ = { 0: self.pad_token, 1: self.eos_token, } if self.mask_token_sent is not None: self.encoder.update( { 2: self.mask_token_sent, 3: self.mask_token, } ) if self.offset > 0: # entries 2-104 are only used for pretraining and called <mask_1>, <mask_2>, unk_2, ...unk_102 # mask_token_sent is already added to list -> so start at 1 self.encoder.update({i + 3: additional_special_tokens[i] for i in range(1 , self.offset - 1 )} ) UpperCamelCase_ = {v: k for k, v in self.encoder.items()} @property def lowercase ( self: Dict ) -> int: """simple docstring""" return len(self.sp_model ) + self.offset def lowercase ( self: int ) -> Dict[str, int]: """simple docstring""" UpperCamelCase_ = {self.convert_ids_to_tokens(_SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self: Optional[int] ) -> Optional[int]: """simple docstring""" UpperCamelCase_ = self.__dict__.copy() UpperCamelCase_ = None return state def __setstate__( self: List[Any] , _SCREAMING_SNAKE_CASE: List[Any] ) -> Any: """simple docstring""" UpperCamelCase_ = d # for backward compatibility if not hasattr(self , "sp_model_kwargs" ): UpperCamelCase_ = {} UpperCamelCase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def lowercase ( self: Optional[int] , _SCREAMING_SNAKE_CASE: str ) -> List[str]: """simple docstring""" return self.sp_model.encode(_SCREAMING_SNAKE_CASE , out_type=_SCREAMING_SNAKE_CASE ) def lowercase ( self: Tuple , _SCREAMING_SNAKE_CASE: str ) -> int: """simple docstring""" if token in self.decoder: return self.decoder[token] elif token in self.added_tokens_decoder: return self.added_tokens_decoder[token] UpperCamelCase_ = self.sp_model.piece_to_id(_SCREAMING_SNAKE_CASE ) return sp_id + self.offset def lowercase ( self: str , _SCREAMING_SNAKE_CASE: int ) -> str: """simple docstring""" if index in self.encoder: return self.encoder[index] elif index in self.added_tokens_encoder: return self.added_tokens_encoder[index] else: UpperCamelCase_ = self.sp_model.IdToPiece(index - self.offset ) return token def lowercase ( self: Optional[Any] , _SCREAMING_SNAKE_CASE: Tuple ) -> Optional[int]: """simple docstring""" UpperCamelCase_ = [] UpperCamelCase_ = "" for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: out_string += self.sp_model.decode(_SCREAMING_SNAKE_CASE ) + token UpperCamelCase_ = [] else: current_sub_tokens.append(_SCREAMING_SNAKE_CASE ) out_string += self.sp_model.decode(_SCREAMING_SNAKE_CASE ) return out_string.strip() def lowercase ( self: List[str] , _SCREAMING_SNAKE_CASE: Optional[int]=False ) -> Union[str, Any]: """simple docstring""" return 1 def lowercase ( self: int , _SCREAMING_SNAKE_CASE: str ) -> str: """simple docstring""" UpperCamelCase_ = set(self.all_special_ids ) # call it once instead of inside list comp all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special return [1 if x in all_special_ids else 0 for x in seq] def lowercase ( self: str , _SCREAMING_SNAKE_CASE: List , _SCREAMING_SNAKE_CASE: Optional[List] = None , _SCREAMING_SNAKE_CASE: bool = False ) -> List[int]: """simple docstring""" if already_has_special_tokens: return self._special_token_mask(_SCREAMING_SNAKE_CASE ) elif token_ids_a is None: return self._special_token_mask(_SCREAMING_SNAKE_CASE ) + [1] else: return self._special_token_mask(token_ids_a + token_ids_a ) + [1] def lowercase ( self: Union[str, Any] , _SCREAMING_SNAKE_CASE: Optional[int] , _SCREAMING_SNAKE_CASE: List[Any]=None ) -> List[int]: """simple docstring""" if token_ids_a is None: return token_ids_a + [self.eos_token_id] # We don't expect to process pairs, but leave the pair logic for API consistency return token_ids_a + token_ids_a + [self.eos_token_id] def lowercase ( self: str , _SCREAMING_SNAKE_CASE: str , _SCREAMING_SNAKE_CASE: Optional[str] = None ) -> Tuple[str]: """simple docstring""" if not os.path.isdir(_SCREAMING_SNAKE_CASE ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return UpperCamelCase_ = os.path.join( _SCREAMING_SNAKE_CASE , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_SCREAMING_SNAKE_CASE ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , _SCREAMING_SNAKE_CASE ) elif not os.path.isfile(self.vocab_file ): with open(_SCREAMING_SNAKE_CASE , "wb" ) as fi: UpperCamelCase_ = self.sp_model.serialized_model_proto() fi.write(_SCREAMING_SNAKE_CASE ) return (out_vocab_file,)
328
1
import sys lowerCAmelCase_ = ( '''73167176531330624919225119674426574742355349194934''' '''96983520312774506326239578318016984801869478851843''' '''85861560789112949495459501737958331952853208805511''' '''12540698747158523863050715693290963295227443043557''' '''66896648950445244523161731856403098711121722383113''' '''62229893423380308135336276614282806444486645238749''' '''30358907296290491560440772390713810515859307960866''' '''70172427121883998797908792274921901699720888093776''' '''65727333001053367881220235421809751254540594752243''' '''52584907711670556013604839586446706324415722155397''' '''53697817977846174064955149290862569321978468622482''' '''83972241375657056057490261407972968652414535100474''' '''82166370484403199890008895243450658541227588666881''' '''16427171479924442928230863465674813919123162824586''' '''17866458359124566529476545682848912883142607690042''' '''24219022671055626321111109370544217506941658960408''' '''07198403850962455444362981230987879927244284909188''' '''84580156166097919133875499200524063689912560717606''' '''05886116467109405077541002256983155200055935729725''' '''71636269561882670428252483600823257530420752963450''' ) def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ = N ): snake_case_ = -sys.maxsize - 1 for i in range(len(SCREAMING_SNAKE_CASE__ ) - 12 ): snake_case_ = 1 for j in range(13 ): product *= int(n[i + j] ) if product > largest_product: snake_case_ = product return largest_product if __name__ == "__main__": print(f"""{solution() = }""")
8
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ..auto import CONFIG_MAPPING SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__) SCREAMING_SNAKE_CASE_ = { """microsoft/table-transformer-detection""": ( """https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json""" ), } class UpperCamelCase__ ( lowerCAmelCase_ ): '''simple docstring''' __snake_case : Union[str, Any] = "table-transformer" __snake_case : Union[str, Any] = ["past_key_values"] __snake_case : List[Any] = { "hidden_size": "d_model", "num_attention_heads": "encoder_attention_heads", } def __init__( self : Optional[int] ,lowerCamelCase__ : Optional[Any]=True ,lowerCamelCase__ : Optional[Any]=None ,lowerCamelCase__ : List[Any]=3 ,lowerCamelCase__ : Optional[int]=100 ,lowerCamelCase__ : List[Any]=6 ,lowerCamelCase__ : Dict=2048 ,lowerCamelCase__ : List[Any]=8 ,lowerCamelCase__ : Dict=6 ,lowerCamelCase__ : Dict=2048 ,lowerCamelCase__ : Any=8 ,lowerCamelCase__ : Optional[int]=0.0 ,lowerCamelCase__ : int=0.0 ,lowerCamelCase__ : List[Any]=True ,lowerCamelCase__ : Optional[int]="relu" ,lowerCamelCase__ : Tuple=256 ,lowerCamelCase__ : Any=0.1 ,lowerCamelCase__ : Optional[Any]=0.0 ,lowerCamelCase__ : Tuple=0.0 ,lowerCamelCase__ : List[Any]=0.02 ,lowerCamelCase__ : int=1.0 ,lowerCamelCase__ : List[str]=False ,lowerCamelCase__ : Optional[Any]="sine" ,lowerCamelCase__ : List[str]="resnet50" ,lowerCamelCase__ : Optional[Any]=True ,lowerCamelCase__ : List[str]=False ,lowerCamelCase__ : int=1 ,lowerCamelCase__ : Dict=5 ,lowerCamelCase__ : Tuple=2 ,lowerCamelCase__ : Union[str, Any]=1 ,lowerCamelCase__ : str=1 ,lowerCamelCase__ : Any=5 ,lowerCamelCase__ : Tuple=2 ,lowerCamelCase__ : str=0.1 ,**lowerCamelCase__ : List[str] ,) -> Optional[int]: '''simple docstring''' if backbone_config is not None and use_timm_backbone: raise ValueError("""You can't specify both `backbone_config` and `use_timm_backbone`.""" ) if not use_timm_backbone: if backbone_config is None: logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" ) SCREAMING_SNAKE_CASE = CONFIG_MAPPING["""resnet"""](out_features=["""stage4"""] ) elif isinstance(lowerCamelCase__ ,lowerCamelCase__ ): SCREAMING_SNAKE_CASE = backbone_config.get("""model_type""" ) SCREAMING_SNAKE_CASE = CONFIG_MAPPING[backbone_model_type] SCREAMING_SNAKE_CASE = config_class.from_dict(lowerCamelCase__ ) # set timm attributes to None SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = None, None, None SCREAMING_SNAKE_CASE = use_timm_backbone SCREAMING_SNAKE_CASE = backbone_config SCREAMING_SNAKE_CASE = num_channels SCREAMING_SNAKE_CASE = num_queries SCREAMING_SNAKE_CASE = d_model SCREAMING_SNAKE_CASE = encoder_ffn_dim SCREAMING_SNAKE_CASE = encoder_layers SCREAMING_SNAKE_CASE = encoder_attention_heads SCREAMING_SNAKE_CASE = decoder_ffn_dim SCREAMING_SNAKE_CASE = decoder_layers SCREAMING_SNAKE_CASE = decoder_attention_heads SCREAMING_SNAKE_CASE = dropout SCREAMING_SNAKE_CASE = attention_dropout SCREAMING_SNAKE_CASE = activation_dropout SCREAMING_SNAKE_CASE = activation_function SCREAMING_SNAKE_CASE = init_std SCREAMING_SNAKE_CASE = init_xavier_std SCREAMING_SNAKE_CASE = encoder_layerdrop SCREAMING_SNAKE_CASE = decoder_layerdrop SCREAMING_SNAKE_CASE = encoder_layers SCREAMING_SNAKE_CASE = auxiliary_loss SCREAMING_SNAKE_CASE = position_embedding_type SCREAMING_SNAKE_CASE = backbone SCREAMING_SNAKE_CASE = use_pretrained_backbone SCREAMING_SNAKE_CASE = dilation # Hungarian matcher SCREAMING_SNAKE_CASE = class_cost SCREAMING_SNAKE_CASE = bbox_cost SCREAMING_SNAKE_CASE = giou_cost # Loss coefficients SCREAMING_SNAKE_CASE = mask_loss_coefficient SCREAMING_SNAKE_CASE = dice_loss_coefficient SCREAMING_SNAKE_CASE = bbox_loss_coefficient SCREAMING_SNAKE_CASE = giou_loss_coefficient SCREAMING_SNAKE_CASE = eos_coefficient super().__init__(is_encoder_decoder=lowerCamelCase__ ,**lowerCamelCase__ ) @property def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> int: '''simple docstring''' return self.encoder_attention_heads @property def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> int: '''simple docstring''' return self.d_model class UpperCamelCase__ ( lowerCAmelCase_ ): '''simple docstring''' __snake_case : int = version.parse("1.11" ) @property def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Mapping[str, Mapping[int, str]]: '''simple docstring''' return OrderedDict( [ ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ("""pixel_mask""", {0: """batch"""}), ] ) @property def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> float: '''simple docstring''' return 1e-5 @property def SCREAMING_SNAKE_CASE__ ( self : Any ) -> int: '''simple docstring''' return 12
296
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available __lowerCamelCase = { '''configuration_m2m_100''': ['''M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''M2M100Config''', '''M2M100OnnxConfig'''], '''tokenization_m2m_100''': ['''M2M100Tokenizer'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase = [ '''M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST''', '''M2M100ForConditionalGeneration''', '''M2M100Model''', '''M2M100PreTrainedModel''', ] if TYPE_CHECKING: from .configuration_mam_aaa import M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP, MaMaaaConfig, MaMaaaOnnxConfig from .tokenization_mam_aaa import MaMaaaTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mam_aaa import ( M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST, MaMaaaForConditionalGeneration, MaMaaaModel, MaMaaaPreTrainedModel, ) else: import sys __lowerCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
363
'''simple docstring''' import multiprocessing from typing import TYPE_CHECKING, Optional, Union from .. import Dataset, Features, config from ..formatting import query_table from ..packaged_modules.sql.sql import Sql from ..utils import logging from .abc import AbstractDatasetInputStream if TYPE_CHECKING: import sqlitea import sqlalchemy class A__ ( _snake_case ): def __init__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = False , **UpperCamelCase__ , ) -> Optional[Any]: '''simple docstring''' super().__init__(features=UpperCamelCase__ , cache_dir=UpperCamelCase__ , keep_in_memory=UpperCamelCase__ , **UpperCamelCase__ ) A_ = Sql( cache_dir=UpperCamelCase__ , features=UpperCamelCase__ , sql=UpperCamelCase__ , con=UpperCamelCase__ , **UpperCamelCase__ , ) def snake_case_ ( self ) -> Optional[Any]: '''simple docstring''' A_ = None A_ = None A_ = None A_ = None self.builder.download_and_prepare( download_config=UpperCamelCase__ , download_mode=UpperCamelCase__ , verification_mode=UpperCamelCase__ , base_path=UpperCamelCase__ , ) # Build dataset for splits A_ = self.builder.as_dataset( split="""train""" , verification_mode=UpperCamelCase__ , in_memory=self.keep_in_memory ) return dataset class A__ : def __init__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = None , UpperCamelCase__ = None , **UpperCamelCase__ , ) -> List[Any]: '''simple docstring''' if num_proc is not None and num_proc <= 0: raise ValueError(f'''num_proc {num_proc} must be an integer > 0.''' ) A_ = dataset A_ = name A_ = con A_ = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE A_ = num_proc A_ = to_sql_kwargs def snake_case_ ( self ) -> int: '''simple docstring''' A_ = self.to_sql_kwargs.pop("""sql""" , UpperCamelCase__ ) A_ = self.to_sql_kwargs.pop("""con""" , UpperCamelCase__ ) A_ = self.to_sql_kwargs.pop("""index""" , UpperCamelCase__ ) A_ = self._write(index=UpperCamelCase__ , **self.to_sql_kwargs ) return written def snake_case_ ( self , UpperCamelCase__ ) -> Union[str, Any]: '''simple docstring''' A_ , A_ , A_ = args A_ = {**to_sql_kwargs, """if_exists""": """append"""} if offset > 0 else to_sql_kwargs A_ = query_table( table=self.dataset.data , key=slice(UpperCamelCase__ , offset + self.batch_size ) , indices=self.dataset._indices , ) A_ = batch.to_pandas() A_ = df.to_sql(self.name , self.con , index=UpperCamelCase__ , **UpperCamelCase__ ) return num_rows or len(UpperCamelCase__ ) def snake_case_ ( self , UpperCamelCase__ , **UpperCamelCase__ ) -> int: '''simple docstring''' A_ = 0 if self.num_proc is None or self.num_proc == 1: for offset in logging.tqdm( range(0 , len(self.dataset ) , self.batch_size ) , unit="""ba""" , disable=not logging.is_progress_bar_enabled() , desc="""Creating SQL from Arrow format""" , ): written += self._batch_sql((offset, index, to_sql_kwargs) ) else: A_ , A_ = len(self.dataset ), self.batch_size with multiprocessing.Pool(self.num_proc ) as pool: for num_rows in logging.tqdm( pool.imap( self._batch_sql , [(offset, index, to_sql_kwargs) for offset in range(0 , UpperCamelCase__ , UpperCamelCase__ )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit="""ba""" , disable=not logging.is_progress_bar_enabled() , desc="""Creating SQL from Arrow format""" , ): written += num_rows return written
101
0
import logging import os import sys from dataclasses import dataclass, field from typing import Optional from seqaseq_trainer import SeqaSeqTrainer from seqaseq_training_args import SeqaSeqTrainingArguments import transformers from transformers import ( AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer, HfArgumentParser, MBartTokenizer, MBartTokenizerFast, set_seed, ) from transformers.trainer_utils import EvaluationStrategy, is_main_process from transformers.training_args import ParallelMode from utils import ( SeqaSeqDataCollator, SeqaSeqDataset, assert_all_frozen, build_compute_metrics_fn, check_output_dir, freeze_embeds, freeze_params, lmap, save_json, use_task_specific_params, write_txt_file, ) _snake_case : Any = logging.getLogger(__name__) @dataclass class a : """simple docstring""" __UpperCAmelCase : str = field( metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} ) __UpperCAmelCase : Optional[str] = field( default=_lowerCAmelCase , metadata={"help": "Pretrained config name or path if not the same as model_name"} ) __UpperCAmelCase : Optional[str] = field( default=_lowerCAmelCase , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} ) __UpperCAmelCase : Optional[str] = field( default=_lowerCAmelCase , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , ) __UpperCAmelCase : bool = field(default=_lowerCAmelCase , metadata={"help": "Whether tp freeze the encoder."} ) __UpperCAmelCase : bool = field(default=_lowerCAmelCase , metadata={"help": "Whether to freeze the embeddings."} ) @dataclass class a : """simple docstring""" __UpperCAmelCase : str = field( metadata={"help": "The input data dir. Should contain the .tsv files (or other data files) for the task."} ) __UpperCAmelCase : Optional[str] = field( default="summarization" , metadata={"help": "Task name, summarization (or summarization_{dataset} for pegasus) or translation"} , ) __UpperCAmelCase : Optional[int] = field( default=1024 , metadata={ "help": ( "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) } , ) __UpperCAmelCase : Optional[int] = field( default=128 , metadata={ "help": ( "The maximum total sequence length for target text after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) } , ) __UpperCAmelCase : Optional[int] = field( default=142 , metadata={ "help": ( "The maximum total sequence length for validation target text after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded. " "This argument is also used to override the ``max_length`` param of ``model.generate``, which is used " "during ``evaluate`` and ``predict``." ) } , ) __UpperCAmelCase : Optional[int] = field( default=142 , metadata={ "help": ( "The maximum total sequence length for test target text after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) } , ) __UpperCAmelCase : Optional[int] = field(default=-1 , metadata={"help": "# training examples. -1 means use all."} ) __UpperCAmelCase : Optional[int] = field(default=-1 , metadata={"help": "# validation examples. -1 means use all."} ) __UpperCAmelCase : Optional[int] = field(default=-1 , metadata={"help": "# test examples. -1 means use all."} ) __UpperCAmelCase : Optional[str] = field(default=_lowerCAmelCase , metadata={"help": "Source language id for translation."} ) __UpperCAmelCase : Optional[str] = field(default=_lowerCAmelCase , metadata={"help": "Target language id for translation."} ) __UpperCAmelCase : Optional[int] = field(default=_lowerCAmelCase , metadata={"help": "# num_beams to use for evaluation."} ) __UpperCAmelCase : bool = field( default=_lowerCAmelCase , metadata={"help": "If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined."} , ) def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ): logger.info(F'***** {split} metrics *****' ) for key in sorted(metrics.keys() ): logger.info(F' {key} = {metrics[key]}' ) save_json(__lowerCamelCase , os.path.join(__lowerCamelCase , F'{split}_results.json' ) ) def lowerCAmelCase_ ( ): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. __snake_case : Optional[int] = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. __snake_case , __snake_case , __snake_case : Optional[int] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: __snake_case , __snake_case , __snake_case : List[str] = parser.parse_args_into_dataclasses() check_output_dir(__lowerCamelCase ) # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , ) logger.warning( "Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() logger.info("Training/evaluation parameters %s" , __lowerCamelCase ) # Set seed set_seed(training_args.seed ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. __snake_case : int = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) __snake_case : str = ("encoder_layerdrop", "decoder_layerdrop", "dropout", "attention_dropout") for p in extra_model_params: if getattr(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ): assert hasattr(__lowerCamelCase , __lowerCamelCase ), F'({config.__class__.__name__}) doesn\'t have a `{p}` attribute' setattr(__lowerCamelCase , __lowerCamelCase , getattr(__lowerCamelCase , __lowerCamelCase ) ) __snake_case : List[str] = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) __snake_case : Tuple = AutoModelForSeqaSeqLM.from_pretrained( model_args.model_name_or_path , from_tf=".ckpt" in model_args.model_name_or_path , config=__lowerCamelCase , cache_dir=model_args.cache_dir , ) # use task specific params use_task_specific_params(__lowerCamelCase , data_args.task ) # set num_beams for evaluation if data_args.eval_beams is None: __snake_case : Optional[int] = model.config.num_beams # set decoder_start_token_id for MBart if model.config.decoder_start_token_id is None and isinstance(__lowerCamelCase , (MBartTokenizer, MBartTokenizerFast) ): assert ( data_args.tgt_lang is not None and data_args.src_lang is not None ), "mBart requires --tgt_lang and --src_lang" if isinstance(__lowerCamelCase , __lowerCamelCase ): __snake_case : List[Any] = tokenizer.lang_code_to_id[data_args.tgt_lang] else: __snake_case : Optional[int] = tokenizer.convert_tokens_to_ids(data_args.tgt_lang ) if model_args.freeze_embeds: freeze_embeds(__lowerCamelCase ) if model_args.freeze_encoder: freeze_params(model.get_encoder() ) assert_all_frozen(model.get_encoder() ) __snake_case : Union[str, Any] = SeqaSeqDataset # Get datasets __snake_case : Optional[Any] = ( dataset_class( __lowerCamelCase , type_path="train" , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or "" , ) if training_args.do_train else None ) __snake_case : Dict = ( dataset_class( __lowerCamelCase , type_path="val" , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or "" , ) if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO else None ) __snake_case : List[str] = ( dataset_class( __lowerCamelCase , type_path="test" , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or "" , ) if training_args.do_predict else None ) # Initialize our Trainer __snake_case : List[Any] = ( build_compute_metrics_fn(data_args.task , __lowerCamelCase ) if training_args.predict_with_generate else None ) __snake_case : int = SeqaSeqTrainer( model=__lowerCamelCase , args=__lowerCamelCase , data_args=__lowerCamelCase , train_dataset=__lowerCamelCase , eval_dataset=__lowerCamelCase , data_collator=SeqaSeqDataCollator( __lowerCamelCase , __lowerCamelCase , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=__lowerCamelCase , tokenizer=__lowerCamelCase , ) __snake_case : str = {} # Training if training_args.do_train: logger.info("*** Train ***" ) __snake_case : List[Any] = trainer.train( model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None ) __snake_case : List[str] = train_result.metrics __snake_case : int = data_args.n_train trainer.save_model() # this also saves the tokenizer if trainer.is_world_process_zero(): handle_metrics("train" , __lowerCamelCase , training_args.output_dir ) all_metrics.update(__lowerCamelCase ) # Need to save the state, since Trainer.save_model saves only the tokenizer with the model trainer.state.save_to_json(os.path.join(training_args.output_dir , "trainer_state.json" ) ) # For convenience, we also re-save the tokenizer to the same directory, # so that you can share your model easily on huggingface.co/models =) tokenizer.save_pretrained(training_args.output_dir ) # Evaluation if training_args.do_eval: logger.info("*** Evaluate ***" ) __snake_case : Dict = trainer.evaluate(metric_key_prefix="val" ) __snake_case : List[Any] = data_args.n_val __snake_case : Optional[Any] = round(metrics["val_loss"] , 4 ) if trainer.is_world_process_zero(): handle_metrics("val" , __lowerCamelCase , training_args.output_dir ) all_metrics.update(__lowerCamelCase ) if training_args.do_predict: logger.info("*** Predict ***" ) __snake_case : Optional[int] = trainer.predict(test_dataset=__lowerCamelCase , metric_key_prefix="test" ) __snake_case : int = test_output.metrics __snake_case : Any = data_args.n_test if trainer.is_world_process_zero(): __snake_case : List[Any] = round(metrics["test_loss"] , 4 ) handle_metrics("test" , __lowerCamelCase , training_args.output_dir ) all_metrics.update(__lowerCamelCase ) if training_args.predict_with_generate: __snake_case : Any = tokenizer.batch_decode( test_output.predictions , skip_special_tokens=__lowerCamelCase , clean_up_tokenization_spaces=__lowerCamelCase ) __snake_case : Any = lmap(str.strip , __lowerCamelCase ) write_txt_file(__lowerCamelCase , os.path.join(training_args.output_dir , "test_generations.txt" ) ) if trainer.is_world_process_zero(): save_json(__lowerCamelCase , os.path.join(training_args.output_dir , "all_results.json" ) ) return all_metrics def lowerCAmelCase_ ( __lowerCamelCase ): # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
123
import numpy as np import torch from torch.utils.data import Dataset, IterableDataset from ..utils.generic import ModelOutput class a (_lowerCAmelCase ): """simple docstring""" def __init__( self : Tuple , lowerCamelCase : List[str] , lowerCamelCase : Union[str, Any] , lowerCamelCase : Optional[int] ) -> Any: __snake_case : List[Any] = dataset __snake_case : Optional[int] = process __snake_case : str = params def __len__( self : Optional[Any] ) -> Any: return len(self.dataset ) def __getitem__( self : Dict , lowerCamelCase : List[Any] ) -> List[str]: __snake_case : List[Any] = self.dataset[i] __snake_case : Tuple = self.process(lowerCamelCase , **self.params ) return processed class a (_lowerCAmelCase ): """simple docstring""" def __init__( self : Union[str, Any] , lowerCamelCase : Optional[int] , lowerCamelCase : Optional[int] , lowerCamelCase : Optional[Any] , lowerCamelCase : Dict=None ) -> int: __snake_case : List[Any] = loader __snake_case : Dict = infer __snake_case : Tuple = params if loader_batch_size == 1: # Let's spare some time by deactivating altogether __snake_case : Union[str, Any] = None __snake_case : Optional[Any] = loader_batch_size # Internal bookkeeping __snake_case : int = None __snake_case : Optional[int] = None def __len__( self : Optional[Any] ) -> Tuple: return len(self.loader ) def __iter__( self : str ) -> Tuple: __snake_case : int = iter(self.loader ) return self def __snake_case ( self : int ) -> Any: if isinstance(self._loader_batch_data , torch.Tensor ): # Batch data is simple tensor, just fetch the slice __snake_case : Union[str, Any] = self._loader_batch_data[self._loader_batch_index] else: # Batch data is assumed to be BaseModelOutput (or dict) __snake_case : int = {} for k, element in self._loader_batch_data.items(): if isinstance(lowerCamelCase , lowerCamelCase ): # Convert ModelOutput to tuple first __snake_case : Dict = element.to_tuple() if isinstance(element[0] , torch.Tensor ): __snake_case : Any = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element ) elif isinstance(element[0] , np.ndarray ): __snake_case : Optional[Any] = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element ) continue if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(lowerCamelCase , lowerCamelCase ): # Those are stored as lists of tensors so need specific unbatching. if isinstance(element[0] , torch.Tensor ): __snake_case : Dict = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element ) elif isinstance(element[0] , np.ndarray ): __snake_case : str = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element ) continue if element is None: # This can happen for optional data that get passed around __snake_case : Union[str, Any] = None elif isinstance(element[self._loader_batch_index] , torch.Tensor ): # Take correct batch data, but make it looked like batch_size=1 # For compatibility with other methods within transformers __snake_case : List[Any] = element[self._loader_batch_index].unsqueeze(0 ) elif isinstance(element[self._loader_batch_index] , np.ndarray ): # Take correct batch data, but make it looked like batch_size=1 # For compatibility with other methods within transformers __snake_case : Optional[Any] = np.expand_dims(element[self._loader_batch_index] , 0 ) else: # This is typically a list, so no need to `unsqueeze`. __snake_case : Tuple = element[self._loader_batch_index] # Recreate the element by reusing the original class to make it look # batch_size=1 __snake_case : str = self._loader_batch_data.__class__(lowerCamelCase ) self._loader_batch_index += 1 return result def __snake_case ( self : Dict ) -> Union[str, Any]: if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size: # We are currently unrolling a batch so we just need to return # the current item within a batch return self.loader_batch_item() # We're out of items within a batch __snake_case : List[str] = next(self.iterator ) __snake_case : int = self.infer(lowerCamelCase , **self.params ) # We now have a batch of "inferred things". if self.loader_batch_size is not None: # Try to infer the size of the batch if isinstance(lowerCamelCase , torch.Tensor ): __snake_case : List[Any] = processed else: __snake_case : Optional[Any] = list(processed.keys() )[0] __snake_case : List[Any] = processed[key] if isinstance(lowerCamelCase , lowerCamelCase ): __snake_case : List[str] = len(lowerCamelCase ) else: __snake_case : Tuple = first_tensor.shape[0] if 0 < observed_batch_size < self.loader_batch_size: # could be last batch so we can't unroll as many # elements. __snake_case : Optional[Any] = observed_batch_size # Setting internal index to unwrap the batch __snake_case : Union[str, Any] = processed __snake_case : str = 0 return self.loader_batch_item() else: # We're not unrolling batches return processed class a (_lowerCAmelCase ): """simple docstring""" def __init__( self : int , lowerCamelCase : Any , lowerCamelCase : Union[str, Any] , lowerCamelCase : List[Any] , lowerCamelCase : Optional[int]=None ) -> Any: super().__init__(lowerCamelCase , lowerCamelCase , lowerCamelCase ) def __iter__( self : Optional[int] ) -> Optional[int]: __snake_case : Union[str, Any] = iter(self.loader ) __snake_case : int = None return self def __snake_case ( self : List[Any] ) -> List[Any]: if self.subiterator is None: __snake_case : Optional[int] = self.infer(next(self.iterator ) , **self.params ) try: # Try to return next item __snake_case : int = next(self.subiterator ) except StopIteration: # When a preprocess iterator ends, we can start lookig at the next item # ChunkIterator will keep feeding until ALL elements of iterator # all have created their subiterator and have been iterating against. # # Another way to look at it, is we're basically flattening lists of lists # into a single list, but with generators __snake_case : Union[str, Any] = self.infer(next(self.iterator ) , **self.params ) __snake_case : int = next(self.subiterator ) return processed class a (_lowerCAmelCase ): """simple docstring""" def __iter__( self : Any ) -> Optional[Any]: __snake_case : str = iter(self.loader ) return self def __snake_case ( self : Tuple ) -> str: # Extremely similar to PipelineIterator in its unpacking mechanism # BUT, we have an extra required item which is the presence of `is_last` # That is because everything is flattened by `PipelineChunkIterator` we # need to keep track of how to regroup here in the original `process` # boundaries so that `process` and `postprocess` see the same data. # This iterator accumulates items (possibly while unbatching) until it # its a `is_last` and then just passes it on to the caller. __snake_case : Dict = False __snake_case : Dict = [] if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size: while self._loader_batch_index < self.loader_batch_size: __snake_case : Union[str, Any] = self.loader_batch_item() __snake_case : Any = item.pop("is_last" ) accumulator.append(lowerCamelCase ) if is_last: return accumulator while not is_last: __snake_case : str = self.infer(next(self.iterator ) , **self.params ) if self.loader_batch_size is not None: if isinstance(lowerCamelCase , torch.Tensor ): __snake_case : Optional[int] = processed else: __snake_case : Union[str, Any] = list(processed.keys() )[0] __snake_case : Optional[Any] = processed[key] if isinstance(lowerCamelCase , lowerCamelCase ): __snake_case : int = len(lowerCamelCase ) else: __snake_case : int = first_tensor.shape[0] if 0 < observed_batch_size < self.loader_batch_size: # could be last batch so we can't unroll as many # elements. __snake_case : Dict = observed_batch_size __snake_case : Union[str, Any] = processed __snake_case : List[str] = 0 while self._loader_batch_index < self.loader_batch_size: __snake_case : str = self.loader_batch_item() __snake_case : str = item.pop("is_last" ) accumulator.append(lowerCamelCase ) if is_last: return accumulator else: __snake_case : List[str] = processed __snake_case : Tuple = item.pop("is_last" ) accumulator.append(lowerCamelCase ) return accumulator class a (_lowerCAmelCase ): """simple docstring""" def __init__( self : Union[str, Any] , lowerCamelCase : Dataset , lowerCamelCase : str ) -> Optional[Any]: __snake_case : int = dataset __snake_case : Union[str, Any] = key def __len__( self : Tuple ) -> Union[str, Any]: return len(self.dataset ) def __getitem__( self : Optional[Any] , lowerCamelCase : str ) -> Optional[int]: return self.dataset[i][self.key] class a (_lowerCAmelCase ): """simple docstring""" def __init__( self : List[Any] , lowerCamelCase : Dataset , lowerCamelCase : str , lowerCamelCase : str ) -> List[str]: __snake_case : Any = dataset __snake_case : Any = keya __snake_case : Union[str, Any] = keya def __len__( self : Optional[int] ) -> Tuple: return len(self.dataset ) def __getitem__( self : Tuple , lowerCamelCase : List[str] ) -> Optional[Any]: return {"text": self.dataset[i][self.keya], "text_pair": self.dataset[i][self.keya]}
123
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available A : List[Any] = {'''configuration_glpn''': ['''GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GLPNConfig''']} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A : int = ['''GLPNFeatureExtractor'''] A : Any = ['''GLPNImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A : List[str] = [ '''GLPN_PRETRAINED_MODEL_ARCHIVE_LIST''', '''GLPNForDepthEstimation''', '''GLPNLayer''', '''GLPNModel''', '''GLPNPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_glpn import GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP, GLPNConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_glpn import GLPNFeatureExtractor from .image_processing_glpn import GLPNImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_glpn import ( GLPN_PRETRAINED_MODEL_ARCHIVE_LIST, GLPNForDepthEstimation, GLPNLayer, GLPNModel, GLPNPreTrainedModel, ) else: import sys A : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
351
import os import re import shutil import sys import tempfile import unittest import black A : Dict = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, '''utils''')) import check_copies # noqa: E402 # This is the reference code that will be used in the tests. # If BertLMPredictionHead is changed in modeling_bert.py, this code needs to be manually updated. A : Optional[int] = ''' def __init__(self, config): super().__init__() self.transform = BertPredictionHeadTransform(config) # The output weights are the same as the input embeddings, but there is # an output-only bias for each token. self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False) self.bias = nn.Parameter(torch.zeros(config.vocab_size)) # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings` self.decoder.bias = self.bias def forward(self, hidden_states): hidden_states = self.transform(hidden_states) hidden_states = self.decoder(hidden_states) return hidden_states ''' class A (unittest.TestCase ): '''simple docstring''' def a_ ( self : Union[str, Any] ) -> List[str]: """simple docstring""" A__ = tempfile.mkdtemp() os.makedirs(os.path.join(self.transformer_dir , """models/bert/""" ) ) A__ = self.transformer_dir shutil.copy( os.path.join(__lowerCAmelCase , """src/transformers/models/bert/modeling_bert.py""" ) , os.path.join(self.transformer_dir , """models/bert/modeling_bert.py""" ) , ) def a_ ( self : str ) -> Optional[int]: """simple docstring""" A__ = """src/transformers""" shutil.rmtree(self.transformer_dir ) def a_ ( self : List[str] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : int , __lowerCAmelCase : Tuple=None ) -> Dict: """simple docstring""" A__ = comment + f'\nclass {class_name}(nn.Module):\n' + class_code if overwrite_result is not None: A__ = comment + f'\nclass {class_name}(nn.Module):\n' + overwrite_result A__ = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_19 ) A__ = black.format_str(__lowerCAmelCase , mode=__lowerCAmelCase ) A__ = os.path.join(self.transformer_dir , """new_code.py""" ) with open(__lowerCAmelCase , """w""" , newline="""\n""" ) as f: f.write(__lowerCAmelCase ) if overwrite_result is None: self.assertTrue(len(check_copies.is_copy_consistent(__lowerCAmelCase ) ) == 0 ) else: check_copies.is_copy_consistent(f.name , overwrite=__lowerCAmelCase ) with open(__lowerCAmelCase , """r""" ) as f: self.assertTrue(f.read() , __lowerCAmelCase ) def a_ ( self : Tuple ) -> List[Any]: """simple docstring""" A__ = check_copies.find_code_in_transformers("""models.bert.modeling_bert.BertLMPredictionHead""" ) self.assertEqual(__lowerCAmelCase , __lowerCAmelCase ) def a_ ( self : Tuple ) -> Any: """simple docstring""" self.check_copy_consistency( """# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead""" , """BertLMPredictionHead""" , REFERENCE_CODE + """\n""" , ) # With no empty line at the end self.check_copy_consistency( """# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead""" , """BertLMPredictionHead""" , __lowerCAmelCase , ) # Copy consistency with rename self.check_copy_consistency( """# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel""" , """TestModelLMPredictionHead""" , re.sub("""Bert""" , """TestModel""" , __lowerCAmelCase ) , ) # Copy consistency with a really long name A__ = """TestModelWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason""" self.check_copy_consistency( f'# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->{long_class_name}' , f'{long_class_name}LMPredictionHead' , re.sub("""Bert""" , __lowerCAmelCase , __lowerCAmelCase ) , ) # Copy consistency with overwrite self.check_copy_consistency( """# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel""" , """TestModelLMPredictionHead""" , __lowerCAmelCase , overwrite_result=re.sub("""Bert""" , """TestModel""" , __lowerCAmelCase ) , ) def a_ ( self : Optional[int] ) -> Optional[int]: """simple docstring""" A__ = check_copies.LOCALIZED_READMES["""README_zh-hans.md"""] A__ = ( """1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the""" """ Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for""" """ Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong""" """ Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.\n1.""" """ **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (from HuggingFace),""" """ released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and""" """ lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same""" """ method has been applied to compress GPT2 into""" """ [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into""" """ [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),""" """ Multilingual BERT into""" """ [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German""" """ version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)**""" """ (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders""" """ as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang""" """ Luong, Quoc V. Le, Christopher D. Manning.""" ) A__ = ( """1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the""" """ Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of""" """ Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian""" """ Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n""" ) A__ = ( """1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the""" """ Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of""" """ Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian""" """ Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n1.""" """ **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (来自 HuggingFace) 伴随论文""" """ [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and""" """ lighter](https://arxiv.org/abs/1910.01108) 由 Victor Sanh, Lysandre Debut and Thomas Wolf 发布。 The same""" """ method has been applied to compress GPT2 into""" """ [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into""" """ [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),""" """ Multilingual BERT into""" """ [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German""" """ version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)** (来自""" """ Google Research/Stanford University) 伴随论文 [ELECTRA: Pre-training text encoders as discriminators rather""" """ than generators](https://arxiv.org/abs/2003.10555) 由 Kevin Clark, Minh-Thang Luong, Quoc V. Le,""" """ Christopher D. Manning 发布。\n""" ) A__ , A__ = check_copies.convert_to_localized_md( __lowerCAmelCase , __lowerCAmelCase , localized_readme["""format_model_list"""] ) self.assertFalse(__lowerCAmelCase ) self.assertEqual(__lowerCAmelCase , __lowerCAmelCase ) A__ , A__ = check_copies.convert_to_localized_md( __lowerCAmelCase , __lowerCAmelCase , localized_readme["""format_model_list"""] ) # Check whether the number of models is equal to README.md after conversion. self.assertTrue(__lowerCAmelCase ) A__ = ( """1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the""" """ Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for""" """ Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong""" """ Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.""" ) A__ = ( """1. **[ALBERT](https://huggingface.co/transformers/main/model_doc/albert.html)** (来自 Google Research and""" """ the Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of""" """ Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian""" """ Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n""" ) A__ = ( """1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the""" """ Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of""" """ Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian""" """ Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n""" ) A__ , A__ = check_copies.convert_to_localized_md( __lowerCAmelCase , __lowerCAmelCase , localized_readme["""format_model_list"""] ) # Check if the model link is synchronized. self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
276
0
"""simple docstring""" from __future__ import annotations def __A ( a_ :list , a_ :int , a_ :int , a_ :int) -> list: __a : Optional[int] = [] __a , __a : Union[str, Any] = input_list[low:mid], input_list[mid : high + 1] while left and right: result.append((left if left[0] <= right[0] else right).pop(0)) __a : str = result + left + right return input_list def __A ( a_ :list) -> list: if len(a_) <= 1: return input_list __a : List[Any] = list(a_) # iteration for two-way merging __a : Any = 2 while p <= len(a_): # getting low, high and middle value for merge-sort of single list for i in range(0 , len(a_) , a_): __a : Optional[int] = i __a : List[str] = i + p - 1 __a : int = (low + high + 1) // 2 __a : Any = merge(a_ , a_ , a_ , a_) # final merge of last two parts if p * 2 >= len(a_): __a : List[str] = i __a : Optional[Any] = merge(a_ , 0 , a_ , len(a_) - 1) break p *= 2 return input_list if __name__ == "__main__": A = input('''Enter numbers separated by a comma:\n''').strip() if user_input == "": A = [] else: A = [int(item.strip()) for item in user_input.split(''',''')] print(iter_merge_sort(unsorted))
160
"""simple docstring""" import uuid from typing import Any, Dict, List, Optional, Union from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging from .base import PIPELINE_INIT_ARGS, Pipeline if is_tf_available(): import tensorflow as tf if is_torch_available(): import torch A = logging.get_logger(__name__) class __lowercase : '''simple docstring''' def __init__( self , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase=None , _UpperCAmelCase=None ): if not conversation_id: __a : List[Any] = uuid.uuida() if past_user_inputs is None: __a : Tuple = [] if generated_responses is None: __a : Dict = [] __a : uuid.UUID = conversation_id __a : List[str] = past_user_inputs __a : List[str] = generated_responses __a : Optional[str] = text def __eq__( self , _UpperCAmelCase ): if not isinstance(_UpperCAmelCase , _UpperCAmelCase ): return False if self.uuid == other.uuid: return True return ( self.new_user_input == other.new_user_input and self.past_user_inputs == other.past_user_inputs and self.generated_responses == other.generated_responses ) def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase = False ): if self.new_user_input: if overwrite: logger.warning( f"""User input added while unprocessed input was existing: \"{self.new_user_input}\" was overwritten """ f"""with: \"{text}\".""" ) __a : Any = text else: logger.warning( f"""User input added while unprocessed input was existing: \"{self.new_user_input}\" new input """ f"""ignored: \"{text}\". Set `overwrite` to True to overwrite unprocessed user input""" ) else: __a : List[str] = text def _lowerCamelCase ( self ): if self.new_user_input: self.past_user_inputs.append(self.new_user_input ) __a : Any = None def _lowerCamelCase ( self , _UpperCAmelCase ): self.generated_responses.append(_UpperCAmelCase ) def _lowerCamelCase ( self ): for user_input, generated_response in zip(self.past_user_inputs , self.generated_responses ): yield True, user_input yield False, generated_response if self.new_user_input: yield True, self.new_user_input def __repr__( self ): __a : Any = f"""Conversation id: {self.uuid} \n""" for is_user, text in self.iter_texts(): __a : str = '''user''' if is_user else '''bot''' output += f"""{name} >> {text} \n""" return output @add_end_docstrings( _UpperCamelCase , R''' min_length_for_response (`int`, *optional*, defaults to 32): The minimum length (in number of tokens) for a response. minimum_tokens (`int`, *optional*, defaults to 10): The minimum length of tokens to leave for a response. ''' , ) class __lowercase ( _UpperCamelCase ): '''simple docstring''' def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ): super().__init__(*_UpperCAmelCase , **_UpperCAmelCase ) if self.tokenizer.pad_token_id is None: __a : List[Any] = self.tokenizer.eos_token def _lowerCamelCase ( self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , **_UpperCAmelCase ): __a : str = {} __a : List[Any] = {} __a : int = {} if min_length_for_response is not None: __a : Dict = min_length_for_response if minimum_tokens is not None: __a : Union[str, Any] = minimum_tokens if "max_length" in generate_kwargs: __a : Tuple = generate_kwargs['''max_length'''] # self.max_length = generate_kwargs.get("max_length", self.model.config.max_length) if clean_up_tokenization_spaces is not None: __a : Tuple = clean_up_tokenization_spaces if generate_kwargs: forward_params.update(_UpperCAmelCase ) return preprocess_params, forward_params, postprocess_params def __call__( self , _UpperCAmelCase , _UpperCAmelCase=0 , **_UpperCAmelCase ): __a : Optional[Any] = super().__call__(_UpperCAmelCase , num_workers=_UpperCAmelCase , **_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) and len(_UpperCAmelCase ) == 1: return outputs[0] return outputs def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase=32 ): if not isinstance(_UpperCAmelCase , _UpperCAmelCase ): raise ValueError('''ConversationalPipeline, expects Conversation as inputs''' ) if conversation.new_user_input is None: raise ValueError( f"""Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. """ '''Add user inputs with the conversation\'s `add_user_input` method''' ) if hasattr(self.tokenizer , '''_build_conversation_input_ids''' ): __a : Tuple = self.tokenizer._build_conversation_input_ids(_UpperCAmelCase ) else: # If the tokenizer cannot handle conversations, we default to only the old version __a : List[str] = self._legacy_parse_and_tokenize(_UpperCAmelCase ) if self.framework == "pt": __a : List[Any] = torch.LongTensor([input_ids] ) elif self.framework == "tf": __a : List[Any] = tf.constant([input_ids] ) return {"input_ids": input_ids, "conversation": conversation} def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase=10 , **_UpperCAmelCase ): __a : List[Any] = generate_kwargs.get('''max_length''' , self.model.config.max_length ) __a : Tuple = model_inputs['''input_ids'''].shape[1] if max_length - minimum_tokens < n: logger.warning(f"""Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})""" ) __a : str = max_length - minimum_tokens __a : str = model_inputs['''input_ids'''][:, -trim:] if "attention_mask" in model_inputs: __a : Any = model_inputs['''attention_mask'''][:, -trim:] __a : Optional[Any] = model_inputs.pop('''conversation''' ) __a : Union[str, Any] = max_length __a : Dict = self.model.generate(**_UpperCAmelCase , **_UpperCAmelCase ) if self.model.config.is_encoder_decoder: __a : Optional[int] = 1 else: __a : Tuple = n return {"output_ids": output_ids[:, start_position:], "conversation": conversation} def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase=True ): __a : Dict = model_outputs['''output_ids'''] __a : Dict = self.tokenizer.decode( output_ids[0] , skip_special_tokens=_UpperCAmelCase , clean_up_tokenization_spaces=_UpperCAmelCase , ) __a : Optional[int] = model_outputs['''conversation'''] conversation.mark_processed() conversation.append_response(_UpperCAmelCase ) return conversation def _lowerCamelCase ( self , _UpperCAmelCase ): __a : Optional[Any] = self.tokenizer.eos_token_id __a : int = [] for is_user, text in conversation.iter_texts(): if eos_token_id is not None: input_ids.extend(self.tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) + [eos_token_id] ) else: input_ids.extend(self.tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) ) if len(_UpperCAmelCase ) > self.tokenizer.model_max_length: __a : int = input_ids[-self.tokenizer.model_max_length :] return input_ids
160
1
import inspect import os import unittest import torch import accelerate from accelerate import Accelerator from accelerate.test_utils import execute_subprocess_async, require_multi_gpu from accelerate.utils import patch_environment class lowercase ( unittest.TestCase ): def a__ ( self ) -> Union[str, Any]: _A : List[Any] = inspect.getfile(accelerate.test_utils ) _A : int = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["""scripts""", """test_script.py"""] ) _A : Any = os.path.sep.join( mod_file.split(os.path.sep )[:-1] + ["""scripts""", """test_distributed_data_loop.py"""] ) _A : Union[str, Any] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["""scripts""", """test_ops.py"""] ) @require_multi_gpu def a__ ( self ) -> Dict: print(F'''Found {torch.cuda.device_count()} devices.''' ) _A : List[Any] = ["""torchrun""", F'''--nproc_per_node={torch.cuda.device_count()}''', self.test_file_path] with patch_environment(omp_num_threads=1 ): execute_subprocess_async(_a , env=os.environ.copy() ) @require_multi_gpu def a__ ( self ) -> str: print(F'''Found {torch.cuda.device_count()} devices.''' ) _A : str = ["""torchrun""", F'''--nproc_per_node={torch.cuda.device_count()}''', self.operation_file_path] print(F'''Command: {cmd}''' ) with patch_environment(omp_num_threads=1 ): execute_subprocess_async(_a , env=os.environ.copy() ) @require_multi_gpu def a__ ( self ) -> Dict: _A : int = ["""torchrun""", F'''--nproc_per_node={torch.cuda.device_count()}''', inspect.getfile(self.__class__ )] with patch_environment(omp_num_threads=1 ): execute_subprocess_async(_a , env=os.environ.copy() ) @require_multi_gpu def a__ ( self ) -> Any: print(F'''Found {torch.cuda.device_count()} devices, using 2 devices only''' ) _A : Dict = ["""torchrun""", F'''--nproc_per_node={torch.cuda.device_count()}''', self.data_loop_file_path] with patch_environment(omp_num_threads=1 , cuda_visible_devices="""0,1""" ): execute_subprocess_async(_a , env=os.environ.copy() ) if __name__ == "__main__": _snake_case = Accelerator() _snake_case = (accelerator.state.process_index + 2, 10) _snake_case = torch.randint(0, 10, shape).to(accelerator.device) _snake_case = "" _snake_case = accelerator.pad_across_processes(tensor) if tensora.shape[0] != accelerator.state.num_processes + 1: error_msg += f"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0." if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor): error_msg += "Tensors have different values." if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0): error_msg += "Padding was not done with the right value (0)." _snake_case = accelerator.pad_across_processes(tensor, pad_first=True) if tensora.shape[0] != accelerator.state.num_processes + 1: error_msg += f"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0." _snake_case = accelerator.state.num_processes - accelerator.state.process_index - 1 if not torch.equal(tensora[index:], tensor): error_msg += "Tensors have different values." if not torch.all(tensora[:index] == 0): error_msg += "Padding was not done with the right value (0)." # Raise error at the end to make sure we don't stop at the first failure. if len(error_msg) > 0: raise ValueError(error_msg)
343
import argparse import logging import sys from unittest.mock import patch import run_glue_deebert from transformers.testing_utils import TestCasePlus, get_gpu_count, require_torch_non_multi_gpu, slow logging.basicConfig(level=logging.DEBUG) _snake_case = logging.getLogger() def lowerCAmelCase_ ( ): _A : Optional[Any] = argparse.ArgumentParser() parser.add_argument("""-f""" ) _A : Optional[Any] = parser.parse_args() return args.f class lowercase ( UpperCamelCase__ ): def a__ ( self ) -> None: _A : List[Any] = logging.StreamHandler(sys.stdout ) logger.addHandler(_a ) def a__ ( self , _a ) -> Dict: _A : Tuple = get_gpu_count() if n_gpu > 1: pass # XXX: doesn't quite work with n_gpu > 1 https://github.com/huggingface/transformers/issues/10560 # script = f"{self.examples_dir_str}/research_projects/deebert/run_glue_deebert.py" # distributed_args = f"-m torch.distributed.launch --nproc_per_node={n_gpu} {script}".split() # cmd = [sys.executable] + distributed_args + args # execute_subprocess_async(cmd, env=self.get_env()) # XXX: test the results - need to save them first into .json file else: args.insert(0 , """run_glue_deebert.py""" ) with patch.object(_a , """argv""" , _a ): _A : Optional[Any] = run_glue_deebert.main() for value in result.values(): self.assertGreaterEqual(_a , 0.666 ) @slow @require_torch_non_multi_gpu def a__ ( self ) -> Optional[int]: _A : Tuple = """ --model_type roberta --model_name_or_path roberta-base --task_name MRPC --do_train --do_eval --do_lower_case --data_dir ./tests/fixtures/tests_samples/MRPC/ --max_seq_length 128 --per_gpu_eval_batch_size=1 --per_gpu_train_batch_size=8 --learning_rate 2e-4 --num_train_epochs 3 --overwrite_output_dir --seed 42 --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage --plot_data_dir ./examples/deebert/results/ --save_steps 0 --overwrite_cache --eval_after_first_stage """.split() self.run_and_check(_a ) _A : Optional[Any] = """ --model_type roberta --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage --task_name MRPC --do_eval --do_lower_case --data_dir ./tests/fixtures/tests_samples/MRPC/ --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage --plot_data_dir ./examples/deebert/results/ --max_seq_length 128 --eval_each_highway --eval_highway --overwrite_cache --per_gpu_eval_batch_size=1 """.split() self.run_and_check(_a ) _A : List[str] = """ --model_type roberta --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage --task_name MRPC --do_eval --do_lower_case --data_dir ./tests/fixtures/tests_samples/MRPC/ --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage --plot_data_dir ./examples/deebert/results/ --max_seq_length 128 --early_exit_entropy 0.1 --eval_highway --overwrite_cache --per_gpu_eval_batch_size=1 """.split() self.run_and_check(_a )
343
1
import numpy as np import torch import tqdm from ...models.unet_ad import UNetaDModel from ...pipelines import DiffusionPipeline from ...utils import randn_tensor from ...utils.dummy_pt_objects import DDPMScheduler class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , )-> Optional[Any]: '''simple docstring''' super().__init__() __UpperCamelCase = value_function __UpperCamelCase = unet __UpperCamelCase = scheduler __UpperCamelCase = env __UpperCamelCase = env.get_dataset() __UpperCamelCase = {} for key in self.data.keys(): try: __UpperCamelCase = self.data[key].mean() except: # noqa: E722 pass __UpperCamelCase = {} for key in self.data.keys(): try: __UpperCamelCase = self.data[key].std() except: # noqa: E722 pass __UpperCamelCase = env.observation_space.shape[0] __UpperCamelCase = env.action_space.shape[0] def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )-> Union[str, Any]: '''simple docstring''' return (x_in - self.means[key]) / self.stds[key] def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )-> Any: '''simple docstring''' return x_in * self.stds[key] + self.means[key] def A__ ( self , SCREAMING_SNAKE_CASE_ )-> Union[str, Any]: '''simple docstring''' if type(SCREAMING_SNAKE_CASE_ ) is dict: return {k: self.to_torch(SCREAMING_SNAKE_CASE_ ) for k, v in x_in.items()} elif torch.is_tensor(SCREAMING_SNAKE_CASE_ ): return x_in.to(self.unet.device ) return torch.tensor(SCREAMING_SNAKE_CASE_ , device=self.unet.device ) def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )-> List[str]: '''simple docstring''' for key, val in cond.items(): __UpperCamelCase = val.clone() return x_in def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )-> Any: '''simple docstring''' __UpperCamelCase = x.shape[0] __UpperCamelCase = None for i in tqdm.tqdm(self.scheduler.timesteps ): # create batch of timesteps to pass into model __UpperCamelCase = torch.full((batch_size,) , SCREAMING_SNAKE_CASE_ , device=self.unet.device , dtype=torch.long ) for _ in range(SCREAMING_SNAKE_CASE_ ): with torch.enable_grad(): x.requires_grad_() # permute to match dimension for pre-trained models __UpperCamelCase = self.value_function(x.permute(0 , 2 , 1 ) , SCREAMING_SNAKE_CASE_ ).sample __UpperCamelCase = torch.autograd.grad([y.sum()] , [x] )[0] __UpperCamelCase = self.scheduler._get_variance(SCREAMING_SNAKE_CASE_ ) __UpperCamelCase = torch.exp(0.5 * posterior_variance ) __UpperCamelCase = model_std * grad __UpperCamelCase = 0 __UpperCamelCase = x.detach() __UpperCamelCase = x + scale * grad __UpperCamelCase = self.reset_xa(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , self.action_dim ) __UpperCamelCase = self.unet(x.permute(0 , 2 , 1 ) , SCREAMING_SNAKE_CASE_ ).sample.permute(0 , 2 , 1 ) # TODO: verify deprecation of this kwarg __UpperCamelCase = self.scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , predict_epsilon=SCREAMING_SNAKE_CASE_ )['''prev_sample'''] # apply conditions to the trajectory (set the initial state) __UpperCamelCase = self.reset_xa(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , self.action_dim ) __UpperCamelCase = self.to_torch(SCREAMING_SNAKE_CASE_ ) return x, y def __call__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=64 , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=0.1 )-> Dict: '''simple docstring''' __UpperCamelCase = self.normalize(SCREAMING_SNAKE_CASE_ , '''observations''' ) __UpperCamelCase = obs[None].repeat(SCREAMING_SNAKE_CASE_ , axis=0 ) __UpperCamelCase = {0: self.to_torch(SCREAMING_SNAKE_CASE_ )} __UpperCamelCase = (batch_size, planning_horizon, self.state_dim + self.action_dim) # generate initial noise and apply our conditions (to make the trajectories start at current state) __UpperCamelCase = randn_tensor(SCREAMING_SNAKE_CASE_ , device=self.unet.device ) __UpperCamelCase = self.reset_xa(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , self.action_dim ) __UpperCamelCase = self.to_torch(SCREAMING_SNAKE_CASE_ ) # run the diffusion process __UpperCamelCase , __UpperCamelCase = self.run_diffusion(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # sort output trajectories by value __UpperCamelCase = y.argsort(0 , descending=SCREAMING_SNAKE_CASE_ ).squeeze() __UpperCamelCase = x[sorted_idx] __UpperCamelCase = sorted_values[:, :, : self.action_dim] __UpperCamelCase = actions.detach().cpu().numpy() __UpperCamelCase = self.de_normalize(SCREAMING_SNAKE_CASE_ , key='''actions''' ) # select the action with the highest value if y is not None: __UpperCamelCase = 0 else: # if we didn't run value guiding, select a random action __UpperCamelCase = np.random.randint(0 , SCREAMING_SNAKE_CASE_ ) __UpperCamelCase = denorm_actions[selected_index, 0] return denorm_actions
328
from __future__ import annotations from collections.abc import Callable def A_ ( snake_case : Callable[[int | float], int | float] , snake_case : int | float , snake_case : int | float , snake_case : int = 100 , ) -> float: '''simple docstring''' __UpperCamelCase = x_start __UpperCamelCase = fnc(snake_case ) __UpperCamelCase = 0.0 for _ in range(snake_case ): # Approximates small segments of curve as linear and solve # for trapezoidal area __UpperCamelCase = (x_end - x_start) / steps + xa __UpperCamelCase = fnc(snake_case ) area += abs(fxa + fxa ) * (xa - xa) / 2 # Increment step __UpperCamelCase = xa __UpperCamelCase = fxa return area if __name__ == "__main__": def A_ ( snake_case : Tuple ) -> Optional[Any]: '''simple docstring''' return x**3 + x**2 print("f(x) = x^3 + x^2") print("The area between the curve, x = -5, x = 5 and the x axis is:") lowercase__ : List[str] = 1_0 while i <= 1_0_0_0_0_0: print(F"with {i} steps: {trapezoidal_area(f, -5, 5, i)}") i *= 1_0
328
1
'''simple docstring''' from dataclasses import dataclass, field from typing import Optional from transformers import AutoConfig, AutoImageProcessor, AutoTokenizer, FlaxVisionEncoderDecoderModel, HfArgumentParser @dataclass class A : __magic_name__ = field( metadata={'''help''': '''The output directory where the model will be written.'''} , ) __magic_name__ = field( metadata={ '''help''': ( '''The encoder model checkpoint for weights initialization.''' '''Don\'t set if you want to train an encoder model from scratch.''' ) } , ) __magic_name__ = field( metadata={ '''help''': ( '''The decoder model checkpoint for weights initialization.''' '''Don\'t set if you want to train a decoder model from scratch.''' ) } , ) __magic_name__ = field( default=__snake_case , metadata={'''help''': '''Pretrained encoder config name or path if not the same as encoder_model_name'''} ) __magic_name__ = field( default=__snake_case , metadata={'''help''': '''Pretrained decoder config name or path if not the same as decoder_model_name'''} ) def lowerCAmelCase_ ( ): '''simple docstring''' A : Dict = HfArgumentParser((ModelArguments,) ) (A ) : str = parser.parse_args_into_dataclasses() # Load pretrained model and tokenizer # Use explicit specified encoder config if model_args.encoder_config_name: A : str = AutoConfig.from_pretrained(model_args.encoder_config_name ) # Use pretrained encoder model's config else: A : Tuple = AutoConfig.from_pretrained(model_args.encoder_model_name_or_path ) # Use explicit specified decoder config if model_args.decoder_config_name: A : Any = AutoConfig.from_pretrained(model_args.decoder_config_name ) # Use pretrained decoder model's config else: A : int = AutoConfig.from_pretrained(model_args.decoder_model_name_or_path ) # necessary for `from_encoder_decoder_pretrained` when `decoder_config` is passed A : Any = True A : Dict = True A : List[str] = FlaxVisionEncoderDecoderModel.from_encoder_decoder_pretrained( encoder_pretrained_model_name_or_path=model_args.encoder_model_name_or_path , decoder_pretrained_model_name_or_path=model_args.decoder_model_name_or_path , encoder_config=snake_case__ , decoder_config=snake_case__ , ) # GPT2 only has bos/eos tokens but not decoder_start/pad tokens A : int = decoder_config.decoder_start_token_id A : int = decoder_config.pad_token_id if decoder_start_token_id is None: A : Tuple = decoder_config.bos_token_id if pad_token_id is None: A : Union[str, Any] = decoder_config.eos_token_id # This is necessary to make Flax's generate() work A : List[str] = decoder_config.eos_token_id A : List[Any] = decoder_start_token_id A : Any = pad_token_id A : List[str] = AutoImageProcessor.from_pretrained(model_args.encoder_model_name_or_path ) A : Dict = AutoTokenizer.from_pretrained(model_args.decoder_model_name_or_path ) A : List[Any] = tokenizer.convert_ids_to_tokens(model.config.pad_token_id ) model.save_pretrained(model_args.output_dir ) image_processor.save_pretrained(model_args.output_dir ) tokenizer.save_pretrained(model_args.output_dir ) if __name__ == "__main__": main()
367
'''simple docstring''' import tempfile import torch from diffusers import ( DEISMultistepScheduler, DPMSolverMultistepScheduler, DPMSolverSinglestepScheduler, UniPCMultistepScheduler, ) from .test_schedulers import SchedulerCommonTest class A ( __snake_case ): __magic_name__ = (UniPCMultistepScheduler,) __magic_name__ = (('''num_inference_steps''', 25),) def __lowerCAmelCase ( self , **SCREAMING_SNAKE_CASE ) -> List[str]: """simple docstring""" A : str = { '''num_train_timesteps''': 1000, '''beta_start''': 0.0_001, '''beta_end''': 0.02, '''beta_schedule''': '''linear''', '''solver_order''': 2, '''solver_type''': '''bh2''', } config.update(**SCREAMING_SNAKE_CASE ) return config def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE=0 , **SCREAMING_SNAKE_CASE ) -> Any: """simple docstring""" A : List[Any] = dict(self.forward_default_kwargs ) A : Union[str, Any] = kwargs.pop('''num_inference_steps''' , SCREAMING_SNAKE_CASE ) A : Optional[Any] = self.dummy_sample A : int = 0.1 * sample A : Union[str, Any] = [residual + 0.2, residual + 0.15, residual + 0.10] for scheduler_class in self.scheduler_classes: A : Optional[Any] = self.get_scheduler_config(**SCREAMING_SNAKE_CASE ) A : Optional[int] = scheduler_class(**SCREAMING_SNAKE_CASE ) scheduler.set_timesteps(SCREAMING_SNAKE_CASE ) # copy over dummy past residuals A : List[Any] = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(SCREAMING_SNAKE_CASE ) A : List[Any] = scheduler_class.from_pretrained(SCREAMING_SNAKE_CASE ) new_scheduler.set_timesteps(SCREAMING_SNAKE_CASE ) # copy over dummy past residuals A : Dict = dummy_past_residuals[: new_scheduler.config.solver_order] A, A : Tuple = sample, sample for t in range(SCREAMING_SNAKE_CASE , time_step + scheduler.config.solver_order + 1 ): A : Any = scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ).prev_sample A : Optional[Any] = new_scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE=0 , **SCREAMING_SNAKE_CASE ) -> Tuple: """simple docstring""" A : Optional[Any] = dict(self.forward_default_kwargs ) A : Tuple = kwargs.pop('''num_inference_steps''' , SCREAMING_SNAKE_CASE ) A : List[Any] = self.dummy_sample A : int = 0.1 * sample A : Optional[Any] = [residual + 0.2, residual + 0.15, residual + 0.10] for scheduler_class in self.scheduler_classes: A : Optional[int] = self.get_scheduler_config() A : Any = scheduler_class(**SCREAMING_SNAKE_CASE ) scheduler.set_timesteps(SCREAMING_SNAKE_CASE ) # copy over dummy past residuals (must be after setting timesteps) A : int = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(SCREAMING_SNAKE_CASE ) A : int = scheduler_class.from_pretrained(SCREAMING_SNAKE_CASE ) # copy over dummy past residuals new_scheduler.set_timesteps(SCREAMING_SNAKE_CASE ) # copy over dummy past residual (must be after setting timesteps) A : Optional[Any] = dummy_past_residuals[: new_scheduler.config.solver_order] A : List[Any] = scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ).prev_sample A : List[Any] = new_scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE=None , **SCREAMING_SNAKE_CASE ) -> Optional[int]: """simple docstring""" if scheduler is None: A : Dict = self.scheduler_classes[0] A : Union[str, Any] = self.get_scheduler_config(**SCREAMING_SNAKE_CASE ) A : List[Any] = scheduler_class(**SCREAMING_SNAKE_CASE ) A : Tuple = self.scheduler_classes[0] A : Union[str, Any] = self.get_scheduler_config(**SCREAMING_SNAKE_CASE ) A : List[str] = scheduler_class(**SCREAMING_SNAKE_CASE ) A : int = 10 A : Tuple = self.dummy_model() A : Any = self.dummy_sample_deter scheduler.set_timesteps(SCREAMING_SNAKE_CASE ) for i, t in enumerate(scheduler.timesteps ): A : int = model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) A : Optional[Any] = scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).prev_sample return sample def __lowerCAmelCase ( self ) -> Optional[int]: """simple docstring""" A : Tuple = dict(self.forward_default_kwargs ) A : List[Any] = kwargs.pop('''num_inference_steps''' , SCREAMING_SNAKE_CASE ) for scheduler_class in self.scheduler_classes: A : Dict = self.get_scheduler_config() A : Dict = scheduler_class(**SCREAMING_SNAKE_CASE ) A : Optional[Any] = self.dummy_sample A : Optional[int] = 0.1 * sample if num_inference_steps is not None and hasattr(SCREAMING_SNAKE_CASE , '''set_timesteps''' ): scheduler.set_timesteps(SCREAMING_SNAKE_CASE ) elif num_inference_steps is not None and not hasattr(SCREAMING_SNAKE_CASE , '''set_timesteps''' ): A : Tuple = num_inference_steps # copy over dummy past residuals (must be done after set_timesteps) A : Dict = [residual + 0.2, residual + 0.15, residual + 0.10] A : List[str] = dummy_past_residuals[: scheduler.config.solver_order] A : List[Any] = scheduler.timesteps[5] A : Dict = scheduler.timesteps[6] A : List[Any] = scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ).prev_sample A : List[Any] = scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) def __lowerCAmelCase ( self ) -> Tuple: """simple docstring""" A : Union[str, Any] = UniPCMultistepScheduler(**self.get_scheduler_config() ) A : List[Any] = self.full_loop(scheduler=SCREAMING_SNAKE_CASE ) A : List[str] = torch.mean(torch.abs(SCREAMING_SNAKE_CASE ) ) assert abs(result_mean.item() - 0.2_464 ) < 1e-3 A : Dict = DPMSolverSinglestepScheduler.from_config(scheduler.config ) A : Optional[int] = DEISMultistepScheduler.from_config(scheduler.config ) A : List[Any] = DPMSolverMultistepScheduler.from_config(scheduler.config ) A : List[Any] = UniPCMultistepScheduler.from_config(scheduler.config ) A : Optional[Any] = self.full_loop(scheduler=SCREAMING_SNAKE_CASE ) A : Optional[Any] = torch.mean(torch.abs(SCREAMING_SNAKE_CASE ) ) assert abs(result_mean.item() - 0.2_464 ) < 1e-3 def __lowerCAmelCase ( self ) -> Union[str, Any]: """simple docstring""" for timesteps in [25, 50, 100, 999, 1000]: self.check_over_configs(num_train_timesteps=SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> List[str]: """simple docstring""" self.check_over_configs(thresholding=SCREAMING_SNAKE_CASE ) for order in [1, 2, 3]: for solver_type in ["bh1", "bh2"]: for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( thresholding=SCREAMING_SNAKE_CASE , prediction_type=SCREAMING_SNAKE_CASE , sample_max_value=SCREAMING_SNAKE_CASE , solver_order=SCREAMING_SNAKE_CASE , solver_type=SCREAMING_SNAKE_CASE , ) def __lowerCAmelCase ( self ) -> Union[str, Any]: """simple docstring""" for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> List[Any]: """simple docstring""" for solver_type in ["bh1", "bh2"]: for order in [1, 2, 3]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( solver_order=SCREAMING_SNAKE_CASE , solver_type=SCREAMING_SNAKE_CASE , prediction_type=SCREAMING_SNAKE_CASE , ) A : Dict = self.full_loop( solver_order=SCREAMING_SNAKE_CASE , solver_type=SCREAMING_SNAKE_CASE , prediction_type=SCREAMING_SNAKE_CASE , ) assert not torch.isnan(SCREAMING_SNAKE_CASE ).any(), "Samples have nan numbers" def __lowerCAmelCase ( self ) -> Tuple: """simple docstring""" self.check_over_configs(lower_order_final=SCREAMING_SNAKE_CASE ) self.check_over_configs(lower_order_final=SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> List[str]: """simple docstring""" for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]: self.check_over_forward(num_inference_steps=SCREAMING_SNAKE_CASE , time_step=0 ) def __lowerCAmelCase ( self ) -> List[str]: """simple docstring""" A : int = self.full_loop() A : int = torch.mean(torch.abs(SCREAMING_SNAKE_CASE ) ) assert abs(result_mean.item() - 0.2_464 ) < 1e-3 def __lowerCAmelCase ( self ) -> List[str]: """simple docstring""" A : List[Any] = self.full_loop(prediction_type='''v_prediction''' ) A : Any = torch.mean(torch.abs(SCREAMING_SNAKE_CASE ) ) assert abs(result_mean.item() - 0.1_014 ) < 1e-3 def __lowerCAmelCase ( self ) -> Union[str, Any]: """simple docstring""" A : Dict = self.scheduler_classes[0] A : List[Any] = self.get_scheduler_config(thresholding=SCREAMING_SNAKE_CASE , dynamic_thresholding_ratio=0 ) A : List[str] = scheduler_class(**SCREAMING_SNAKE_CASE ) A : Tuple = 10 A : Union[str, Any] = self.dummy_model() A : Dict = self.dummy_sample_deter.half() scheduler.set_timesteps(SCREAMING_SNAKE_CASE ) for i, t in enumerate(scheduler.timesteps ): A : Dict = model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) A : Optional[Any] = scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).prev_sample assert sample.dtype == torch.floataa def __lowerCAmelCase ( self , **SCREAMING_SNAKE_CASE ) -> str: """simple docstring""" for scheduler_class in self.scheduler_classes: A : Dict = self.get_scheduler_config(**SCREAMING_SNAKE_CASE ) A : Tuple = scheduler_class(**SCREAMING_SNAKE_CASE ) scheduler.set_timesteps(scheduler.config.num_train_timesteps ) assert len(scheduler.timesteps.unique() ) == scheduler.num_inference_steps
311
0
def A_ ( A__ , A__ , A__ , A__ ) -> Any: global f # a global dp table for knapsack if f[i][j] < 0: if j < wt[i - 1]: a__ : str = mf_knapsack(i - 1 , A__ , A__ , A__ ) else: a__ : int = max( mf_knapsack(i - 1 , A__ , A__ , A__ ) , mf_knapsack(i - 1 , A__ , A__ , j - wt[i - 1] ) + val[i - 1] , ) a__ : Union[str, Any] = val return f[i][j] def A_ ( A__ , A__ , A__ , A__ ) -> List[str]: a__ : Any = [[0] * (w + 1) for _ in range(n + 1 )] for i in range(1 , n + 1 ): for w_ in range(1 , w + 1 ): if wt[i - 1] <= w_: a__ : str = max(val[i - 1] + dp[i - 1][w_ - wt[i - 1]] , dp[i - 1][w_] ) else: a__ : str = dp[i - 1][w_] return dp[n][w_], dp def A_ ( A__ , A__ , A__ ) -> List[str]: if not (isinstance(A__ , (list, tuple) ) and isinstance(A__ , (list, tuple) )): raise ValueError( 'Both the weights and values vectors must be either lists or tuples' ) a__ : Union[str, Any] = len(A__ ) if num_items != len(A__ ): a__ : Optional[Any] = ( 'The number of weights must be the same as the number of values.\n' F'But got {num_items} weights and {len(A__ )} values' ) raise ValueError(A__ ) for i in range(A__ ): if not isinstance(wt[i] , A__ ): a__ : Tuple = ( 'All weights must be integers but got weight of ' F'type {type(wt[i] )} at index {i}' ) raise TypeError(A__ ) a__ , a__ : List[Any] = knapsack(A__ , A__ , A__ , A__ ) a__ : set = set() _construct_solution(A__ , A__ , A__ , A__ , A__ ) return optimal_val, example_optional_set def A_ ( A__ , A__ , A__ , A__ , A__ ) -> List[str]: # for the current item i at a maximum weight j to be part of an optimal subset, # the optimal value at (i, j) must be greater than the optimal value at (i-1, j). # where i - 1 means considering only the previous items at the given maximum weight if i > 0 and j > 0: if dp[i - 1][j] == dp[i][j]: _construct_solution(A__ , A__ , i - 1 , A__ , A__ ) else: optimal_set.add(A__ ) _construct_solution(A__ , A__ , i - 1 , j - wt[i - 1] , A__ ) if __name__ == "__main__": lowercase : int = [3, 2, 4, 4] lowercase : Tuple = [4, 3, 2, 3] lowercase : str = 4 lowercase : str = 6 lowercase : Optional[Any] = [[0] * (w + 1)] + [[0] + [-1] * (w + 1) for _ in range(n + 1)] lowercase , lowercase : Optional[Any] = knapsack(w, wt, val, n) print(optimal_solution) print(mf_knapsack(n, wt, val, w)) # switched the n and w # testing the dynamic programming problem with example # the optimal subset for the above example are items 3 and 4 lowercase , lowercase : Optional[Any] = knapsack_with_example_solution(w, wt, val) assert optimal_solution == 8 assert optimal_subset == {3, 4} print("""optimal_value = """, optimal_solution) print("""An optimal subset corresponding to the optimal value""", optimal_subset)
99
import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from timm import create_model from timm.data import resolve_data_config from timm.data.transforms_factory import create_transform from transformers import BitConfig, BitForImageClassification, BitImageProcessor from transformers.image_utils import PILImageResampling from transformers.utils import logging logging.set_verbosity_info() lowercase__ :str = logging.get_logger(__name__) def UpperCamelCase ( lowerCAmelCase__ ): '''simple docstring''' lowercase = '''huggingface/label-files''' lowercase = '''imagenet-1k-id2label.json''' lowercase = json.load(open(hf_hub_download(lowerCAmelCase__ , lowerCAmelCase__ , repo_type='''dataset''' ) , '''r''' ) ) lowercase = {int(lowerCAmelCase__ ): v for k, v in idalabel.items()} lowercase = {v: k for k, v in idalabel.items()} lowercase = '''std_conv''' if '''bit''' in model_name else False # note that when using BiT as backbone for ViT-hybrid checkpoints, # one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same", # config.conv_layer = "std_conv_same" lowercase = BitConfig( conv_layer=lowerCAmelCase__ , num_labels=1000 , idalabel=lowerCAmelCase__ , labelaid=lowerCAmelCase__ , ) return config def UpperCamelCase ( lowerCAmelCase__ ): '''simple docstring''' if "stem.conv" in name: lowercase = name.replace('''stem.conv''' , '''bit.embedder.convolution''' ) if "blocks" in name: lowercase = name.replace('''blocks''' , '''layers''' ) if "head.fc" in name: lowercase = name.replace('''head.fc''' , '''classifier.1''' ) if name.startswith('''norm''' ): lowercase = '''bit.''' + name if "bit" not in name and "classifier" not in name: lowercase = '''bit.encoder.''' + name return name def UpperCamelCase ( ): '''simple docstring''' lowercase = '''http://images.cocodataset.org/val2017/000000039769.jpg''' lowercase = Image.open(requests.get(lowerCAmelCase__ , stream=lowerCAmelCase__ ).raw ) return im @torch.no_grad() def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=False ): '''simple docstring''' lowercase = get_config(lowerCAmelCase__ ) # load original model from timm lowercase = create_model(lowerCAmelCase__ , pretrained=lowerCAmelCase__ ) timm_model.eval() # load state_dict of original model lowercase = timm_model.state_dict() for key in state_dict.copy().keys(): lowercase = state_dict.pop(lowerCAmelCase__ ) lowercase = val.squeeze() if '''head''' in key else val # load HuggingFace model lowercase = BitForImageClassification(lowerCAmelCase__ ) model.eval() model.load_state_dict(lowerCAmelCase__ ) # create image processor lowercase = create_transform(**resolve_data_config({} , model=lowerCAmelCase__ ) ) lowercase = transform.transforms lowercase = { '''bilinear''': PILImageResampling.BILINEAR, '''bicubic''': PILImageResampling.BICUBIC, '''nearest''': PILImageResampling.NEAREST, } lowercase = BitImageProcessor( do_resize=lowerCAmelCase__ , size={'''shortest_edge''': timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=lowerCAmelCase__ , crop_size={'''height''': timm_transforms[1].size[0], '''width''': timm_transforms[1].size[1]} , do_normalize=lowerCAmelCase__ , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , ) lowercase = prepare_img() lowercase = transform(lowerCAmelCase__ ).unsqueeze(0 ) lowercase = processor(lowerCAmelCase__ , return_tensors='''pt''' ).pixel_values # verify pixel values assert torch.allclose(lowerCAmelCase__ , lowerCAmelCase__ ) # verify logits with torch.no_grad(): lowercase = model(lowerCAmelCase__ ) lowercase = outputs.logits print('''Logits:''' , logits[0, :3] ) print('''Predicted class:''' , model.config.idalabel[logits.argmax(-1 ).item()] ) lowercase = timm_model(lowerCAmelCase__ ) assert timm_logits.shape == outputs.logits.shape assert torch.allclose(lowerCAmelCase__ , outputs.logits , atol=1E-3 ) print('''Looks ok!''' ) if pytorch_dump_folder_path is not None: Path(lowerCAmelCase__ ).mkdir(exist_ok=lowerCAmelCase__ ) print(f'Saving model {model_name} and processor to {pytorch_dump_folder_path}' ) model.save_pretrained(lowerCAmelCase__ ) processor.save_pretrained(lowerCAmelCase__ ) if push_to_hub: print(f'Pushing model {model_name} and processor to the hub' ) model.push_to_hub(f'ybelkada/{model_name}' ) processor.push_to_hub(f'ybelkada/{model_name}' ) if __name__ == "__main__": lowercase__ :List[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( "--model_name", default="resnetv2_50x1_bitm", type=str, help="Name of the BiT timm model you'd like to convert.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory." ) parser.add_argument( "--push_to_hub", action="store_true", help="Whether to push the model to the hub.", ) lowercase__ :List[str] = parser.parse_args() convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
101
0
import inspect import unittest import numpy as np from transformers import ViTConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_configuration_common import ConfigTester from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor if is_flax_available(): import jax from transformers.models.vit.modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel class UpperCamelCase_ ( unittest.TestCase ): def __init__( self , lowerCAmelCase_ , lowerCAmelCase_=13 , lowerCAmelCase_=30 , lowerCAmelCase_=2 , lowerCAmelCase_=3 , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=32 , lowerCAmelCase_=5 , lowerCAmelCase_=4 , lowerCAmelCase_=37 , lowerCAmelCase_="gelu" , lowerCAmelCase_=0.1 , lowerCAmelCase_=0.1 , lowerCAmelCase_=10 , lowerCAmelCase_=0.02 , ) -> List[Any]: _snake_case = parent _snake_case = batch_size _snake_case = image_size _snake_case = patch_size _snake_case = num_channels _snake_case = is_training _snake_case = use_labels _snake_case = hidden_size _snake_case = num_hidden_layers _snake_case = num_attention_heads _snake_case = intermediate_size _snake_case = hidden_act _snake_case = hidden_dropout_prob _snake_case = attention_probs_dropout_prob _snake_case = type_sequence_label_size _snake_case = initializer_range # in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) _snake_case = (image_size // patch_size) ** 2 _snake_case = num_patches + 1 def lowerCAmelCase ( self ) -> str: _snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _snake_case = ViTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCAmelCase_ , initializer_range=self.initializer_range , ) return config, pixel_values def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ) -> List[Any]: _snake_case = FlaxViTModel(config=lowerCAmelCase_ ) _snake_case = model(lowerCAmelCase_ ) # expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) _snake_case = (self.image_size, self.image_size) _snake_case = (self.patch_size, self.patch_size) _snake_case = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, num_patches + 1, self.hidden_size) ) def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ) -> Dict: _snake_case = self.type_sequence_label_size _snake_case = FlaxViTForImageClassification(config=lowerCAmelCase_ ) _snake_case = model(lowerCAmelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images _snake_case = 1 _snake_case = FlaxViTForImageClassification(lowerCAmelCase_ ) _snake_case = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) _snake_case = model(lowerCAmelCase_ ) def lowerCAmelCase ( self ) -> str: _snake_case = self.prepare_config_and_inputs() ( ( _snake_case ) , ( _snake_case ) , ) = config_and_inputs _snake_case = {'pixel_values': pixel_values} return config, inputs_dict @require_flax class UpperCamelCase_ ( _lowerCamelCase , unittest.TestCase ): lowerCAmelCase_ = (FlaxViTModel, FlaxViTForImageClassification) if is_flax_available() else () def lowerCAmelCase ( self ) -> None: _snake_case = FlaxViTModelTester(self ) _snake_case = ConfigTester(self , config_class=lowerCAmelCase_ , has_text_modality=lowerCAmelCase_ , hidden_size=37 ) def lowerCAmelCase ( self ) -> Union[str, Any]: self.config_tester.run_common_tests() def lowerCAmelCase ( self ) -> Optional[int]: _snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCAmelCase_ ) def lowerCAmelCase ( self ) -> Union[str, Any]: _snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase_ ) def lowerCAmelCase ( self ) -> int: _snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _snake_case = model_class(lowerCAmelCase_ ) _snake_case = inspect.signature(model.__call__ ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _snake_case = [*signature.parameters.keys()] _snake_case = ['pixel_values'] self.assertListEqual(arg_names[:1] , lowerCAmelCase_ ) def lowerCAmelCase ( self ) -> Optional[int]: _snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): _snake_case = self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) _snake_case = model_class(lowerCAmelCase_ ) @jax.jit def model_jitted(lowerCAmelCase_ , **lowerCAmelCase_ ): return model(pixel_values=lowerCAmelCase_ , **lowerCAmelCase_ ) with self.subTest('JIT Enabled' ): _snake_case = model_jitted(**lowerCAmelCase_ ).to_tuple() with self.subTest('JIT Disabled' ): with jax.disable_jit(): _snake_case = model_jitted(**lowerCAmelCase_ ).to_tuple() self.assertEqual(len(lowerCAmelCase_ ) , len(lowerCAmelCase_ ) ) for jitted_output, output in zip(lowerCAmelCase_ , lowerCAmelCase_ ): self.assertEqual(jitted_output.shape , output.shape ) @slow def lowerCAmelCase ( self ) -> Optional[int]: for model_class_name in self.all_model_classes: _snake_case = model_class_name.from_pretrained('google/vit-base-patch16-224' ) _snake_case = model(np.ones((1, 3, 224, 224) ) ) self.assertIsNotNone(lowerCAmelCase_ )
295
from collections.abc import Sequence def lowerCamelCase__ ( UpperCamelCase__ : Sequence[float] , UpperCamelCase__ : bool = False ) -> float: '''simple docstring''' if not arr: return 0 _snake_case = 0 if allow_empty_subarrays else float('-inf' ) _snake_case = 0.0 for num in arr: _snake_case = max(0 if allow_empty_subarrays else num , curr_sum + num ) _snake_case = max(UpperCamelCase__ , UpperCamelCase__ ) return max_sum if __name__ == "__main__": from doctest import testmod testmod() UpperCAmelCase_ = [-2, 1, -3, 4, -1, 2, 1, -5, 4] print(F"{max_subarray_sum(nums) = }")
295
1
"""simple docstring""" import logging import os from typing import Dict, List, Optional, Union import torch import torch.nn as nn from accelerate.utils.imports import ( is_abit_bnb_available, is_abit_bnb_available, is_bnb_available, ) from ..big_modeling import dispatch_model, init_empty_weights from .dataclasses import BnbQuantizationConfig from .modeling import ( find_tied_parameters, get_balanced_memory, infer_auto_device_map, load_checkpoint_in_model, offload_weight, set_module_tensor_to_device, ) if is_bnb_available(): import bitsandbytes as bnb from copy import deepcopy lowerCAmelCase_ = logging.getLogger(__name__) def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase = False , ) -> int: lowercase__ : Dict = bnb_quantization_config.load_in_abit lowercase__ : int = bnb_quantization_config.load_in_abit if load_in_abit and not is_abit_bnb_available(): raise ImportError( '''You have a version of `bitsandbytes` that is not compatible with 8bit quantization,''' ''' make sure you have the latest version of `bitsandbytes` installed.''' ) if load_in_abit and not is_abit_bnb_available(): raise ValueError( '''You have a version of `bitsandbytes` that is not compatible with 4bit quantization,''' '''make sure you have the latest version of `bitsandbytes` installed.''' ) lowercase__ : List[str] = [] # custom device map if isinstance(__lowerCamelCase , __lowerCamelCase ) and len(device_map.keys() ) > 1: lowercase__ : str = [key for key, value in device_map.items() if value in ['''disk''', '''cpu''']] # We keep some modules such as the lm_head in their original dtype for numerical stability reasons if bnb_quantization_config.skip_modules is None: lowercase__ : Any = get_keys_to_not_convert(__lowerCamelCase ) # add cpu modules to skip modules only for 4-bit modules if load_in_abit: bnb_quantization_config.skip_modules.extend(__lowerCamelCase ) lowercase__ : Union[str, Any] = bnb_quantization_config.skip_modules # We add the modules we want to keep in full precision if bnb_quantization_config.keep_in_fpaa_modules is None: lowercase__ : Union[str, Any] = [] lowercase__ : Optional[Any] = bnb_quantization_config.keep_in_fpaa_modules modules_to_not_convert.extend(__lowerCamelCase ) # compatibility with peft lowercase__ : Optional[Any] = load_in_abit lowercase__ : Optional[Any] = load_in_abit lowercase__ : str = get_parameter_device(__lowerCamelCase ) if model_device.type != "meta": # quantization of an already loaded model logger.warning( '''It is not recommended to quantize a loaded model. ''' '''The model should be instantiated under the `init_empty_weights` context manager.''' ) lowercase__ : Dict = replace_with_bnb_layers(__lowerCamelCase , __lowerCamelCase , modules_to_not_convert=__lowerCamelCase ) # convert param to the right dtype lowercase__ : Dict = bnb_quantization_config.torch_dtype for name, param in model.state_dict().items(): if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ): param.to(torch.floataa ) if param.dtype != torch.floataa: lowercase__ : Any = name.replace('''.weight''' , '''''' ).replace('''.bias''' , '''''' ) lowercase__ : Dict = getattr(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) if param is not None: param.to(torch.floataa ) elif torch.is_floating_point(__lowerCamelCase ): param.to(__lowerCamelCase ) if model_device.type == "cuda": # move everything to cpu in the first place because we can't do quantization if the weights are already on cuda model.cuda(torch.cuda.current_device() ) torch.cuda.empty_cache() elif torch.cuda.is_available(): model.to(torch.cuda.current_device() ) else: raise RuntimeError('''No GPU found. A GPU is needed for quantization.''' ) logger.info( f"""The model device type is {model_device.type}. However, cuda is needed for quantization.""" '''We move the model to cuda.''' ) return model elif weights_location is None: raise RuntimeError( f"""`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} """ ) else: with init_empty_weights(): lowercase__ : int = replace_with_bnb_layers( __lowerCamelCase , __lowerCamelCase , modules_to_not_convert=__lowerCamelCase ) lowercase__ : List[str] = get_quantized_model_device_map( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , max_memory=__lowerCamelCase , no_split_module_classes=__lowerCamelCase , ) if offload_state_dict is None and device_map is not None and "disk" in device_map.values(): lowercase__ : List[str] = True lowercase__ : int = any(x in list(device_map.values() ) for x in ['''cpu''', '''disk'''] ) load_checkpoint_in_model( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , dtype=bnb_quantization_config.torch_dtype , offload_folder=__lowerCamelCase , offload_state_dict=__lowerCamelCase , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , ) return dispatch_model(__lowerCamelCase , device_map=__lowerCamelCase , offload_dir=__lowerCamelCase ) def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=None ) -> int: if device_map is None: if torch.cuda.is_available(): lowercase__ : Optional[int] = {'''''': torch.cuda.current_device()} else: raise RuntimeError('''No GPU found. A GPU is needed for quantization.''' ) logger.info('''The device_map was not initialized.''' '''Setting device_map to `{\'\':torch.cuda.current_device()}`.''' ) if isinstance(__lowerCamelCase , __lowerCamelCase ): if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]: raise ValueError( '''If passing a string for `device_map`, please choose \'auto\', \'balanced\', \'balanced_low_0\' or ''' '''\'sequential\'.''' ) lowercase__ : List[str] = {} special_dtypes.update( { name: bnb_quantization_config.torch_dtype for name, _ in model.named_parameters() if any(m in name for m in bnb_quantization_config.skip_modules ) } ) special_dtypes.update( { name: torch.floataa for name, _ in model.named_parameters() if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules ) } ) lowercase__ : List[str] = {} lowercase__ : Tuple = special_dtypes lowercase__ : Optional[int] = no_split_module_classes lowercase__ : int = bnb_quantization_config.target_dtype # get max_memory for each device. if device_map != "sequential": lowercase__ : Optional[Any] = get_balanced_memory( __lowerCamelCase , low_zero=(device_map == '''balanced_low_0''') , max_memory=__lowerCamelCase , **__lowerCamelCase , ) lowercase__ : int = max_memory lowercase__ : List[Any] = infer_auto_device_map(__lowerCamelCase , **__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ): # check if don't have any quantized module on the cpu lowercase__ : Any = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules lowercase__ : Union[str, Any] = { key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert } for device in ["cpu", "disk"]: if device in device_map_without_some_modules.values(): if bnb_quantization_config.load_in_abit: raise ValueError( ''' Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit the quantized model. If you want to dispatch the model on the CPU or the disk while keeping these modules in `torch_dtype`, you need to pass a custom `device_map` to `load_and_quantize_model`. Check https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk for more details. ''' ) else: logger.info( '''Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit''' ) del device_map_without_some_modules return device_map def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=None , __lowerCamelCase=None ) -> str: if modules_to_not_convert is None: lowercase__ : Optional[int] = [] lowercase__ , lowercase__ : Optional[Any] = _replace_with_bnb_layers( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) if not has_been_replaced: logger.warning( '''You are loading your model in 8bit or 4bit but no linear modules were found in your model.''' ''' this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers.''' ''' Please double check your model architecture, or submit an issue on github if you think this is''' ''' a bug.''' ) return model def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=None , __lowerCamelCase=None , ) -> int: lowercase__ : Optional[Any] = False for name, module in model.named_children(): if current_key_name is None: lowercase__ : Optional[int] = [] current_key_name.append(__lowerCamelCase ) if isinstance(__lowerCamelCase , nn.Linear ) and name not in modules_to_not_convert: # Check if the current key is not in the `modules_to_not_convert` lowercase__ : Union[str, Any] = '''.'''.join(__lowerCamelCase ) lowercase__ : Dict = True for key in modules_to_not_convert: if ( (key in current_key_name_str) and (key + "." in current_key_name_str) ) or key == current_key_name_str: lowercase__ : int = False break if proceed: # Load bnb module with empty weight and replace ``nn.Linear` module if bnb_quantization_config.load_in_abit: lowercase__ : Optional[int] = bnb.nn.LinearabitLt( module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=__lowerCamelCase , threshold=bnb_quantization_config.llm_inta_threshold , ) elif bnb_quantization_config.load_in_abit: lowercase__ : Dict = bnb.nn.Linearabit( module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , ) else: raise ValueError('''load_in_8bit and load_in_4bit can\'t be both False''' ) lowercase__ : List[str] = module.weight.data if module.bias is not None: lowercase__ : Dict = module.bias.data bnb_module.requires_grad_(__lowerCamelCase ) setattr(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) lowercase__ : Tuple = True if len(list(module.children() ) ) > 0: lowercase__ , lowercase__ : int = _replace_with_bnb_layers( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) lowercase__ : Union[str, Any] = has_been_replaced | _has_been_replaced # Remove the last key for recursion current_key_name.pop(-1 ) return model, has_been_replaced def __UpperCAmelCase ( __lowerCamelCase ) -> List[str]: # Create a copy of the model with init_empty_weights(): lowercase__ : int = deepcopy(__lowerCamelCase ) # this has 0 cost since it is done inside `init_empty_weights` context manager` lowercase__ : Optional[Any] = find_tied_parameters(__lowerCamelCase ) # For compatibility with Accelerate < 0.18 if isinstance(__lowerCamelCase , __lowerCamelCase ): lowercase__ : List[str] = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() ) else: lowercase__ : Optional[int] = sum(__lowerCamelCase , [] ) lowercase__ : List[str] = len(__lowerCamelCase ) > 0 # Check if it is a base model lowercase__ : Any = False if hasattr(__lowerCamelCase , '''base_model_prefix''' ): lowercase__ : Optional[int] = not hasattr(__lowerCamelCase , model.base_model_prefix ) # Ignore this for base models (BertModel, GPT2Model, etc.) if (not has_tied_params) and is_base_model: return [] # otherwise they have an attached head lowercase__ : int = list(model.named_children() ) lowercase__ : List[Any] = [list_modules[-1][0]] # add last module together with tied weights lowercase__ : Union[str, Any] = set(__lowerCamelCase ) - set(__lowerCamelCase ) lowercase__ : List[str] = list(set(__lowerCamelCase ) ) + list(__lowerCamelCase ) # remove ".weight" from the keys lowercase__ : int = ['''.weight''', '''.bias'''] lowercase__ : Optional[Any] = [] for name in list_untouched: for name_to_remove in names_to_remove: if name_to_remove in name: lowercase__ : str = name.replace(__lowerCamelCase , '''''' ) filtered_module_names.append(__lowerCamelCase ) return filtered_module_names def __UpperCAmelCase ( __lowerCamelCase ) -> Dict: for m in model.modules(): if isinstance(__lowerCamelCase , bnb.nn.Linearabit ): return True return False def __UpperCAmelCase ( __lowerCamelCase ) -> Dict: return next(parameter.parameters() ).device def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> List[str]: # if it is not quantized, we quantize and offload the quantized weights and the SCB stats if fpaa_statistics is None: set_module_tensor_to_device(__lowerCamelCase , __lowerCamelCase , 0 , dtype=__lowerCamelCase , value=__lowerCamelCase ) lowercase__ : Any = param_name lowercase__ : List[Any] = model if "." in tensor_name: lowercase__ : Dict = tensor_name.split('''.''' ) for split in splits[:-1]: lowercase__ : int = getattr(__lowerCamelCase , __lowerCamelCase ) if new_module is None: raise ValueError(f"""{module} has no attribute {split}.""" ) lowercase__ : int = new_module lowercase__ : str = splits[-1] # offload weights lowercase__ : Any = False offload_weight(module._parameters[tensor_name] , __lowerCamelCase , __lowerCamelCase , index=__lowerCamelCase ) if hasattr(module._parameters[tensor_name] , '''SCB''' ): offload_weight( module._parameters[tensor_name].SCB , param_name.replace('''weight''' , '''SCB''' ) , __lowerCamelCase , index=__lowerCamelCase , ) else: offload_weight(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , index=__lowerCamelCase ) offload_weight(__lowerCamelCase , param_name.replace('''weight''' , '''SCB''' ) , __lowerCamelCase , index=__lowerCamelCase ) set_module_tensor_to_device(__lowerCamelCase , __lowerCamelCase , '''meta''' , dtype=__lowerCamelCase , value=torch.empty(*param.size() ) )
16
'''simple docstring''' import argparse import gc import json import os import shutil import warnings import torch from transformers import LlamaConfig, LlamaForCausalLM, LlamaTokenizer try: from transformers import LlamaTokenizerFast except ImportError as e: warnings.warn(e) warnings.warn( '''The converted tokenizer will be the `slow` tokenizer. To use the fast, update your `tokenizers` library and re-run the tokenizer conversion''' ) A__: Dict = None A__: Tuple = { '''7B''': 1_1008, '''13B''': 1_3824, '''30B''': 1_7920, '''65B''': 2_2016, '''70B''': 2_8672, } A__: Any = { '''7B''': 1, '''7Bf''': 1, '''13B''': 2, '''13Bf''': 2, '''30B''': 4, '''65B''': 8, '''70B''': 8, '''70Bf''': 8, } def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Optional[int] ,_UpperCAmelCase : Optional[int]=1 ,_UpperCAmelCase : List[str]=256 ) -> Dict: return multiple_of * ((int(ffn_dim_multiplier * int(8 * n / 3 ) ) + multiple_of - 1) // multiple_of) def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : List[Any] ) -> List[str]: with open(_UpperCAmelCase ,"""r""" ) as f: return json.load(_UpperCAmelCase ) def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : str ,_UpperCAmelCase : Optional[Any] ) -> Tuple: with open(_UpperCAmelCase ,"""w""" ) as f: json.dump(_UpperCAmelCase ,_UpperCAmelCase ) def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : str ,_UpperCAmelCase : Optional[Any] ,_UpperCAmelCase : int ,_UpperCAmelCase : List[Any]=True ) -> Union[str, Any]: os.makedirs(_UpperCAmelCase ,exist_ok=_UpperCAmelCase ) _a : Union[str, Any] =os.path.join(_UpperCAmelCase ,"""tmp""" ) os.makedirs(_UpperCAmelCase ,exist_ok=_UpperCAmelCase ) _a : int =read_json(os.path.join(_UpperCAmelCase ,"""params.json""" ) ) _a : int =NUM_SHARDS[model_size] _a : Dict =params["""n_layers"""] _a : Union[str, Any] =params["""n_heads"""] _a : List[str] =n_heads // num_shards _a : int =params["""dim"""] _a : Union[str, Any] =dim // n_heads _a : int =1_0_0_0_0.0 _a : str =1.0 / (base ** (torch.arange(0 ,_UpperCAmelCase ,2 ).float() / dims_per_head)) if "n_kv_heads" in params: _a : str =params["""n_kv_heads"""] # for GQA / MQA _a : Optional[Any] =n_heads_per_shard // num_key_value_heads _a : Optional[int] =dim // num_key_value_heads else: # compatibility with other checkpoints _a : str =n_heads _a : Any =n_heads_per_shard _a : str =dim # permute for sliced rotary def permute(_UpperCAmelCase : Tuple ,_UpperCAmelCase : Optional[int]=n_heads ,_UpperCAmelCase : Optional[int]=dim ,_UpperCAmelCase : List[str]=dim ): return w.view(_UpperCAmelCase ,dima // n_heads // 2 ,2 ,_UpperCAmelCase ).transpose(1 ,2 ).reshape(_UpperCAmelCase ,_UpperCAmelCase ) print(F"Fetching all parameters from the checkpoint at {input_base_path}." ) # Load weights if model_size == "7B": # Not sharded # (The sharded implementation would also work, but this is simpler.) _a : Any =torch.load(os.path.join(_UpperCAmelCase ,"""consolidated.00.pth""" ) ,map_location="""cpu""" ) else: # Sharded _a : List[Any] =[ torch.load(os.path.join(_UpperCAmelCase ,F"consolidated.{i:02d}.pth" ) ,map_location="""cpu""" ) for i in range(_UpperCAmelCase ) ] _a : Any =0 _a : Optional[int] ={"""weight_map""": {}} for layer_i in range(_UpperCAmelCase ): _a : List[str] =F"pytorch_model-{layer_i + 1}-of-{n_layers + 1}.bin" if model_size == "7B": # Unsharded _a : List[str] ={ F"model.layers.{layer_i}.self_attn.q_proj.weight": permute( loaded[F"layers.{layer_i}.attention.wq.weight"] ), F"model.layers.{layer_i}.self_attn.k_proj.weight": permute( loaded[F"layers.{layer_i}.attention.wk.weight"] ), F"model.layers.{layer_i}.self_attn.v_proj.weight": loaded[F"layers.{layer_i}.attention.wv.weight"], F"model.layers.{layer_i}.self_attn.o_proj.weight": loaded[F"layers.{layer_i}.attention.wo.weight"], F"model.layers.{layer_i}.mlp.gate_proj.weight": loaded[F"layers.{layer_i}.feed_forward.w1.weight"], F"model.layers.{layer_i}.mlp.down_proj.weight": loaded[F"layers.{layer_i}.feed_forward.w2.weight"], F"model.layers.{layer_i}.mlp.up_proj.weight": loaded[F"layers.{layer_i}.feed_forward.w3.weight"], F"model.layers.{layer_i}.input_layernorm.weight": loaded[F"layers.{layer_i}.attention_norm.weight"], F"model.layers.{layer_i}.post_attention_layernorm.weight": loaded[F"layers.{layer_i}.ffn_norm.weight"], } else: # Sharded # Note that attention.w{q,k,v,o}, feed_fordward.w[1,2,3], attention_norm.weight and ffn_norm.weight share # the same storage object, saving attention_norm and ffn_norm will save other weights too, which is # redundant as other weights will be stitched from multiple shards. To avoid that, they are cloned. _a : Tuple ={ F"model.layers.{layer_i}.input_layernorm.weight": loaded[0][ F"layers.{layer_i}.attention_norm.weight" ].clone(), F"model.layers.{layer_i}.post_attention_layernorm.weight": loaded[0][ F"layers.{layer_i}.ffn_norm.weight" ].clone(), } _a : str =permute( torch.cat( [ loaded[i][F"layers.{layer_i}.attention.wq.weight"].view(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ) for i in range(_UpperCAmelCase ) ] ,dim=0 ,).reshape(_UpperCAmelCase ,_UpperCAmelCase ) ) _a : Tuple =permute( torch.cat( [ loaded[i][F"layers.{layer_i}.attention.wk.weight"].view( _UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ) for i in range(_UpperCAmelCase ) ] ,dim=0 ,).reshape(_UpperCAmelCase ,_UpperCAmelCase ) ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,) _a : Any =torch.cat( [ loaded[i][F"layers.{layer_i}.attention.wv.weight"].view( _UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ) for i in range(_UpperCAmelCase ) ] ,dim=0 ,).reshape(_UpperCAmelCase ,_UpperCAmelCase ) _a : List[str] =torch.cat( [loaded[i][F"layers.{layer_i}.attention.wo.weight"] for i in range(_UpperCAmelCase )] ,dim=1 ) _a : Union[str, Any] =torch.cat( [loaded[i][F"layers.{layer_i}.feed_forward.w1.weight"] for i in range(_UpperCAmelCase )] ,dim=0 ) _a : Tuple =torch.cat( [loaded[i][F"layers.{layer_i}.feed_forward.w2.weight"] for i in range(_UpperCAmelCase )] ,dim=1 ) _a : Union[str, Any] =torch.cat( [loaded[i][F"layers.{layer_i}.feed_forward.w3.weight"] for i in range(_UpperCAmelCase )] ,dim=0 ) _a : str =inv_freq for k, v in state_dict.items(): _a : Any =filename param_count += v.numel() torch.save(_UpperCAmelCase ,os.path.join(_UpperCAmelCase ,_UpperCAmelCase ) ) _a : Union[str, Any] =F"pytorch_model-{n_layers + 1}-of-{n_layers + 1}.bin" if model_size == "7B": # Unsharded _a : List[str] ={ """model.embed_tokens.weight""": loaded["""tok_embeddings.weight"""], """model.norm.weight""": loaded["""norm.weight"""], """lm_head.weight""": loaded["""output.weight"""], } else: _a : int ={ """model.norm.weight""": loaded[0]["""norm.weight"""], """model.embed_tokens.weight""": torch.cat( [loaded[i]["""tok_embeddings.weight"""] for i in range(_UpperCAmelCase )] ,dim=1 ), """lm_head.weight""": torch.cat([loaded[i]["""output.weight"""] for i in range(_UpperCAmelCase )] ,dim=0 ), } for k, v in state_dict.items(): _a : Dict =filename param_count += v.numel() torch.save(_UpperCAmelCase ,os.path.join(_UpperCAmelCase ,_UpperCAmelCase ) ) # Write configs _a : Tuple ={"""total_size""": param_count * 2} write_json(_UpperCAmelCase ,os.path.join(_UpperCAmelCase ,"""pytorch_model.bin.index.json""" ) ) _a : Optional[Any] =params["""ffn_dim_multiplier"""] if """ffn_dim_multiplier""" in params else 1 _a : int =params["""multiple_of"""] if """multiple_of""" in params else 256 _a : List[Any] =LlamaConfig( hidden_size=_UpperCAmelCase ,intermediate_size=compute_intermediate_size(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ) ,num_attention_heads=params["""n_heads"""] ,num_hidden_layers=params["""n_layers"""] ,rms_norm_eps=params["""norm_eps"""] ,num_key_value_heads=_UpperCAmelCase ,) config.save_pretrained(_UpperCAmelCase ) # Make space so we can load the model properly now. del state_dict del loaded gc.collect() print("""Loading the checkpoint in a Llama model.""" ) _a : Any =LlamaForCausalLM.from_pretrained(_UpperCAmelCase ,torch_dtype=torch.floataa ,low_cpu_mem_usage=_UpperCAmelCase ) # Avoid saving this as part of the config. del model.config._name_or_path print("""Saving in the Transformers format.""" ) model.save_pretrained(_UpperCAmelCase ,safe_serialization=_UpperCAmelCase ) shutil.rmtree(_UpperCAmelCase ) def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ,_UpperCAmelCase : int ) -> Optional[Any]: # Initialize the tokenizer based on the `spm` model _a : List[str] =LlamaTokenizer if LlamaTokenizerFast is None else LlamaTokenizerFast print(F"Saving a {tokenizer_class.__name__} to {tokenizer_path}." ) _a : List[Any] =tokenizer_class(_UpperCAmelCase ) tokenizer.save_pretrained(_UpperCAmelCase ) def SCREAMING_SNAKE_CASE_ ( ) -> Union[str, Any]: _a : List[str] =argparse.ArgumentParser() parser.add_argument( """--input_dir""" ,help="""Location of LLaMA weights, which contains tokenizer.model and model folders""" ,) parser.add_argument( """--model_size""" ,choices=["""7B""", """7Bf""", """13B""", """13Bf""", """30B""", """65B""", """70B""", """70Bf""", """tokenizer_only"""] ,) parser.add_argument( """--output_dir""" ,help="""Location to write HF model and tokenizer""" ,) parser.add_argument("""--safe_serialization""" ,type=_UpperCAmelCase ,help="""Whether or not to save using `safetensors`.""" ) _a : Optional[Any] =parser.parse_args() if args.model_size != "tokenizer_only": write_model( model_path=args.output_dir ,input_base_path=os.path.join(args.input_dir ,args.model_size ) ,model_size=args.model_size ,safe_serialization=args.safe_serialization ,) _a : List[Any] =os.path.join(args.input_dir ,"""tokenizer.model""" ) write_tokenizer(args.output_dir ,_UpperCAmelCase ) if __name__ == "__main__": main()
276
0
import unittest import numpy as np from transformers import BertConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask if is_flax_available(): from transformers.models.bert.modeling_flax_bert import ( FlaxBertForMaskedLM, FlaxBertForMultipleChoice, FlaxBertForNextSentencePrediction, FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification, FlaxBertForTokenClassification, FlaxBertModel, ) class SCREAMING_SNAKE_CASE ( unittest.TestCase ): """simple docstring""" def __init__( self : Tuple , lowerCAmelCase : Tuple , lowerCAmelCase : str=13 , lowerCAmelCase : Optional[Any]=7 , lowerCAmelCase : Optional[Any]=True , lowerCAmelCase : Any=True , lowerCAmelCase : str=True , lowerCAmelCase : Optional[Any]=True , lowerCAmelCase : Dict=99 , lowerCAmelCase : Any=32 , lowerCAmelCase : int=5 , lowerCAmelCase : Tuple=4 , lowerCAmelCase : Optional[Any]=37 , lowerCAmelCase : str="gelu" , lowerCAmelCase : str=0.1 , lowerCAmelCase : str=0.1 , lowerCAmelCase : Any=5_12 , lowerCAmelCase : Optional[Any]=16 , lowerCAmelCase : Dict=2 , lowerCAmelCase : Tuple=0.02 , lowerCAmelCase : Optional[int]=4 , ) -> List[Any]: """simple docstring""" __lowerCAmelCase : int = parent __lowerCAmelCase : Dict = batch_size __lowerCAmelCase : Dict = seq_length __lowerCAmelCase : Union[str, Any] = is_training __lowerCAmelCase : List[Any] = use_attention_mask __lowerCAmelCase : List[Any] = use_token_type_ids __lowerCAmelCase : Optional[int] = use_labels __lowerCAmelCase : str = vocab_size __lowerCAmelCase : Any = hidden_size __lowerCAmelCase : Optional[int] = num_hidden_layers __lowerCAmelCase : Optional[int] = num_attention_heads __lowerCAmelCase : Dict = intermediate_size __lowerCAmelCase : Tuple = hidden_act __lowerCAmelCase : Dict = hidden_dropout_prob __lowerCAmelCase : Any = attention_probs_dropout_prob __lowerCAmelCase : Union[str, Any] = max_position_embeddings __lowerCAmelCase : int = type_vocab_size __lowerCAmelCase : Tuple = type_sequence_label_size __lowerCAmelCase : int = initializer_range __lowerCAmelCase : Optional[int] = num_choices def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Dict: """simple docstring""" __lowerCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __lowerCAmelCase : Dict = None if self.use_attention_mask: __lowerCAmelCase : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] ) __lowerCAmelCase : Union[str, Any] = None if self.use_token_type_ids: __lowerCAmelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __lowerCAmelCase : List[str] = BertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCAmelCase , initializer_range=self.initializer_range , ) return config, input_ids, token_type_ids, attention_mask def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[Any]: """simple docstring""" __lowerCAmelCase : Any = self.prepare_config_and_inputs() __lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase : List[str] = config_and_inputs __lowerCAmelCase : Any = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask} return config, inputs_dict def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Tuple: """simple docstring""" __lowerCAmelCase : List[str] = self.prepare_config_and_inputs() __lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase : Dict = config_and_inputs __lowerCAmelCase : Any = True __lowerCAmelCase : str = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) __lowerCAmelCase : int = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, attention_mask, encoder_hidden_states, encoder_attention_mask, ) @require_flax class SCREAMING_SNAKE_CASE ( a_ , unittest.TestCase ): """simple docstring""" lowerCamelCase : int =True lowerCamelCase : Any =( ( FlaxBertModel, FlaxBertForPreTraining, FlaxBertForMaskedLM, FlaxBertForMultipleChoice, FlaxBertForQuestionAnswering, FlaxBertForNextSentencePrediction, FlaxBertForSequenceClassification, FlaxBertForTokenClassification, FlaxBertForQuestionAnswering, ) if is_flax_available() else () ) def SCREAMING_SNAKE_CASE ( self : str ) -> Union[str, Any]: """simple docstring""" __lowerCAmelCase : Union[str, Any] = FlaxBertModelTester(self ) @slow def SCREAMING_SNAKE_CASE ( self : Dict ) -> Dict: """simple docstring""" __lowerCAmelCase : int = FlaxBertModel.from_pretrained("""bert-base-cased""" ) __lowerCAmelCase : str = model(np.ones((1, 1) ) ) self.assertIsNotNone(lowerCAmelCase )
139
from pathlib import Path import fire def snake_case_ (__A : str , __A : str , __A : int ) -> Any: __lowerCAmelCase : Tuple = Path(__A ) __lowerCAmelCase : Tuple = Path(__A ) dest_dir.mkdir(exist_ok=__A ) for path in src_dir.iterdir(): __lowerCAmelCase : str = [x.rstrip() for x in list(path.open().readlines() )][:n] __lowerCAmelCase : Dict = dest_dir.joinpath(path.name ) print(__A ) dest_path.open("""w""" ).write("""\n""".join(__A ) ) if __name__ == "__main__": fire.Fire(minify)
139
1
import inspect import os import unittest import torch import accelerate from accelerate import Accelerator from accelerate.test_utils import execute_subprocess_async, require_multi_gpu from accelerate.utils import patch_environment class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ): def lowerCamelCase_ ( self : List[str] ): """simple docstring""" UpperCamelCase = inspect.getfile(accelerate.test_utils ) UpperCamelCase = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["""scripts""", """test_script.py"""] ) UpperCamelCase = os.path.sep.join( mod_file.split(os.path.sep )[:-1] + ["""scripts""", """test_distributed_data_loop.py"""] ) UpperCamelCase = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["""scripts""", """test_ops.py"""] ) @require_multi_gpu def lowerCamelCase_ ( self : List[Any] ): """simple docstring""" print(f"""Found {torch.cuda.device_count()} devices.""" ) UpperCamelCase = ["""torchrun""", f"""--nproc_per_node={torch.cuda.device_count()}""", self.test_file_path] with patch_environment(omp_num_threads=1 ): execute_subprocess_async(lowerCamelCase_ , env=os.environ.copy() ) @require_multi_gpu def lowerCamelCase_ ( self : Union[str, Any] ): """simple docstring""" print(f"""Found {torch.cuda.device_count()} devices.""" ) UpperCamelCase = ["""torchrun""", f"""--nproc_per_node={torch.cuda.device_count()}""", self.operation_file_path] print(f"""Command: {cmd}""" ) with patch_environment(omp_num_threads=1 ): execute_subprocess_async(lowerCamelCase_ , env=os.environ.copy() ) @require_multi_gpu def lowerCamelCase_ ( self : Any ): """simple docstring""" UpperCamelCase = ["""torchrun""", f"""--nproc_per_node={torch.cuda.device_count()}""", inspect.getfile(self.__class__ )] with patch_environment(omp_num_threads=1 ): execute_subprocess_async(lowerCamelCase_ , env=os.environ.copy() ) @require_multi_gpu def lowerCamelCase_ ( self : List[Any] ): """simple docstring""" print(f"""Found {torch.cuda.device_count()} devices, using 2 devices only""" ) UpperCamelCase = ["""torchrun""", f"""--nproc_per_node={torch.cuda.device_count()}""", self.data_loop_file_path] with patch_environment(omp_num_threads=1 , cuda_visible_devices="""0,1""" ): execute_subprocess_async(lowerCamelCase_ , env=os.environ.copy() ) if __name__ == "__main__": _SCREAMING_SNAKE_CASE = Accelerator() _SCREAMING_SNAKE_CASE = (accelerator.state.process_index + 2, 1_0) _SCREAMING_SNAKE_CASE = torch.randint(0, 1_0, shape).to(accelerator.device) _SCREAMING_SNAKE_CASE = """""" _SCREAMING_SNAKE_CASE = accelerator.pad_across_processes(tensor) if tensora.shape[0] != accelerator.state.num_processes + 1: error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0." if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor): error_msg += "Tensors have different values." if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0): error_msg += "Padding was not done with the right value (0)." _SCREAMING_SNAKE_CASE = accelerator.pad_across_processes(tensor, pad_first=True) if tensora.shape[0] != accelerator.state.num_processes + 1: error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0." _SCREAMING_SNAKE_CASE = accelerator.state.num_processes - accelerator.state.process_index - 1 if not torch.equal(tensora[index:], tensor): error_msg += "Tensors have different values." if not torch.all(tensora[:index] == 0): error_msg += "Padding was not done with the right value (0)." # Raise error at the end to make sure we don't stop at the first failure. if len(error_msg) > 0: raise ValueError(error_msg)
343
import argparse import json from collections import OrderedDict from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( ConditionalDetrConfig, ConditionalDetrForObjectDetection, ConditionalDetrForSegmentation, ConditionalDetrImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() _SCREAMING_SNAKE_CASE = logging.get_logger(__name__) # here we list all keys to be renamed (original name on the left, our name on the right) _SCREAMING_SNAKE_CASE = [] for i in range(6): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append( (F'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''', F'''encoder.layers.{i}.self_attn.out_proj.weight''') ) rename_keys.append( (F'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', F'''encoder.layers.{i}.self_attn.out_proj.bias''') ) rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.weight''', F'''encoder.layers.{i}.fc1.weight''')) rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.bias''', F'''encoder.layers.{i}.fc1.bias''')) rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.weight''', F'''encoder.layers.{i}.fc2.weight''')) rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.bias''', F'''encoder.layers.{i}.fc2.bias''')) rename_keys.append( (F'''transformer.encoder.layers.{i}.norm1.weight''', F'''encoder.layers.{i}.self_attn_layer_norm.weight''') ) rename_keys.append((F'''transformer.encoder.layers.{i}.norm1.bias''', F'''encoder.layers.{i}.self_attn_layer_norm.bias''')) rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.weight''', F'''encoder.layers.{i}.final_layer_norm.weight''')) rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.bias''', F'''encoder.layers.{i}.final_layer_norm.bias''')) # decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms rename_keys.append( (F'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', F'''decoder.layers.{i}.self_attn.out_proj.weight''') ) rename_keys.append( (F'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', F'''decoder.layers.{i}.self_attn.out_proj.bias''') ) rename_keys.append( ( F'''transformer.decoder.layers.{i}.cross_attn.out_proj.weight''', F'''decoder.layers.{i}.encoder_attn.out_proj.weight''', ) ) rename_keys.append( ( F'''transformer.decoder.layers.{i}.cross_attn.out_proj.bias''', F'''decoder.layers.{i}.encoder_attn.out_proj.bias''', ) ) rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.weight''', F'''decoder.layers.{i}.fc1.weight''')) rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.bias''', F'''decoder.layers.{i}.fc1.bias''')) rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.weight''', F'''decoder.layers.{i}.fc2.weight''')) rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.bias''', F'''decoder.layers.{i}.fc2.bias''')) rename_keys.append( (F'''transformer.decoder.layers.{i}.norm1.weight''', F'''decoder.layers.{i}.self_attn_layer_norm.weight''') ) rename_keys.append((F'''transformer.decoder.layers.{i}.norm1.bias''', F'''decoder.layers.{i}.self_attn_layer_norm.bias''')) rename_keys.append( (F'''transformer.decoder.layers.{i}.norm2.weight''', F'''decoder.layers.{i}.encoder_attn_layer_norm.weight''') ) rename_keys.append( (F'''transformer.decoder.layers.{i}.norm2.bias''', F'''decoder.layers.{i}.encoder_attn_layer_norm.bias''') ) rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.weight''', F'''decoder.layers.{i}.final_layer_norm.weight''')) rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.bias''', F'''decoder.layers.{i}.final_layer_norm.bias''')) # q, k, v projections in self/cross-attention in decoder for conditional DETR rename_keys.append( (F'''transformer.decoder.layers.{i}.sa_qcontent_proj.weight''', F'''decoder.layers.{i}.sa_qcontent_proj.weight''') ) rename_keys.append( (F'''transformer.decoder.layers.{i}.sa_kcontent_proj.weight''', F'''decoder.layers.{i}.sa_kcontent_proj.weight''') ) rename_keys.append( (F'''transformer.decoder.layers.{i}.sa_qpos_proj.weight''', F'''decoder.layers.{i}.sa_qpos_proj.weight''') ) rename_keys.append( (F'''transformer.decoder.layers.{i}.sa_kpos_proj.weight''', F'''decoder.layers.{i}.sa_kpos_proj.weight''') ) rename_keys.append((F'''transformer.decoder.layers.{i}.sa_v_proj.weight''', F'''decoder.layers.{i}.sa_v_proj.weight''')) rename_keys.append( (F'''transformer.decoder.layers.{i}.ca_qcontent_proj.weight''', F'''decoder.layers.{i}.ca_qcontent_proj.weight''') ) # rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight")) rename_keys.append( (F'''transformer.decoder.layers.{i}.ca_kcontent_proj.weight''', F'''decoder.layers.{i}.ca_kcontent_proj.weight''') ) rename_keys.append( (F'''transformer.decoder.layers.{i}.ca_kpos_proj.weight''', F'''decoder.layers.{i}.ca_kpos_proj.weight''') ) rename_keys.append((F'''transformer.decoder.layers.{i}.ca_v_proj.weight''', F'''decoder.layers.{i}.ca_v_proj.weight''')) rename_keys.append( (F'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight''', F'''decoder.layers.{i}.ca_qpos_sine_proj.weight''') ) rename_keys.append( (F'''transformer.decoder.layers.{i}.sa_qcontent_proj.bias''', F'''decoder.layers.{i}.sa_qcontent_proj.bias''') ) rename_keys.append( (F'''transformer.decoder.layers.{i}.sa_kcontent_proj.bias''', F'''decoder.layers.{i}.sa_kcontent_proj.bias''') ) rename_keys.append((F'''transformer.decoder.layers.{i}.sa_qpos_proj.bias''', F'''decoder.layers.{i}.sa_qpos_proj.bias''')) rename_keys.append((F'''transformer.decoder.layers.{i}.sa_kpos_proj.bias''', F'''decoder.layers.{i}.sa_kpos_proj.bias''')) rename_keys.append((F'''transformer.decoder.layers.{i}.sa_v_proj.bias''', F'''decoder.layers.{i}.sa_v_proj.bias''')) rename_keys.append( (F'''transformer.decoder.layers.{i}.ca_qcontent_proj.bias''', F'''decoder.layers.{i}.ca_qcontent_proj.bias''') ) # rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias")) rename_keys.append( (F'''transformer.decoder.layers.{i}.ca_kcontent_proj.bias''', F'''decoder.layers.{i}.ca_kcontent_proj.bias''') ) rename_keys.append((F'''transformer.decoder.layers.{i}.ca_kpos_proj.bias''', F'''decoder.layers.{i}.ca_kpos_proj.bias''')) rename_keys.append((F'''transformer.decoder.layers.{i}.ca_v_proj.bias''', F'''decoder.layers.{i}.ca_v_proj.bias''')) rename_keys.append( (F'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias''', F'''decoder.layers.{i}.ca_qpos_sine_proj.bias''') ) # convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads # for conditional DETR, also convert reference point head and query scale MLP rename_keys.extend( [ ("""input_proj.weight""", """input_projection.weight"""), ("""input_proj.bias""", """input_projection.bias"""), ("""query_embed.weight""", """query_position_embeddings.weight"""), ("""transformer.decoder.norm.weight""", """decoder.layernorm.weight"""), ("""transformer.decoder.norm.bias""", """decoder.layernorm.bias"""), ("""class_embed.weight""", """class_labels_classifier.weight"""), ("""class_embed.bias""", """class_labels_classifier.bias"""), ("""bbox_embed.layers.0.weight""", """bbox_predictor.layers.0.weight"""), ("""bbox_embed.layers.0.bias""", """bbox_predictor.layers.0.bias"""), ("""bbox_embed.layers.1.weight""", """bbox_predictor.layers.1.weight"""), ("""bbox_embed.layers.1.bias""", """bbox_predictor.layers.1.bias"""), ("""bbox_embed.layers.2.weight""", """bbox_predictor.layers.2.weight"""), ("""bbox_embed.layers.2.bias""", """bbox_predictor.layers.2.bias"""), ("""transformer.decoder.ref_point_head.layers.0.weight""", """decoder.ref_point_head.layers.0.weight"""), ("""transformer.decoder.ref_point_head.layers.0.bias""", """decoder.ref_point_head.layers.0.bias"""), ("""transformer.decoder.ref_point_head.layers.1.weight""", """decoder.ref_point_head.layers.1.weight"""), ("""transformer.decoder.ref_point_head.layers.1.bias""", """decoder.ref_point_head.layers.1.bias"""), ("""transformer.decoder.query_scale.layers.0.weight""", """decoder.query_scale.layers.0.weight"""), ("""transformer.decoder.query_scale.layers.0.bias""", """decoder.query_scale.layers.0.bias"""), ("""transformer.decoder.query_scale.layers.1.weight""", """decoder.query_scale.layers.1.weight"""), ("""transformer.decoder.query_scale.layers.1.bias""", """decoder.query_scale.layers.1.bias"""), ("""transformer.decoder.layers.0.ca_qpos_proj.weight""", """decoder.layers.0.ca_qpos_proj.weight"""), ("""transformer.decoder.layers.0.ca_qpos_proj.bias""", """decoder.layers.0.ca_qpos_proj.bias"""), ] ) def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> Optional[Any]: '''simple docstring''' UpperCamelCase = state_dict.pop(UpperCamelCase_ ) UpperCamelCase = val def lowercase( UpperCamelCase_ ) -> Any: '''simple docstring''' UpperCamelCase = OrderedDict() for key, value in state_dict.items(): if "backbone.0.body" in key: UpperCamelCase = key.replace("""backbone.0.body""" , """backbone.conv_encoder.model""" ) UpperCamelCase = value else: UpperCamelCase = value return new_state_dict def lowercase( UpperCamelCase_ , UpperCamelCase_=False ) -> Optional[int]: '''simple docstring''' UpperCamelCase = """""" if is_panoptic: UpperCamelCase = """conditional_detr.""" # first: transformer encoder for i in range(6 ): # read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias) UpperCamelCase = state_dict.pop(f"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight""" ) UpperCamelCase = state_dict.pop(f"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias""" ) # next, add query, keys and values (in that order) to the state dict UpperCamelCase = in_proj_weight[:256, :] UpperCamelCase = in_proj_bias[:256] UpperCamelCase = in_proj_weight[256:512, :] UpperCamelCase = in_proj_bias[256:512] UpperCamelCase = in_proj_weight[-256:, :] UpperCamelCase = in_proj_bias[-256:] def lowercase( ) -> Any: '''simple docstring''' UpperCamelCase = """http://images.cocodataset.org/val2017/000000039769.jpg""" UpperCamelCase = Image.open(requests.get(UpperCamelCase_ , stream=UpperCamelCase_ ).raw ) return im @torch.no_grad() def lowercase( UpperCamelCase_ , UpperCamelCase_ ) -> Any: '''simple docstring''' UpperCamelCase = ConditionalDetrConfig() # set backbone and dilation attributes if "resnet101" in model_name: UpperCamelCase = """resnet101""" if "dc5" in model_name: UpperCamelCase = True UpperCamelCase = """panoptic""" in model_name if is_panoptic: UpperCamelCase = 250 else: UpperCamelCase = 91 UpperCamelCase = """huggingface/label-files""" UpperCamelCase = """coco-detection-id2label.json""" UpperCamelCase = json.load(open(hf_hub_download(UpperCamelCase_ , UpperCamelCase_ , repo_type="""dataset""" ) , """r""" ) ) UpperCamelCase = {int(UpperCamelCase_ ): v for k, v in idalabel.items()} UpperCamelCase = idalabel UpperCamelCase = {v: k for k, v in idalabel.items()} # load image processor UpperCamelCase = """coco_panoptic""" if is_panoptic else """coco_detection""" UpperCamelCase = ConditionalDetrImageProcessor(format=UpperCamelCase_ ) # prepare image UpperCamelCase = prepare_img() UpperCamelCase = image_processor(images=UpperCamelCase_ , return_tensors="""pt""" ) UpperCamelCase = encoding["""pixel_values"""] logger.info(f"""Converting model {model_name}...""" ) # load original model from torch hub UpperCamelCase = torch.hub.load("""DeppMeng/ConditionalDETR""" , UpperCamelCase_ , pretrained=UpperCamelCase_ ).eval() UpperCamelCase = conditional_detr.state_dict() # rename keys for src, dest in rename_keys: if is_panoptic: UpperCamelCase = """conditional_detr.""" + src rename_key(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) UpperCamelCase = rename_backbone_keys(UpperCamelCase_ ) # query, key and value matrices need special treatment read_in_q_k_v(UpperCamelCase_ , is_panoptic=UpperCamelCase_ ) # important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them UpperCamelCase = """conditional_detr.model.""" if is_panoptic else """model.""" for key in state_dict.copy().keys(): if is_panoptic: if ( key.startswith("""conditional_detr""" ) and not key.startswith("""class_labels_classifier""" ) and not key.startswith("""bbox_predictor""" ) ): UpperCamelCase = state_dict.pop(UpperCamelCase_ ) UpperCamelCase = val elif "class_labels_classifier" in key or "bbox_predictor" in key: UpperCamelCase = state_dict.pop(UpperCamelCase_ ) UpperCamelCase = val elif key.startswith("""bbox_attention""" ) or key.startswith("""mask_head""" ): continue else: UpperCamelCase = state_dict.pop(UpperCamelCase_ ) UpperCamelCase = val else: if not key.startswith("""class_labels_classifier""" ) and not key.startswith("""bbox_predictor""" ): UpperCamelCase = state_dict.pop(UpperCamelCase_ ) UpperCamelCase = val # finally, create HuggingFace model and load state dict UpperCamelCase = ConditionalDetrForSegmentation(UpperCamelCase_ ) if is_panoptic else ConditionalDetrForObjectDetection(UpperCamelCase_ ) model.load_state_dict(UpperCamelCase_ ) model.eval() model.push_to_hub(repo_id=UpperCamelCase_ , organization="""DepuMeng""" , commit_message="""Add model""" ) # verify our conversion UpperCamelCase = conditional_detr(UpperCamelCase_ ) UpperCamelCase = model(UpperCamelCase_ ) assert torch.allclose(outputs.logits , original_outputs["""pred_logits"""] , atol=1E-4 ) assert torch.allclose(outputs.pred_boxes , original_outputs["""pred_boxes"""] , atol=1E-4 ) if is_panoptic: assert torch.allclose(outputs.pred_masks , original_outputs["""pred_masks"""] , atol=1E-4 ) # Save model and image processor logger.info(f"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" ) Path(UpperCamelCase_ ).mkdir(exist_ok=UpperCamelCase_ ) model.save_pretrained(UpperCamelCase_ ) image_processor.save_pretrained(UpperCamelCase_ ) if __name__ == "__main__": _SCREAMING_SNAKE_CASE = argparse.ArgumentParser() parser.add_argument( """--model_name""", default="""conditional_detr_resnet50""", type=str, help="""Name of the CONDITIONAL_DETR model you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model.""" ) _SCREAMING_SNAKE_CASE = parser.parse_args() convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
343
1
"""simple docstring""" from scipy.stats import pearsonr, spearmanr from sklearn.metrics import fa_score, matthews_corrcoef import datasets SCREAMING_SNAKE_CASE__ = "\\n@inproceedings{wang2019glue,\n title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},\n author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},\n note={In the Proceedings of ICLR.},\n year={2019}\n}\n" SCREAMING_SNAKE_CASE__ = "\\nGLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n" SCREAMING_SNAKE_CASE__ = "\nCompute GLUE evaluation metric associated to each GLUE dataset.\nArgs:\n predictions: list of predictions to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\nReturns: depending on the GLUE subset, one or several of:\n \"accuracy\": Accuracy\n \"f1\": F1 score\n \"pearson\": Pearson Correlation\n \"spearmanr\": Spearman Correlation\n \"matthews_correlation\": Matthew Correlation\nExamples:\n\n >>> glue_metric = datasets.load_metric('glue', 'sst2') # 'sst2' or any of [\"mnli\", \"mnli_mismatched\", \"mnli_matched\", \"qnli\", \"rte\", \"wnli\", \"hans\"]\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0}\n\n >>> glue_metric = datasets.load_metric('glue', 'mrpc') # 'mrpc' or 'qqp'\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0, 'f1': 1.0}\n\n >>> glue_metric = datasets.load_metric('glue', 'stsb')\n >>> references = [0., 1., 2., 3., 4., 5.]\n >>> predictions = [0., 1., 2., 3., 4., 5.]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print({\"pearson\": round(results[\"pearson\"], 2), \"spearmanr\": round(results[\"spearmanr\"], 2)})\n {'pearson': 1.0, 'spearmanr': 1.0}\n\n >>> glue_metric = datasets.load_metric('glue', 'cola')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'matthews_correlation': 1.0}\n" def lowerCAmelCase__ ( _UpperCamelCase : Tuple , _UpperCamelCase : Tuple ) -> Tuple: """simple docstring""" return float((preds == labels).mean() ) def lowerCAmelCase__ ( _UpperCamelCase : Optional[int] , _UpperCamelCase : Union[str, Any] ) -> Union[str, Any]: """simple docstring""" snake_case = simple_accuracy(_UpperCamelCase , _UpperCamelCase ) snake_case = float(fa_score(y_true=_UpperCamelCase , y_pred=_UpperCamelCase ) ) return { "accuracy": acc, "f1": fa, } def lowerCAmelCase__ ( _UpperCamelCase : int , _UpperCamelCase : int ) -> Tuple: """simple docstring""" snake_case = float(pearsonr(_UpperCamelCase , _UpperCamelCase )[0] ) snake_case = float(spearmanr(_UpperCamelCase , _UpperCamelCase )[0] ) return { "pearson": pearson_corr, "spearmanr": spearman_corr, } @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowerCAmelCase_ ( datasets.Metric ): """simple docstring""" def snake_case ( self ): """simple docstring""" if self.config_name not in [ "sst2", "mnli", "mnli_mismatched", "mnli_matched", "cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans", ]: raise KeyError( 'You should supply a configuration name selected in ' '["sst2", "mnli", "mnli_mismatched", "mnli_matched", ' '"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]' ) return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { 'predictions': datasets.Value('int64' if self.config_name != 'stsb' else 'float32' ), 'references': datasets.Value('int64' if self.config_name != 'stsb' else 'float32' ), } ) , codebase_urls=[] , reference_urls=[] , format='numpy' , ) def snake_case ( self , lowerCAmelCase , lowerCAmelCase ): """simple docstring""" if self.config_name == "cola": return {"matthews_correlation": matthews_corrcoef(lowerCAmelCase , lowerCAmelCase )} elif self.config_name == "stsb": return pearson_and_spearman(lowerCAmelCase , lowerCAmelCase ) elif self.config_name in ["mrpc", "qqp"]: return acc_and_fa(lowerCAmelCase , lowerCAmelCase ) elif self.config_name in ["sst2", "mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]: return {"accuracy": simple_accuracy(lowerCAmelCase , lowerCAmelCase )} else: raise KeyError( 'You should supply a configuration name selected in ' '["sst2", "mnli", "mnli_mismatched", "mnli_matched", ' '"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]' )
149
"""simple docstring""" import os import sys import unittest SCREAMING_SNAKE_CASE__ = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, "utils")) import check_dummies # noqa: E402 from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402 # Align TRANSFORMERS_PATH in check_dummies with the current path SCREAMING_SNAKE_CASE__ = os.path.join(git_repo_path, "src", "transformers") SCREAMING_SNAKE_CASE__ = "\n{0} = None\n" SCREAMING_SNAKE_CASE__ = "\nclass {0}(metaclass=DummyObject):\n _backends = {1}\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, {1})\n" SCREAMING_SNAKE_CASE__ = "\ndef {0}(*args, **kwargs):\n requires_backends({0}, {1})\n" class lowerCAmelCase_ ( unittest.TestCase ): """simple docstring""" def snake_case ( self ): """simple docstring""" snake_case = find_backend(' _import_structure["models.albert"].append("AlbertTokenizerFast")' ) self.assertIsNone(lowerCAmelCase ) snake_case = find_backend(' if not is_tokenizers_available():' ) self.assertEqual(lowerCAmelCase , 'tokenizers' ) snake_case = find_backend(' if not is_tensorflow_text_available():' ) self.assertEqual(lowerCAmelCase , 'tensorflow_text' ) snake_case = find_backend(' if not (is_sentencepiece_available() and is_tokenizers_available()):' ) self.assertEqual(lowerCAmelCase , 'sentencepiece_and_tokenizers' ) snake_case = find_backend( ' if not (is_sentencepiece_available() and is_tensorflow_text_available()):' ) self.assertEqual(lowerCAmelCase , 'sentencepiece_and_tensorflow_text' ) snake_case = find_backend( ' if not (is_sentencepiece_available() and is_tokenizers_available() and is_vision_available()):' ) self.assertEqual(lowerCAmelCase , 'sentencepiece_and_tokenizers_and_vision' ) def snake_case ( self ): """simple docstring""" snake_case = read_init() # We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects self.assertIn('torch' , lowerCAmelCase ) self.assertIn('tensorflow_text' , lowerCAmelCase ) self.assertIn('sentencepiece_and_tokenizers' , lowerCAmelCase ) # Likewise, we can't assert on the exact content of a key self.assertIn('BertModel' , objects['torch'] ) self.assertIn('TFBertModel' , objects['tf'] ) self.assertIn('FlaxBertModel' , objects['flax'] ) self.assertIn('BertModel' , objects['torch'] ) self.assertIn('TFBertTokenizer' , objects['tensorflow_text'] ) self.assertIn('convert_slow_tokenizer' , objects['sentencepiece_and_tokenizers'] ) def snake_case ( self ): """simple docstring""" snake_case = create_dummy_object('CONSTANT' , '\'torch\'' ) self.assertEqual(lowerCAmelCase , '\nCONSTANT = None\n' ) snake_case = create_dummy_object('function' , '\'torch\'' ) self.assertEqual( lowerCAmelCase , '\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n' ) snake_case = '\nclass FakeClass(metaclass=DummyObject):\n _backends = \'torch\'\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, \'torch\')\n' snake_case = create_dummy_object('FakeClass' , '\'torch\'' ) self.assertEqual(lowerCAmelCase , lowerCAmelCase ) def snake_case ( self ): """simple docstring""" snake_case = '# This file is autogenerated by the command `make fix-copies`, do not edit.\nfrom ..utils import DummyObject, requires_backends\n\n\nCONSTANT = None\n\n\ndef function(*args, **kwargs):\n requires_backends(function, ["torch"])\n\n\nclass FakeClass(metaclass=DummyObject):\n _backends = ["torch"]\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, ["torch"])\n' snake_case = create_dummy_files({'torch': ['CONSTANT', 'function', 'FakeClass']} ) self.assertEqual(dummy_files['torch'] , lowerCAmelCase )
149
1
import os import torch from ..logging import get_logger from .constants import FSDP_PYTORCH_VERSION, MODEL_NAME, OPTIMIZER_NAME from .versions import is_torch_version if is_torch_version('''>=''', FSDP_PYTORCH_VERSION): import torch.distributed.checkpoint as dist_cp from torch.distributed.checkpoint.default_planner import DefaultLoadPlanner, DefaultSavePlanner from torch.distributed.checkpoint.optimizer import load_sharded_optimizer_state_dict from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType snake_case_ = get_logger(__name__) def snake_case__ ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : int=0 ): '''simple docstring''' os.makedirs(SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ ) with FSDP.state_dict_type( SCREAMING_SNAKE_CASE_ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ): lowercase__ : Dict = model.state_dict() if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: lowercase__ : Optional[Any] = f"""{MODEL_NAME}.bin""" if model_index == 0 else f"""{MODEL_NAME}_{model_index}.bin""" lowercase__ : str = os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) if accelerator.process_index == 0: logger.info(f"""Saving model to {output_model_file}""" ) torch.save(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) logger.info(f"""Model saved to {output_model_file}""" ) elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT: lowercase__ : str = ( f"""{MODEL_NAME}_rank{accelerator.process_index}.bin""" if model_index == 0 else f"""{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin""" ) lowercase__ : Any = os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) logger.info(f"""Saving model to {output_model_file}""" ) torch.save(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) logger.info(f"""Model saved to {output_model_file}""" ) elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT: lowercase__ : Optional[Any] = os.path.join(SCREAMING_SNAKE_CASE_ , f"""{MODEL_NAME}_{model_index}""" ) os.makedirs(SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ ) logger.info(f"""Saving model to {ckpt_dir}""" ) lowercase__ : Dict = {"model": state_dict} dist_cp.save_state_dict( state_dict=SCREAMING_SNAKE_CASE_ , storage_writer=dist_cp.FileSystemWriter(SCREAMING_SNAKE_CASE_ ) , planner=DefaultSavePlanner() , ) logger.info(f"""Model saved to {ckpt_dir}""" ) def snake_case__ ( SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Dict=0 ): '''simple docstring''' accelerator.wait_for_everyone() with FSDP.state_dict_type( SCREAMING_SNAKE_CASE_ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ): if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: if type(SCREAMING_SNAKE_CASE_ ) != FSDP and accelerator.process_index != 0: if not fsdp_plugin.sync_module_states: raise ValueError( 'Set the `sync_module_states` flag to `True` so that model states are synced across processes when ' 'initializing FSDP object' ) return lowercase__ : List[Any] = f"""{MODEL_NAME}.bin""" if model_index == 0 else f"""{MODEL_NAME}_{model_index}.bin""" lowercase__ : Dict = os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) logger.info(f"""Loading model from {input_model_file}""" ) lowercase__ : Dict = torch.load(SCREAMING_SNAKE_CASE_ ) logger.info(f"""Model loaded from {input_model_file}""" ) elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT: lowercase__ : Any = ( f"""{MODEL_NAME}_rank{accelerator.process_index}.bin""" if model_index == 0 else f"""{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin""" ) lowercase__ : Union[str, Any] = os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) logger.info(f"""Loading model from {input_model_file}""" ) lowercase__ : Dict = torch.load(SCREAMING_SNAKE_CASE_ ) logger.info(f"""Model loaded from {input_model_file}""" ) elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT: lowercase__ : Any = ( os.path.join(SCREAMING_SNAKE_CASE_ , f"""{MODEL_NAME}_{model_index}""" ) if f"""{MODEL_NAME}""" not in input_dir else input_dir ) logger.info(f"""Loading model from {ckpt_dir}""" ) lowercase__ : List[Any] = {"model": model.state_dict()} dist_cp.load_state_dict( state_dict=SCREAMING_SNAKE_CASE_ , storage_reader=dist_cp.FileSystemReader(SCREAMING_SNAKE_CASE_ ) , planner=DefaultLoadPlanner() , ) lowercase__ : Any = state_dict["model"] logger.info(f"""Model loaded from {ckpt_dir}""" ) model.load_state_dict(SCREAMING_SNAKE_CASE_ ) def snake_case__ ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : List[Any]=0 ): '''simple docstring''' os.makedirs(SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ ) with FSDP.state_dict_type( SCREAMING_SNAKE_CASE_ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ): lowercase__ : int = FSDP.optim_state_dict(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: if accelerator.process_index == 0: lowercase__ : Optional[int] = ( f"""{OPTIMIZER_NAME}.bin""" if optimizer_index == 0 else f"""{OPTIMIZER_NAME}_{optimizer_index}.bin""" ) lowercase__ : List[str] = os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) logger.info(f"""Saving Optimizer state to {output_optimizer_file}""" ) torch.save(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) logger.info(f"""Optimizer state saved in {output_optimizer_file}""" ) else: lowercase__ : Any = os.path.join(SCREAMING_SNAKE_CASE_ , f"""{OPTIMIZER_NAME}_{optimizer_index}""" ) os.makedirs(SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ ) logger.info(f"""Saving Optimizer state to {ckpt_dir}""" ) dist_cp.save_state_dict( state_dict={'optimizer': optim_state} , storage_writer=dist_cp.FileSystemWriter(SCREAMING_SNAKE_CASE_ ) , planner=DefaultSavePlanner() , ) logger.info(f"""Optimizer state saved in {ckpt_dir}""" ) def snake_case__ ( SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : List[Any]=0 ): '''simple docstring''' accelerator.wait_for_everyone() with FSDP.state_dict_type( SCREAMING_SNAKE_CASE_ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ): if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: lowercase__ : Union[str, Any] = None # below check should work but currently it isn't working (mostly opytorch issue), # in the meantime disabling it at the cost of excess memory usage # if accelerator.process_index == 0 or not fsdp_plugin.optim_state_dict_config.rank0_only: lowercase__ : Any = ( f"""{OPTIMIZER_NAME}.bin""" if optimizer_index == 0 else f"""{OPTIMIZER_NAME}_{optimizer_index}.bin""" ) lowercase__ : int = os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) logger.info(f"""Loading Optimizer state from {input_optimizer_file}""" ) lowercase__ : Optional[Any] = torch.load(SCREAMING_SNAKE_CASE_ ) logger.info(f"""Optimizer state loaded from {input_optimizer_file}""" ) else: lowercase__ : Tuple = ( os.path.join(SCREAMING_SNAKE_CASE_ , f"""{OPTIMIZER_NAME}_{optimizer_index}""" ) if f"""{OPTIMIZER_NAME}""" not in input_dir else input_dir ) logger.info(f"""Loading Optimizer from {ckpt_dir}""" ) lowercase__ : int = load_sharded_optimizer_state_dict( model_state_dict=model.state_dict() , optimizer_key='optimizer' , storage_reader=dist_cp.FileSystemReader(SCREAMING_SNAKE_CASE_ ) , ) lowercase__ : int = optim_state["optimizer"] logger.info(f"""Optimizer loaded from {ckpt_dir}""" ) lowercase__ : str = FSDP.optim_state_dict_to_load(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) optimizer.load_state_dict(SCREAMING_SNAKE_CASE_ )
214
'''simple docstring''' from jiwer import compute_measures import datasets a : List[Any] = "\\n@inproceedings{inproceedings,\n author = {Morris, Andrew and Maier, Viktoria and Green, Phil},\n year = {2004},\n month = {01},\n pages = {},\n title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}\n}\n" a : str = "\\nWord error rate (WER) is a common metric of the performance of an automatic speech recognition system.\n\nThe general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort.\n\nThis problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate.\n\nWord error rate can then be computed as:\n\nWER = (S + D + I) / N = (S + D + I) / (S + D + C)\n\nwhere\n\nS is the number of substitutions,\nD is the number of deletions,\nI is the number of insertions,\nC is the number of correct words,\nN is the number of words in the reference (N=S+D+C).\n\nThis value indicates the average number of errors per reference word. The lower the value, the better the\nperformance of the ASR system with a WER of 0 being a perfect score.\n" a : Union[str, Any] = "\nCompute WER score of transcribed segments against references.\n\nArgs:\n references: List of references for each speech input.\n predictions: List of transcriptions to score.\n concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively.\n\nReturns:\n (float): the word error rate\n\nExamples:\n\n >>> predictions = [\"this is the prediction\", \"there is an other sample\"]\n >>> references = [\"this is the reference\", \"there is another one\"]\n >>> wer = datasets.load_metric(\"wer\")\n >>> wer_score = wer.compute(predictions=predictions, references=references)\n >>> print(wer_score)\n 0.5\n" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class UpperCamelCase__ ( datasets.Metric ): """simple docstring""" def A_ ( self ): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Value("string" , id="sequence" ), "references": datasets.Value("string" , id="sequence" ), } ) , codebase_urls=["https://github.com/jitsi/jiwer/"] , reference_urls=[ "https://en.wikipedia.org/wiki/Word_error_rate", ] , ) def A_ ( self , snake_case=None , snake_case=None , snake_case=False ): '''simple docstring''' if concatenate_texts: return compute_measures(snake_case , snake_case )["wer"] else: UpperCAmelCase : Dict = 0 UpperCAmelCase : Optional[Any] = 0 for prediction, reference in zip(snake_case , snake_case ): UpperCAmelCase : Tuple = compute_measures(snake_case , snake_case ) incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"] total += measures["substitutions"] + measures["deletions"] + measures["hits"] return incorrect / total
311
0
"""simple docstring""" import copy import inspect import unittest from transformers import AutoBackbone from transformers.configuration_utils import PretrainedConfig from transformers.testing_utils import require_timm, require_torch, torch_device from transformers.utils.import_utils import is_torch_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor if is_torch_available(): import torch from transformers import TimmBackbone, TimmBackboneConfig from ...test_pipeline_mixin import PipelineTesterMixin class __SCREAMING_SNAKE_CASE : '''simple docstring''' def __init__( self : int , __a : Dict , __a : Optional[Any]=None , __a : Tuple=None , __a : str=None , __a : Dict="resnet50" , __a : List[str]=3 , __a : Any=32 , __a : Dict=3 , __a : str=True , __a : Tuple=True , ) -> Any: _UpperCamelCase : List[Any] = parent _UpperCamelCase : Dict = out_indices if out_indices is not None else [4] _UpperCamelCase : Optional[Any] = stage_names _UpperCamelCase : Dict = out_features _UpperCamelCase : Optional[Any] = backbone _UpperCamelCase : Union[str, Any] = batch_size _UpperCamelCase : Union[str, Any] = image_size _UpperCamelCase : Optional[int] = num_channels _UpperCamelCase : str = use_pretrained_backbone _UpperCamelCase : List[Any] = is_training def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[Any]: _UpperCamelCase : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _UpperCamelCase : Tuple = self.get_config() return config, pixel_values def __SCREAMING_SNAKE_CASE ( self : Dict ) -> str: return TimmBackboneConfig( image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , ) def __SCREAMING_SNAKE_CASE ( self : int , __a : Any , __a : Optional[Any] ) -> List[str]: _UpperCamelCase : Tuple = TimmBackbone(config=__a ) model.to(__a ) model.eval() with torch.no_grad(): _UpperCamelCase : List[str] = model(__a ) self.parent.assertEqual( result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 14, 14) , ) def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Dict: _UpperCamelCase : Any = self.prepare_config_and_inputs() _UpperCamelCase, _UpperCamelCase : Union[str, Any] = config_and_inputs _UpperCamelCase : Union[str, Any] = {"pixel_values": pixel_values} return config, inputs_dict @require_torch @require_timm class __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , unittest.TestCase ): '''simple docstring''' SCREAMING_SNAKE_CASE__ :Dict = (TimmBackbone,) if is_torch_available() else () SCREAMING_SNAKE_CASE__ :Dict = {"feature-extraction": TimmBackbone} if is_torch_available() else {} SCREAMING_SNAKE_CASE__ :List[str] = False SCREAMING_SNAKE_CASE__ :Optional[Any] = False SCREAMING_SNAKE_CASE__ :List[str] = False SCREAMING_SNAKE_CASE__ :Dict = False def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[Any]: _UpperCamelCase : Dict = TimmBackboneModelTester(self ) _UpperCamelCase : List[Any] = ConfigTester(self , config_class=__a , has_text_modality=__a ) def __SCREAMING_SNAKE_CASE ( self : Any ) -> List[Any]: self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def __SCREAMING_SNAKE_CASE ( self : str ) -> Tuple: _UpperCamelCase : Union[str, Any] = "resnet18" _UpperCamelCase : Optional[int] = "microsoft/resnet-18" _UpperCamelCase : Optional[int] = AutoBackbone.from_pretrained(__a , use_timm_backbone=__a ) _UpperCamelCase : str = AutoBackbone.from_pretrained(__a ) self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) ) self.assertEqual(len(timm_model.stage_names ) , len(transformers_model.stage_names ) ) self.assertEqual(timm_model.channels , transformers_model.channels ) # Out indices are set to the last layer by default. For timm models, we don't know # the number of layers in advance, so we set it to (-1,), whereas for transformers # models, we set it to [len(stage_names) - 1] (kept for backward compatibility). self.assertEqual(timm_model.out_indices , (-1,) ) self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names ) - 1] ) _UpperCamelCase : Optional[Any] = AutoBackbone.from_pretrained(__a , use_timm_backbone=__a , out_indices=[1, 2, 3] ) _UpperCamelCase : List[str] = AutoBackbone.from_pretrained(__a , out_indices=[1, 2, 3] ) self.assertEqual(timm_model.out_indices , transformers_model.out_indices ) self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) ) self.assertEqual(timm_model.channels , transformers_model.channels ) @unittest.skip("TimmBackbone doesn't support feed forward chunking" ) def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[Any]: pass @unittest.skip("TimmBackbone doesn't have num_hidden_layers attribute" ) def __SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[int]: pass @unittest.skip("TimmBackbone initialization is managed on the timm side" ) def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[Any]: pass @unittest.skip("TimmBackbone models doesn't have inputs_embeds" ) def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[str]: pass @unittest.skip("TimmBackbone models doesn't have inputs_embeds" ) def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> Tuple: pass @unittest.skip("TimmBackbone model cannot be created without specifying a backbone checkpoint" ) def __SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]: pass @unittest.skip("Only checkpoints on timm can be loaded into TimmBackbone" ) def __SCREAMING_SNAKE_CASE ( self : Any ) -> List[str]: pass @unittest.skip("model weights aren't tied in TimmBackbone." ) def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Union[str, Any]: pass @unittest.skip("model weights aren't tied in TimmBackbone." ) def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> str: pass @unittest.skip("Only checkpoints on timm can be loaded into TimmBackbone" ) def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[Any]: pass @unittest.skip("Only checkpoints on timm can be loaded into TimmBackbone" ) def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[int]: pass @unittest.skip("TimmBackbone doesn't have hidden size info in its configuration." ) def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[int]: pass @unittest.skip("TimmBackbone doesn't support output_attentions." ) def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[Any]: pass @unittest.skip("Safetensors is not supported by timm." ) def __SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[int]: pass @unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." ) def __SCREAMING_SNAKE_CASE ( self : Dict ) -> str: pass def __SCREAMING_SNAKE_CASE ( self : Any ) -> List[str]: _UpperCamelCase, _UpperCamelCase : int = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _UpperCamelCase : List[str] = model_class(__a ) _UpperCamelCase : str = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _UpperCamelCase : Dict = [*signature.parameters.keys()] _UpperCamelCase : Tuple = ["pixel_values"] self.assertListEqual(arg_names[:1] , __a ) def __SCREAMING_SNAKE_CASE ( self : Any ) -> Union[str, Any]: _UpperCamelCase, _UpperCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() _UpperCamelCase : Optional[int] = True _UpperCamelCase : int = self.has_attentions # no need to test all models as different heads yield the same functionality _UpperCamelCase : str = self.all_model_classes[0] _UpperCamelCase : str = model_class(__a ) model.to(__a ) _UpperCamelCase : int = self._prepare_for_class(__a , __a ) _UpperCamelCase : Optional[Any] = model(**__a ) _UpperCamelCase : Union[str, Any] = outputs[0][-1] # Encoder-/Decoder-only models _UpperCamelCase : Tuple = outputs.hidden_states[0] hidden_states.retain_grad() if self.has_attentions: _UpperCamelCase : Any = outputs.attentions[0] attentions.retain_grad() output.flatten()[0].backward(retain_graph=__a ) self.assertIsNotNone(hidden_states.grad ) if self.has_attentions: self.assertIsNotNone(attentions.grad ) def __SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]: _UpperCamelCase, _UpperCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _UpperCamelCase : Union[str, Any] = model_class(__a ) model.to(__a ) model.eval() _UpperCamelCase : Dict = model(**__a ) self.assertEqual(len(result.feature_maps ) , len(config.out_indices ) ) self.assertEqual(len(model.channels ) , len(config.out_indices ) ) # Check output of last stage is taken if out_features=None, out_indices=None _UpperCamelCase : List[str] = copy.deepcopy(__a ) _UpperCamelCase : Dict = None _UpperCamelCase : Dict = model_class(__a ) model.to(__a ) model.eval() _UpperCamelCase : List[Any] = model(**__a ) self.assertEqual(len(result.feature_maps ) , 1 ) self.assertEqual(len(model.channels ) , 1 ) # Check backbone can be initialized with fresh weights _UpperCamelCase : Dict = copy.deepcopy(__a ) _UpperCamelCase : int = False _UpperCamelCase : Optional[Any] = model_class(__a ) model.to(__a ) model.eval() _UpperCamelCase : Any = model(**__a )
310
"""simple docstring""" import unittest from transformers import ( MODEL_FOR_OBJECT_DETECTION_MAPPING, AutoFeatureExtractor, AutoModelForObjectDetection, ObjectDetectionPipeline, is_vision_available, pipeline, ) from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_pytesseract, require_tf, require_timm, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class __SCREAMING_SNAKE_CASE : '''simple docstring''' @staticmethod def __SCREAMING_SNAKE_CASE ( *__a : int , **__a : int ) -> List[Any]: pass @is_pipeline_test @require_vision @require_timm @require_torch class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): '''simple docstring''' SCREAMING_SNAKE_CASE__ :str = MODEL_FOR_OBJECT_DETECTION_MAPPING def __SCREAMING_SNAKE_CASE ( self : Any , __a : Union[str, Any] , __a : Optional[int] , __a : str ) -> Optional[Any]: _UpperCamelCase : List[Any] = ObjectDetectionPipeline(model=__a , image_processor=__a ) return object_detector, ["./tests/fixtures/tests_samples/COCO/000000039769.png"] def __SCREAMING_SNAKE_CASE ( self : List[str] , __a : List[Any] , __a : Union[str, Any] ) -> int: _UpperCamelCase : Any = object_detector("./tests/fixtures/tests_samples/COCO/000000039769.png" , threshold=0.0 ) self.assertGreater(len(__a ) , 0 ) for detected_object in outputs: self.assertEqual( __a , { "score": ANY(__a ), "label": ANY(__a ), "box": {"xmin": ANY(__a ), "ymin": ANY(__a ), "xmax": ANY(__a ), "ymax": ANY(__a )}, } , ) import datasets _UpperCamelCase : str = datasets.load_dataset("hf-internal-testing/fixtures_image_utils" , "image" , split="test" ) _UpperCamelCase : List[Any] = [ Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ), "http://images.cocodataset.org/val2017/000000039769.jpg", # RGBA dataset[0]["file"], # LA dataset[1]["file"], # L dataset[2]["file"], ] _UpperCamelCase : List[Any] = object_detector(__a , threshold=0.0 ) self.assertEqual(len(__a ) , len(__a ) ) for outputs in batch_outputs: self.assertGreater(len(__a ) , 0 ) for detected_object in outputs: self.assertEqual( __a , { "score": ANY(__a ), "label": ANY(__a ), "box": {"xmin": ANY(__a ), "ymin": ANY(__a ), "xmax": ANY(__a ), "ymax": ANY(__a )}, } , ) @require_tf @unittest.skip("Object detection not implemented in TF" ) def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> int: pass @require_torch def __SCREAMING_SNAKE_CASE ( self : int ) -> List[str]: _UpperCamelCase : List[str] = "hf-internal-testing/tiny-detr-mobilenetsv3" _UpperCamelCase : Optional[int] = AutoModelForObjectDetection.from_pretrained(__a ) _UpperCamelCase : str = AutoFeatureExtractor.from_pretrained(__a ) _UpperCamelCase : List[Any] = ObjectDetectionPipeline(model=__a , feature_extractor=__a ) _UpperCamelCase : int = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" , threshold=0.0 ) self.assertEqual( nested_simplify(__a , decimals=4 ) , [ {"score": 0.33_76, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}}, {"score": 0.33_76, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}}, ] , ) _UpperCamelCase : Any = object_detector( [ "http://images.cocodataset.org/val2017/000000039769.jpg", "http://images.cocodataset.org/val2017/000000039769.jpg", ] , threshold=0.0 , ) self.assertEqual( nested_simplify(__a , decimals=4 ) , [ [ {"score": 0.33_76, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}}, {"score": 0.33_76, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}}, ], [ {"score": 0.33_76, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}}, {"score": 0.33_76, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}}, ], ] , ) @require_torch @slow def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]: _UpperCamelCase : str = "facebook/detr-resnet-50" _UpperCamelCase : Union[str, Any] = AutoModelForObjectDetection.from_pretrained(__a ) _UpperCamelCase : str = AutoFeatureExtractor.from_pretrained(__a ) _UpperCamelCase : Union[str, Any] = ObjectDetectionPipeline(model=__a , feature_extractor=__a ) _UpperCamelCase : Tuple = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" ) self.assertEqual( nested_simplify(__a , decimals=4 ) , [ {"score": 0.99_82, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}}, {"score": 0.99_60, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}}, {"score": 0.99_55, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}}, {"score": 0.99_88, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}}, {"score": 0.99_87, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}}, ] , ) _UpperCamelCase : List[str] = object_detector( [ "http://images.cocodataset.org/val2017/000000039769.jpg", "http://images.cocodataset.org/val2017/000000039769.jpg", ] ) self.assertEqual( nested_simplify(__a , decimals=4 ) , [ [ {"score": 0.99_82, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}}, {"score": 0.99_60, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}}, {"score": 0.99_55, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}}, {"score": 0.99_88, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}}, {"score": 0.99_87, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}}, ], [ {"score": 0.99_82, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}}, {"score": 0.99_60, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}}, {"score": 0.99_55, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}}, {"score": 0.99_88, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}}, {"score": 0.99_87, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}}, ], ] , ) @require_torch @slow def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]: _UpperCamelCase : Dict = "facebook/detr-resnet-50" _UpperCamelCase : Optional[Any] = pipeline("object-detection" , model=__a ) _UpperCamelCase : str = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" ) self.assertEqual( nested_simplify(__a , decimals=4 ) , [ {"score": 0.99_82, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}}, {"score": 0.99_60, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}}, {"score": 0.99_55, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}}, {"score": 0.99_88, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}}, {"score": 0.99_87, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}}, ] , ) _UpperCamelCase : Tuple = object_detector( [ "http://images.cocodataset.org/val2017/000000039769.jpg", "http://images.cocodataset.org/val2017/000000039769.jpg", ] ) self.assertEqual( nested_simplify(__a , decimals=4 ) , [ [ {"score": 0.99_82, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}}, {"score": 0.99_60, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}}, {"score": 0.99_55, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}}, {"score": 0.99_88, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}}, {"score": 0.99_87, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}}, ], [ {"score": 0.99_82, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}}, {"score": 0.99_60, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}}, {"score": 0.99_55, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}}, {"score": 0.99_88, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}}, {"score": 0.99_87, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}}, ], ] , ) @require_torch @slow def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> int: _UpperCamelCase : Tuple = 0.99_85 _UpperCamelCase : List[Any] = "facebook/detr-resnet-50" _UpperCamelCase : List[str] = pipeline("object-detection" , model=__a ) _UpperCamelCase : Any = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" , threshold=__a ) self.assertEqual( nested_simplify(__a , decimals=4 ) , [ {"score": 0.99_88, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}}, {"score": 0.99_87, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}}, ] , ) @require_torch @require_pytesseract @slow def __SCREAMING_SNAKE_CASE ( self : str ) -> Union[str, Any]: _UpperCamelCase : Optional[Any] = "Narsil/layoutlmv3-finetuned-funsd" _UpperCamelCase : int = 0.99_93 _UpperCamelCase : str = pipeline("object-detection" , model=__a , threshold=__a ) _UpperCamelCase : Union[str, Any] = object_detector( "https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png" ) self.assertEqual( nested_simplify(__a , decimals=4 ) , [ {"score": 0.99_93, "label": "I-ANSWER", "box": {"xmin": 294, "ymin": 254, "xmax": 343, "ymax": 264}}, {"score": 0.99_93, "label": "I-ANSWER", "box": {"xmin": 294, "ymin": 254, "xmax": 343, "ymax": 264}}, ] , )
310
1
import gc import random import unittest import numpy as np import torch from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline from diffusers.pipelines.shap_e import ShapERenderer from diffusers.utils import floats_tensor, load_image, load_numpy, slow from diffusers.utils.testing_utils import require_torch_gpu, torch_device from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference class A ( A_ , unittest.TestCase ): UpperCamelCase_ : Optional[int] =ShapEImgaImgPipeline UpperCamelCase_ : int =['''image'''] UpperCamelCase_ : int =['''image'''] UpperCamelCase_ : Union[str, Any] =[ '''num_images_per_prompt''', '''num_inference_steps''', '''generator''', '''latents''', '''guidance_scale''', '''frame_size''', '''output_type''', '''return_dict''', ] UpperCamelCase_ : Optional[Any] =False @property def _A (self ): return 3_2 @property def _A (self ): return 3_2 @property def _A (self ): return self.time_input_dim * 4 @property def _A (self ): return 8 @property def _A (self ): torch.manual_seed(0 ) __lowercase= CLIPVisionConfig( hidden_size=self.text_embedder_hidden_size , image_size=6_4 , projection_dim=self.text_embedder_hidden_size , intermediate_size=3_7 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1 , ) __lowercase= CLIPVisionModel(lowerCAmelCase ) return model @property def _A (self ): __lowercase= CLIPImageProcessor( crop_size=2_2_4 , do_center_crop=lowerCAmelCase , do_normalize=lowerCAmelCase , do_resize=lowerCAmelCase , image_mean=[0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73] , image_std=[0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11] , resample=3 , size=2_2_4 , ) return image_processor @property def _A (self ): torch.manual_seed(0 ) __lowercase= { 'num_attention_heads': 2, 'attention_head_dim': 1_6, 'embedding_dim': self.time_input_dim, 'num_embeddings': 3_2, 'embedding_proj_dim': self.text_embedder_hidden_size, 'time_embed_dim': self.time_embed_dim, 'num_layers': 1, 'clip_embed_dim': self.time_input_dim * 2, 'additional_embeddings': 0, 'time_embed_act_fn': 'gelu', 'norm_in_type': 'layer', 'embedding_proj_norm_type': 'layer', 'encoder_hid_proj_type': None, 'added_emb_type': None, } __lowercase= PriorTransformer(**lowerCAmelCase ) return model @property def _A (self ): torch.manual_seed(0 ) __lowercase= { 'param_shapes': ( (self.renderer_dim, 9_3), (self.renderer_dim, 8), (self.renderer_dim, 8), (self.renderer_dim, 8), ), 'd_latent': self.time_input_dim, 'd_hidden': self.renderer_dim, 'n_output': 1_2, 'background': ( 0.1, 0.1, 0.1, ), } __lowercase= ShapERenderer(**lowerCAmelCase ) return model def _A (self ): __lowercase= self.dummy_prior __lowercase= self.dummy_image_encoder __lowercase= self.dummy_image_processor __lowercase= self.dummy_renderer __lowercase= HeunDiscreteScheduler( beta_schedule='exp' , num_train_timesteps=1_0_2_4 , prediction_type='sample' , use_karras_sigmas=lowerCAmelCase , clip_sample=lowerCAmelCase , clip_sample_range=1.0 , ) __lowercase= { 'prior': prior, 'image_encoder': image_encoder, 'image_processor': image_processor, 'renderer': renderer, 'scheduler': scheduler, } return components def _A (self , lowerCAmelCase , lowerCAmelCase=0 ): __lowercase= floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(lowerCAmelCase ) ).to(lowerCAmelCase ) if str(lowerCAmelCase ).startswith('mps' ): __lowercase= torch.manual_seed(lowerCAmelCase ) else: __lowercase= torch.Generator(device=lowerCAmelCase ).manual_seed(lowerCAmelCase ) __lowercase= { 'image': input_image, 'generator': generator, 'num_inference_steps': 1, 'frame_size': 3_2, 'output_type': 'np', } return inputs def _A (self ): __lowercase= 'cpu' __lowercase= self.get_dummy_components() __lowercase= self.pipeline_class(**lowerCAmelCase ) __lowercase= pipe.to(lowerCAmelCase ) pipe.set_progress_bar_config(disable=lowerCAmelCase ) __lowercase= pipe(**self.get_dummy_inputs(lowerCAmelCase ) ) __lowercase= output.images[0] __lowercase= image[0, -3:, -3:, -1] assert image.shape == (2_0, 3_2, 3_2, 3) __lowercase= np.array( [ 0.00_03_92_16, 0.00_03_92_16, 0.00_03_92_16, 0.00_03_92_16, 0.00_03_92_16, 0.00_03_92_16, 0.00_03_92_16, 0.00_03_92_16, 0.00_03_92_16, ] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def _A (self ): # NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches self._test_inference_batch_consistent(batch_sizes=[1, 2] ) def _A (self ): __lowercase= torch_device == 'cpu' __lowercase= True self._test_inference_batch_single_identical( batch_size=2 , test_max_difference=lowerCAmelCase , relax_max_difference=lowerCAmelCase , ) def _A (self ): __lowercase= self.get_dummy_components() __lowercase= self.pipeline_class(**lowerCAmelCase ) __lowercase= pipe.to(lowerCAmelCase ) pipe.set_progress_bar_config(disable=lowerCAmelCase ) __lowercase= 1 __lowercase= 2 __lowercase= self.get_dummy_inputs(lowerCAmelCase ) for key in inputs.keys(): if key in self.batch_params: __lowercase= batch_size * [inputs[key]] __lowercase= pipe(**lowerCAmelCase , num_images_per_prompt=lowerCAmelCase )[0] assert images.shape[0] == batch_size * num_images_per_prompt @slow @require_torch_gpu class A ( unittest.TestCase ): def _A (self ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def _A (self ): __lowercase= load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/shap_e/corgi.png' ) __lowercase= load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/shap_e/test_shap_e_img2img_out.npy' ) __lowercase= ShapEImgaImgPipeline.from_pretrained('openai/shap-e-img2img' ) __lowercase= pipe.to(lowerCAmelCase ) pipe.set_progress_bar_config(disable=lowerCAmelCase ) __lowercase= torch.Generator(device=lowerCAmelCase ).manual_seed(0 ) __lowercase= pipe( lowerCAmelCase , generator=lowerCAmelCase , guidance_scale=3.0 , num_inference_steps=6_4 , frame_size=6_4 , output_type='np' , ).images[0] assert images.shape == (2_0, 6_4, 6_4, 3) assert_mean_pixel_difference(lowerCAmelCase , lowerCAmelCase )
295
import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST, OpenAIGPTConfig, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification, OpenAIGPTLMHeadModel, OpenAIGPTModel, ) class A : def __init__(self , lowerCAmelCase , lowerCAmelCase=1_3 , lowerCAmelCase=7 , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=9_9 , lowerCAmelCase=3_2 , lowerCAmelCase=5 , lowerCAmelCase=4 , lowerCAmelCase=3_7 , lowerCAmelCase="gelu" , lowerCAmelCase=0.1 , lowerCAmelCase=0.1 , lowerCAmelCase=5_1_2 , lowerCAmelCase=1_6 , lowerCAmelCase=2 , lowerCAmelCase=0.02 , lowerCAmelCase=3 , lowerCAmelCase=4 , lowerCAmelCase=None , ): __lowercase= parent __lowercase= batch_size __lowercase= seq_length __lowercase= is_training __lowercase= use_token_type_ids __lowercase= use_labels __lowercase= vocab_size __lowercase= hidden_size __lowercase= num_hidden_layers __lowercase= num_attention_heads __lowercase= intermediate_size __lowercase= hidden_act __lowercase= hidden_dropout_prob __lowercase= attention_probs_dropout_prob __lowercase= max_position_embeddings __lowercase= type_vocab_size __lowercase= type_sequence_label_size __lowercase= initializer_range __lowercase= num_labels __lowercase= num_choices __lowercase= scope __lowercase= self.vocab_size - 1 def _A (self ): __lowercase= ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __lowercase= None if self.use_token_type_ids: __lowercase= ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __lowercase= None __lowercase= None __lowercase= None if self.use_labels: __lowercase= ids_tensor([self.batch_size] , self.type_sequence_label_size ) __lowercase= ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __lowercase= ids_tensor([self.batch_size] , self.num_choices ) __lowercase= OpenAIGPTConfig( vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , ) __lowercase= ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 ) return ( config, input_ids, head_mask, token_type_ids, sequence_labels, token_labels, choice_labels, ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , *lowerCAmelCase ): __lowercase= OpenAIGPTModel(config=lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() __lowercase= model(lowerCAmelCase , token_type_ids=lowerCAmelCase , head_mask=lowerCAmelCase ) __lowercase= model(lowerCAmelCase , token_type_ids=lowerCAmelCase ) __lowercase= model(lowerCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , *lowerCAmelCase ): __lowercase= OpenAIGPTLMHeadModel(lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() __lowercase= model(lowerCAmelCase , token_type_ids=lowerCAmelCase , labels=lowerCAmelCase ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , *lowerCAmelCase ): __lowercase= OpenAIGPTDoubleHeadsModel(lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() __lowercase= model(lowerCAmelCase , token_type_ids=lowerCAmelCase , labels=lowerCAmelCase ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , *lowerCAmelCase ): __lowercase= self.num_labels __lowercase= OpenAIGPTForSequenceClassification(lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() __lowercase= ids_tensor([self.batch_size] , self.type_sequence_label_size ) __lowercase= model(lowerCAmelCase , token_type_ids=lowerCAmelCase , labels=lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _A (self ): __lowercase= self.prepare_config_and_inputs() ( ( __lowercase ), ( __lowercase ), ( __lowercase ), ( __lowercase ), ( __lowercase ), ( __lowercase ), ( __lowercase ), )= config_and_inputs __lowercase= { 'input_ids': input_ids, 'token_type_ids': token_type_ids, 'head_mask': head_mask, } return config, inputs_dict @require_torch class A ( A_ , A_ , A_ , unittest.TestCase ): UpperCamelCase_ : Optional[Any] =( (OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification) if is_torch_available() else () ) UpperCamelCase_ : Tuple =( (OpenAIGPTLMHeadModel,) if is_torch_available() else () ) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly UpperCamelCase_ : List[str] =( { '''feature-extraction''': OpenAIGPTModel, '''text-classification''': OpenAIGPTForSequenceClassification, '''text-generation''': OpenAIGPTLMHeadModel, '''zero-shot''': OpenAIGPTForSequenceClassification, } if is_torch_available() else {} ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests": # Get `tokenizer does not have a padding token` error for both fast/slow tokenizers. # `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a # tiny config could not be created. return True return False def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=False ): __lowercase= super()._prepare_for_class(lowerCAmelCase , lowerCAmelCase , return_labels=lowerCAmelCase ) if return_labels: if model_class.__name__ == "OpenAIGPTDoubleHeadsModel": __lowercase= torch.zeros( (self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=lowerCAmelCase , ) __lowercase= inputs_dict['labels'] __lowercase= inputs_dict['labels'] __lowercase= torch.zeros( (self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=lowerCAmelCase , ) __lowercase= torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase ) return inputs_dict def _A (self ): __lowercase= OpenAIGPTModelTester(self ) __lowercase= ConfigTester(self , config_class=lowerCAmelCase , n_embd=3_7 ) def _A (self ): self.config_tester.run_common_tests() def _A (self ): __lowercase= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_openai_gpt_model(*lowerCAmelCase ) def _A (self ): __lowercase= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_lm_head_model(*lowerCAmelCase ) def _A (self ): __lowercase= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_double_lm_head_model(*lowerCAmelCase ) def _A (self ): __lowercase= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*lowerCAmelCase ) @slow def _A (self ): for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __lowercase= OpenAIGPTModel.from_pretrained(lowerCAmelCase ) self.assertIsNotNone(lowerCAmelCase ) @require_torch class A ( unittest.TestCase ): @slow def _A (self ): __lowercase= OpenAIGPTLMHeadModel.from_pretrained('openai-gpt' ) model.to(lowerCAmelCase ) __lowercase= torch.tensor([[4_8_1, 4_7_3_5, 5_4_4]] , dtype=torch.long , device=lowerCAmelCase ) # the president is __lowercase= [ 4_8_1, 4_7_3_5, 5_4_4, 2_4_6, 9_6_3, 8_7_0, 7_6_2, 2_3_9, 2_4_4, 4_0_4_7_7, 2_4_4, 2_4_9, 7_1_9, 8_8_1, 4_8_7, 5_4_4, 2_4_0, 2_4_4, 6_0_3, 4_8_1, ] # the president is a very good man. " \n " i\'m sure he is, " said the __lowercase= model.generate(lowerCAmelCase , do_sample=lowerCAmelCase ) self.assertListEqual(output_ids[0].tolist() , lowerCAmelCase )
295
1
"""simple docstring""" from dataclasses import dataclass, field from typing import Tuple from ..utils import cached_property, is_tf_available, logging, requires_backends from .benchmark_args_utils import BenchmarkArguments if is_tf_available(): import tensorflow as tf UpperCAmelCase: Any = logging.get_logger(__name__) @dataclass class UpperCamelCase ( snake_case ): """simple docstring""" SCREAMING_SNAKE_CASE_ : str = [ "no_inference", "no_cuda", "no_tpu", "no_speed", "no_memory", "no_env_print", "no_multi_process", ] def __init__( self ,**UpperCAmelCase_ ): for deprecated_arg in self.deprecated_args: if deprecated_arg in kwargs: _lowercase : str = deprecated_arg[3:] _lowercase : Optional[int] = not kwargs.pop(UpperCAmelCase_ ) logger.warning( f"""{deprecated_arg} is depreciated. Please use --no-{positive_arg} or""" f""" {positive_arg}={kwargs[positive_arg]}""" ) _lowercase : List[Any] = kwargs.pop("""tpu_name""" ,self.tpu_name ) _lowercase : Optional[Any] = kwargs.pop("""device_idx""" ,self.device_idx ) _lowercase : Dict = kwargs.pop("""eager_mode""" ,self.eager_mode ) _lowercase : List[str] = kwargs.pop("""use_xla""" ,self.use_xla ) super().__init__(**UpperCAmelCase_ ) SCREAMING_SNAKE_CASE_ : str = field( default=snake_case , metadata={"help": "Name of TPU"} , ) SCREAMING_SNAKE_CASE_ : int = field( default=0 , metadata={"help": "CPU / GPU device index. Defaults to 0."} , ) SCREAMING_SNAKE_CASE_ : bool = field(default=snake_case , metadata={"help": "Benchmark models in eager model."} ) SCREAMING_SNAKE_CASE_ : bool = field( default=snake_case , metadata={ "help": "Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`." } , ) @cached_property def lowerCamelCase__ ( self ): requires_backends(self ,["""tf"""] ) _lowercase : Union[str, Any] = None if self.tpu: try: if self.tpu_name: _lowercase : List[str] = tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name ) else: _lowercase : int = tf.distribute.cluster_resolver.TPUClusterResolver() except ValueError: _lowercase : Optional[int] = None return tpu @cached_property def lowerCamelCase__ ( self ): requires_backends(self ,["""tf"""] ) if self.is_tpu: tf.config.experimental_connect_to_cluster(self._setup_tpu ) tf.tpu.experimental.initialize_tpu_system(self._setup_tpu ) _lowercase : Union[str, Any] = tf.distribute.TPUStrategy(self._setup_tpu ) else: # currently no multi gpu is allowed if self.is_gpu: # TODO: Currently only single GPU is supported tf.config.set_visible_devices(self.gpu_list[self.device_idx] ,"""GPU""" ) _lowercase : str = tf.distribute.OneDeviceStrategy(device=f"""/gpu:{self.device_idx}""" ) else: tf.config.set_visible_devices([] ,"""GPU""" ) # disable GPU _lowercase : Union[str, Any] = tf.distribute.OneDeviceStrategy(device=f"""/cpu:{self.device_idx}""" ) return strategy @property def lowerCamelCase__ ( self ): requires_backends(self ,["""tf"""] ) return self._setup_tpu is not None @property def lowerCamelCase__ ( self ): requires_backends(self ,["""tf"""] ) return self._setup_strategy @property def lowerCamelCase__ ( self ): requires_backends(self ,["""tf"""] ) return tf.config.list_physical_devices("""GPU""" ) @property def lowerCamelCase__ ( self ): requires_backends(self ,["""tf"""] ) if self.cuda: return len(self.gpu_list ) return 0 @property def lowerCamelCase__ ( self ): return self.n_gpu > 0
336
"""simple docstring""" import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import CLIPTokenizer, CLIPTokenizerFast from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import CLIPImageProcessor, CLIPProcessor @require_vision class UpperCamelCase ( unittest.TestCase ): """simple docstring""" def lowerCamelCase__ ( self ): _lowercase : str = tempfile.mkdtemp() # fmt: off _lowercase : List[Any] = ["""l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """lo""", """l</w>""", """w</w>""", """r</w>""", """t</w>""", """low</w>""", """er</w>""", """lowest</w>""", """newer</w>""", """wider""", """<unk>""", """<|startoftext|>""", """<|endoftext|>"""] # fmt: on _lowercase : Optional[int] = dict(zip(UpperCAmelCase_ ,range(len(UpperCAmelCase_ ) ) ) ) _lowercase : Optional[int] = ["""#version: 0.2""", """l o""", """lo w</w>""", """e r</w>""", """"""] _lowercase : Optional[int] = {"""unk_token""": """<unk>"""} _lowercase : List[str] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""vocab_file"""] ) _lowercase : str = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""merges_file"""] ) with open(self.vocab_file ,"""w""" ,encoding="""utf-8""" ) as fp: fp.write(json.dumps(UpperCAmelCase_ ) + """\n""" ) with open(self.merges_file ,"""w""" ,encoding="""utf-8""" ) as fp: fp.write("""\n""".join(UpperCAmelCase_ ) ) _lowercase : Dict = { """do_resize""": True, """size""": 20, """do_center_crop""": True, """crop_size""": 18, """do_normalize""": True, """image_mean""": [0.48145466, 0.4578275, 0.40821073], """image_std""": [0.26862954, 0.26130258, 0.27577711], } _lowercase : List[Any] = os.path.join(self.tmpdirname ,UpperCAmelCase_ ) with open(self.image_processor_file ,"""w""" ,encoding="""utf-8""" ) as fp: json.dump(UpperCAmelCase_ ,UpperCAmelCase_ ) def lowerCamelCase__ ( self ,**UpperCAmelCase_ ): return CLIPTokenizer.from_pretrained(self.tmpdirname ,**UpperCAmelCase_ ) def lowerCamelCase__ ( self ,**UpperCAmelCase_ ): return CLIPTokenizerFast.from_pretrained(self.tmpdirname ,**UpperCAmelCase_ ) def lowerCamelCase__ ( self ,**UpperCAmelCase_ ): return CLIPImageProcessor.from_pretrained(self.tmpdirname ,**UpperCAmelCase_ ) def lowerCamelCase__ ( self ): shutil.rmtree(self.tmpdirname ) def lowerCamelCase__ ( self ): _lowercase : Optional[Any] = [np.random.randint(2_55 ,size=(3, 30, 4_00) ,dtype=np.uinta )] _lowercase : Tuple = [Image.fromarray(np.moveaxis(UpperCAmelCase_ ,0 ,-1 ) ) for x in image_inputs] return image_inputs def lowerCamelCase__ ( self ): _lowercase : Union[str, Any] = self.get_tokenizer() _lowercase : List[Any] = self.get_rust_tokenizer() _lowercase : List[Any] = self.get_image_processor() _lowercase : List[str] = CLIPProcessor(tokenizer=UpperCAmelCase_ ,image_processor=UpperCAmelCase_ ) processor_slow.save_pretrained(self.tmpdirname ) _lowercase : Optional[int] = CLIPProcessor.from_pretrained(self.tmpdirname ,use_fast=UpperCAmelCase_ ) _lowercase : Union[str, Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ ,image_processor=UpperCAmelCase_ ) processor_fast.save_pretrained(self.tmpdirname ) _lowercase : List[str] = CLIPProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor_slow.tokenizer.get_vocab() ,tokenizer_slow.get_vocab() ) self.assertEqual(processor_fast.tokenizer.get_vocab() ,tokenizer_fast.get_vocab() ) self.assertEqual(tokenizer_slow.get_vocab() ,tokenizer_fast.get_vocab() ) self.assertIsInstance(processor_slow.tokenizer ,UpperCAmelCase_ ) self.assertIsInstance(processor_fast.tokenizer ,UpperCAmelCase_ ) self.assertEqual(processor_slow.image_processor.to_json_string() ,image_processor.to_json_string() ) self.assertEqual(processor_fast.image_processor.to_json_string() ,image_processor.to_json_string() ) self.assertIsInstance(processor_slow.image_processor ,UpperCAmelCase_ ) self.assertIsInstance(processor_fast.image_processor ,UpperCAmelCase_ ) def lowerCamelCase__ ( self ): _lowercase : str = CLIPProcessor(tokenizer=self.get_tokenizer() ,image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) _lowercase : Union[str, Any] = self.get_tokenizer(bos_token="""(BOS)""" ,eos_token="""(EOS)""" ) _lowercase : Optional[int] = self.get_image_processor(do_normalize=UpperCAmelCase_ ,padding_value=1.0 ) _lowercase : int = CLIPProcessor.from_pretrained( self.tmpdirname ,bos_token="""(BOS)""" ,eos_token="""(EOS)""" ,do_normalize=UpperCAmelCase_ ,padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer ,UpperCAmelCase_ ) self.assertEqual(processor.image_processor.to_json_string() ,image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor ,UpperCAmelCase_ ) def lowerCamelCase__ ( self ): _lowercase : Optional[Any] = self.get_image_processor() _lowercase : Optional[int] = self.get_tokenizer() _lowercase : Union[str, Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ ,image_processor=UpperCAmelCase_ ) _lowercase : int = self.prepare_image_inputs() _lowercase : str = image_processor(UpperCAmelCase_ ,return_tensors="""np""" ) _lowercase : int = processor(images=UpperCAmelCase_ ,return_tensors="""np""" ) for key in input_image_proc.keys(): self.assertAlmostEqual(input_image_proc[key].sum() ,input_processor[key].sum() ,delta=1E-2 ) def lowerCamelCase__ ( self ): _lowercase : Optional[Any] = self.get_image_processor() _lowercase : Optional[Any] = self.get_tokenizer() _lowercase : Union[str, Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ ,image_processor=UpperCAmelCase_ ) _lowercase : List[Any] = """lower newer""" _lowercase : Any = processor(text=UpperCAmelCase_ ) _lowercase : Union[str, Any] = tokenizer(UpperCAmelCase_ ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] ,encoded_processor[key] ) def lowerCamelCase__ ( self ): _lowercase : Union[str, Any] = self.get_image_processor() _lowercase : List[Any] = self.get_tokenizer() _lowercase : List[Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ ,image_processor=UpperCAmelCase_ ) _lowercase : str = """lower newer""" _lowercase : List[Any] = self.prepare_image_inputs() _lowercase : Optional[int] = processor(text=UpperCAmelCase_ ,images=UpperCAmelCase_ ) self.assertListEqual(list(inputs.keys() ) ,["""input_ids""", """attention_mask""", """pixel_values"""] ) # test if it raises when no input is passed with pytest.raises(UpperCAmelCase_ ): processor() def lowerCamelCase__ ( self ): _lowercase : Dict = self.get_image_processor() _lowercase : List[Any] = self.get_tokenizer() _lowercase : Union[str, Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ ,image_processor=UpperCAmelCase_ ) _lowercase : Dict = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] _lowercase : int = processor.batch_decode(UpperCAmelCase_ ) _lowercase : Tuple = tokenizer.batch_decode(UpperCAmelCase_ ) self.assertListEqual(UpperCAmelCase_ ,UpperCAmelCase_ ) def lowerCamelCase__ ( self ): _lowercase : Optional[Any] = self.get_image_processor() _lowercase : List[Any] = self.get_tokenizer() _lowercase : List[Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ ,image_processor=UpperCAmelCase_ ) _lowercase : Optional[Any] = """lower newer""" _lowercase : Any = self.prepare_image_inputs() _lowercase : Optional[int] = processor(text=UpperCAmelCase_ ,images=UpperCAmelCase_ ) self.assertListEqual(list(inputs.keys() ) ,processor.model_input_names )
336
1
'''simple docstring''' A_ = [ (10_00, "M"), (9_00, "CM"), (5_00, "D"), (4_00, "CD"), (1_00, "C"), (90, "XC"), (50, "L"), (40, "XL"), (10, "X"), (9, "IX"), (5, "V"), (4, "IV"), (1, "I"), ] def A_ ( snake_case ): SCREAMING_SNAKE_CASE:Any = {"I": 1, "V": 5, "X": 10, "L": 50, "C": 100, "D": 500, "M": 1000} SCREAMING_SNAKE_CASE:Tuple = 0 SCREAMING_SNAKE_CASE:str = 0 while place < len(snake_case ): if (place + 1 < len(snake_case )) and (vals[roman[place]] < vals[roman[place + 1]]): total += vals[roman[place + 1]] - vals[roman[place]] place += 2 else: total += vals[roman[place]] place += 1 return total def A_ ( snake_case ): SCREAMING_SNAKE_CASE:Union[str, Any] = [] for arabic, roman in ROMAN: ((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)):Optional[int] = divmod(snake_case , snake_case ) result.append(roman * factor ) if number == 0: break return "".join(snake_case ) if __name__ == "__main__": import doctest doctest.testmod()
139
'''simple docstring''' # Logistic Regression from scratch # In[62]: # In[63]: # importing all the required libraries import numpy as np from matplotlib import pyplot as plt from sklearn import datasets def A_ ( snake_case ): return 1 / (1 + np.exp(-z )) def A_ ( snake_case , snake_case ): return (-y * np.log(snake_case ) - (1 - y) * np.log(1 - h )).mean() def A_ ( snake_case , snake_case , snake_case ): SCREAMING_SNAKE_CASE:Dict = np.dot(snake_case , snake_case ) return np.sum(y * scores - np.log(1 + np.exp(snake_case ) ) ) def A_ ( snake_case , snake_case , snake_case , snake_case=70000 ): SCREAMING_SNAKE_CASE:List[str] = np.zeros(x.shape[1] ) for iterations in range(snake_case ): SCREAMING_SNAKE_CASE:Union[str, Any] = np.dot(snake_case , snake_case ) SCREAMING_SNAKE_CASE:Dict = sigmoid_function(snake_case ) SCREAMING_SNAKE_CASE:List[str] = np.dot(x.T , h - y ) / y.size SCREAMING_SNAKE_CASE:Any = theta - alpha * gradient # updating the weights SCREAMING_SNAKE_CASE:Dict = np.dot(snake_case , snake_case ) SCREAMING_SNAKE_CASE:Union[str, Any] = sigmoid_function(snake_case ) SCREAMING_SNAKE_CASE:Dict = cost_function(snake_case , snake_case ) if iterations % 100 == 0: print(F'''loss: {j} \t''' ) # printing the loss after every 100 iterations return theta # In[68]: if __name__ == "__main__": A_ = datasets.load_iris() A_ = iris.data[:, :2] A_ = (iris.target != 0) * 1 A_ = 0.1 A_ = logistic_reg(alpha, x, y, max_iterations=7_00_00) print("theta: ", theta) # printing the theta i.e our weights vector def A_ ( snake_case ): return sigmoid_function( np.dot(snake_case , snake_case ) ) # predicting the value of probability from the logistic regression algorithm plt.figure(figsize=(10, 6)) plt.scatter(x[y == 0][:, 0], x[y == 0][:, 1], color="b", label="0") plt.scatter(x[y == 1][:, 0], x[y == 1][:, 1], color="r", label="1") ((A_) , (A_)) = (x[:, 0].min(), x[:, 0].max()) ((A_) , (A_)) = (x[:, 1].min(), x[:, 1].max()) ((A_) , (A_)) = np.meshgrid(np.linspace(xa_min, xa_max), np.linspace(xa_min, xa_max)) A_ = np.c_[xxa.ravel(), xxa.ravel()] A_ = predict_prob(grid).reshape(xxa.shape) plt.contour(xxa, xxa, probs, [0.5], linewidths=1, colors="black") plt.legend() plt.show()
139
1
from datetime import datetime import matplotlib.pyplot as plt import torch def lowerCamelCase__ ( _lowercase ): '''simple docstring''' for param in module.parameters(): UpperCAmelCase_ : List[Any] = False def lowerCamelCase__ ( ): '''simple docstring''' UpperCAmelCase_ : Any = '''cuda''' if torch.cuda.is_available() else '''cpu''' if torch.backends.mps.is_available() and torch.backends.mps.is_built(): UpperCAmelCase_ : List[str] = '''mps''' if device == "mps": print( '''WARNING: MPS currently doesn\'t seem to work, and messes up backpropagation without any visible torch''' ''' errors. I recommend using CUDA on a colab notebook or CPU instead if you\'re facing inexplicable issues''' ''' with generations.''' ) return device def lowerCamelCase__ ( _lowercase ): '''simple docstring''' UpperCAmelCase_ : str = plt.imshow(SCREAMING_SNAKE_CASE_ ) fig.axes.get_xaxis().set_visible(SCREAMING_SNAKE_CASE_ ) fig.axes.get_yaxis().set_visible(SCREAMING_SNAKE_CASE_ ) plt.show() def lowerCamelCase__ ( ): '''simple docstring''' UpperCAmelCase_ : Union[str, Any] = datetime.now() UpperCAmelCase_ : List[str] = current_time.strftime('''%H:%M:%S''' ) return timestamp
366
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available __a = { 'configuration_bloom': ['BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BloomConfig', 'BloomOnnxConfig'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = ['BloomTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = [ 'BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST', 'BloomForCausalLM', 'BloomModel', 'BloomPreTrainedModel', 'BloomForSequenceClassification', 'BloomForTokenClassification', 'BloomForQuestionAnswering', ] if TYPE_CHECKING: from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_bloom_fast import BloomTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_bloom import ( BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST, BloomForCausalLM, BloomForQuestionAnswering, BloomForSequenceClassification, BloomForTokenClassification, BloomModel, BloomPreTrainedModel, ) else: import sys __a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
235
0
def lowerCAmelCase_ ( ): for n in range(1 ,1_00_00_00): yield n * (n + 1) // 2 def lowerCAmelCase_ ( A_): UpperCamelCase__: int = 1 UpperCamelCase__: Optional[Any] = 2 while i * i <= n: UpperCamelCase__: int = 0 while n % i == 0: n //= i multiplicity += 1 divisors_count *= multiplicity + 1 i += 1 if n > 1: divisors_count *= 2 return divisors_count def lowerCAmelCase_ ( ): return next(i for i in triangle_number_generator() if count_divisors(A_) > 5_00) if __name__ == "__main__": print(solution())
149
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available A__: str = {'''configuration_yolos''': ['''YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''YolosConfig''', '''YolosOnnxConfig''']} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A__: Union[str, Any] = ['''YolosFeatureExtractor'''] A__: Optional[int] = ['''YolosImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A__: str = [ '''YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST''', '''YolosForObjectDetection''', '''YolosModel''', '''YolosPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_yolos import YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP, YolosConfig, YolosOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_yolos import YolosFeatureExtractor from .image_processing_yolos import YolosImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_yolos import ( YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST, YolosForObjectDetection, YolosModel, YolosPreTrainedModel, ) else: import sys A__: Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
149
1
"""simple docstring""" import json import os import re import sys import urllib.request import requests from bsa import BeautifulSoup _lowerCAmelCase : Optional[int] = { "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36" " (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582" } def __snake_case ( SCREAMING_SNAKE_CASE__ : Dict = "dhaka" , SCREAMING_SNAKE_CASE__ : List[str] = 5 ) -> List[Any]: '''simple docstring''' _UpperCAmelCase : List[str] = min(_UpperCAmelCase , 50 ) # Prevent abuse! _UpperCAmelCase : Dict = { "q": query, "tbm": "isch", "hl": "en", "ijn": "0", } _UpperCAmelCase : Optional[Any] = requests.get("https://www.google.com/search" , params=_UpperCAmelCase , headers=_UpperCAmelCase ) _UpperCAmelCase : Union[str, Any] = BeautifulSoup(html.text , "html.parser" ) _UpperCAmelCase : Union[str, Any] = "".join( re.findall(R"AF_initDataCallback\(([^<]+)\);" , str(soup.select("script" ) ) ) ) _UpperCAmelCase : Optional[Any] = json.dumps(_UpperCAmelCase ) _UpperCAmelCase : int = json.loads(_UpperCAmelCase ) _UpperCAmelCase : List[Any] = re.findall( R"\[\"GRID_STATE0\",null,\[\[1,\[0,\".*?\",(.*),\"All\"," , _UpperCAmelCase , ) if not matched_google_image_data: return 0 _UpperCAmelCase : Union[str, Any] = re.sub( R"\[\"(https\:\/\/encrypted-tbn0\.gstatic\.com\/images\?.*?)\",\d+,\d+\]" , "" , str(_UpperCAmelCase ) , ) _UpperCAmelCase : Union[str, Any] = re.findall( R"(?:'|,),\[\"(https:|http.*?)\",\d+,\d+\]" , _UpperCAmelCase , ) for index, fixed_full_res_image in enumerate(_UpperCAmelCase ): if index >= max_images: return index _UpperCAmelCase : Any = bytes(_UpperCAmelCase , "ascii" ).decode( "unicode-escape" ) _UpperCAmelCase : Optional[Any] = bytes(_UpperCAmelCase , "ascii" ).decode( "unicode-escape" ) _UpperCAmelCase : Optional[int] = urllib.request.build_opener() _UpperCAmelCase : List[Any] = [ ( "User-Agent", "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36" " (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582", ) ] urllib.request.install_opener(_UpperCAmelCase ) _UpperCAmelCase : List[str] = f'query_{query.replace(" " , "_" )}' if not os.path.exists(_UpperCAmelCase ): os.makedirs(_UpperCAmelCase ) urllib.request.urlretrieve( # noqa: S310 _UpperCAmelCase , f'{path_name}/original_size_img_{index}.jpg' ) return index if __name__ == "__main__": try: _lowerCAmelCase : Dict = download_images_from_google_query(sys.argv[1]) print(F"{image_count} images were downloaded to disk.") except IndexError: print("Please provide a search term.") raise
365
"""simple docstring""" import logging import os from dataclasses import dataclass from enum import Enum from typing import List, Optional, Union from filelock import FileLock from transformers import PreTrainedTokenizer, is_tf_available, is_torch_available _lowerCAmelCase : Any = logging.getLogger(__name__) @dataclass class UpperCAmelCase_ : __SCREAMING_SNAKE_CASE : str __SCREAMING_SNAKE_CASE : List[str] __SCREAMING_SNAKE_CASE : Optional[List[str]] @dataclass class UpperCAmelCase_ : __SCREAMING_SNAKE_CASE : List[int] __SCREAMING_SNAKE_CASE : List[int] __SCREAMING_SNAKE_CASE : Optional[List[int]] = None __SCREAMING_SNAKE_CASE : Optional[List[int]] = None class UpperCAmelCase_ ( _UpperCamelCase ): __SCREAMING_SNAKE_CASE : Union[str, Any] = 'train' __SCREAMING_SNAKE_CASE : Tuple = 'dev' __SCREAMING_SNAKE_CASE : Optional[int] = 'test' class UpperCAmelCase_ : @staticmethod def snake_case_ ( A : Union[str, Any] , A : Union[Split, str] ): raise NotImplementedError @staticmethod def snake_case_ ( A : str ): raise NotImplementedError @staticmethod def snake_case_ ( A : List[InputExample] , A : List[str] , A : int , A : PreTrainedTokenizer , A : Optional[int]=False , A : List[str]="[CLS]" , A : List[Any]=1 , A : str="[SEP]" , A : int=False , A : int=False , A : Any=0 , A : List[str]=0 , A : Dict=-1_0_0 , A : str=0 , A : Optional[Any]=True , ): _UpperCAmelCase : Dict = {label: i for i, label in enumerate(A )} _UpperCAmelCase : str = [] for ex_index, example in enumerate(A ): if ex_index % 1_0_0_0_0 == 0: logger.info("Writing example %d of %d" , A , len(A ) ) _UpperCAmelCase : int = [] _UpperCAmelCase : List[str] = [] for word, label in zip(example.words , example.labels ): _UpperCAmelCase : str = tokenizer.tokenize(A ) # bert-base-multilingual-cased sometimes output "nothing ([]) when calling tokenize with just a space. if len(A ) > 0: tokens.extend(A ) # Use the real label id for the first token of the word, and padding ids for the remaining tokens label_ids.extend([label_map[label]] + [pad_token_label_id] * (len(A ) - 1) ) # Account for [CLS] and [SEP] with "- 2" and with "- 3" for RoBERTa. _UpperCAmelCase : List[str] = tokenizer.num_special_tokens_to_add() if len(A ) > max_seq_length - special_tokens_count: _UpperCAmelCase : List[Any] = tokens[: (max_seq_length - special_tokens_count)] _UpperCAmelCase : List[Any] = label_ids[: (max_seq_length - special_tokens_count)] # The convention in BERT is: # (a) For sequence pairs: # tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP] # type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1 # (b) For single sequences: # tokens: [CLS] the dog is hairy . [SEP] # type_ids: 0 0 0 0 0 0 0 # # Where "type_ids" are used to indicate whether this is the first # sequence or the second sequence. The embedding vectors for `type=0` and # `type=1` were learned during pre-training and are added to the wordpiece # embedding vector (and position vector). This is not *strictly* necessary # since the [SEP] token unambiguously separates the sequences, but it makes # it easier for the model to learn the concept of sequences. # # For classification tasks, the first vector (corresponding to [CLS]) is # used as the "sentence vector". Note that this only makes sense because # the entire model is fine-tuned. tokens += [sep_token] label_ids += [pad_token_label_id] if sep_token_extra: # roberta uses an extra separator b/w pairs of sentences tokens += [sep_token] label_ids += [pad_token_label_id] _UpperCAmelCase : Dict = [sequence_a_segment_id] * len(A ) if cls_token_at_end: tokens += [cls_token] label_ids += [pad_token_label_id] segment_ids += [cls_token_segment_id] else: _UpperCAmelCase : str = [cls_token] + tokens _UpperCAmelCase : Dict = [pad_token_label_id] + label_ids _UpperCAmelCase : Any = [cls_token_segment_id] + segment_ids _UpperCAmelCase : int = tokenizer.convert_tokens_to_ids(A ) # The mask has 1 for real tokens and 0 for padding tokens. Only real # tokens are attended to. _UpperCAmelCase : List[Any] = [1 if mask_padding_with_zero else 0] * len(A ) # Zero-pad up to the sequence length. _UpperCAmelCase : List[str] = max_seq_length - len(A ) if pad_on_left: _UpperCAmelCase : str = ([pad_token] * padding_length) + input_ids _UpperCAmelCase : str = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask _UpperCAmelCase : Any = ([pad_token_segment_id] * padding_length) + segment_ids _UpperCAmelCase : Dict = ([pad_token_label_id] * padding_length) + label_ids else: input_ids += [pad_token] * padding_length input_mask += [0 if mask_padding_with_zero else 1] * padding_length segment_ids += [pad_token_segment_id] * padding_length label_ids += [pad_token_label_id] * padding_length assert len(A ) == max_seq_length assert len(A ) == max_seq_length assert len(A ) == max_seq_length assert len(A ) == max_seq_length if ex_index < 5: logger.info("*** Example ***" ) logger.info("guid: %s" , example.guid ) logger.info("tokens: %s" , " ".join([str(A ) for x in tokens] ) ) logger.info("input_ids: %s" , " ".join([str(A ) for x in input_ids] ) ) logger.info("input_mask: %s" , " ".join([str(A ) for x in input_mask] ) ) logger.info("segment_ids: %s" , " ".join([str(A ) for x in segment_ids] ) ) logger.info("label_ids: %s" , " ".join([str(A ) for x in label_ids] ) ) if "token_type_ids" not in tokenizer.model_input_names: _UpperCAmelCase : Dict = None features.append( InputFeatures( input_ids=A , attention_mask=A , token_type_ids=A , label_ids=A ) ) return features if is_torch_available(): import torch from torch import nn from torch.utils.data import Dataset class UpperCAmelCase_ ( _UpperCamelCase ): __SCREAMING_SNAKE_CASE : List[InputFeatures] __SCREAMING_SNAKE_CASE : int = nn.CrossEntropyLoss().ignore_index def __init__( self : Dict , A : TokenClassificationTask , A : str , A : PreTrainedTokenizer , A : List[str] , A : str , A : Optional[int] = None , A : List[str]=False , A : Split = Split.train , ): # Load data features from cache or dataset file _UpperCAmelCase : int = os.path.join( A , "cached_{}_{}_{}".format(mode.value , tokenizer.__class__.__name__ , str(A ) ) , ) # Make sure only the first process in distributed training processes the dataset, # and the others will use the cache. _UpperCAmelCase : List[str] = cached_features_file + ".lock" with FileLock(A ): if os.path.exists(A ) and not overwrite_cache: logger.info(f'Loading features from cached file {cached_features_file}' ) _UpperCAmelCase : Tuple = torch.load(A ) else: logger.info(f'Creating features from dataset file at {data_dir}' ) _UpperCAmelCase : List[str] = token_classification_task.read_examples_from_file(A , A ) # TODO clean up all this to leverage built-in features of tokenizers _UpperCAmelCase : List[Any] = token_classification_task.convert_examples_to_features( A , A , A , A , cls_token_at_end=bool(model_type in ["xlnet"] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ["xlnet"] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=A , pad_on_left=bool(tokenizer.padding_side == "left" ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , ) logger.info(f'Saving features into cached file {cached_features_file}' ) torch.save(self.features , A ) def __len__( self : Dict ): return len(self.features ) def __getitem__( self : List[str] , A : Optional[Any] ): return self.features[i] if is_tf_available(): import tensorflow as tf class UpperCAmelCase_ : __SCREAMING_SNAKE_CASE : List[InputFeatures] __SCREAMING_SNAKE_CASE : int = -1_0_0 def __init__( self : Tuple , A : TokenClassificationTask , A : str , A : PreTrainedTokenizer , A : List[str] , A : str , A : Optional[int] = None , A : Optional[Any]=False , A : Split = Split.train , ): _UpperCAmelCase : Union[str, Any] = token_classification_task.read_examples_from_file(A , A ) # TODO clean up all this to leverage built-in features of tokenizers _UpperCAmelCase : List[str] = token_classification_task.convert_examples_to_features( A , A , A , A , cls_token_at_end=bool(model_type in ["xlnet"] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ["xlnet"] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=A , pad_on_left=bool(tokenizer.padding_side == "left" ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , ) def gen(): for ex in self.features: if ex.token_type_ids is None: yield ( {"input_ids": ex.input_ids, "attention_mask": ex.attention_mask}, ex.label_ids, ) else: yield ( { "input_ids": ex.input_ids, "attention_mask": ex.attention_mask, "token_type_ids": ex.token_type_ids, }, ex.label_ids, ) if "token_type_ids" not in tokenizer.model_input_names: _UpperCAmelCase : List[str] = tf.data.Dataset.from_generator( A , ({"input_ids": tf.intaa, "attention_mask": tf.intaa}, tf.intaa) , ( {"input_ids": tf.TensorShape([None] ), "attention_mask": tf.TensorShape([None] )}, tf.TensorShape([None] ), ) , ) else: _UpperCAmelCase : List[Any] = tf.data.Dataset.from_generator( A , ({"input_ids": tf.intaa, "attention_mask": tf.intaa, "token_type_ids": tf.intaa}, tf.intaa) , ( { "input_ids": tf.TensorShape([None] ), "attention_mask": tf.TensorShape([None] ), "token_type_ids": tf.TensorShape([None] ), }, tf.TensorShape([None] ), ) , ) def snake_case_ ( self : str ): _UpperCAmelCase : Dict = self.dataset.apply(tf.data.experimental.assert_cardinality(len(self.features ) ) ) return self.dataset def __len__( self : List[Any] ): return len(self.features ) def __getitem__( self : int , A : int ): return self.features[i]
202
0
from __future__ import annotations def _A ( _lowercase , _lowercase ) -> Tuple: """simple docstring""" print(f'''Vertex\tShortest Distance from vertex {src}''' ) for i, d in enumerate(_lowercase ): print(f'''{i}\t\t{d}''' ) def _A ( _lowercase , _lowercase , _lowercase ) -> List[Any]: """simple docstring""" for j in range(_lowercase ): __UpperCamelCase, __UpperCamelCase, __UpperCamelCase = (graph[j][k] for k in ['src', 'dst', 'weight']) if distance[u] != float('inf' ) and distance[u] + w < distance[v]: return True return False def _A ( _lowercase , _lowercase , _lowercase , _lowercase ) -> list[float]: """simple docstring""" __UpperCamelCase = [float('inf' )] * vertex_count __UpperCamelCase = 0.0 for _ in range(vertex_count - 1 ): for j in range(_lowercase ): __UpperCamelCase, __UpperCamelCase, __UpperCamelCase = (graph[j][k] for k in ['src', 'dst', 'weight']) if distance[u] != float('inf' ) and distance[u] + w < distance[v]: __UpperCamelCase = distance[u] + w __UpperCamelCase = check_negative_cycle(_lowercase , _lowercase , _lowercase ) if negative_cycle_exists: raise Exception('Negative cycle found' ) return distance if __name__ == "__main__": import doctest doctest.testmod() __snake_case = int(input('''Enter number of vertices: ''').strip()) __snake_case = int(input('''Enter number of edges: ''').strip()) __snake_case = [{} for _ in range(E)] for i in range(E): print('''Edge ''', i + 1) __snake_case , __snake_case , __snake_case = ( int(x) for x in input('''Enter source, destination, weight: ''').strip().split(''' ''') ) __snake_case = {'''src''': src, '''dst''': dest, '''weight''': weight} __snake_case = int(input('''\nEnter shortest path source:''').strip()) __snake_case = bellman_ford(graph, V, E, source) print_distance(shortest_distance, 0)
310
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) __snake_case = {'''configuration_vit_mae''': ['''VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTMAEConfig''']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case = [ '''VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST''', '''ViTMAEForPreTraining''', '''ViTMAELayer''', '''ViTMAEModel''', '''ViTMAEPreTrainedModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case = [ '''TFViTMAEForPreTraining''', '''TFViTMAEModel''', '''TFViTMAEPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vit_mae import ( VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST, ViTMAEForPreTraining, ViTMAELayer, ViTMAEModel, ViTMAEPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel else: import sys __snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
310
1
'''simple docstring''' import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...utils import logging __UpperCAmelCase =logging.get_logger(__name__) __UpperCAmelCase ={ "BridgeTower/bridgetower-base": "https://huggingface.co/BridgeTower/bridgetower-base/blob/main/config.json", "BridgeTower/bridgetower-base-itm-mlm": ( "https://huggingface.co/BridgeTower/bridgetower-base-itm-mlm/blob/main/config.json" ), } class a__ ( UpperCAmelCase__ ): lowerCamelCase : Tuple ="bridgetower_vision_model" def __init__( self : Dict , a : Optional[int]=7_68 , a : Optional[int]=12 , a : Tuple=3 , a : Tuple=16 , a : Dict=2_88 , a : str=1 , a : Any=1e-0_5 , a : str=False , a : List[Any]=True , a : List[str]=False , **a : Optional[int] , ): """simple docstring""" super().__init__(**a ) __lowerCamelCase = hidden_size __lowerCamelCase = num_hidden_layers __lowerCamelCase = num_channels __lowerCamelCase = patch_size __lowerCamelCase = image_size __lowerCamelCase = initializer_factor __lowerCamelCase = layer_norm_eps __lowerCamelCase = stop_gradient __lowerCamelCase = share_layernorm __lowerCamelCase = remove_last_layer @classmethod def SCREAMING_SNAKE_CASE__ ( cls : Union[str, Any] , a : Union[str, os.PathLike] , **a : Any ): """simple docstring""" __lowerCamelCase , __lowerCamelCase = cls.get_config_dict(a , **a ) if config_dict.get('''model_type''' ) == "bridgetower": __lowerCamelCase = config_dict['''text_config'''] if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type: logger.warning( f"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """ f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" ) return cls.from_dict(a , **a ) class a__ ( UpperCAmelCase__ ): lowerCamelCase : int ="bridgetower_text_model" def __init__( self : Dict , a : Any=5_02_65 , a : str=7_68 , a : Union[str, Any]=12 , a : List[str]=12 , a : int=1 , a : List[str]=30_72 , a : int="gelu" , a : Union[str, Any]=0.1 , a : Union[str, Any]=0.1 , a : Any=5_14 , a : Union[str, Any]=1 , a : Union[str, Any]=1e-0_5 , a : Tuple=1 , a : Tuple=0 , a : str=2 , a : List[str]="absolute" , a : List[str]=True , **a : Optional[Any] , ): """simple docstring""" super().__init__(**a ) __lowerCamelCase = vocab_size __lowerCamelCase = hidden_size __lowerCamelCase = num_hidden_layers __lowerCamelCase = num_attention_heads __lowerCamelCase = hidden_act __lowerCamelCase = initializer_factor __lowerCamelCase = intermediate_size __lowerCamelCase = hidden_dropout_prob __lowerCamelCase = attention_probs_dropout_prob __lowerCamelCase = max_position_embeddings __lowerCamelCase = type_vocab_size __lowerCamelCase = layer_norm_eps __lowerCamelCase = position_embedding_type __lowerCamelCase = use_cache __lowerCamelCase = pad_token_id __lowerCamelCase = bos_token_id __lowerCamelCase = eos_token_id @classmethod def SCREAMING_SNAKE_CASE__ ( cls : Any , a : Union[str, os.PathLike] , **a : Union[str, Any] ): """simple docstring""" __lowerCamelCase , __lowerCamelCase = cls.get_config_dict(a , **a ) if config_dict.get('''model_type''' ) == "bridgetower": __lowerCamelCase = config_dict['''text_config'''] if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type: logger.warning( f"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """ f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" ) return cls.from_dict(a , **a ) class a__ ( UpperCAmelCase__ ): lowerCamelCase : Any ="bridgetower" def __init__( self : str , a : str=True , a : str="gelu" , a : Dict=7_68 , a : Union[str, Any]=1 , a : List[Any]=1e-0_5 , a : Dict=False , a : Tuple="add" , a : Dict=12 , a : List[str]=6 , a : Optional[int]=False , a : Dict=False , a : str=None , a : Optional[int]=None , **a : Dict , ): """simple docstring""" __lowerCamelCase = kwargs.pop('''text_config_dict''' , a ) __lowerCamelCase = kwargs.pop('''vision_config_dict''' , a ) super().__init__(**a ) __lowerCamelCase = share_cross_modal_transformer_layers __lowerCamelCase = hidden_act __lowerCamelCase = hidden_size __lowerCamelCase = initializer_factor __lowerCamelCase = layer_norm_eps __lowerCamelCase = share_link_tower_layers __lowerCamelCase = link_tower_type __lowerCamelCase = num_attention_heads __lowerCamelCase = num_hidden_layers __lowerCamelCase = tie_word_embeddings __lowerCamelCase = init_layernorm_from_vision_encoder if text_config is None: __lowerCamelCase = {} logger.info('''`text_config` is `None`. Initializing the `BridgeTowerTextConfig` with default values.''' ) if vision_config is None: __lowerCamelCase = {} logger.info('''`vision_config` is `None`. Initializing the `BridgeTowerVisionConfig` with default values.''' ) __lowerCamelCase = BridgeTowerTextConfig(**a ) __lowerCamelCase = BridgeTowerVisionConfig(**a ) @classmethod def SCREAMING_SNAKE_CASE__ ( cls : Tuple , a : BridgeTowerTextConfig , a : BridgeTowerVisionConfig , **a : Union[str, Any] ): """simple docstring""" return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **a ) def SCREAMING_SNAKE_CASE__ ( self : str ): """simple docstring""" __lowerCamelCase = copy.deepcopy(self.__dict__ ) __lowerCamelCase = self.text_config.to_dict() __lowerCamelCase = self.vision_config.to_dict() __lowerCamelCase = self.__class__.model_type return output
237
'''simple docstring''' import unittest from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin __UpperCAmelCase =get_tests_dir("fixtures/spiece.model") @require_sentencepiece @require_tokenizers class a__ ( UpperCAmelCase__ , unittest.TestCase ): lowerCamelCase : Optional[Any] =DebertaVaTokenizer lowerCamelCase : Optional[int] =DebertaVaTokenizerFast lowerCamelCase : Optional[Any] =True lowerCamelCase : Tuple =True def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ): """simple docstring""" super().setUp() # We have a SentencePiece fixture for testing __lowerCamelCase = DebertaVaTokenizer(a , unk_token='''<unk>''' ) tokenizer.save_pretrained(self.tmpdirname ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , a : Dict ): """simple docstring""" __lowerCamelCase = '''this is a test''' __lowerCamelCase = '''this is a test''' return input_text, output_text def SCREAMING_SNAKE_CASE__ ( self : List[str] ): """simple docstring""" __lowerCamelCase = '''<pad>''' __lowerCamelCase = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(a ) , a ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(a ) , a ) def SCREAMING_SNAKE_CASE__ ( self : List[Any] ): """simple docstring""" __lowerCamelCase = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '''<pad>''' ) self.assertEqual(vocab_keys[1] , '''<unk>''' ) self.assertEqual(vocab_keys[-1] , '''[PAD]''' ) self.assertEqual(len(a ) , 3_00_01 ) def SCREAMING_SNAKE_CASE__ ( self : int ): """simple docstring""" self.assertEqual(self.get_tokenizer().vocab_size , 3_00_00 ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ): """simple docstring""" __lowerCamelCase = ''' \tHeLLo!how \n Are yoU? ''' __lowerCamelCase = ['''▁hello''', '''!''', '''how''', '''▁are''', '''▁you''', '''?'''] # fmt: on __lowerCamelCase = DebertaVaTokenizer(a , do_lower_case=a ) __lowerCamelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(a , add_special_tokens=a ) ) self.assertListEqual(a , a ) __lowerCamelCase = DebertaVaTokenizerFast(a , do_lower_case=a ) __lowerCamelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(a , add_special_tokens=a ) ) self.assertListEqual(a , a ) @unittest.skip('''There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.''' ) def SCREAMING_SNAKE_CASE__ ( self : Dict ): """simple docstring""" pass @unittest.skip('''There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.''' ) def SCREAMING_SNAKE_CASE__ ( self : List[Any] ): """simple docstring""" pass def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ): """simple docstring""" __lowerCamelCase = '''I was born in 92000, and this is falsé.''' __lowerCamelCase = ['''▁''', '''<unk>''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', '''▁''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''▁''', '''.''', ] # fmt: on __lowerCamelCase = DebertaVaTokenizer(a , split_by_punct=a ) __lowerCamelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(a , add_special_tokens=a ) ) self.assertListEqual(a , a ) __lowerCamelCase = DebertaVaTokenizerFast(a , split_by_punct=a ) __lowerCamelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(a , add_special_tokens=a ) ) self.assertListEqual(a , a ) def SCREAMING_SNAKE_CASE__ ( self : Any ): """simple docstring""" __lowerCamelCase = '''I was born in 92000, and this is falsé.''' __lowerCamelCase = ['''▁i''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', '''▁''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''▁''', '''.''', ] # fmt: on __lowerCamelCase = DebertaVaTokenizer(a , do_lower_case=a , split_by_punct=a ) __lowerCamelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(a , add_special_tokens=a ) ) self.assertListEqual(a , a ) __lowerCamelCase = DebertaVaTokenizerFast(a , do_lower_case=a , split_by_punct=a ) __lowerCamelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(a , add_special_tokens=a ) ) self.assertListEqual(a , a ) def SCREAMING_SNAKE_CASE__ ( self : str ): """simple docstring""" __lowerCamelCase = '''I was born in 92000, and this is falsé.''' __lowerCamelCase = ['''▁i''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''.''', ] # fmt: on __lowerCamelCase = DebertaVaTokenizer(a , do_lower_case=a , split_by_punct=a ) __lowerCamelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(a , add_special_tokens=a ) ) self.assertListEqual(a , a ) __lowerCamelCase = DebertaVaTokenizerFast(a , do_lower_case=a , split_by_punct=a ) __lowerCamelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(a , add_special_tokens=a ) ) self.assertListEqual(a , a ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ): """simple docstring""" __lowerCamelCase = '''I was born in 92000, and this is falsé.''' __lowerCamelCase = ['''▁''', '''<unk>''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', '''▁''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''▁''', '''.''', ] # fmt: on __lowerCamelCase = DebertaVaTokenizer(a , do_lower_case=a , split_by_punct=a ) __lowerCamelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(a , add_special_tokens=a ) ) self.assertListEqual(a , a ) __lowerCamelCase = DebertaVaTokenizerFast(a , do_lower_case=a , split_by_punct=a ) __lowerCamelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(a , add_special_tokens=a ) ) self.assertListEqual(a , a ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ): """simple docstring""" __lowerCamelCase = ''' \tHeLLo!how \n Are yoU? ''' __lowerCamelCase = ['''▁''', '''<unk>''', '''e''', '''<unk>''', '''o''', '''!''', '''how''', '''▁''', '''<unk>''', '''re''', '''▁yo''', '''<unk>''', '''?'''] # fmt: on __lowerCamelCase = DebertaVaTokenizer(a , do_lower_case=a , split_by_punct=a ) __lowerCamelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(a , add_special_tokens=a ) ) self.assertListEqual(a , a ) __lowerCamelCase = DebertaVaTokenizerFast(a , do_lower_case=a , split_by_punct=a ) __lowerCamelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(a , add_special_tokens=a ) ) self.assertListEqual(a , a ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ): """simple docstring""" __lowerCamelCase = self.get_tokenizer() __lowerCamelCase = self.get_rust_tokenizer() __lowerCamelCase = '''I was born in 92000, and this is falsé.''' __lowerCamelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(a , add_special_tokens=a ) ) __lowerCamelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(a , add_special_tokens=a ) ) self.assertListEqual(a , a ) __lowerCamelCase = tokenizer.encode(a , add_special_tokens=a ) __lowerCamelCase = rust_tokenizer.encode(a , add_special_tokens=a ) self.assertListEqual(a , a ) __lowerCamelCase = self.get_rust_tokenizer() __lowerCamelCase = tokenizer.encode(a ) __lowerCamelCase = rust_tokenizer.encode(a ) self.assertListEqual(a , a ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ): """simple docstring""" __lowerCamelCase = '''This is a test''' __lowerCamelCase = [13, 1, 43_98, 25, 21, 12_89] __lowerCamelCase = ['''▁''', '''T''', '''his''', '''▁is''', '''▁a''', '''▁test'''] __lowerCamelCase = ['''▁''', '''<unk>''', '''his''', '''▁is''', '''▁a''', '''▁test'''] __lowerCamelCase = DebertaVaTokenizer(a , keep_accents=a ) __lowerCamelCase = DebertaVaTokenizerFast(a , keep_accents=a ) __lowerCamelCase = tokenizer.encode(a , add_special_tokens=a ) self.assertListEqual(a , a ) __lowerCamelCase = tokenizer.tokenize(a ) self.assertListEqual(a , a ) __lowerCamelCase = tokenizer.convert_ids_to_tokens(a ) self.assertListEqual(a , a ) __lowerCamelCase = rust_tokenizer.encode(a , add_special_tokens=a ) self.assertListEqual(a , a ) __lowerCamelCase = rust_tokenizer.tokenize(a ) self.assertListEqual(a , a ) __lowerCamelCase = rust_tokenizer.convert_ids_to_tokens(a ) self.assertListEqual(a , a ) # fmt: off __lowerCamelCase = '''I was born in 92000, and this is falsé.''' __lowerCamelCase = [13, 1, 23, 3_86, 19, 5_61, 30_50, 15, 17, 48, 25, 82_56, 18, 1, 9] __lowerCamelCase = ['''▁''', '''I''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''é''', '''.''', ] __lowerCamelCase = ['''▁''', '''<unk>''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''.''', ] # fmt: on __lowerCamelCase = tokenizer.encode(a , add_special_tokens=a ) self.assertListEqual(a , a ) __lowerCamelCase = tokenizer.tokenize(a ) self.assertListEqual(a , a ) __lowerCamelCase = tokenizer.convert_ids_to_tokens(a ) self.assertListEqual(a , a ) __lowerCamelCase = rust_tokenizer.encode(a , add_special_tokens=a ) self.assertListEqual(a , a ) __lowerCamelCase = rust_tokenizer.tokenize(a ) self.assertListEqual(a , a ) __lowerCamelCase = rust_tokenizer.convert_ids_to_tokens(a ) self.assertListEqual(a , a ) def SCREAMING_SNAKE_CASE__ ( self : str ): """simple docstring""" __lowerCamelCase = DebertaVaTokenizer(a ) __lowerCamelCase = tokenizer.encode('''sequence builders''' ) __lowerCamelCase = tokenizer.encode('''multi-sequence build''' ) __lowerCamelCase = tokenizer.build_inputs_with_special_tokens(a ) __lowerCamelCase = tokenizer.build_inputs_with_special_tokens(a , a ) self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] , a ) self.assertEqual( [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id] , a , ) @slow def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ): """simple docstring""" __lowerCamelCase = {'''input_ids''': [[1, 3_98_67, 36, 1_93_90, 4_86, 27, 3_50_52, 8_14_36, 18, 6_06_85, 12_25, 7, 3_50_52, 8_14_36, 18, 93_67, 1_68_99, 18, 1_59_37, 53, 5_94, 7_73, 18, 1_62_87, 3_04_65, 36, 1_59_37, 6, 4_11_39, 38, 3_69_79, 6_07_63, 1_91, 6, 3_41_32, 99, 6, 5_05_38, 3_90, 4_32_30, 6, 3_41_32, 27_79, 2_08_50, 14, 6_99, 10_72, 11_94, 36, 3_82, 1_09_01, 53, 7, 6_99, 10_72, 20_84, 36, 2_04_22, 6_30, 53, 19, 1_05, 30_49, 18_96, 10_53, 1_68_99, 15_06, 11, 3_79_78, 42_43, 7, 12_37, 3_18_69, 2_00, 1_65_66, 6_54, 6, 3_50_52, 8_14_36, 7, 5_56_30, 1_35_93, 4, 2], [1, 26, 1_50_11, 13, 6_67, 8, 10_53, 18, 2_36_11, 12_37, 7_23_56, 1_28_20, 34, 10_41_34, 12_09, 35, 1_33_13, 66_27, 21, 2_02, 3_47, 7, 1_64, 23_99, 11, 46, 44_85, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 12_32, 28_64, 1_57_85, 1_49_51, 1_05, 5, 85_81, 12_50, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''token_type_ids''': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=a , model_name='''microsoft/deberta-v2-xlarge''' , revision='''ad6e42c1532ddf3a15c39246b63f5559d558b670''' , )
237
1
from __future__ import annotations def a__ ( UpperCAmelCase : list[list[int]] ) -> bool: UpperCAmelCase : Union[str, Any] = len(UpperCAmelCase ) # We need to create solution object to save path. UpperCAmelCase : int = [[0 for _ in range(UpperCAmelCase )] for _ in range(UpperCAmelCase )] UpperCAmelCase : Union[str, Any] = run_maze(UpperCAmelCase , 0 , 0 , UpperCAmelCase ) if solved: print('''\n'''.join(str(UpperCAmelCase ) for row in solutions ) ) else: print('''No solution exists!''' ) return solved def a__ ( UpperCAmelCase : list[list[int]] , UpperCAmelCase : int , UpperCAmelCase : int , UpperCAmelCase : list[list[int]] ) -> bool: UpperCAmelCase : Dict = len(UpperCAmelCase ) # Final check point. if i == j == (size - 1): UpperCAmelCase : Dict = 1 return True UpperCAmelCase : Union[str, Any] = (not i < 0) and (not j < 0) # Check lower bounds UpperCAmelCase : List[Any] = (i < size) and (j < size) # Check upper bounds if lower_flag and upper_flag: # check for already visited and block points. UpperCAmelCase : Any = (not solutions[i][j]) and (not maze[i][j]) if block_flag: # check visited UpperCAmelCase : str = 1 # check for directions if ( run_maze(UpperCAmelCase , i + 1 , UpperCAmelCase , UpperCAmelCase ) or run_maze(UpperCAmelCase , UpperCAmelCase , j + 1 , UpperCAmelCase ) or run_maze(UpperCAmelCase , i - 1 , UpperCAmelCase , UpperCAmelCase ) or run_maze(UpperCAmelCase , UpperCAmelCase , j - 1 , UpperCAmelCase ) ): return True UpperCAmelCase : Any = 0 return False return False if __name__ == "__main__": import doctest doctest.testmod()
336
def a__ ( UpperCAmelCase : int ) -> int: UpperCAmelCase : list[list[int]] = [[0 for _ in range(UpperCAmelCase )] for _ in range(m + 1 )] for i in range(m + 1 ): UpperCAmelCase : Optional[Any] = 1 for n in range(m + 1 ): for k in range(1 , UpperCAmelCase ): memo[n][k] += memo[n][k - 1] if n - k > 0: memo[n][k] += memo[n - k - 1][k] return memo[m][m - 1] if __name__ == "__main__": import sys if len(sys.argv) == 1: try: _lowerCamelCase : List[Any] = int(input("Enter a number: ").strip()) print(partition(n)) except ValueError: print("Please enter a number.") else: try: _lowerCamelCase : str = int(sys.argv[1]) print(partition(n)) except ValueError: print("Please pass a number.")
336
1
from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _lowerCamelCase : Optional[int] = { "configuration_informer": [ "INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "InformerConfig", ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase : str = [ "INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "InformerForPrediction", "InformerModel", "InformerPreTrainedModel", ] if TYPE_CHECKING: from .configuration_informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_informer import ( INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, InformerForPrediction, InformerModel, InformerPreTrainedModel, ) else: import sys _lowerCamelCase : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
350
import argparse from collections import OrderedDict from pathlib import Path import torch from huggingface_hub import hf_hub_download from PIL import Image from torchvision.transforms import functional as F from transformers import DetrImageProcessor, TableTransformerConfig, TableTransformerForObjectDetection from transformers.utils import logging logging.set_verbosity_info() _lowerCamelCase : Any = logging.get_logger(__name__) # here we list all keys to be renamed (original name on the left, our name on the right) _lowerCamelCase : Tuple = [] for i in range(6): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append( (F'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''', F'''encoder.layers.{i}.self_attn.out_proj.weight''') ) rename_keys.append( (F'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', F'''encoder.layers.{i}.self_attn.out_proj.bias''') ) rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.weight''', F'''encoder.layers.{i}.fc1.weight''')) rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.bias''', F'''encoder.layers.{i}.fc1.bias''')) rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.weight''', F'''encoder.layers.{i}.fc2.weight''')) rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.bias''', F'''encoder.layers.{i}.fc2.bias''')) rename_keys.append( (F'''transformer.encoder.layers.{i}.norm1.weight''', F'''encoder.layers.{i}.self_attn_layer_norm.weight''') ) rename_keys.append((F'''transformer.encoder.layers.{i}.norm1.bias''', F'''encoder.layers.{i}.self_attn_layer_norm.bias''')) rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.weight''', F'''encoder.layers.{i}.final_layer_norm.weight''')) rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.bias''', F'''encoder.layers.{i}.final_layer_norm.bias''')) # decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms rename_keys.append( (F'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', F'''decoder.layers.{i}.self_attn.out_proj.weight''') ) rename_keys.append( (F'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', F'''decoder.layers.{i}.self_attn.out_proj.bias''') ) rename_keys.append( ( F'''transformer.decoder.layers.{i}.multihead_attn.out_proj.weight''', F'''decoder.layers.{i}.encoder_attn.out_proj.weight''', ) ) rename_keys.append( ( F'''transformer.decoder.layers.{i}.multihead_attn.out_proj.bias''', F'''decoder.layers.{i}.encoder_attn.out_proj.bias''', ) ) rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.weight''', F'''decoder.layers.{i}.fc1.weight''')) rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.bias''', F'''decoder.layers.{i}.fc1.bias''')) rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.weight''', F'''decoder.layers.{i}.fc2.weight''')) rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.bias''', F'''decoder.layers.{i}.fc2.bias''')) rename_keys.append( (F'''transformer.decoder.layers.{i}.norm1.weight''', F'''decoder.layers.{i}.self_attn_layer_norm.weight''') ) rename_keys.append((F'''transformer.decoder.layers.{i}.norm1.bias''', F'''decoder.layers.{i}.self_attn_layer_norm.bias''')) rename_keys.append( (F'''transformer.decoder.layers.{i}.norm2.weight''', F'''decoder.layers.{i}.encoder_attn_layer_norm.weight''') ) rename_keys.append( (F'''transformer.decoder.layers.{i}.norm2.bias''', F'''decoder.layers.{i}.encoder_attn_layer_norm.bias''') ) rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.weight''', F'''decoder.layers.{i}.final_layer_norm.weight''')) rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.bias''', F'''decoder.layers.{i}.final_layer_norm.bias''')) # convolutional projection + query embeddings + layernorm of encoder + layernorm of decoder + class and bounding box heads rename_keys.extend( [ ("input_proj.weight", "input_projection.weight"), ("input_proj.bias", "input_projection.bias"), ("query_embed.weight", "query_position_embeddings.weight"), ("transformer.encoder.norm.weight", "encoder.layernorm.weight"), ("transformer.encoder.norm.bias", "encoder.layernorm.bias"), ("transformer.decoder.norm.weight", "decoder.layernorm.weight"), ("transformer.decoder.norm.bias", "decoder.layernorm.bias"), ("class_embed.weight", "class_labels_classifier.weight"), ("class_embed.bias", "class_labels_classifier.bias"), ("bbox_embed.layers.0.weight", "bbox_predictor.layers.0.weight"), ("bbox_embed.layers.0.bias", "bbox_predictor.layers.0.bias"), ("bbox_embed.layers.1.weight", "bbox_predictor.layers.1.weight"), ("bbox_embed.layers.1.bias", "bbox_predictor.layers.1.bias"), ("bbox_embed.layers.2.weight", "bbox_predictor.layers.2.weight"), ("bbox_embed.layers.2.bias", "bbox_predictor.layers.2.bias"), ] ) def _UpperCAmelCase (UpperCamelCase_ : Dict , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Optional[Any] ): '''simple docstring''' _lowerCAmelCase : str = state_dict.pop(UpperCamelCase_ ) _lowerCAmelCase : Tuple = val def _UpperCAmelCase (UpperCamelCase_ : Union[str, Any] ): '''simple docstring''' _lowerCAmelCase : Optional[int] = OrderedDict() for key, value in state_dict.items(): if "backbone.0.body" in key: _lowerCAmelCase : List[str] = key.replace("""backbone.0.body""" , """backbone.conv_encoder.model""" ) _lowerCAmelCase : Optional[Any] = value else: _lowerCAmelCase : Union[str, Any] = value return new_state_dict def _UpperCAmelCase (UpperCamelCase_ : Optional[int] ): '''simple docstring''' _lowerCAmelCase : Tuple = """""" # first: transformer encoder for i in range(6 ): # read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias) _lowerCAmelCase : Dict = state_dict.pop(F"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight" ) _lowerCAmelCase : str = state_dict.pop(F"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias" ) # next, add query, keys and values (in that order) to the state dict _lowerCAmelCase : str = in_proj_weight[:256, :] _lowerCAmelCase : Tuple = in_proj_bias[:256] _lowerCAmelCase : Tuple = in_proj_weight[256:512, :] _lowerCAmelCase : Dict = in_proj_bias[256:512] _lowerCAmelCase : Tuple = in_proj_weight[-256:, :] _lowerCAmelCase : List[str] = in_proj_bias[-256:] # next: transformer decoder (which is a bit more complex because it also includes cross-attention) for i in range(6 ): # read in weights + bias of input projection layer of self-attention _lowerCAmelCase : Optional[int] = state_dict.pop(F"{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight" ) _lowerCAmelCase : Any = state_dict.pop(F"{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias" ) # next, add query, keys and values (in that order) to the state dict _lowerCAmelCase : Optional[Any] = in_proj_weight[:256, :] _lowerCAmelCase : Dict = in_proj_bias[:256] _lowerCAmelCase : Optional[Any] = in_proj_weight[256:512, :] _lowerCAmelCase : Optional[Any] = in_proj_bias[256:512] _lowerCAmelCase : str = in_proj_weight[-256:, :] _lowerCAmelCase : Any = in_proj_bias[-256:] # read in weights + bias of input projection layer of cross-attention _lowerCAmelCase : Union[str, Any] = state_dict.pop( F"{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight" ) _lowerCAmelCase : str = state_dict.pop(F"{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias" ) # next, add query, keys and values (in that order) of cross-attention to the state dict _lowerCAmelCase : Tuple = in_proj_weight_cross_attn[:256, :] _lowerCAmelCase : List[str] = in_proj_bias_cross_attn[:256] _lowerCAmelCase : Dict = in_proj_weight_cross_attn[256:512, :] _lowerCAmelCase : Any = in_proj_bias_cross_attn[256:512] _lowerCAmelCase : Tuple = in_proj_weight_cross_attn[-256:, :] _lowerCAmelCase : List[Any] = in_proj_bias_cross_attn[-256:] def _UpperCAmelCase (UpperCamelCase_ : List[Any] , UpperCamelCase_ : Any ): '''simple docstring''' _lowerCAmelCase , _lowerCAmelCase : Optional[int] = image.size _lowerCAmelCase : List[str] = max(UpperCamelCase_ , UpperCamelCase_ ) _lowerCAmelCase : Union[str, Any] = 800 if """detection""" in checkpoint_url else 1000 _lowerCAmelCase : Optional[int] = target_max_size / current_max_size _lowerCAmelCase : Union[str, Any] = image.resize((int(round(scale * width ) ), int(round(scale * height ) )) ) return resized_image def _UpperCAmelCase (UpperCamelCase_ : Any ): '''simple docstring''' _lowerCAmelCase : Optional[int] = F.to_tensor(UpperCamelCase_ ) _lowerCAmelCase : int = F.normalize(UpperCamelCase_ , mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ) return image @torch.no_grad() def _UpperCAmelCase (UpperCamelCase_ : List[Any] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Optional[int] ): '''simple docstring''' logger.info("""Converting model...""" ) # load original state dict _lowerCAmelCase : Dict = torch.hub.load_state_dict_from_url(UpperCamelCase_ , map_location="""cpu""" ) # rename keys for src, dest in rename_keys: rename_key(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) _lowerCAmelCase : List[str] = rename_backbone_keys(UpperCamelCase_ ) # query, key and value matrices need special treatment read_in_q_k_v(UpperCamelCase_ ) # important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them _lowerCAmelCase : int = """model.""" for key in state_dict.copy().keys(): if not key.startswith("""class_labels_classifier""" ) and not key.startswith("""bbox_predictor""" ): _lowerCAmelCase : Tuple = state_dict.pop(UpperCamelCase_ ) _lowerCAmelCase : str = val # create HuggingFace model and load state dict _lowerCAmelCase : List[Any] = TableTransformerConfig( backbone="""resnet18""" , mask_loss_coefficient=1 , dice_loss_coefficient=1 , ce_loss_coefficient=1 , bbox_loss_coefficient=5 , giou_loss_coefficient=2 , eos_coefficient=0.4 , class_cost=1 , bbox_cost=5 , giou_cost=2 , ) if "detection" in checkpoint_url: _lowerCAmelCase : Any = 15 _lowerCAmelCase : Any = 2 _lowerCAmelCase : List[Any] = {0: """table""", 1: """table rotated"""} _lowerCAmelCase : Tuple = idalabel _lowerCAmelCase : Optional[Any] = {v: k for k, v in idalabel.items()} else: _lowerCAmelCase : Optional[Any] = 125 _lowerCAmelCase : int = 6 _lowerCAmelCase : List[str] = { 0: """table""", 1: """table column""", 2: """table row""", 3: """table column header""", 4: """table projected row header""", 5: """table spanning cell""", } _lowerCAmelCase : int = idalabel _lowerCAmelCase : Dict = {v: k for k, v in idalabel.items()} _lowerCAmelCase : Any = DetrImageProcessor( format="""coco_detection""" , max_size=800 if """detection""" in checkpoint_url else 1000 ) _lowerCAmelCase : int = TableTransformerForObjectDetection(UpperCamelCase_ ) model.load_state_dict(UpperCamelCase_ ) model.eval() # verify our conversion _lowerCAmelCase : Optional[Any] = """example_pdf.png""" if """detection""" in checkpoint_url else """example_table.png""" _lowerCAmelCase : Union[str, Any] = hf_hub_download(repo_id="""nielsr/example-pdf""" , repo_type="""dataset""" , filename=UpperCamelCase_ ) _lowerCAmelCase : Union[str, Any] = Image.open(UpperCamelCase_ ).convert("""RGB""" ) _lowerCAmelCase : Optional[Any] = normalize(resize(UpperCamelCase_ , UpperCamelCase_ ) ).unsqueeze(0 ) _lowerCAmelCase : Optional[Any] = model(UpperCamelCase_ ) if "detection" in checkpoint_url: _lowerCAmelCase : Dict = (1, 15, 3) _lowerCAmelCase : Optional[Any] = torch.tensor( [[-6.7_897, -16.9_985, 6.7_937], [-8.0_186, -22.2_192, 6.9_677], [-7.3_117, -21.0_708, 7.4_055]] ) _lowerCAmelCase : Union[str, Any] = torch.tensor([[0.4_867, 0.1_767, 0.6_732], [0.6_718, 0.4_479, 0.3_830], [0.4_716, 0.1_760, 0.6_364]] ) else: _lowerCAmelCase : Optional[Any] = (1, 125, 7) _lowerCAmelCase : Tuple = torch.tensor( [[-18.1_430, -8.3_214, 4.8_274], [-18.4_685, -7.1_361, -4.2_667], [-26.3_693, -9.3_429, -4.9_962]] ) _lowerCAmelCase : Dict = torch.tensor([[0.4_983, 0.5_595, 0.9_440], [0.4_916, 0.6_315, 0.5_954], [0.6_108, 0.8_637, 0.1_135]] ) assert outputs.logits.shape == expected_shape assert torch.allclose(outputs.logits[0, :3, :3] , UpperCamelCase_ , atol=1E-4 ) assert torch.allclose(outputs.pred_boxes[0, :3, :3] , UpperCamelCase_ , atol=1E-4 ) print("""Looks ok!""" ) if pytorch_dump_folder_path is not None: # Save model and image processor logger.info(F"Saving PyTorch model and image processor to {pytorch_dump_folder_path}..." ) Path(UpperCamelCase_ ).mkdir(exist_ok=UpperCamelCase_ ) model.save_pretrained(UpperCamelCase_ ) image_processor.save_pretrained(UpperCamelCase_ ) if push_to_hub: # Push model to HF hub logger.info("""Pushing model to the hub...""" ) _lowerCAmelCase : List[str] = ( """microsoft/table-transformer-detection""" if """detection""" in checkpoint_url else """microsoft/table-transformer-structure-recognition""" ) model.push_to_hub(UpperCamelCase_ ) image_processor.push_to_hub(UpperCamelCase_ ) if __name__ == "__main__": _lowerCamelCase : Dict = argparse.ArgumentParser() parser.add_argument( "--checkpoint_url", default="https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth", type=str, choices=[ "https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth", "https://pubtables1m.blob.core.windows.net/model/pubtables1m_structure_detr_r18.pth", ], help="URL of the Table Transformer checkpoint you'd like to convert.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model." ) parser.add_argument( "--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub." ) _lowerCamelCase : List[str] = parser.parse_args() convert_table_transformer_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
159
0
"""simple docstring""" class UpperCamelCase_ : """simple docstring""" def __init__( self : Optional[Any] ) -> None: __SCREAMING_SNAKE_CASE = {} # Mapping from char to TrieNode __SCREAMING_SNAKE_CASE = False def UpperCAmelCase_ ( self : Tuple , UpperCAmelCase__ : list[str] ) -> None: for word in words: self.insert(UpperCAmelCase__ ) def UpperCAmelCase_ ( self : List[str] , UpperCAmelCase__ : str ) -> None: __SCREAMING_SNAKE_CASE = self for char in word: if char not in curr.nodes: __SCREAMING_SNAKE_CASE = TrieNode() __SCREAMING_SNAKE_CASE = curr.nodes[char] __SCREAMING_SNAKE_CASE = True def UpperCAmelCase_ ( self : Tuple , UpperCAmelCase__ : str ) -> bool: __SCREAMING_SNAKE_CASE = self for char in word: if char not in curr.nodes: return False __SCREAMING_SNAKE_CASE = curr.nodes[char] return curr.is_leaf def UpperCAmelCase_ ( self : List[Any] , UpperCAmelCase__ : str ) -> None: def _delete(UpperCAmelCase__ : TrieNode , UpperCAmelCase__ : str , UpperCAmelCase__ : int ) -> bool: if index == len(UpperCAmelCase__ ): # If word does not exist if not curr.is_leaf: return False __SCREAMING_SNAKE_CASE = False return len(curr.nodes ) == 0 __SCREAMING_SNAKE_CASE = word[index] __SCREAMING_SNAKE_CASE = curr.nodes.get(UpperCAmelCase__ ) # If char not in current trie node if not char_node: return False # Flag to check if node can be deleted __SCREAMING_SNAKE_CASE = _delete(UpperCAmelCase__ , UpperCAmelCase__ , index + 1 ) if delete_curr: del curr.nodes[char] return len(curr.nodes ) == 0 return delete_curr _delete(self , UpperCAmelCase__ , 0 ) def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ): '''simple docstring''' if node.is_leaf: print(lowerCAmelCase_ , end=" " ) for key, value in node.nodes.items(): print_words(lowerCAmelCase_ , word + key ) def UpperCAmelCase__ (): '''simple docstring''' __SCREAMING_SNAKE_CASE = "banana bananas bandana band apple all beast".split() __SCREAMING_SNAKE_CASE = TrieNode() root.insert_many(lowerCAmelCase_ ) # print_words(root, "") assert all(root.find(lowerCAmelCase_ ) for word in words ) assert root.find("banana" ) assert not root.find("bandanas" ) assert not root.find("apps" ) assert root.find("apple" ) assert root.find("all" ) root.delete("all" ) assert not root.find("all" ) root.delete("banana" ) assert not root.find("banana" ) assert root.find("bananas" ) return True def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ): '''simple docstring''' print(str(lowerCAmelCase_ ) , "works!" if passes else "doesn't work :(" ) def UpperCAmelCase__ (): '''simple docstring''' assert test_trie() def UpperCAmelCase__ (): '''simple docstring''' print_results("Testing trie functionality" , test_trie() ) if __name__ == "__main__": main()
54
a__ = '''0.18.2''' from .configuration_utils import ConfigMixin from .utils import ( OptionalDependencyNotAvailable, is_flax_available, is_inflect_available, is_invisible_watermark_available, is_k_diffusion_available, is_k_diffusion_version, is_librosa_available, is_note_seq_available, is_onnx_available, is_scipy_available, is_torch_available, is_torchsde_available, is_transformers_available, is_transformers_version, is_unidecode_available, logging, ) try: if not is_onnx_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_onnx_objects import * # noqa F403 else: from .pipelines import OnnxRuntimeModel try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_pt_objects import * # noqa F403 else: from .models import ( AutoencoderKL, ControlNetModel, ModelMixin, PriorTransformer, TaFilmDecoder, TransformeraDModel, UNetaDModel, UNetaDConditionModel, UNetaDModel, UNetaDConditionModel, VQModel, ) from .optimization import ( get_constant_schedule, get_constant_schedule_with_warmup, get_cosine_schedule_with_warmup, get_cosine_with_hard_restarts_schedule_with_warmup, get_linear_schedule_with_warmup, get_polynomial_decay_schedule_with_warmup, get_scheduler, ) from .pipelines import ( AudioPipelineOutput, ConsistencyModelPipeline, DanceDiffusionPipeline, DDIMPipeline, DDPMPipeline, DiffusionPipeline, DiTPipeline, ImagePipelineOutput, KarrasVePipeline, LDMPipeline, LDMSuperResolutionPipeline, PNDMPipeline, RePaintPipeline, ScoreSdeVePipeline, ) from .schedulers import ( CMStochasticIterativeScheduler, DDIMInverseScheduler, DDIMParallelScheduler, DDIMScheduler, DDPMParallelScheduler, DDPMScheduler, DEISMultistepScheduler, DPMSolverMultistepInverseScheduler, DPMSolverMultistepScheduler, DPMSolverSinglestepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, HeunDiscreteScheduler, IPNDMScheduler, KarrasVeScheduler, KDPMaAncestralDiscreteScheduler, KDPMaDiscreteScheduler, PNDMScheduler, RePaintScheduler, SchedulerMixin, ScoreSdeVeScheduler, UnCLIPScheduler, UniPCMultistepScheduler, VQDiffusionScheduler, ) from .training_utils import EMAModel try: if not (is_torch_available() and is_scipy_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_torch_and_scipy_objects import * # noqa F403 else: from .schedulers import LMSDiscreteScheduler try: if not (is_torch_available() and is_torchsde_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_torch_and_torchsde_objects import * # noqa F403 else: from .schedulers import DPMSolverSDEScheduler try: if not (is_torch_available() and is_transformers_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .pipelines import ( AltDiffusionImgaImgPipeline, AltDiffusionPipeline, AudioLDMPipeline, CycleDiffusionPipeline, IFImgaImgPipeline, IFImgaImgSuperResolutionPipeline, IFInpaintingPipeline, IFInpaintingSuperResolutionPipeline, IFPipeline, IFSuperResolutionPipeline, ImageTextPipelineOutput, KandinskyImgaImgPipeline, KandinskyInpaintPipeline, KandinskyPipeline, KandinskyPriorPipeline, KandinskyVaaControlnetImgaImgPipeline, KandinskyVaaControlnetPipeline, KandinskyVaaImgaImgPipeline, KandinskyVaaInpaintPipeline, KandinskyVaaPipeline, KandinskyVaaPriorEmbaEmbPipeline, KandinskyVaaPriorPipeline, LDMTextToImagePipeline, PaintByExamplePipeline, SemanticStableDiffusionPipeline, ShapEImgaImgPipeline, ShapEPipeline, StableDiffusionAttendAndExcitePipeline, StableDiffusionControlNetImgaImgPipeline, StableDiffusionControlNetInpaintPipeline, StableDiffusionControlNetPipeline, StableDiffusionDepthaImgPipeline, StableDiffusionDiffEditPipeline, StableDiffusionImageVariationPipeline, StableDiffusionImgaImgPipeline, StableDiffusionInpaintPipeline, StableDiffusionInpaintPipelineLegacy, StableDiffusionInstructPixaPixPipeline, StableDiffusionLatentUpscalePipeline, StableDiffusionLDMaDPipeline, StableDiffusionModelEditingPipeline, StableDiffusionPanoramaPipeline, StableDiffusionParadigmsPipeline, StableDiffusionPipeline, StableDiffusionPipelineSafe, StableDiffusionPixaPixZeroPipeline, StableDiffusionSAGPipeline, StableDiffusionUpscalePipeline, StableUnCLIPImgaImgPipeline, StableUnCLIPPipeline, TextToVideoSDPipeline, TextToVideoZeroPipeline, UnCLIPImageVariationPipeline, UnCLIPPipeline, UniDiffuserModel, UniDiffuserPipeline, UniDiffuserTextDecoder, VersatileDiffusionDualGuidedPipeline, VersatileDiffusionImageVariationPipeline, VersatileDiffusionPipeline, VersatileDiffusionTextToImagePipeline, VideoToVideoSDPipeline, VQDiffusionPipeline, ) try: if not (is_torch_available() and is_transformers_available() and is_invisible_watermark_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_torch_and_transformers_and_invisible_watermark_objects import * # noqa F403 else: from .pipelines import StableDiffusionXLImgaImgPipeline, StableDiffusionXLPipeline try: if not (is_torch_available() and is_transformers_available() and is_k_diffusion_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403 else: from .pipelines import StableDiffusionKDiffusionPipeline try: if not (is_torch_available() and is_transformers_available() and is_onnx_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_torch_and_transformers_and_onnx_objects import * # noqa F403 else: from .pipelines import ( OnnxStableDiffusionImgaImgPipeline, OnnxStableDiffusionInpaintPipeline, OnnxStableDiffusionInpaintPipelineLegacy, OnnxStableDiffusionPipeline, OnnxStableDiffusionUpscalePipeline, StableDiffusionOnnxPipeline, ) try: if not (is_torch_available() and is_librosa_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_torch_and_librosa_objects import * # noqa F403 else: from .pipelines import AudioDiffusionPipeline, Mel try: if not (is_transformers_available() and is_torch_available() and is_note_seq_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403 else: from .pipelines import SpectrogramDiffusionPipeline try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_flax_objects import * # noqa F403 else: from .models.controlnet_flax import FlaxControlNetModel from .models.modeling_flax_utils import FlaxModelMixin from .models.unet_ad_condition_flax import FlaxUNetaDConditionModel from .models.vae_flax import FlaxAutoencoderKL from .pipelines import FlaxDiffusionPipeline from .schedulers import ( FlaxDDIMScheduler, FlaxDDPMScheduler, FlaxDPMSolverMultistepScheduler, FlaxKarrasVeScheduler, FlaxLMSDiscreteScheduler, FlaxPNDMScheduler, FlaxSchedulerMixin, FlaxScoreSdeVeScheduler, ) try: if not (is_flax_available() and is_transformers_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_flax_and_transformers_objects import * # noqa F403 else: from .pipelines import ( FlaxStableDiffusionControlNetPipeline, FlaxStableDiffusionImgaImgPipeline, FlaxStableDiffusionInpaintPipeline, FlaxStableDiffusionPipeline, ) try: if not (is_note_seq_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_note_seq_objects import * # noqa F403 else: from .pipelines import MidiProcessor
235
0
"""simple docstring""" import argparse lowerCamelCase_ = "docs/source/_static/js/custom.js" def __lowerCamelCase ( a_ : Dict ) -> List[Any]: with open(a_ , encoding='''utf-8''' , newline='''\n''' ) as f: __SCREAMING_SNAKE_CASE :Dict = f.readlines() __SCREAMING_SNAKE_CASE :Dict = 0 # First let's put the right version while not lines[index].startswith('''const stableVersion =''' ): index += 1 __SCREAMING_SNAKE_CASE :int = f'''const stableVersion = "v{version}"\n''' # Then update the dictionary while not lines[index].startswith('''const versionMapping = {''' ): index += 1 # We go until the end while not lines[index].startswith('''}''' ): index += 1 # We add the new version at the end lines[index - 1] += f''' "v{version}": "v{version}",\n''' with open(a_ , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f: f.writelines(a_ ) if __name__ == "__main__": lowerCamelCase_ = argparse.ArgumentParser() parser.add_argument("--version", help="Release version.") lowerCamelCase_ = parser.parse_args() update_custom_js(args.version)
239
"""simple docstring""" import unittest from transformers import AutoTokenizer, NystromformerConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( NystromformerForMaskedLM, NystromformerForMultipleChoice, NystromformerForQuestionAnswering, NystromformerForSequenceClassification, NystromformerForTokenClassification, NystromformerModel, ) from transformers.models.nystromformer.modeling_nystromformer import NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST class _SCREAMING_SNAKE_CASE: def __init__( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__=13 ,SCREAMING_SNAKE_CASE__=7 ,SCREAMING_SNAKE_CASE__=True ,SCREAMING_SNAKE_CASE__=True ,SCREAMING_SNAKE_CASE__=True ,SCREAMING_SNAKE_CASE__=True ,SCREAMING_SNAKE_CASE__=99 ,SCREAMING_SNAKE_CASE__=32 ,SCREAMING_SNAKE_CASE__=5 ,SCREAMING_SNAKE_CASE__=4 ,SCREAMING_SNAKE_CASE__=37 ,SCREAMING_SNAKE_CASE__="gelu" ,SCREAMING_SNAKE_CASE__=0.1 ,SCREAMING_SNAKE_CASE__=0.1 ,SCREAMING_SNAKE_CASE__=5_12 ,SCREAMING_SNAKE_CASE__=16 ,SCREAMING_SNAKE_CASE__=2 ,SCREAMING_SNAKE_CASE__=0.0_2 ,SCREAMING_SNAKE_CASE__=3 ,SCREAMING_SNAKE_CASE__=4 ,SCREAMING_SNAKE_CASE__=None ,) -> Dict: """simple docstring""" __SCREAMING_SNAKE_CASE :Any = parent __SCREAMING_SNAKE_CASE :Tuple = batch_size __SCREAMING_SNAKE_CASE :Tuple = seq_length __SCREAMING_SNAKE_CASE :Any = is_training __SCREAMING_SNAKE_CASE :Tuple = use_input_mask __SCREAMING_SNAKE_CASE :List[Any] = use_token_type_ids __SCREAMING_SNAKE_CASE :int = use_labels __SCREAMING_SNAKE_CASE :Dict = vocab_size __SCREAMING_SNAKE_CASE :int = hidden_size __SCREAMING_SNAKE_CASE :int = num_hidden_layers __SCREAMING_SNAKE_CASE :Union[str, Any] = num_attention_heads __SCREAMING_SNAKE_CASE :Any = intermediate_size __SCREAMING_SNAKE_CASE :Any = hidden_act __SCREAMING_SNAKE_CASE :str = hidden_dropout_prob __SCREAMING_SNAKE_CASE :List[str] = attention_probs_dropout_prob __SCREAMING_SNAKE_CASE :int = max_position_embeddings __SCREAMING_SNAKE_CASE :Any = type_vocab_size __SCREAMING_SNAKE_CASE :Optional[Any] = type_sequence_label_size __SCREAMING_SNAKE_CASE :Optional[int] = initializer_range __SCREAMING_SNAKE_CASE :Union[str, Any] = num_labels __SCREAMING_SNAKE_CASE :Union[str, Any] = num_choices __SCREAMING_SNAKE_CASE :str = scope def _UpperCamelCase ( self ) -> Optional[int]: """simple docstring""" __SCREAMING_SNAKE_CASE :Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size ) __SCREAMING_SNAKE_CASE :Union[str, Any] = None if self.use_input_mask: __SCREAMING_SNAKE_CASE :List[str] = random_attention_mask([self.batch_size, self.seq_length] ) __SCREAMING_SNAKE_CASE :Union[str, Any] = None if self.use_token_type_ids: __SCREAMING_SNAKE_CASE :Tuple = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size ) __SCREAMING_SNAKE_CASE :Dict = None __SCREAMING_SNAKE_CASE :Dict = None __SCREAMING_SNAKE_CASE :Dict = None if self.use_labels: __SCREAMING_SNAKE_CASE :List[Any] = ids_tensor([self.batch_size] ,self.type_sequence_label_size ) __SCREAMING_SNAKE_CASE :Optional[int] = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels ) __SCREAMING_SNAKE_CASE :Dict = ids_tensor([self.batch_size] ,self.num_choices ) __SCREAMING_SNAKE_CASE :Tuple = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def _UpperCamelCase ( self ) -> Union[str, Any]: """simple docstring""" return NystromformerConfig( vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=SCREAMING_SNAKE_CASE__ ,initializer_range=self.initializer_range ,) def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ) -> Any: """simple docstring""" __SCREAMING_SNAKE_CASE :List[Any] = NystromformerModel(config=SCREAMING_SNAKE_CASE__ ) model.to(SCREAMING_SNAKE_CASE__ ) model.eval() __SCREAMING_SNAKE_CASE :Any = model(SCREAMING_SNAKE_CASE__ ,attention_mask=SCREAMING_SNAKE_CASE__ ,token_type_ids=SCREAMING_SNAKE_CASE__ ) __SCREAMING_SNAKE_CASE :List[str] = model(SCREAMING_SNAKE_CASE__ ,token_type_ids=SCREAMING_SNAKE_CASE__ ) __SCREAMING_SNAKE_CASE :int = model(SCREAMING_SNAKE_CASE__ ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ) -> int: """simple docstring""" __SCREAMING_SNAKE_CASE :Dict = NystromformerForMaskedLM(config=SCREAMING_SNAKE_CASE__ ) model.to(SCREAMING_SNAKE_CASE__ ) model.eval() __SCREAMING_SNAKE_CASE :Tuple = model(SCREAMING_SNAKE_CASE__ ,attention_mask=SCREAMING_SNAKE_CASE__ ,token_type_ids=SCREAMING_SNAKE_CASE__ ,labels=SCREAMING_SNAKE_CASE__ ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) ) def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ) -> Tuple: """simple docstring""" __SCREAMING_SNAKE_CASE :List[str] = NystromformerForQuestionAnswering(config=SCREAMING_SNAKE_CASE__ ) model.to(SCREAMING_SNAKE_CASE__ ) model.eval() __SCREAMING_SNAKE_CASE :Optional[Any] = model( SCREAMING_SNAKE_CASE__ ,attention_mask=SCREAMING_SNAKE_CASE__ ,token_type_ids=SCREAMING_SNAKE_CASE__ ,start_positions=SCREAMING_SNAKE_CASE__ ,end_positions=SCREAMING_SNAKE_CASE__ ,) self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) ) def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]: """simple docstring""" __SCREAMING_SNAKE_CASE :int = self.num_labels __SCREAMING_SNAKE_CASE :Any = NystromformerForSequenceClassification(SCREAMING_SNAKE_CASE__ ) model.to(SCREAMING_SNAKE_CASE__ ) model.eval() __SCREAMING_SNAKE_CASE :Dict = model(SCREAMING_SNAKE_CASE__ ,attention_mask=SCREAMING_SNAKE_CASE__ ,token_type_ids=SCREAMING_SNAKE_CASE__ ,labels=SCREAMING_SNAKE_CASE__ ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) ) def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ) -> Any: """simple docstring""" __SCREAMING_SNAKE_CASE :Optional[Any] = self.num_labels __SCREAMING_SNAKE_CASE :Tuple = NystromformerForTokenClassification(config=SCREAMING_SNAKE_CASE__ ) model.to(SCREAMING_SNAKE_CASE__ ) model.eval() __SCREAMING_SNAKE_CASE :Any = model(SCREAMING_SNAKE_CASE__ ,attention_mask=SCREAMING_SNAKE_CASE__ ,token_type_ids=SCREAMING_SNAKE_CASE__ ,labels=SCREAMING_SNAKE_CASE__ ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) ) def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ) -> Dict: """simple docstring""" __SCREAMING_SNAKE_CASE :Optional[Any] = self.num_choices __SCREAMING_SNAKE_CASE :Dict = NystromformerForMultipleChoice(config=SCREAMING_SNAKE_CASE__ ) model.to(SCREAMING_SNAKE_CASE__ ) model.eval() __SCREAMING_SNAKE_CASE :List[str] = input_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous() __SCREAMING_SNAKE_CASE :Optional[int] = token_type_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous() __SCREAMING_SNAKE_CASE :List[Any] = input_mask.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous() __SCREAMING_SNAKE_CASE :Dict = model( SCREAMING_SNAKE_CASE__ ,attention_mask=SCREAMING_SNAKE_CASE__ ,token_type_ids=SCREAMING_SNAKE_CASE__ ,labels=SCREAMING_SNAKE_CASE__ ,) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) ) def _UpperCamelCase ( self ) -> Any: """simple docstring""" __SCREAMING_SNAKE_CASE :List[Any] = self.prepare_config_and_inputs() ( ( __SCREAMING_SNAKE_CASE ) ,( __SCREAMING_SNAKE_CASE ) ,( __SCREAMING_SNAKE_CASE ) ,( __SCREAMING_SNAKE_CASE ) ,( __SCREAMING_SNAKE_CASE ) ,( __SCREAMING_SNAKE_CASE ) ,( __SCREAMING_SNAKE_CASE ) , ) :Dict = config_and_inputs __SCREAMING_SNAKE_CASE :str = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class _SCREAMING_SNAKE_CASE( A , A , unittest.TestCase ): SCREAMING_SNAKE_CASE_ : int = ( ( NystromformerModel, NystromformerForMaskedLM, NystromformerForMultipleChoice, NystromformerForQuestionAnswering, NystromformerForSequenceClassification, NystromformerForTokenClassification, ) if is_torch_available() else () ) SCREAMING_SNAKE_CASE_ : Union[str, Any] = ( { '''feature-extraction''': NystromformerModel, '''fill-mask''': NystromformerForMaskedLM, '''question-answering''': NystromformerForQuestionAnswering, '''text-classification''': NystromformerForSequenceClassification, '''token-classification''': NystromformerForTokenClassification, '''zero-shot''': NystromformerForSequenceClassification, } if is_torch_available() else {} ) SCREAMING_SNAKE_CASE_ : str = False SCREAMING_SNAKE_CASE_ : Tuple = False def _UpperCamelCase ( self ) -> Tuple: """simple docstring""" __SCREAMING_SNAKE_CASE :Any = NystromformerModelTester(self ) __SCREAMING_SNAKE_CASE :Optional[Any] = ConfigTester(self ,config_class=SCREAMING_SNAKE_CASE__ ,hidden_size=37 ) def _UpperCamelCase ( self ) -> List[str]: """simple docstring""" self.config_tester.run_common_tests() def _UpperCamelCase ( self ) -> List[str]: """simple docstring""" __SCREAMING_SNAKE_CASE :List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE__ ) def _UpperCamelCase ( self ) -> Dict: """simple docstring""" __SCREAMING_SNAKE_CASE :Any = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: __SCREAMING_SNAKE_CASE :Any = type self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE__ ) def _UpperCamelCase ( self ) -> Any: """simple docstring""" __SCREAMING_SNAKE_CASE :List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*SCREAMING_SNAKE_CASE__ ) def _UpperCamelCase ( self ) -> int: """simple docstring""" __SCREAMING_SNAKE_CASE :Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*SCREAMING_SNAKE_CASE__ ) def _UpperCamelCase ( self ) -> Union[str, Any]: """simple docstring""" __SCREAMING_SNAKE_CASE :Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*SCREAMING_SNAKE_CASE__ ) def _UpperCamelCase ( self ) -> Union[str, Any]: """simple docstring""" __SCREAMING_SNAKE_CASE :Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*SCREAMING_SNAKE_CASE__ ) def _UpperCamelCase ( self ) -> Optional[int]: """simple docstring""" __SCREAMING_SNAKE_CASE :Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*SCREAMING_SNAKE_CASE__ ) @slow def _UpperCamelCase ( self ) -> Union[str, Any]: """simple docstring""" for model_name in NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __SCREAMING_SNAKE_CASE :Tuple = NystromformerModel.from_pretrained(SCREAMING_SNAKE_CASE__ ) self.assertIsNotNone(SCREAMING_SNAKE_CASE__ ) @require_torch class _SCREAMING_SNAKE_CASE( unittest.TestCase ): @slow def _UpperCamelCase ( self ) -> List[Any]: """simple docstring""" __SCREAMING_SNAKE_CASE :int = NystromformerModel.from_pretrained('''uw-madison/nystromformer-512''' ) __SCREAMING_SNAKE_CASE :Dict = torch.tensor([[0, 1, 2, 3, 4, 5]] ) with torch.no_grad(): __SCREAMING_SNAKE_CASE :str = model(SCREAMING_SNAKE_CASE__ )[0] __SCREAMING_SNAKE_CASE :Optional[int] = torch.Size((1, 6, 7_68) ) self.assertEqual(output.shape ,SCREAMING_SNAKE_CASE__ ) __SCREAMING_SNAKE_CASE :int = torch.tensor( [[[-0.4_5_3_2, -0.0_9_3_6, 0.5_1_3_7], [-0.2_6_7_6, 0.0_6_2_8, 0.6_1_8_6], [-0.3_6_2_9, -0.1_7_2_6, 0.4_7_1_6]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] ,SCREAMING_SNAKE_CASE__ ,atol=1E-4 ) ) @slow def _UpperCamelCase ( self ) -> Optional[int]: """simple docstring""" __SCREAMING_SNAKE_CASE :Optional[Any] = '''the [MASK] of Belgium is Brussels''' __SCREAMING_SNAKE_CASE :Optional[Any] = AutoTokenizer.from_pretrained('''uw-madison/nystromformer-512''' ) __SCREAMING_SNAKE_CASE :str = NystromformerForMaskedLM.from_pretrained('''uw-madison/nystromformer-512''' ) __SCREAMING_SNAKE_CASE :Union[str, Any] = tokenizer(SCREAMING_SNAKE_CASE__ ,return_tensors='''pt''' ) with torch.no_grad(): __SCREAMING_SNAKE_CASE :Union[str, Any] = model(encoding.input_ids ).logits __SCREAMING_SNAKE_CASE :List[str] = token_logits[:, 2, :].argmax(-1 )[0] self.assertEqual(tokenizer.decode(SCREAMING_SNAKE_CASE__ ) ,'''capital''' )
239
1
"""simple docstring""" import os import tempfile import unittest from pathlib import Path from transformers import AutoConfig, is_torch_available from transformers.testing_utils import require_torch, torch_device if is_torch_available(): from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments @require_torch class _UpperCAmelCase ( unittest.TestCase ): def A ( self : List[str] , A : Union[str, Any] ) -> int: for model_result in results.values(): for batch_size, sequence_length in zip(model_result['''bs'''] , model_result['''ss'''] ): lowercase_ : Tuple = model_result['''result'''][batch_size][sequence_length] self.assertIsNotNone(A ) def A ( self : int ) -> Optional[Any]: lowercase_ : Optional[Any] = '''sshleifer/tiny-gpt2''' lowercase_ : int = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=A , inference=A , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A , ) lowercase_ : List[str] = PyTorchBenchmark(A ) lowercase_ : Optional[Any] = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def A ( self : int ) -> Optional[Any]: lowercase_ : List[Any] = '''sgugger/tiny-distilbert-classification''' lowercase_ : Optional[Any] = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=A , inference=A , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A , only_pretrain_model=A , ) lowercase_ : Tuple = PyTorchBenchmark(A ) lowercase_ : Optional[int] = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def A ( self : List[Any] ) -> str: lowercase_ : str = '''sshleifer/tiny-gpt2''' lowercase_ : Any = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=A , inference=A , torchscript=A , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A , ) lowercase_ : List[Any] = PyTorchBenchmark(A ) lowercase_ : Dict = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) @unittest.skipIf(torch_device == '''cpu''' , '''Cant do half precision''' ) def A ( self : List[Any] ) -> str: lowercase_ : int = '''sshleifer/tiny-gpt2''' lowercase_ : Tuple = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=A , inference=A , fpaa=A , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A , ) lowercase_ : Any = PyTorchBenchmark(A ) lowercase_ : Dict = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def A ( self : Union[str, Any] ) -> Optional[int]: lowercase_ : Union[str, Any] = '''sshleifer/tiny-gpt2''' lowercase_ : Optional[int] = AutoConfig.from_pretrained(A ) # set architectures equal to `None` lowercase_ : str = None lowercase_ : Tuple = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=A , inference=A , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A , ) lowercase_ : Any = PyTorchBenchmark(A , configs=[config] ) lowercase_ : List[str] = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def A ( self : Optional[Any] ) -> List[Any]: lowercase_ : Optional[Any] = '''sshleifer/tiny-gpt2''' lowercase_ : str = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=A , inference=A , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A , ) lowercase_ : List[str] = PyTorchBenchmark(A ) lowercase_ : Dict = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) @unittest.skipIf(torch_device == '''cpu''' , '''Can\'t do half precision''' ) def A ( self : Optional[Any] ) -> Dict: lowercase_ : Optional[Any] = '''sshleifer/tiny-gpt2''' lowercase_ : str = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=A , inference=A , sequence_lengths=[8] , batch_sizes=[1] , fpaa=A , multi_process=A , ) lowercase_ : int = PyTorchBenchmark(A ) lowercase_ : Optional[int] = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def A ( self : int ) -> Optional[Any]: lowercase_ : List[Any] = '''sshleifer/tiny-gpt2''' lowercase_ : Any = AutoConfig.from_pretrained(A ) lowercase_ : Any = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=A , inference=A , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A , ) lowercase_ : Optional[Any] = PyTorchBenchmark(A , configs=[config] ) lowercase_ : List[Any] = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def A ( self : Any ) -> List[Any]: lowercase_ : Union[str, Any] = '''sshleifer/tinier_bart''' lowercase_ : Optional[Any] = AutoConfig.from_pretrained(A ) lowercase_ : int = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=A , inference=A , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A , ) lowercase_ : Any = PyTorchBenchmark(A , configs=[config] ) lowercase_ : int = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def A ( self : List[str] ) -> Optional[int]: lowercase_ : str = '''sshleifer/tiny-gpt2''' lowercase_ : Union[str, Any] = AutoConfig.from_pretrained(A ) lowercase_ : List[Any] = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=A , inference=A , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A , ) lowercase_ : List[str] = PyTorchBenchmark(A , configs=[config] ) lowercase_ : str = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def A ( self : Optional[Any] ) -> int: lowercase_ : Optional[Any] = '''sshleifer/tinier_bart''' lowercase_ : Optional[Any] = AutoConfig.from_pretrained(A ) lowercase_ : Tuple = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=A , inference=A , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A , ) lowercase_ : str = PyTorchBenchmark(A , configs=[config] ) lowercase_ : str = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def A ( self : Union[str, Any] ) -> Optional[int]: lowercase_ : List[Any] = '''sshleifer/tiny-gpt2''' with tempfile.TemporaryDirectory() as tmp_dir: lowercase_ : Any = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=A , inference=A , save_to_csv=A , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(A , '''inf_time.csv''' ) , train_memory_csv_file=os.path.join(A , '''train_mem.csv''' ) , inference_memory_csv_file=os.path.join(A , '''inf_mem.csv''' ) , train_time_csv_file=os.path.join(A , '''train_time.csv''' ) , env_info_csv_file=os.path.join(A , '''env.csv''' ) , multi_process=A , ) lowercase_ : str = PyTorchBenchmark(A ) benchmark.run() self.assertTrue(Path(os.path.join(A , '''inf_time.csv''' ) ).exists() ) self.assertTrue(Path(os.path.join(A , '''train_time.csv''' ) ).exists() ) self.assertTrue(Path(os.path.join(A , '''inf_mem.csv''' ) ).exists() ) self.assertTrue(Path(os.path.join(A , '''train_mem.csv''' ) ).exists() ) self.assertTrue(Path(os.path.join(A , '''env.csv''' ) ).exists() ) def A ( self : str ) -> Tuple: lowercase_ : str = '''sshleifer/tiny-gpt2''' def _check_summary_is_not_empty(A : List[Any] ): self.assertTrue(hasattr(A , '''sequential''' ) ) self.assertTrue(hasattr(A , '''cumulative''' ) ) self.assertTrue(hasattr(A , '''current''' ) ) self.assertTrue(hasattr(A , '''total''' ) ) with tempfile.TemporaryDirectory() as tmp_dir: lowercase_ : Any = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=A , inference=A , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(A , '''log.txt''' ) , log_print=A , trace_memory_line_by_line=A , multi_process=A , ) lowercase_ : Optional[int] = PyTorchBenchmark(A ) lowercase_ : Dict = benchmark.run() _check_summary_is_not_empty(result.inference_summary ) _check_summary_is_not_empty(result.train_summary ) self.assertTrue(Path(os.path.join(A , '''log.txt''' ) ).exists() )
33
"""simple docstring""" import importlib import torch import yaml from omegaconf import OmegaConf from taming.models.vqgan import VQModel def __magic_name__ ( __snake_case : Dict , __snake_case : Optional[Any]=False ) -> Tuple: lowercase : Union[str, Any] = OmegaConf.load(__snake_case ) if display: print(yaml.dump(OmegaConf.to_container(__snake_case ) ) ) return config def __magic_name__ ( __snake_case : Dict , __snake_case : Optional[Any]=None , __snake_case : Union[str, Any]=None ) -> Tuple: if conf_path is None: lowercase : List[Any] = "./model_checkpoints/vqgan_only.yaml" lowercase : Tuple = load_config(__snake_case , display=__snake_case ) lowercase : List[Any] = VQModel(**config.model.params ) if ckpt_path is None: lowercase : List[str] = "./model_checkpoints/vqgan_only.pt" lowercase : Optional[int] = torch.load(__snake_case , map_location=__snake_case ) if ".ckpt" in ckpt_path: lowercase : str = sd["state_dict"] model.load_state_dict(__snake_case , strict=__snake_case ) model.to(__snake_case ) del sd return model def __magic_name__ ( __snake_case : Tuple , __snake_case : Union[str, Any] ) -> int: lowercase , lowercase , lowercase : List[Any] = model.encode(__snake_case ) print(f"""VQGAN --- {model.__class__.__name__}: latent shape: {z.shape[2:]}""" ) lowercase : str = model.decode(__snake_case ) return xrec def __magic_name__ ( __snake_case : Dict , __snake_case : Optional[int]=False ) -> int: lowercase , lowercase : Union[str, Any] = string.rsplit("." , 1 ) if reload: lowercase : Any = importlib.import_module(__snake_case ) importlib.reload(__snake_case ) return getattr(importlib.import_module(__snake_case , package=__snake_case ) , cls ) def __magic_name__ ( __snake_case : str ) -> List[str]: if "target" not in config: raise KeyError("Expected key `target` to instantiate." ) return get_obj_from_str(config["target"] )(**config.get("params" , {} ) ) def __magic_name__ ( __snake_case : Any , __snake_case : int , __snake_case : List[Any]=True , __snake_case : Dict=True ) -> str: lowercase : Optional[int] = instantiate_from_config(__snake_case ) if sd is not None: model.load_state_dict(__snake_case ) if gpu: model.cuda() if eval_mode: model.eval() return {"model": model} def __magic_name__ ( __snake_case : Optional[int] , __snake_case : Any , __snake_case : Optional[int] , __snake_case : List[str] ) -> Any: # load the specified checkpoint if ckpt: lowercase : Dict = torch.load(__snake_case , map_location="cpu" ) lowercase : List[Any] = pl_sd["global_step"] print(f"""loaded model from global step {global_step}.""" ) else: lowercase : int = {"state_dict": None} lowercase : Optional[Any] = None lowercase : List[Any] = load_model_from_config(config.model , pl_sd["state_dict"] , gpu=__snake_case , eval_mode=__snake_case )["model"] return model, global_step
202
0
import random def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> tuple: lowercase , lowercase , lowercase : List[str] = [], [], [] for element in data: if element < pivot: less.append(SCREAMING_SNAKE_CASE__ ) elif element > pivot: greater.append(SCREAMING_SNAKE_CASE__ ) else: equal.append(SCREAMING_SNAKE_CASE__ ) return less, equal, greater def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Dict: # index = len(items) // 2 when trying to find the median # (value of index when items is sorted) # invalid input if index >= len(SCREAMING_SNAKE_CASE__ ) or index < 0: return None lowercase : int = items[random.randint(0 , len(SCREAMING_SNAKE_CASE__ ) - 1 )] lowercase : Optional[int] = 0 lowercase , lowercase , lowercase : Union[str, Any] = _partition(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) lowercase : Optional[int] = len(SCREAMING_SNAKE_CASE__ ) lowercase : List[str] = len(SCREAMING_SNAKE_CASE__ ) # index is the pivot if m <= index < m + count: return pivot # must be in smaller elif m > index: return quick_select(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) # must be in larger else: return quick_select(SCREAMING_SNAKE_CASE__ , index - (m + count) )
285
import unittest from datasets import load_dataset from transformers import BloomTokenizerFast from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class __snake_case ( lowerCAmelCase , unittest.TestCase ): _a : Optional[int]= None _a : Optional[Any]= BloomTokenizerFast _a : Tuple= BloomTokenizerFast _a : str= True _a : Optional[int]= False _a : List[Any]= "tokenizer_file" _a : List[Any]= {"bos_token": "<s>", "eos_token": "</s>", "unk_token": "<unk>", "pad_token": "<pad>"} def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' super().setUp() lowercase : Optional[Any] = BloomTokenizerFast.from_pretrained("""bigscience/tokenizer""" ) tokenizer.save_pretrained(self.tmpdirname ) def _SCREAMING_SNAKE_CASE ( self ,**snake_case ): '''simple docstring''' kwargs.update(self.special_tokens_map ) return BloomTokenizerFast.from_pretrained(self.tmpdirname ,**snake_case ) def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' lowercase : Tuple = self.get_rust_tokenizer() lowercase : List[str] = ["""The quick brown fox</s>""", """jumps over the lazy dog</s>"""] lowercase : Optional[int] = [[2175, 23714, 73173, 144252, 2], [77, 132619, 3478, 368, 109586, 35433, 2]] lowercase : Any = tokenizer.batch_encode_plus(snake_case )["""input_ids"""] self.assertListEqual(snake_case ,snake_case ) lowercase : Optional[int] = tokenizer.batch_decode(snake_case ) self.assertListEqual(snake_case ,snake_case ) def _SCREAMING_SNAKE_CASE ( self ,snake_case=6 ): '''simple docstring''' for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ): lowercase : Dict = self.rust_tokenizer_class.from_pretrained(snake_case ,**snake_case ) # tokenizer_r.pad_token = None # Hotfixing padding = None # Simple input lowercase : Dict = """This is a simple input""" lowercase : Tuple = ["""This is a simple input 1""", """This is a simple input 2"""] lowercase : Dict = ("""This is a simple input""", """This is a pair""") lowercase : Optional[Any] = [ ("""This is a simple input 1""", """This is a simple input 2"""), ("""This is a simple pair 1""", """This is a simple pair 2"""), ] # Simple input tests try: tokenizer_r.encode(snake_case ,max_length=snake_case ) tokenizer_r.encode_plus(snake_case ,max_length=snake_case ) tokenizer_r.batch_encode_plus(snake_case ,max_length=snake_case ) tokenizer_r.encode(snake_case ,max_length=snake_case ) tokenizer_r.batch_encode_plus(snake_case ,max_length=snake_case ) except ValueError: self.fail("""Bloom Tokenizer should be able to deal with padding""" ) lowercase : Optional[int] = None # Hotfixing padding = None self.assertRaises(snake_case ,tokenizer_r.encode ,snake_case ,max_length=snake_case ,padding="""max_length""" ) # Simple input self.assertRaises(snake_case ,tokenizer_r.encode_plus ,snake_case ,max_length=snake_case ,padding="""max_length""" ) # Simple input self.assertRaises( snake_case ,tokenizer_r.batch_encode_plus ,snake_case ,max_length=snake_case ,padding="""max_length""" ,) # Pair input self.assertRaises(snake_case ,tokenizer_r.encode ,snake_case ,max_length=snake_case ,padding="""max_length""" ) # Pair input self.assertRaises(snake_case ,tokenizer_r.encode_plus ,snake_case ,max_length=snake_case ,padding="""max_length""" ) # Pair input self.assertRaises( snake_case ,tokenizer_r.batch_encode_plus ,snake_case ,max_length=snake_case ,padding="""max_length""" ,) def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' lowercase : List[Any] = self.get_rust_tokenizer() lowercase : List[str] = load_dataset("""xnli""" ,"""all_languages""" ,split="""test""" ,streaming=snake_case ) lowercase : Optional[Any] = next(iter(snake_case ) )["""premise"""] # pick up one data lowercase : str = list(sample_data.values() ) lowercase : Optional[int] = list(map(tokenizer.encode ,snake_case ) ) lowercase : Dict = [tokenizer.decode(snake_case ,clean_up_tokenization_spaces=snake_case ) for x in output_tokens] self.assertListEqual(snake_case ,snake_case ) def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' self.assertGreaterEqual(len(self.tokenizer_class.pretrained_vocab_files_map ) ,1 ) self.assertGreaterEqual(len(list(self.tokenizer_class.pretrained_vocab_files_map.values() )[0] ) ,1 )
285
1
'''simple docstring''' from .testing import ( are_the_same_tensors, execute_subprocess_async, require_bnb, require_cpu, require_cuda, require_huggingface_suite, require_mps, require_multi_gpu, require_multi_xpu, require_safetensors, require_single_gpu, require_single_xpu, require_torch_min_version, require_tpu, require_xpu, skip, slow, ) from .training import RegressionDataset, RegressionModel, RegressionModelaXPU from .scripts import test_script, test_sync, test_ops # isort: skip
237
'''simple docstring''' from .integrations import ( is_optuna_available, is_ray_available, is_sigopt_available, is_wandb_available, run_hp_search_optuna, run_hp_search_ray, run_hp_search_sigopt, run_hp_search_wandb, ) from .trainer_utils import ( HPSearchBackend, default_hp_space_optuna, default_hp_space_ray, default_hp_space_sigopt, default_hp_space_wandb, ) from .utils import logging __lowerCAmelCase : Optional[Any] =logging.get_logger(__name__) class UpperCAmelCase : __lowercase = 42 __lowercase = None @staticmethod def UpperCAmelCase_ ( )-> Dict: raise NotImplementedError def UpperCAmelCase_ ( self :List[Any] , lowercase_ :str , lowercase_ :int , lowercase_ :str , **lowercase_ :Dict )-> str: raise NotImplementedError def UpperCAmelCase_ ( self :Optional[int] , lowercase_ :int )-> Any: raise NotImplementedError def UpperCAmelCase_ ( self :List[Any] )-> Optional[Any]: if not self.is_available(): raise RuntimeError( F"You picked the {self.name} backend, but it is not installed. Run {self.pip_install()}." ) @classmethod def UpperCAmelCase_ ( cls :int )-> Any: return F"`pip install {cls.pip_package or cls.name}`" class UpperCAmelCase ( UpperCamelCase__ ): __lowercase = """optuna""" @staticmethod def UpperCAmelCase_ ( )-> int: return is_optuna_available() def UpperCAmelCase_ ( self :List[str] , lowercase_ :str , lowercase_ :int , lowercase_ :str , **lowercase_ :List[Any] )-> Tuple: return run_hp_search_optuna(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ) def UpperCAmelCase_ ( self :str , lowercase_ :Optional[int] )-> Optional[Any]: return default_hp_space_optuna(lowercase_ ) class UpperCAmelCase ( UpperCamelCase__ ): __lowercase = """ray""" __lowercase = """'ray[tune]'""" @staticmethod def UpperCAmelCase_ ( )-> str: return is_ray_available() def UpperCAmelCase_ ( self :int , lowercase_ :Dict , lowercase_ :int , lowercase_ :str , **lowercase_ :List[str] )-> int: return run_hp_search_ray(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ) def UpperCAmelCase_ ( self :Optional[int] , lowercase_ :Dict )-> int: return default_hp_space_ray(lowercase_ ) class UpperCAmelCase ( UpperCamelCase__ ): __lowercase = """sigopt""" @staticmethod def UpperCAmelCase_ ( )-> Union[str, Any]: return is_sigopt_available() def UpperCAmelCase_ ( self :Any , lowercase_ :Union[str, Any] , lowercase_ :int , lowercase_ :str , **lowercase_ :Dict )-> Dict: return run_hp_search_sigopt(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ) def UpperCAmelCase_ ( self :Optional[int] , lowercase_ :Optional[int] )-> List[str]: return default_hp_space_sigopt(lowercase_ ) class UpperCAmelCase ( UpperCamelCase__ ): __lowercase = """wandb""" @staticmethod def UpperCAmelCase_ ( )-> List[str]: return is_wandb_available() def UpperCAmelCase_ ( self :Dict , lowercase_ :Optional[Any] , lowercase_ :int , lowercase_ :str , **lowercase_ :Dict )-> List[str]: return run_hp_search_wandb(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ) def UpperCAmelCase_ ( self :Union[str, Any] , lowercase_ :str )-> Dict: return default_hp_space_wandb(lowercase_ ) __lowerCAmelCase : int ={ HPSearchBackend(backend.name): backend for backend in [OptunaBackend, RayTuneBackend, SigOptBackend, WandbBackend] } def UpperCamelCase ( ): A__ = [backend for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() if backend.is_available()] if len(_lowerCamelCase ) > 0: A__ = available_backends[0].name if len(_lowerCamelCase ) > 1: logger.info( F"{len(_lowerCamelCase )} hyperparameter search backends available. Using {name} as the default." ) return name raise RuntimeError( "No hyperparameter search backend available.\n" + "\n".join( F" - To install {backend.name} run {backend.pip_install()}" for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() ) )
237
1
"""simple docstring""" from copy import deepcopy import torch import torch.nn.functional as F from torch.optim import AdamW from torch.optim.lr_scheduler import LambdaLR from torch.utils.data import DataLoader from accelerate.accelerator import Accelerator from accelerate.state import GradientState from accelerate.test_utils import RegressionDataset, RegressionModel from accelerate.utils import DistributedType, is_torch_version, set_seed def _A ( _a : Any , _a : Union[str, Any] , _a : Tuple , _a : int ): """simple docstring""" for param, grad_param in zip(model_a.parameters() , model_b.parameters() ): if not param.requires_grad: continue if not did_step: # Grads should not be in sync assert ( torch.allclose(param.grad , grad_param.grad ) is False ), f'Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})' else: # Grads should be in sync assert ( torch.allclose(param.grad , grad_param.grad ) is True ), f'Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})' def _A ( _a : List[str] , _a : str , _a : Union[str, Any] , _a : List[str] , _a : str=True ): """simple docstring""" model.train() A = model(_a ) A = F.mse_loss(_a , target.to(output.device ) ) if not do_backward: loss /= accelerator.gradient_accumulation_steps loss.backward() else: accelerator.backward(_a ) def _A ( _a : Optional[Any] , _a : Optional[int]=False ): """simple docstring""" set_seed(4_2 ) A = RegressionModel() A = deepcopy(_a ) A = RegressionDataset(length=8_0 ) A = DataLoader(_a , batch_size=1_6 ) model.to(accelerator.device ) if sched: A = AdamW(params=model.parameters() , lr=1E-3 ) A = AdamW(params=ddp_model.parameters() , lr=1E-3 ) A = LambdaLR(_a , lr_lambda=lambda _a : epoch**0.65 ) A = LambdaLR(_a , lr_lambda=lambda _a : epoch**0.65 ) # Make a copy of `model` if sched: A , A , A , A = accelerator.prepare(_a , _a , _a , _a ) else: A , A = accelerator.prepare(_a , _a ) if sched: return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched) return model, ddp_model, dataloader def _A ( _a : Union[str, Any] ): """simple docstring""" A , A , A = get_training_setup(_a ) # Use a single batch A , A = next(iter(_a ) ).values() for iteration in range(3 ): # Gather the distributed inputs and targs for the base model A , A = accelerator.gather((ddp_input, ddp_target) ) A , A = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" step_model(_a , _a , _a , _a ) # Do "gradient accumulation" (noop) if iteration % 2 == 0: # Accumulate grads locally with accelerator.no_sync(_a ): step_model(_a , _a , _a , _a ) else: # Sync grads step_model(_a , _a , _a , _a ) # Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync check_model_parameters(_a , _a , _a , _a ) for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ): if not param.requires_grad: continue assert torch.allclose( param.grad , ddp_param.grad ), f'Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})' # Shuffle ddp_input on each iteration torch.manual_seed(1_3_3_7 + iteration ) A = ddp_input[torch.randperm(len(_a ) )] def _A ( _a : Optional[Any] ): """simple docstring""" A , A , A = get_training_setup(_a ) # Use a single batch A , A = next(iter(_a ) ).values() for iteration in range(3 ): # Gather the distributed inputs and targs for the base model A , A = accelerator.gather((ddp_input, ddp_target) ) A , A = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" step_model(_a , _a , _a , _a ) # Do "gradient accumulation" (noop) if iteration % 2 == 0: # Accumulate grads locally with accelerator.no_sync(_a ): step_model(_a , _a , _a , _a ) else: # Sync grads step_model(_a , _a , _a , _a ) # DDP model and model should only be in sync when not (iteration % 2 == 0) for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ): if not param.requires_grad: continue if iteration % 2 == 0: # Grads should not be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is False ), f'Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})' else: # Grads should be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is True ), f'Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})' # Shuffle ddp_input on each iteration torch.manual_seed(1_3_3_7 + iteration ) A = ddp_input[torch.randperm(len(_a ) )] def _A ( _a : Union[str, Any]=False , _a : Any=False ): """simple docstring""" A = Accelerator( split_batches=_a , dispatch_batches=_a , gradient_accumulation_steps=2 ) # Test that context manager behaves properly A , A , A = get_training_setup(_a ) for iteration, batch in enumerate(_a ): A , A = batch.values() # Gather the distributed inputs and targs for the base model A , A = accelerator.gather((ddp_input, ddp_target) ) A , A = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" step_model(_a , _a , _a , _a , _a ) # Do "gradient accumulation" (noop) with accelerator.accumulate(_a ): step_model(_a , _a , _a , _a ) # DDP model and model should only be in sync when not (iteration % 2 == 0) for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ): if not param.requires_grad: continue if ((iteration + 1) % 2 == 0) or (iteration == len(_a ) - 1): # Grads should be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is True ), f'Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})' else: # Grads should not be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is False ), f'Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})' # Shuffle ddp_input on each iteration torch.manual_seed(1_3_3_7 + iteration ) A = ddp_input[torch.randperm(len(_a ) )] GradientState._reset_state() def _A ( _a : int=False , _a : str=False ): """simple docstring""" A = Accelerator( split_batches=_a , dispatch_batches=_a , gradient_accumulation_steps=2 ) # Test that context manager behaves properly A , A , A , A , A , A , A = get_training_setup(_a , _a ) for iteration, batch in enumerate(_a ): A , A = batch.values() # Gather the distributed inputs and targs for the base model A , A = accelerator.gather((ddp_input, ddp_target) ) A , A = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" model.train() ddp_model.train() step_model(_a , _a , _a , _a , _a ) opt.step() if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(_a )): if split_batches: sched.step() else: for _ in range(accelerator.num_processes ): sched.step() opt.zero_grad() # Perform gradient accumulation under wrapper with accelerator.accumulate(_a ): step_model(_a , _a , _a , _a ) ddp_opt.step() ddp_sched.step() ddp_opt.zero_grad() # Learning rates should be the same assert ( opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"] ), f'Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]["lr"]}\nDDP opt: {ddp_opt.param_groups[0]["lr"]}\n' A = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(_a )) if accelerator.num_processes > 1: check_model_parameters(_a , _a , _a , _a ) # Shuffle ddp_input on each iteration torch.manual_seed(1_3_3_7 + iteration ) GradientState._reset_state() def _A ( ): """simple docstring""" A = Accelerator() A = RegressionDataset(length=8_0 ) A = DataLoader(_a , batch_size=1_6 ) A = RegressionDataset(length=9_6 ) A = DataLoader(_a , batch_size=1_6 ) A , A = accelerator.prepare(_a , _a ) assert accelerator.gradient_state.active_dataloader is None for iteration, _ in enumerate(_a ): assert id(accelerator.gradient_state.active_dataloader ) == id(_a ) if iteration < len(_a ) - 1: assert not accelerator.gradient_state.end_of_dataloader if iteration == 1: for batch_num, _ in enumerate(_a ): assert id(accelerator.gradient_state.active_dataloader ) == id(_a ) if batch_num < len(_a ) - 1: assert not accelerator.gradient_state.end_of_dataloader else: assert accelerator.gradient_state.end_of_dataloader else: assert accelerator.gradient_state.end_of_dataloader assert accelerator.gradient_state.active_dataloader is None def _A ( ): """simple docstring""" A = Accelerator() A = accelerator.state if state.local_process_index == 0: print("""**Test `accumulate` gradient accumulation with dataloader break**""" ) test_dataloader_break() if state.distributed_type == DistributedType.NO: if state.local_process_index == 0: print("""**Test NOOP `no_sync` context manager**""" ) test_noop_sync(_a ) if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU): if state.local_process_index == 0: print("""**Test Distributed `no_sync` context manager**""" ) test_distributed_sync(_a ) if state.distributed_type == DistributedType.MULTI_GPU: for split_batch in [True, False]: for dispatch_batches in [True, False]: if state.local_process_index == 0: print( """**Test `accumulate` gradient accumulation, """ , f'`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**' , ) test_gradient_accumulation(_a , _a ) # Currently will break on torch 2.0 +, need to investigate why if is_torch_version("""<""" , """2.0""" ) or state.distributed_type == DistributedType.NO: if state.local_process_index == 0: print( """**Test `accumulate` gradient accumulation with optimizer and scheduler, """ , """`split_batches=False`, `dispatch_batches=False`**""" , ) test_gradient_accumulation_with_opt_and_scheduler() if state.distributed_type == DistributedType.MULTI_GPU: for split_batch in [True, False]: for dispatch_batches in [True, False]: if not split_batch and not dispatch_batches: continue if state.local_process_index == 0: print( """**Test `accumulate` gradient accumulation with optimizer and scheduler, """ , f'`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**' , ) test_gradient_accumulation_with_opt_and_scheduler(_a , _a ) def _A ( _a : Any ): """simple docstring""" main() if __name__ == "__main__": main()
77
"""simple docstring""" from collections.abc import Callable import numpy as np def _A ( _a : Callable , _a : float , _a : float , _a : float , _a : float ): """simple docstring""" A = int(np.ceil((x_end - xa) / step_size ) ) A = np.zeros((n + 1,) ) A = ya A = xa for k in range(_a ): A = y[k] + step_size * ode_func(_a , y[k] ) x += step_size return y if __name__ == "__main__": import doctest doctest.testmod()
77
1
'''simple docstring''' import torch from diffusers import DDPMParallelScheduler from .test_schedulers import SchedulerCommonTest class _lowercase ( _lowercase ): a = (DDPMParallelScheduler,) def lowerCamelCase_ ( self: Union[str, Any] , **UpperCamelCase__: str ): lowerCamelCase__ : str = { """num_train_timesteps""": 1_000, """beta_start""": 0.0_001, """beta_end""": 0.02, """beta_schedule""": """linear""", """variance_type""": """fixed_small""", """clip_sample""": True, } config.update(**UpperCamelCase__ ) return config def lowerCamelCase_ ( self: Tuple ): for timesteps in [1, 5, 100, 1_000]: self.check_over_configs(num_train_timesteps=UpperCamelCase__ ) def lowerCamelCase_ ( self: List[str] ): for beta_start, beta_end in zip([0.0_001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ): self.check_over_configs(beta_start=UpperCamelCase__ , beta_end=UpperCamelCase__ ) def lowerCamelCase_ ( self: Optional[int] ): for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=UpperCamelCase__ ) def lowerCamelCase_ ( self: List[Any] ): for variance in ["fixed_small", "fixed_large", "other"]: self.check_over_configs(variance_type=UpperCamelCase__ ) def lowerCamelCase_ ( self: Dict ): for clip_sample in [True, False]: self.check_over_configs(clip_sample=UpperCamelCase__ ) def lowerCamelCase_ ( self: Union[str, Any] ): self.check_over_configs(thresholding=UpperCamelCase__ ) for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs( thresholding=UpperCamelCase__ , prediction_type=UpperCamelCase__ , sample_max_value=UpperCamelCase__ , ) def lowerCamelCase_ ( self: str ): for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs(prediction_type=UpperCamelCase__ ) def lowerCamelCase_ ( self: List[Any] ): for t in [0, 500, 999]: self.check_over_forward(time_step=UpperCamelCase__ ) def lowerCamelCase_ ( self: int ): lowerCamelCase__ : int = self.scheduler_classes[0] lowerCamelCase__ : Union[str, Any] = self.get_scheduler_config() lowerCamelCase__ : Union[str, Any] = scheduler_class(**UpperCamelCase__ ) assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.00_979 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.02 ) ) < 1e-5 def lowerCamelCase_ ( self: List[str] ): lowerCamelCase__ : int = self.scheduler_classes[0] lowerCamelCase__ : List[Any] = self.get_scheduler_config() lowerCamelCase__ : List[str] = scheduler_class(**UpperCamelCase__ ) lowerCamelCase__ : str = len(UpperCamelCase__ ) lowerCamelCase__ : str = self.dummy_model() lowerCamelCase__ : int = self.dummy_sample_deter lowerCamelCase__ : Optional[int] = self.dummy_sample_deter + 0.1 lowerCamelCase__ : Optional[int] = self.dummy_sample_deter - 0.1 lowerCamelCase__ : Union[str, Any] = samplea.shape[0] lowerCamelCase__ : Union[str, Any] = torch.stack([samplea, samplea, samplea] , dim=0 ) lowerCamelCase__ : str = torch.arange(UpperCamelCase__ )[0:3, None].repeat(1 , UpperCamelCase__ ) lowerCamelCase__ : Tuple = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) ) lowerCamelCase__ : Dict = scheduler.batch_step_no_noise(UpperCamelCase__ , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) ) lowerCamelCase__ : Union[str, Any] = torch.sum(torch.abs(UpperCamelCase__ ) ) lowerCamelCase__ : List[str] = torch.mean(torch.abs(UpperCamelCase__ ) ) assert abs(result_sum.item() - 1_153.1_833 ) < 1e-2 assert abs(result_mean.item() - 0.5_005 ) < 1e-3 def lowerCamelCase_ ( self: List[Any] ): lowerCamelCase__ : List[Any] = self.scheduler_classes[0] lowerCamelCase__ : Dict = self.get_scheduler_config() lowerCamelCase__ : List[Any] = scheduler_class(**UpperCamelCase__ ) lowerCamelCase__ : List[Any] = len(UpperCamelCase__ ) lowerCamelCase__ : Optional[int] = self.dummy_model() lowerCamelCase__ : int = self.dummy_sample_deter lowerCamelCase__ : Optional[int] = torch.manual_seed(0 ) for t in reversed(range(UpperCamelCase__ ) ): # 1. predict noise residual lowerCamelCase__ : Dict = model(UpperCamelCase__ , UpperCamelCase__ ) # 2. predict previous mean of sample x_t-1 lowerCamelCase__ : Any = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , generator=UpperCamelCase__ ).prev_sample lowerCamelCase__ : List[str] = pred_prev_sample lowerCamelCase__ : List[Any] = torch.sum(torch.abs(UpperCamelCase__ ) ) lowerCamelCase__ : Optional[int] = torch.mean(torch.abs(UpperCamelCase__ ) ) assert abs(result_sum.item() - 258.9_606 ) < 1e-2 assert abs(result_mean.item() - 0.3_372 ) < 1e-3 def lowerCamelCase_ ( self: Optional[Any] ): lowerCamelCase__ : Optional[Any] = self.scheduler_classes[0] lowerCamelCase__ : Any = self.get_scheduler_config(prediction_type="""v_prediction""" ) lowerCamelCase__ : Any = scheduler_class(**UpperCamelCase__ ) lowerCamelCase__ : int = len(UpperCamelCase__ ) lowerCamelCase__ : Optional[int] = self.dummy_model() lowerCamelCase__ : List[Any] = self.dummy_sample_deter lowerCamelCase__ : List[str] = torch.manual_seed(0 ) for t in reversed(range(UpperCamelCase__ ) ): # 1. predict noise residual lowerCamelCase__ : Optional[Any] = model(UpperCamelCase__ , UpperCamelCase__ ) # 2. predict previous mean of sample x_t-1 lowerCamelCase__ : List[str] = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , generator=UpperCamelCase__ ).prev_sample lowerCamelCase__ : List[Any] = pred_prev_sample lowerCamelCase__ : Union[str, Any] = torch.sum(torch.abs(UpperCamelCase__ ) ) lowerCamelCase__ : List[str] = torch.mean(torch.abs(UpperCamelCase__ ) ) assert abs(result_sum.item() - 202.0_296 ) < 1e-2 assert abs(result_mean.item() - 0.2_631 ) < 1e-3 def lowerCamelCase_ ( self: List[str] ): lowerCamelCase__ : Dict = self.scheduler_classes[0] lowerCamelCase__ : List[str] = self.get_scheduler_config() lowerCamelCase__ : Optional[int] = scheduler_class(**UpperCamelCase__ ) lowerCamelCase__ : List[str] = [100, 87, 50, 1, 0] scheduler.set_timesteps(timesteps=UpperCamelCase__ ) lowerCamelCase__ : Any = scheduler.timesteps for i, timestep in enumerate(UpperCamelCase__ ): if i == len(UpperCamelCase__ ) - 1: lowerCamelCase__ : List[str] = -1 else: lowerCamelCase__ : int = timesteps[i + 1] lowerCamelCase__ : List[Any] = scheduler.previous_timestep(UpperCamelCase__ ) lowerCamelCase__ : Union[str, Any] = prev_t.item() self.assertEqual(UpperCamelCase__ , UpperCamelCase__ ) def lowerCamelCase_ ( self: Any ): lowerCamelCase__ : Optional[int] = self.scheduler_classes[0] lowerCamelCase__ : Union[str, Any] = self.get_scheduler_config() lowerCamelCase__ : Any = scheduler_class(**UpperCamelCase__ ) lowerCamelCase__ : List[str] = [100, 87, 50, 51, 0] with self.assertRaises(UpperCamelCase__ , msg="""`custom_timesteps` must be in descending order.""" ): scheduler.set_timesteps(timesteps=UpperCamelCase__ ) def lowerCamelCase_ ( self: Tuple ): lowerCamelCase__ : Tuple = self.scheduler_classes[0] lowerCamelCase__ : Dict = self.get_scheduler_config() lowerCamelCase__ : str = scheduler_class(**UpperCamelCase__ ) lowerCamelCase__ : Dict = [100, 87, 50, 1, 0] lowerCamelCase__ : List[str] = len(UpperCamelCase__ ) with self.assertRaises(UpperCamelCase__ , msg="""Can only pass one of `num_inference_steps` or `custom_timesteps`.""" ): scheduler.set_timesteps(num_inference_steps=UpperCamelCase__ , timesteps=UpperCamelCase__ ) def lowerCamelCase_ ( self: str ): lowerCamelCase__ : Union[str, Any] = self.scheduler_classes[0] lowerCamelCase__ : Tuple = self.get_scheduler_config() lowerCamelCase__ : List[Any] = scheduler_class(**UpperCamelCase__ ) lowerCamelCase__ : Union[str, Any] = [scheduler.config.num_train_timesteps] with self.assertRaises( UpperCamelCase__ , msg="""`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}""" , ): scheduler.set_timesteps(timesteps=UpperCamelCase__ )
41
import inspect import re from hashlib import shaaaa from typing import Dict, List from .arrow import arrow from .audiofolder import audiofolder from .csv import csv from .imagefolder import imagefolder from .json import json from .pandas import pandas from .parquet import parquet from .sql import sql # noqa F401 from .text import text def _lowerCAmelCase ( lowerCAmelCase_ :List[str] )->str: '''simple docstring''' snake_case_ = [] for line in lines: snake_case_ = re.sub(r"#.*" , "" , lowerCAmelCase_ ) # remove comments if line: filtered_lines.append(lowerCAmelCase_ ) snake_case_ = "\n".join(lowerCAmelCase_ ) # Make a hash from all this code snake_case_ = full_str.encode("utf-8" ) return shaaaa(lowerCAmelCase_ ).hexdigest() # get importable module names and hash for caching SCREAMING_SNAKE_CASE :Union[str, Any] = { '''csv''': (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())), '''json''': (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())), '''pandas''': (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())), '''parquet''': (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())), '''arrow''': (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())), '''text''': (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())), '''imagefolder''': (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())), '''audiofolder''': (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())), } # Used to infer the module to use based on the data files extensions SCREAMING_SNAKE_CASE :str = { '''.csv''': ('''csv''', {}), '''.tsv''': ('''csv''', {'''sep''': '''\t'''}), '''.json''': ('''json''', {}), '''.jsonl''': ('''json''', {}), '''.parquet''': ('''parquet''', {}), '''.arrow''': ('''arrow''', {}), '''.txt''': ('''text''', {}), } _EXTENSION_TO_MODULE.update({ext: ('''imagefolder''', {}) for ext in imagefolder.ImageFolder.EXTENSIONS}) _EXTENSION_TO_MODULE.update({ext.upper(): ('''imagefolder''', {}) for ext in imagefolder.ImageFolder.EXTENSIONS}) _EXTENSION_TO_MODULE.update({ext: ('''audiofolder''', {}) for ext in audiofolder.AudioFolder.EXTENSIONS}) _EXTENSION_TO_MODULE.update({ext.upper(): ('''audiofolder''', {}) for ext in audiofolder.AudioFolder.EXTENSIONS}) SCREAMING_SNAKE_CASE :List[Any] = {'''imagefolder''', '''audiofolder'''} # Used to filter data files based on extensions given a module name SCREAMING_SNAKE_CASE :Dict[str, List[str]] = {} for _ext, (_module, _) in _EXTENSION_TO_MODULE.items(): _MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext) _MODULE_TO_EXTENSIONS["imagefolder"].append('''.zip''') _MODULE_TO_EXTENSIONS["audiofolder"].append('''.zip''')
159
0
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging a : Any = logging.get_logger(__name__) a : str = { 'hustvl/yolos-small': 'https://huggingface.co/hustvl/yolos-small/resolve/main/config.json', # See all YOLOS models at https://huggingface.co/models?filter=yolos } class _a ( _lowerCAmelCase ): A = '''yolos''' def __init__(self, SCREAMING_SNAKE_CASE_=768, SCREAMING_SNAKE_CASE_=12, SCREAMING_SNAKE_CASE_=12, SCREAMING_SNAKE_CASE_=3072, SCREAMING_SNAKE_CASE_="gelu", SCREAMING_SNAKE_CASE_=0.0, SCREAMING_SNAKE_CASE_=0.0, SCREAMING_SNAKE_CASE_=0.0_2, SCREAMING_SNAKE_CASE_=1E-12, SCREAMING_SNAKE_CASE_=[512, 864], SCREAMING_SNAKE_CASE_=16, SCREAMING_SNAKE_CASE_=3, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=100, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=False, SCREAMING_SNAKE_CASE_=1, SCREAMING_SNAKE_CASE_=5, SCREAMING_SNAKE_CASE_=2, SCREAMING_SNAKE_CASE_=5, SCREAMING_SNAKE_CASE_=2, SCREAMING_SNAKE_CASE_=0.1, **SCREAMING_SNAKE_CASE_, ) -> int: super().__init__(**SCREAMING_SNAKE_CASE_ ) UpperCAmelCase_: Dict = hidden_size UpperCAmelCase_: List[str] = num_hidden_layers UpperCAmelCase_: Union[str, Any] = num_attention_heads UpperCAmelCase_: Any = intermediate_size UpperCAmelCase_: Optional[int] = hidden_act UpperCAmelCase_: Any = hidden_dropout_prob UpperCAmelCase_: Union[str, Any] = attention_probs_dropout_prob UpperCAmelCase_: List[Any] = initializer_range UpperCAmelCase_: List[str] = layer_norm_eps UpperCAmelCase_: Optional[int] = image_size UpperCAmelCase_: Union[str, Any] = patch_size UpperCAmelCase_: Optional[Any] = num_channels UpperCAmelCase_: Optional[Any] = qkv_bias UpperCAmelCase_: List[str] = num_detection_tokens UpperCAmelCase_: int = use_mid_position_embeddings UpperCAmelCase_: int = auxiliary_loss # Hungarian matcher UpperCAmelCase_: Optional[Any] = class_cost UpperCAmelCase_: Optional[Any] = bbox_cost UpperCAmelCase_: str = giou_cost # Loss coefficients UpperCAmelCase_: Tuple = bbox_loss_coefficient UpperCAmelCase_: Union[str, Any] = giou_loss_coefficient UpperCAmelCase_: Any = eos_coefficient class _a ( _lowerCAmelCase ): A = version.parse('''1.11''' ) @property def __snake_case (self ) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ] ) @property def __snake_case (self ) -> float: return 1E-4 @property def __snake_case (self ) -> int: return 12
82
from __future__ import annotations import inspect import unittest from math import floor import numpy as np from transformers import CvtConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFCvtForImageClassification, TFCvtModel from transformers.models.cvt.modeling_tf_cvt import TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class _a ( _lowerCAmelCase ): def __snake_case (self ) -> Optional[int]: UpperCAmelCase_: Union[str, Any] = self.config_class(**self.inputs_dict ) self.parent.assertTrue(hasattr(SCREAMING_SNAKE_CASE_, """embed_dim""" ) ) self.parent.assertTrue(hasattr(SCREAMING_SNAKE_CASE_, """num_heads""" ) ) class _a : def __init__(self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=13, SCREAMING_SNAKE_CASE_=64, SCREAMING_SNAKE_CASE_=3, SCREAMING_SNAKE_CASE_=[16, 48, 96], SCREAMING_SNAKE_CASE_=[1, 3, 6], SCREAMING_SNAKE_CASE_=[1, 2, 10], SCREAMING_SNAKE_CASE_=[7, 3, 3], SCREAMING_SNAKE_CASE_=[4, 2, 2], SCREAMING_SNAKE_CASE_=[2, 1, 1], SCREAMING_SNAKE_CASE_=[2, 2, 2], SCREAMING_SNAKE_CASE_=[False, False, True], SCREAMING_SNAKE_CASE_=[0.0, 0.0, 0.0], SCREAMING_SNAKE_CASE_=0.0_2, SCREAMING_SNAKE_CASE_=1E-12, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=2, ) -> List[Any]: UpperCAmelCase_: Union[str, Any] = parent UpperCAmelCase_: Any = batch_size UpperCAmelCase_: Optional[int] = image_size UpperCAmelCase_: Tuple = patch_sizes UpperCAmelCase_: int = patch_stride UpperCAmelCase_: int = patch_padding UpperCAmelCase_: List[str] = is_training UpperCAmelCase_: List[Any] = use_labels UpperCAmelCase_: int = num_labels UpperCAmelCase_: Dict = num_channels UpperCAmelCase_: Any = embed_dim UpperCAmelCase_: Optional[Any] = num_heads UpperCAmelCase_: Dict = stride_kv UpperCAmelCase_: Dict = depth UpperCAmelCase_: Optional[Any] = cls_token UpperCAmelCase_: List[str] = attention_drop_rate UpperCAmelCase_: List[str] = initializer_range UpperCAmelCase_: Tuple = layer_norm_eps def __snake_case (self ) -> Dict: UpperCAmelCase_: str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) UpperCAmelCase_: Optional[Any] = None if self.use_labels: # create a random int32 tensor of given shape UpperCAmelCase_: str = ids_tensor([self.batch_size], self.num_labels ) UpperCAmelCase_: List[str] = self.get_config() return config, pixel_values, labels def __snake_case (self ) -> Tuple: return CvtConfig( image_size=self.image_size, num_labels=self.num_labels, num_channels=self.num_channels, embed_dim=self.embed_dim, num_heads=self.num_heads, patch_sizes=self.patch_sizes, patch_padding=self.patch_padding, patch_stride=self.patch_stride, stride_kv=self.stride_kv, depth=self.depth, cls_token=self.cls_token, attention_drop_rate=self.attention_drop_rate, initializer_range=self.initializer_range, ) def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Optional[Any]: UpperCAmelCase_: Optional[int] = TFCvtModel(config=SCREAMING_SNAKE_CASE_ ) UpperCAmelCase_: Tuple = model(SCREAMING_SNAKE_CASE_, training=SCREAMING_SNAKE_CASE_ ) UpperCAmelCase_: Optional[Any] = (self.image_size, self.image_size) UpperCAmelCase_ , UpperCAmelCase_: Any = image_size[0], image_size[1] for i in range(len(self.depth ) ): UpperCAmelCase_: Optional[Any] = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 ) UpperCAmelCase_: str = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 ) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.embed_dim[-1], height, width) ) def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> str: UpperCAmelCase_: List[str] = self.num_labels UpperCAmelCase_: Tuple = TFCvtForImageClassification(SCREAMING_SNAKE_CASE_ ) UpperCAmelCase_: Any = model(SCREAMING_SNAKE_CASE_, labels=SCREAMING_SNAKE_CASE_, training=SCREAMING_SNAKE_CASE_ ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) ) def __snake_case (self ) -> Dict: UpperCAmelCase_: Any = self.prepare_config_and_inputs() UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_: Union[str, Any] = config_and_inputs UpperCAmelCase_: Optional[Any] = {"""pixel_values""": pixel_values} return config, inputs_dict @require_tf class _a ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ): A = (TFCvtModel, TFCvtForImageClassification) if is_tf_available() else () A = ( {'''feature-extraction''': TFCvtModel, '''image-classification''': TFCvtForImageClassification} if is_tf_available() else {} ) A = False A = False A = False A = False A = False def __snake_case (self ) -> int: UpperCAmelCase_: Tuple = TFCvtModelTester(self ) UpperCAmelCase_: Dict = TFCvtConfigTester(self, config_class=SCREAMING_SNAKE_CASE_, has_text_modality=SCREAMING_SNAKE_CASE_, hidden_size=37 ) def __snake_case (self ) -> List[Any]: self.config_tester.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() @unittest.skip(reason="""Cvt does not output attentions""" ) def __snake_case (self ) -> Optional[int]: pass @unittest.skip(reason="""Cvt does not use inputs_embeds""" ) def __snake_case (self ) -> List[str]: pass @unittest.skip(reason="""Cvt does not support input and output embeddings""" ) def __snake_case (self ) -> Dict: pass @unittest.skipIf( not is_tf_available() or len(tf.config.list_physical_devices("""GPU""" ) ) == 0, reason="""TF does not support backprop for grouped convolutions on CPU.""", ) def __snake_case (self ) -> Optional[int]: super().test_dataset_conversion() @unittest.skipIf( not is_tf_available() or len(tf.config.list_physical_devices("""GPU""" ) ) == 0, reason="""TF does not support backprop for grouped convolutions on CPU.""", ) @slow def __snake_case (self ) -> int: super().test_keras_fit() @unittest.skip(reason="""Get `Failed to determine best cudnn convolution algo.` error after using TF 2.12+cuda 11.8""" ) def __snake_case (self ) -> List[Any]: UpperCAmelCase_: List[str] = tf.keras.mixed_precision.Policy("""mixed_float16""" ) tf.keras.mixed_precision.set_global_policy(SCREAMING_SNAKE_CASE_ ) super().test_keras_fit() tf.keras.mixed_precision.set_global_policy("""float32""" ) def __snake_case (self ) -> Tuple: UpperCAmelCase_ , UpperCAmelCase_: str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase_: List[str] = model_class(SCREAMING_SNAKE_CASE_ ) UpperCAmelCase_: Optional[int] = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCAmelCase_: Any = [*signature.parameters.keys()] UpperCAmelCase_: Optional[int] = ["""pixel_values"""] self.assertListEqual(arg_names[:1], SCREAMING_SNAKE_CASE_ ) def __snake_case (self ) -> Any: def check_hidden_states_output(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ): UpperCAmelCase_: Optional[int] = model_class(SCREAMING_SNAKE_CASE_ ) UpperCAmelCase_: Optional[int] = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) ) UpperCAmelCase_: Optional[Any] = outputs.hidden_states UpperCAmelCase_: Optional[int] = len(self.model_tester.depth ) self.assertEqual(len(SCREAMING_SNAKE_CASE_ ), SCREAMING_SNAKE_CASE_ ) # verify the first hidden states (first block) self.assertListEqual( list(hidden_states[0].shape[-3:] ), [ self.model_tester.embed_dim[0], self.model_tester.image_size // 4, self.model_tester.image_size // 4, ], ) UpperCAmelCase_ , UpperCAmelCase_: Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase_: int = True check_hidden_states_output(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] UpperCAmelCase_: Tuple = True check_hidden_states_output(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) def __snake_case (self ) -> int: UpperCAmelCase_: Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ ) def __snake_case (self ) -> Optional[int]: UpperCAmelCase_: Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE_ ) @slow def __snake_case (self ) -> Optional[int]: for model_name in TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase_: Union[str, Any] = TFCvtModel.from_pretrained(SCREAMING_SNAKE_CASE_ ) self.assertIsNotNone(SCREAMING_SNAKE_CASE_ ) def lowerCAmelCase_ (): """simple docstring""" UpperCAmelCase_: str = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_tf @require_vision class _a ( unittest.TestCase ): @cached_property def __snake_case (self ) -> Tuple: return AutoImageProcessor.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) @slow def __snake_case (self ) -> Dict: UpperCAmelCase_: Tuple = TFCvtForImageClassification.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) UpperCAmelCase_: Dict = self.default_image_processor UpperCAmelCase_: Dict = prepare_img() UpperCAmelCase_: Union[str, Any] = image_processor(images=SCREAMING_SNAKE_CASE_, return_tensors="""tf""" ) # forward pass UpperCAmelCase_: int = model(**SCREAMING_SNAKE_CASE_ ) # verify the logits UpperCAmelCase_: Optional[int] = tf.TensorShape((1, 1000) ) self.assertEqual(outputs.logits.shape, SCREAMING_SNAKE_CASE_ ) UpperCAmelCase_: Dict = tf.constant([0.9_2_8_5, 0.9_0_1_5, -0.3_1_5_0] ) self.assertTrue(np.allclose(outputs.logits[0, :3].numpy(), SCREAMING_SNAKE_CASE_, atol=1E-4 ) )
82
1
'''simple docstring''' import inspect import os import torch from transformers import AutoModel from transformers.testing_utils import mockenv_context from transformers.trainer_utils import set_seed import accelerate from accelerate.accelerator import Accelerator from accelerate.state import AcceleratorState from accelerate.test_utils.testing import ( AccelerateTestCase, TempDirTestCase, execute_subprocess_async, require_cuda, require_fsdp, require_multi_gpu, slow, ) from accelerate.utils.constants import ( FSDP_AUTO_WRAP_POLICY, FSDP_BACKWARD_PREFETCH, FSDP_SHARDING_STRATEGY, FSDP_STATE_DICT_TYPE, ) from accelerate.utils.dataclasses import FullyShardedDataParallelPlugin from accelerate.utils.other import patch_environment set_seed(42) _lowercase : Tuple = "bert-base-cased" _lowercase : Optional[Any] = "fp16" _lowercase : List[str] = "bf16" _lowercase : int = [FPaa, BFaa] @require_fsdp @require_cuda class __magic_name__ ( _UpperCAmelCase): def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ): super().setUp() lowercase_ : List[str] = dict( ACCELERATE_USE_FSDP="""true""" , MASTER_ADDR="""localhost""" , MASTER_PORT="""10999""" , RANK="""0""" , LOCAL_RANK="""0""" , WORLD_SIZE="""1""" , ) def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ): from torch.distributed.fsdp.fully_sharded_data_parallel import ShardingStrategy for i, strategy in enumerate(lowercase_ ): lowercase_ : Optional[int] = self.dist_env.copy() lowercase_ : Optional[int] = f'''{i + 1}''' lowercase_ : Dict = strategy with mockenv_context(**lowercase_ ): lowercase_ : int = FullyShardedDataParallelPlugin() self.assertEqual(fsdp_plugin.sharding_strategy , ShardingStrategy(i + 1 ) ) def SCREAMING_SNAKE_CASE_ ( self : List[Any] ): from torch.distributed.fsdp.fully_sharded_data_parallel import BackwardPrefetch for i, prefetch_policy in enumerate(lowercase_ ): lowercase_ : Union[str, Any] = self.dist_env.copy() lowercase_ : int = prefetch_policy with mockenv_context(**lowercase_ ): lowercase_ : Any = FullyShardedDataParallelPlugin() if prefetch_policy == "NO_PREFETCH": self.assertIsNone(fsdp_plugin.backward_prefetch ) else: self.assertEqual(fsdp_plugin.backward_prefetch , BackwardPrefetch(i + 1 ) ) def SCREAMING_SNAKE_CASE_ ( self : Dict ): from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType for i, state_dict_type in enumerate(lowercase_ ): lowercase_ : Any = self.dist_env.copy() lowercase_ : str = state_dict_type with mockenv_context(**lowercase_ ): lowercase_ : Tuple = FullyShardedDataParallelPlugin() self.assertEqual(fsdp_plugin.state_dict_type , StateDictType(i + 1 ) ) if state_dict_type == "FULL_STATE_DICT": self.assertTrue(fsdp_plugin.state_dict_config.offload_to_cpu ) self.assertTrue(fsdp_plugin.state_dict_config.ranka_only ) def SCREAMING_SNAKE_CASE_ ( self : List[str] ): lowercase_ : Dict = AutoModel.from_pretrained(lowercase_ ) for policy in FSDP_AUTO_WRAP_POLICY: lowercase_ : Tuple = self.dist_env.copy() lowercase_ : int = policy if policy == "TRANSFORMER_BASED_WRAP": lowercase_ : List[Any] = """BertLayer""" elif policy == "SIZE_BASED_WRAP": lowercase_ : Optional[Any] = """2000""" with mockenv_context(**lowercase_ ): lowercase_ : List[str] = FullyShardedDataParallelPlugin() fsdp_plugin.set_auto_wrap_policy(lowercase_ ) if policy == "NO_WRAP": self.assertIsNone(fsdp_plugin.auto_wrap_policy ) else: self.assertIsNotNone(fsdp_plugin.auto_wrap_policy ) lowercase_ : Any = self.dist_env.copy() lowercase_ : Optional[int] = """TRANSFORMER_BASED_WRAP""" lowercase_ : List[str] = """T5Layer""" with mockenv_context(**lowercase_ ): lowercase_ : str = FullyShardedDataParallelPlugin() with self.assertRaises(lowercase_ ) as cm: fsdp_plugin.set_auto_wrap_policy(lowercase_ ) self.assertTrue("""Could not find the transformer layer class to wrap in the model.""" in str(cm.exception ) ) lowercase_ : Dict = self.dist_env.copy() lowercase_ : int = """SIZE_BASED_WRAP""" lowercase_ : Dict = """0""" with mockenv_context(**lowercase_ ): lowercase_ : List[Any] = FullyShardedDataParallelPlugin() fsdp_plugin.set_auto_wrap_policy(lowercase_ ) self.assertIsNone(fsdp_plugin.auto_wrap_policy ) def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ): from torch.distributed.fsdp.fully_sharded_data_parallel import MixedPrecision from torch.distributed.fsdp.sharded_grad_scaler import ShardedGradScaler for mp_dtype in dtypes: lowercase_ : Optional[Any] = self.dist_env.copy() lowercase_ : Dict = mp_dtype with mockenv_context(**lowercase_ ): lowercase_ : Dict = Accelerator() if mp_dtype == "fp16": lowercase_ : str = torch.floataa elif mp_dtype == "bf16": lowercase_ : List[Any] = torch.bfloataa lowercase_ : int = MixedPrecision(param_dtype=lowercase_ , reduce_dtype=lowercase_ , buffer_dtype=lowercase_ ) self.assertEqual(accelerator.state.fsdp_plugin.mixed_precision_policy , lowercase_ ) if mp_dtype == FPaa: self.assertTrue(isinstance(accelerator.scaler , lowercase_ ) ) elif mp_dtype == BFaa: self.assertIsNone(accelerator.scaler ) AcceleratorState._reset_state(lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ): from torch.distributed.fsdp.fully_sharded_data_parallel import CPUOffload for flag in [True, False]: lowercase_ : Optional[Any] = self.dist_env.copy() lowercase_ : List[str] = str(lowercase_ ).lower() with mockenv_context(**lowercase_ ): lowercase_ : List[Any] = FullyShardedDataParallelPlugin() self.assertEqual(fsdp_plugin.cpu_offload , CPUOffload(offload_params=lowercase_ ) ) @require_fsdp @require_multi_gpu @slow class __magic_name__ ( _UpperCAmelCase): def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ): super().setUp() lowercase_ : Any = 0.82 lowercase_ : Any = [ """fsdp_shard_grad_op_transformer_based_wrap""", """fsdp_full_shard_transformer_based_wrap""", ] lowercase_ : Any = { """multi_gpu_fp16""": 3200, """fsdp_shard_grad_op_transformer_based_wrap_fp16""": 2000, """fsdp_full_shard_transformer_based_wrap_fp16""": 1900, # Disabling below test as it overwhelms the RAM memory usage # on CI self-hosted runner leading to tests getting killed. # "fsdp_full_shard_cpu_offload_transformer_based_wrap_fp32": 1500, # fp16 was leading to indefinite hang } lowercase_ : Dict = 160 lowercase_ : List[str] = 160 lowercase_ : Any = inspect.getfile(accelerate.test_utils ) lowercase_ : Union[str, Any] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["""scripts""", """external_deps"""] ) def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ): lowercase_ : List[Any] = os.path.join(self.test_scripts_folder , """test_performance.py""" ) lowercase_ : List[Any] = ["""accelerate""", """launch""", """--num_processes=2""", """--num_machines=1""", """--machine_rank=0""", """--use_fsdp"""] for config in self.performance_configs: lowercase_ : str = cmd.copy() for i, strategy in enumerate(lowercase_ ): if strategy.lower() in config: cmd_config.append(f'''--fsdp_sharding_strategy={i+1}''' ) break if "fp32" in config: cmd_config.append("""--mixed_precision=no""" ) else: cmd_config.append("""--mixed_precision=fp16""" ) if "cpu_offload" in config: cmd_config.append("""--fsdp_offload_params=True""" ) for policy in FSDP_AUTO_WRAP_POLICY: if policy.lower() in config: cmd_config.append(f'''--fsdp_auto_wrap_policy={policy}''' ) break if policy == "TRANSFORMER_BASED_WRAP": cmd_config.append("""--fsdp_transformer_layer_cls_to_wrap=BertLayer""" ) elif policy == "SIZE_BASED_WRAP": cmd_config.append("""--fsdp_min_num_params=2000""" ) cmd_config.extend( [ self.test_file_path, f'''--output_dir={self.tmpdir}''', f'''--performance_lower_bound={self.performance_lower_bound}''', ] ) with patch_environment(omp_num_threads=1 ): execute_subprocess_async(lowercase_ , env=os.environ.copy() ) def SCREAMING_SNAKE_CASE_ ( self : Dict ): lowercase_ : Optional[int] = os.path.join(self.test_scripts_folder , """test_checkpointing.py""" ) lowercase_ : str = [ """accelerate""", """launch""", """--num_processes=2""", """--num_machines=1""", """--machine_rank=0""", """--use_fsdp""", """--mixed_precision=fp16""", """--fsdp_transformer_layer_cls_to_wrap=BertLayer""", ] for i, strategy in enumerate(lowercase_ ): lowercase_ : Union[str, Any] = cmd.copy() cmd_config.append(f'''--fsdp_sharding_strategy={i+1}''' ) if strategy != "FULL_SHARD": continue lowercase_ : int = len(lowercase_ ) for state_dict_type in FSDP_STATE_DICT_TYPE: lowercase_ : List[Any] = cmd_config[:state_dict_config_index] cmd_config.append(f'''--fsdp_state_dict_type={state_dict_type}''' ) cmd_config.extend( [ self.test_file_path, f'''--output_dir={self.tmpdir}''', """--partial_train_epoch=1""", ] ) with patch_environment(omp_num_threads=1 ): execute_subprocess_async(lowercase_ , env=os.environ.copy() ) lowercase_ : Tuple = cmd_config[:-1] lowercase_ : Union[str, Any] = os.path.join(self.tmpdir , """epoch_0""" ) cmd_config.extend( [ f'''--resume_from_checkpoint={resume_from_checkpoint}''', ] ) with patch_environment(omp_num_threads=1 ): execute_subprocess_async(lowercase_ , env=os.environ.copy() ) def SCREAMING_SNAKE_CASE_ ( self : int ): lowercase_ : str = os.path.join(self.test_scripts_folder , """test_peak_memory_usage.py""" ) lowercase_ : Dict = [ """accelerate""", """launch""", """--num_processes=2""", """--num_machines=1""", """--machine_rank=0""", ] for spec, peak_mem_upper_bound in self.peak_memory_usage_upper_bound.items(): lowercase_ : Union[str, Any] = cmd.copy() if "fp16" in spec: cmd_config.extend(["""--mixed_precision=fp16"""] ) else: cmd_config.extend(["""--mixed_precision=no"""] ) if "multi_gpu" in spec: continue else: cmd_config.extend(["""--use_fsdp"""] ) for i, strategy in enumerate(lowercase_ ): if strategy.lower() in spec: cmd_config.append(f'''--fsdp_sharding_strategy={i+1}''' ) break if "cpu_offload" in spec: cmd_config.append("""--fsdp_offload_params=True""" ) for policy in FSDP_AUTO_WRAP_POLICY: if policy.lower() in spec: cmd_config.append(f'''--fsdp_auto_wrap_policy={policy}''' ) break if policy == "TRANSFORMER_BASED_WRAP": cmd_config.append("""--fsdp_transformer_layer_cls_to_wrap=BertLayer""" ) elif policy == "SIZE_BASED_WRAP": cmd_config.append("""--fsdp_min_num_params=2000""" ) cmd_config.extend( [ self.test_file_path, f'''--output_dir={self.tmpdir}''', f'''--peak_memory_upper_bound={peak_mem_upper_bound}''', f'''--n_train={self.n_train}''', f'''--n_val={self.n_val}''', ] ) with patch_environment(omp_num_threads=1 ): execute_subprocess_async(lowercase_ , env=os.environ.copy() )
239
'''simple docstring''' from __future__ import annotations import math import random from typing import Any class __magic_name__ : def __init__( self : Any ): lowercase_ : list[Any] = [] lowercase_ : int = 0 lowercase_ : int = 0 def SCREAMING_SNAKE_CASE_ ( self : int ): return self.head == self.tail def SCREAMING_SNAKE_CASE_ ( self : List[str] , lowercase_ : Any ): self.data.append(lowercase_ ) lowercase_ : Optional[Any] = self.tail + 1 def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ): lowercase_ : List[Any] = self.data[self.head] lowercase_ : Optional[int] = self.head + 1 return ret def SCREAMING_SNAKE_CASE_ ( self : List[str] ): return self.tail - self.head def SCREAMING_SNAKE_CASE_ ( self : Tuple ): print(self.data ) print("""**************""" ) print(self.data[self.head : self.tail] ) class __magic_name__ : def __init__( self : Any , lowercase_ : Any ): lowercase_ : Optional[Any] = data lowercase_ : MyNode | None = None lowercase_ : MyNode | None = None lowercase_ : int = 1 def SCREAMING_SNAKE_CASE_ ( self : Tuple ): return self.data def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ): return self.left def SCREAMING_SNAKE_CASE_ ( self : Dict ): return self.right def SCREAMING_SNAKE_CASE_ ( self : str ): return self.height def SCREAMING_SNAKE_CASE_ ( self : Any , lowercase_ : Any ): lowercase_ : Dict = data def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , lowercase_ : MyNode | None ): lowercase_ : Optional[Any] = node def SCREAMING_SNAKE_CASE_ ( self : Tuple , lowercase_ : MyNode | None ): lowercase_ : str = node def SCREAMING_SNAKE_CASE_ ( self : Any , lowercase_ : int ): lowercase_ : Tuple = height def lowerCamelCase ( UpperCAmelCase__ : MyNode | None ) -> int: if node is None: return 0 return node.get_height() def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : int ) -> int: if a > b: return a return b def lowerCamelCase ( UpperCAmelCase__ : MyNode ) -> MyNode: print("""left rotation node:""" , node.get_data() ) lowercase_ : Union[str, Any] = node.get_left() assert ret is not None node.set_left(ret.get_right() ) ret.set_right(UpperCAmelCase__ ) lowercase_ : Any = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1 node.set_height(UpperCAmelCase__ ) lowercase_ : Optional[int] = my_max(get_height(ret.get_right() ) , get_height(ret.get_left() ) ) + 1 ret.set_height(UpperCAmelCase__ ) return ret def lowerCamelCase ( UpperCAmelCase__ : MyNode ) -> MyNode: print("""right rotation node:""" , node.get_data() ) lowercase_ : Dict = node.get_right() assert ret is not None node.set_right(ret.get_left() ) ret.set_left(UpperCAmelCase__ ) lowercase_ : Tuple = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1 node.set_height(UpperCAmelCase__ ) lowercase_ : str = my_max(get_height(ret.get_right() ) , get_height(ret.get_left() ) ) + 1 ret.set_height(UpperCAmelCase__ ) return ret def lowerCamelCase ( UpperCAmelCase__ : MyNode ) -> MyNode: lowercase_ : Optional[Any] = node.get_left() assert left_child is not None node.set_left(left_rotation(UpperCAmelCase__ ) ) return right_rotation(UpperCAmelCase__ ) def lowerCamelCase ( UpperCAmelCase__ : MyNode ) -> MyNode: lowercase_ : Optional[Any] = node.get_right() assert right_child is not None node.set_right(right_rotation(UpperCAmelCase__ ) ) return left_rotation(UpperCAmelCase__ ) def lowerCamelCase ( UpperCAmelCase__ : MyNode | None , UpperCAmelCase__ : Any ) -> MyNode | None: if node is None: return MyNode(UpperCAmelCase__ ) if data < node.get_data(): node.set_left(insert_node(node.get_left() , UpperCAmelCase__ ) ) if ( get_height(node.get_left() ) - get_height(node.get_right() ) == 2 ): # an unbalance detected lowercase_ : List[Any] = node.get_left() assert left_child is not None if ( data < left_child.get_data() ): # new node is the left child of the left child lowercase_ : List[Any] = right_rotation(UpperCAmelCase__ ) else: lowercase_ : Any = lr_rotation(UpperCAmelCase__ ) else: node.set_right(insert_node(node.get_right() , UpperCAmelCase__ ) ) if get_height(node.get_right() ) - get_height(node.get_left() ) == 2: lowercase_ : Any = node.get_right() assert right_child is not None if data < right_child.get_data(): lowercase_ : Optional[int] = rl_rotation(UpperCAmelCase__ ) else: lowercase_ : Optional[int] = left_rotation(UpperCAmelCase__ ) lowercase_ : Optional[int] = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1 node.set_height(UpperCAmelCase__ ) return node def lowerCamelCase ( UpperCAmelCase__ : MyNode ) -> Any: while True: lowercase_ : Any = root.get_right() if right_child is None: break lowercase_ : List[str] = right_child return root.get_data() def lowerCamelCase ( UpperCAmelCase__ : MyNode ) -> Any: while True: lowercase_ : List[Any] = root.get_left() if left_child is None: break lowercase_ : str = left_child return root.get_data() def lowerCamelCase ( UpperCAmelCase__ : MyNode , UpperCAmelCase__ : Any ) -> MyNode | None: lowercase_ : Union[str, Any] = root.get_left() lowercase_ : int = root.get_right() if root.get_data() == data: if left_child is not None and right_child is not None: lowercase_ : Any = get_left_most(UpperCAmelCase__ ) root.set_data(UpperCAmelCase__ ) root.set_right(del_node(UpperCAmelCase__ , UpperCAmelCase__ ) ) elif left_child is not None: lowercase_ : Dict = left_child elif right_child is not None: lowercase_ : Dict = right_child else: return None elif root.get_data() > data: if left_child is None: print("""No such data""" ) return root else: root.set_left(del_node(UpperCAmelCase__ , UpperCAmelCase__ ) ) else: # root.get_data() < data if right_child is None: return root else: root.set_right(del_node(UpperCAmelCase__ , UpperCAmelCase__ ) ) if get_height(UpperCAmelCase__ ) - get_height(UpperCAmelCase__ ) == 2: assert right_child is not None if get_height(right_child.get_right() ) > get_height(right_child.get_left() ): lowercase_ : str = left_rotation(UpperCAmelCase__ ) else: lowercase_ : List[str] = rl_rotation(UpperCAmelCase__ ) elif get_height(UpperCAmelCase__ ) - get_height(UpperCAmelCase__ ) == -2: assert left_child is not None if get_height(left_child.get_left() ) > get_height(left_child.get_right() ): lowercase_ : Optional[Any] = right_rotation(UpperCAmelCase__ ) else: lowercase_ : List[str] = lr_rotation(UpperCAmelCase__ ) lowercase_ : int = my_max(get_height(root.get_right() ) , get_height(root.get_left() ) ) + 1 root.set_height(UpperCAmelCase__ ) return root class __magic_name__ : def __init__( self : List[str] ): lowercase_ : MyNode | None = None def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ): return get_height(self.root ) def SCREAMING_SNAKE_CASE_ ( self : Any , lowercase_ : Any ): print("""insert:""" + str(lowercase_ ) ) lowercase_ : Dict = insert_node(self.root , lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Any , lowercase_ : Any ): print("""delete:""" + str(lowercase_ ) ) if self.root is None: print("""Tree is empty!""" ) return lowercase_ : int = del_node(self.root , lowercase_ ) def __str__( self : Optional[int] , ): # a level traversale, gives a more intuitive look on the tree lowercase_ : int = """""" lowercase_ : Optional[int] = MyQueue() q.push(self.root ) lowercase_ : Optional[Any] = self.get_height() if layer == 0: return output lowercase_ : Optional[int] = 0 while not q.is_empty(): lowercase_ : Any = q.pop() lowercase_ : List[Any] = """ """ * int(math.pow(2 , layer - 1 ) ) output += space if node is None: output += "*" q.push(lowercase_ ) q.push(lowercase_ ) else: output += str(node.get_data() ) q.push(node.get_left() ) q.push(node.get_right() ) output += space lowercase_ : Union[str, Any] = cnt + 1 for i in range(100 ): if cnt == math.pow(2 , lowercase_ ) - 1: lowercase_ : Optional[Any] = layer - 1 if layer == 0: output += "\n*************************************" return output output += "\n" break output += "\n*************************************" return output def lowerCamelCase ( ) -> None: import doctest doctest.testmod() if __name__ == "__main__": _test() _lowercase : Optional[Any] = AVLtree() _lowercase : Optional[Any] = list(range(10)) random.shuffle(lst) for i in lst: t.insert(i) print(str(t)) random.shuffle(lst) for i in lst: t.del_node(i) print(str(t))
239
1
'''simple docstring''' import os import sys import tempfile import unittest import unittest.mock as mock from pathlib import Path from huggingface_hub import HfFolder, delete_repo from huggingface_hub.file_download import http_get from requests.exceptions import HTTPError from transformers import ( AlbertTokenizer, AutoTokenizer, BertTokenizer, BertTokenizerFast, GPTaTokenizerFast, is_tokenizers_available, ) from transformers.testing_utils import TOKEN, USER, is_staging_test, require_tokenizers from transformers.tokenization_utils import Trie sys.path.append(str(Path(__file__).parent.parent / """utils""")) from test_module.custom_tokenization import CustomTokenizer # noqa E402 if is_tokenizers_available(): from test_module.custom_tokenization_fast import CustomTokenizerFast class UpperCamelCase ( unittest.TestCase ): """simple docstring""" def SCREAMING_SNAKE_CASE_ ( self : Optional[Any]): """simple docstring""" a : Union[str, Any] = mock.Mock() a : Tuple = 5_0_0 a : Union[str, Any] = {} a : List[str] = HTTPError a : int = {} # Download this model to make sure it's in the cache. a : List[str] = BertTokenizer.from_pretrained('hf-internal-testing/tiny-random-bert') # Under the mock environment we get a 500 error when trying to reach the tokenizer. with mock.patch('requests.Session.request' , return_value=UpperCAmelCase_) as mock_head: a : Any = BertTokenizer.from_pretrained('hf-internal-testing/tiny-random-bert') # This check we did call the fake head request mock_head.assert_called() @require_tokenizers def SCREAMING_SNAKE_CASE_ ( self : Optional[Any]): """simple docstring""" a : Tuple = mock.Mock() a : Any = 5_0_0 a : Tuple = {} a : str = HTTPError a : Optional[Any] = {} # Download this model to make sure it's in the cache. a : Any = GPTaTokenizerFast.from_pretrained('gpt2') # Under the mock environment we get a 500 error when trying to reach the tokenizer. with mock.patch('requests.Session.request' , return_value=UpperCAmelCase_) as mock_head: a : Tuple = GPTaTokenizerFast.from_pretrained('gpt2') # This check we did call the fake head request mock_head.assert_called() def SCREAMING_SNAKE_CASE_ ( self : Optional[int]): """simple docstring""" try: a : Optional[int] = tempfile.mktemp() with open(UpperCAmelCase_ , 'wb') as f: http_get('https://huggingface.co/albert-base-v1/resolve/main/spiece.model' , UpperCAmelCase_) a : List[str] = AlbertTokenizer.from_pretrained(UpperCAmelCase_) finally: os.remove(UpperCAmelCase_) # Supporting this legacy load introduced a weird bug where the tokenizer would load local files if they are in # the current folder and have the right name. if os.path.isfile('tokenizer.json'): # We skip the test if the user has a `tokenizer.json` in this folder to avoid deleting it. return try: with open('tokenizer.json' , 'wb') as f: http_get('https://huggingface.co/hf-internal-testing/tiny-random-bert/blob/main/tokenizer.json' , UpperCAmelCase_) a : Dict = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-gpt2') # The tiny random BERT has a vocab size of 1024, tiny gpt2 as a vocab size of 1000 self.assertEqual(tokenizer.vocab_size , 1_0_0_0) # Tokenizer should depend on the remote checkpoint, not the local tokenizer.json file. finally: os.remove('tokenizer.json') def SCREAMING_SNAKE_CASE_ ( self : Optional[int]): """simple docstring""" a : Optional[Any] = AlbertTokenizer.from_pretrained('https://huggingface.co/albert-base-v1/resolve/main/spiece.model') @is_staging_test class UpperCamelCase ( unittest.TestCase ): """simple docstring""" A : int = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"] @classmethod def SCREAMING_SNAKE_CASE_ ( cls : Any): """simple docstring""" a : str = TOKEN HfFolder.save_token(UpperCAmelCase_) @classmethod def SCREAMING_SNAKE_CASE_ ( cls : List[str]): """simple docstring""" try: delete_repo(token=cls._token , repo_id='test-tokenizer') except HTTPError: pass try: delete_repo(token=cls._token , repo_id='valid_org/test-tokenizer-org') except HTTPError: pass try: delete_repo(token=cls._token , repo_id='test-dynamic-tokenizer') except HTTPError: pass def SCREAMING_SNAKE_CASE_ ( self : Any): """simple docstring""" with tempfile.TemporaryDirectory() as tmp_dir: a : Optional[Any] = os.path.join(UpperCAmelCase_ , 'vocab.txt') with open(UpperCAmelCase_ , 'w' , encoding='utf-8') as vocab_writer: vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens])) a : int = BertTokenizer(UpperCAmelCase_) tokenizer.push_to_hub('test-tokenizer' , use_auth_token=self._token) a : Optional[int] = BertTokenizer.from_pretrained(f"""{USER}/test-tokenizer""") self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab) # Reset repo delete_repo(token=self._token , repo_id='test-tokenizer') # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(UpperCAmelCase_ , repo_id='test-tokenizer' , push_to_hub=UpperCAmelCase_ , use_auth_token=self._token) a : Union[str, Any] = BertTokenizer.from_pretrained(f"""{USER}/test-tokenizer""") self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab) def SCREAMING_SNAKE_CASE_ ( self : Tuple): """simple docstring""" with tempfile.TemporaryDirectory() as tmp_dir: a : Any = os.path.join(UpperCAmelCase_ , 'vocab.txt') with open(UpperCAmelCase_ , 'w' , encoding='utf-8') as vocab_writer: vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens])) a : str = BertTokenizer(UpperCAmelCase_) tokenizer.push_to_hub('valid_org/test-tokenizer-org' , use_auth_token=self._token) a : int = BertTokenizer.from_pretrained('valid_org/test-tokenizer-org') self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab) # Reset repo delete_repo(token=self._token , repo_id='valid_org/test-tokenizer-org') # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained( UpperCAmelCase_ , repo_id='valid_org/test-tokenizer-org' , push_to_hub=UpperCAmelCase_ , use_auth_token=self._token) a : List[Any] = BertTokenizer.from_pretrained('valid_org/test-tokenizer-org') self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab) @require_tokenizers def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any]): """simple docstring""" CustomTokenizer.register_for_auto_class() with tempfile.TemporaryDirectory() as tmp_dir: a : Any = os.path.join(UpperCAmelCase_ , 'vocab.txt') with open(UpperCAmelCase_ , 'w' , encoding='utf-8') as vocab_writer: vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens])) a : int = CustomTokenizer(UpperCAmelCase_) # No fast custom tokenizer tokenizer.push_to_hub('test-dynamic-tokenizer' , use_auth_token=self._token) a : Union[str, Any] = AutoTokenizer.from_pretrained(f"""{USER}/test-dynamic-tokenizer""" , trust_remote_code=UpperCAmelCase_) # Can't make an isinstance check because the new_model.config is from the CustomTokenizer class of a dynamic module self.assertEqual(tokenizer.__class__.__name__ , 'CustomTokenizer') # Fast and slow custom tokenizer CustomTokenizerFast.register_for_auto_class() with tempfile.TemporaryDirectory() as tmp_dir: a : Union[str, Any] = os.path.join(UpperCAmelCase_ , 'vocab.txt') with open(UpperCAmelCase_ , 'w' , encoding='utf-8') as vocab_writer: vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens])) a : Tuple = BertTokenizerFast.from_pretrained(UpperCAmelCase_) bert_tokenizer.save_pretrained(UpperCAmelCase_) a : List[str] = CustomTokenizerFast.from_pretrained(UpperCAmelCase_) tokenizer.push_to_hub('test-dynamic-tokenizer' , use_auth_token=self._token) a : Any = AutoTokenizer.from_pretrained(f"""{USER}/test-dynamic-tokenizer""" , trust_remote_code=UpperCAmelCase_) # Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module self.assertEqual(tokenizer.__class__.__name__ , 'CustomTokenizerFast') a : Union[str, Any] = AutoTokenizer.from_pretrained( f"""{USER}/test-dynamic-tokenizer""" , use_fast=UpperCAmelCase_ , trust_remote_code=UpperCAmelCase_) # Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module self.assertEqual(tokenizer.__class__.__name__ , 'CustomTokenizer') class UpperCamelCase ( unittest.TestCase ): """simple docstring""" def SCREAMING_SNAKE_CASE_ ( self : int): """simple docstring""" a : Dict = Trie() trie.add('Hello 友達') self.assertEqual(trie.data , {'H': {'e': {'l': {'l': {'o': {' ': {'友': {'達': {'': 1}}}}}}}}}) trie.add('Hello') trie.data self.assertEqual(trie.data , {'H': {'e': {'l': {'l': {'o': {'': 1, ' ': {'友': {'達': {'': 1}}}}}}}}}) def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any]): """simple docstring""" a : str = Trie() self.assertEqual(trie.split('[CLS] This is a extra_id_100') , ['[CLS] This is a extra_id_100']) trie.add('[CLS]') trie.add('extra_id_1') trie.add('extra_id_100') self.assertEqual(trie.split('[CLS] This is a extra_id_100') , ['[CLS]', ' This is a ', 'extra_id_100']) def SCREAMING_SNAKE_CASE_ ( self : str): """simple docstring""" a : List[Any] = Trie() trie.add('A') self.assertEqual(trie.split('ABC') , ['A', 'BC']) self.assertEqual(trie.split('BCA') , ['BC', 'A']) def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any]): """simple docstring""" a : Optional[Any] = Trie() trie.add('TOKEN]') trie.add('[SPECIAL_TOKEN]') self.assertEqual(trie.split('This is something [SPECIAL_TOKEN]') , ['This is something ', '[SPECIAL_TOKEN]']) def SCREAMING_SNAKE_CASE_ ( self : Dict): """simple docstring""" a : List[str] = Trie() trie.add('A') trie.add('P') trie.add('[SPECIAL_TOKEN]') self.assertEqual(trie.split('This is something [SPECIAL_TOKEN]') , ['This is something ', '[SPECIAL_TOKEN]']) def SCREAMING_SNAKE_CASE_ ( self : str): """simple docstring""" a : int = Trie() trie.add('AB') trie.add('B') trie.add('C') self.assertEqual(trie.split('ABC') , ['AB', 'C']) def SCREAMING_SNAKE_CASE_ ( self : Optional[Any]): """simple docstring""" a : Tuple = Trie() trie.add('ABC') trie.add('B') trie.add('CD') self.assertEqual(trie.split('ABCD') , ['ABC', 'D']) def SCREAMING_SNAKE_CASE_ ( self : Optional[int]): """simple docstring""" a : Dict = Trie() a : Dict = trie.cut_text('ABC' , [0, 0, 2, 1, 2, 3]) self.assertEqual(UpperCAmelCase_ , ['AB', 'C'])
359
'''simple docstring''' from __future__ import annotations from decimal import Decimal from math import * # noqa: F403 from sympy import diff def SCREAMING_SNAKE_CASE__ ( snake_case : str , snake_case : float | Decimal , snake_case : float = 10**-10 ) -> float: """simple docstring""" a : Dict = a while True: a : Any = Decimal(snake_case ) - ( Decimal(eval(snake_case ) ) / Decimal(eval(str(diff(snake_case ) ) ) ) # noqa: S307 ) # This number dictates the accuracy of the answer if abs(eval(snake_case ) ) < precision: # noqa: S307 return float(snake_case ) # Let's Execute if __name__ == "__main__": # Find root of trigonometric function # Find value of pi print(f'''The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}''') # Find root of polynomial print(f'''The root of x**2 - 5*x + 2 = 0 is {newton_raphson("x**2 - 5*x + 2", 0.4)}''') # Find Square Root of 5 print(f'''The root of log(x) - 1 = 0 is {newton_raphson("log(x) - 1", 2)}''') # Exponential Roots print(f'''The root of exp(x) - 1 = 0 is {newton_raphson("exp(x) - 1", 0)}''')
345
0
def __lowerCamelCase ( UpperCamelCase__ ): '''simple docstring''' snake_case_ = len(UpperCamelCase__ ) while cur > 1: # Find the maximum number in arr snake_case_ = arr.index(max(arr[0:cur] ) ) # Reverse from 0 to mi snake_case_ = arr[mi::-1] + arr[mi + 1 : len(UpperCamelCase__ )] # Reverse whole list snake_case_ = arr[cur - 1 :: -1] + arr[cur : len(UpperCamelCase__ )] cur -= 1 return arr if __name__ == "__main__": _UpperCAmelCase : Tuple = input("""Enter numbers separated by a comma:\n""").strip() _UpperCAmelCase : List[str] = [int(item) for item in user_input.split(""",""")] print(pancake_sort(unsorted))
285
import uuid from typing import Any, Dict, List, Optional, Union from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging from .base import PIPELINE_INIT_ARGS, Pipeline if is_tf_available(): import tensorflow as tf if is_torch_available(): import torch _UpperCAmelCase : Dict = logging.get_logger(__name__) class lowercase : def __init__( self , snake_case = None , snake_case = None , snake_case=None , snake_case=None ): if not conversation_id: snake_case_ = uuid.uuida() if past_user_inputs is None: snake_case_ = [] if generated_responses is None: snake_case_ = [] snake_case_ = conversation_id snake_case_ = past_user_inputs snake_case_ = generated_responses snake_case_ = text def __eq__( self , snake_case ): if not isinstance(snake_case , snake_case ): return False if self.uuid == other.uuid: return True return ( self.new_user_input == other.new_user_input and self.past_user_inputs == other.past_user_inputs and self.generated_responses == other.generated_responses ) def a ( self , snake_case , snake_case = False ): if self.new_user_input: if overwrite: logger.warning( F'''User input added while unprocessed input was existing: "{self.new_user_input}" was overwritten ''' F'''with: "{text}".''' ) snake_case_ = text else: logger.warning( F'''User input added while unprocessed input was existing: "{self.new_user_input}" new input ''' F'''ignored: "{text}". Set `overwrite` to True to overwrite unprocessed user input''' ) else: snake_case_ = text def a ( self ): if self.new_user_input: self.past_user_inputs.append(self.new_user_input ) snake_case_ = None def a ( self , snake_case ): self.generated_responses.append(snake_case ) def a ( self ): for user_input, generated_response in zip(self.past_user_inputs , self.generated_responses ): yield True, user_input yield False, generated_response if self.new_user_input: yield True, self.new_user_input def __repr__( self ): snake_case_ = F'''Conversation id: {self.uuid} \n''' for is_user, text in self.iter_texts(): snake_case_ = 'user' if is_user else 'bot' output += F'''{name} >> {text} \n''' return output @add_end_docstrings( lowercase_ , R''' min_length_for_response (`int`, *optional*, defaults to 32): The minimum length (in number of tokens) for a response. minimum_tokens (`int`, *optional*, defaults to 10): The minimum length of tokens to leave for a response. ''' , ) class lowercase ( lowercase_ ): def __init__( self , *snake_case , **snake_case ): super().__init__(*snake_case , **snake_case ) if self.tokenizer.pad_token_id is None: snake_case_ = self.tokenizer.eos_token def a ( self , snake_case=None , snake_case=None , snake_case=None , **snake_case ): snake_case_ = {} snake_case_ = {} snake_case_ = {} if min_length_for_response is not None: snake_case_ = min_length_for_response if minimum_tokens is not None: snake_case_ = minimum_tokens if "max_length" in generate_kwargs: snake_case_ = generate_kwargs['max_length'] # self.max_length = generate_kwargs.get("max_length", self.model.config.max_length) if clean_up_tokenization_spaces is not None: snake_case_ = clean_up_tokenization_spaces if generate_kwargs: forward_params.update(snake_case ) return preprocess_params, forward_params, postprocess_params def __call__( self , snake_case , snake_case=0 , **snake_case ): snake_case_ = super().__call__(snake_case , num_workers=snake_case , **snake_case ) if isinstance(snake_case , snake_case ) and len(snake_case ) == 1: return outputs[0] return outputs def a ( self , snake_case , snake_case=32 ): if not isinstance(snake_case , snake_case ): raise ValueError('ConversationalPipeline, expects Conversation as inputs' ) if conversation.new_user_input is None: raise ValueError( F'''Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. ''' 'Add user inputs with the conversation\'s `add_user_input` method' ) if hasattr(self.tokenizer , '_build_conversation_input_ids' ): snake_case_ = self.tokenizer._build_conversation_input_ids(snake_case ) else: # If the tokenizer cannot handle conversations, we default to only the old version snake_case_ = self._legacy_parse_and_tokenize(snake_case ) if self.framework == "pt": snake_case_ = torch.LongTensor([input_ids] ) elif self.framework == "tf": snake_case_ = tf.constant([input_ids] ) return {"input_ids": input_ids, "conversation": conversation} def a ( self , snake_case , snake_case=10 , **snake_case ): snake_case_ = generate_kwargs.get('max_length' , self.model.config.max_length ) snake_case_ = model_inputs['input_ids'].shape[1] if max_length - minimum_tokens < n: logger.warning(F'''Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})''' ) snake_case_ = max_length - minimum_tokens snake_case_ = model_inputs['input_ids'][:, -trim:] if "attention_mask" in model_inputs: snake_case_ = model_inputs['attention_mask'][:, -trim:] snake_case_ = model_inputs.pop('conversation' ) snake_case_ = max_length snake_case_ = self.model.generate(**snake_case , **snake_case ) if self.model.config.is_encoder_decoder: snake_case_ = 1 else: snake_case_ = n return {"output_ids": output_ids[:, start_position:], "conversation": conversation} def a ( self , snake_case , snake_case=True ): snake_case_ = model_outputs['output_ids'] snake_case_ = self.tokenizer.decode( output_ids[0] , skip_special_tokens=snake_case , clean_up_tokenization_spaces=snake_case , ) snake_case_ = model_outputs['conversation'] conversation.mark_processed() conversation.append_response(snake_case ) return conversation def a ( self , snake_case ): snake_case_ = self.tokenizer.eos_token_id snake_case_ = [] for is_user, text in conversation.iter_texts(): if eos_token_id is not None: input_ids.extend(self.tokenizer.encode(snake_case , add_special_tokens=snake_case ) + [eos_token_id] ) else: input_ids.extend(self.tokenizer.encode(snake_case , add_special_tokens=snake_case ) ) if len(snake_case ) > self.tokenizer.model_max_length: snake_case_ = input_ids[-self.tokenizer.model_max_length :] return input_ids
285
1
'''simple docstring''' from ....configuration_utils import PretrainedConfig from ....utils import logging UpperCamelCase__ = logging.get_logger(__name__) UpperCamelCase__ = { '''CarlCochet/trajectory-transformer-halfcheetah-medium-v2''': ( '''https://huggingface.co/CarlCochet/trajectory-transformer-halfcheetah-medium-v2/resolve/main/config.json''' ), # See all TrajectoryTransformer models at https://huggingface.co/models?filter=trajectory_transformer } class lowerCamelCase_ ( __a ): lowerCAmelCase__ = 'trajectory_transformer' lowerCAmelCase__ = ['past_key_values'] lowerCAmelCase__ = { 'hidden_size': 'n_embd', 'num_attention_heads': 'n_head', 'num_hidden_layers': 'n_layer', } def __init__( self : int , _A : Any=100 , _A : List[Any]=5 , _A : Dict=1 , _A : List[Any]=1 , _A : Optional[int]=249 , _A : str=6 , _A : Any=17 , _A : Dict=25 , _A : int=4 , _A : Any=4 , _A : List[Any]=128 , _A : str=0.1 , _A : List[Any]=0.1 , _A : str=0.1 , _A : List[str]=0.0_0_0_6 , _A : Union[str, Any]=512 , _A : Dict=0.0_2 , _A : Dict=1e-12 , _A : List[str]=1 , _A : int=True , _A : int=1 , _A : Dict=50_256 , _A : str=50_256 , **_A : str , ): '''simple docstring''' UpperCAmelCase__ : Tuple = vocab_size UpperCAmelCase__ : List[str] = action_weight UpperCAmelCase__ : str = reward_weight UpperCAmelCase__ : Any = value_weight UpperCAmelCase__ : List[Any] = max_position_embeddings UpperCAmelCase__ : Tuple = block_size UpperCAmelCase__ : int = action_dim UpperCAmelCase__ : Optional[Any] = observation_dim UpperCAmelCase__ : List[str] = transition_dim UpperCAmelCase__ : Dict = learning_rate UpperCAmelCase__ : Dict = n_layer UpperCAmelCase__ : Union[str, Any] = n_head UpperCAmelCase__ : str = n_embd UpperCAmelCase__ : int = embd_pdrop UpperCAmelCase__ : Dict = attn_pdrop UpperCAmelCase__ : int = resid_pdrop UpperCAmelCase__ : List[Any] = initializer_range UpperCAmelCase__ : Tuple = layer_norm_eps UpperCAmelCase__ : Dict = kaiming_initializer_range UpperCAmelCase__ : Optional[Any] = use_cache super().__init__(pad_token_id=_A , bos_token_id=_A , eos_token_id=_A , **_A )
299
'''simple docstring''' import json import os import shutil import tempfile import unittest from multiprocessing import get_context from pathlib import Path import datasets import numpy as np from datasets import load_dataset from parameterized import parameterized from transformers import AutoProcessor from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available from ..wavaveca.test_feature_extraction_wavaveca import floats_list if is_pyctcdecode_available(): from huggingface_hub import snapshot_download from pyctcdecode import BeamSearchDecoderCTC from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput if is_torch_available(): from transformers import WavaVecaForCTC @require_pyctcdecode class lowerCamelCase_ ( unittest.TestCase ): def lowercase_ ( self : Optional[Any] ): '''simple docstring''' UpperCAmelCase__ : Any = '''| <pad> <unk> <s> </s> a b c d e f g h i j k'''.split() UpperCAmelCase__ : Optional[Any] = dict(zip(_A , range(len(_A ) ) ) ) UpperCAmelCase__ : Tuple = { '''unk_token''': '''<unk>''', '''bos_token''': '''<s>''', '''eos_token''': '''</s>''', } UpperCAmelCase__ : Optional[int] = { '''feature_size''': 1, '''padding_value''': 0.0, '''sampling_rate''': 16_000, '''return_attention_mask''': False, '''do_normalize''': True, } UpperCAmelCase__ : Union[str, Any] = tempfile.mkdtemp() UpperCAmelCase__ : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) UpperCAmelCase__ : int = os.path.join(self.tmpdirname , _A ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(_A ) + '''\n''' ) with open(self.feature_extraction_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(_A ) + '''\n''' ) # load decoder from hub UpperCAmelCase__ : Any = '''hf-internal-testing/ngram-beam-search-decoder''' def lowercase_ ( self : int , **_A : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : Dict = self.add_kwargs_tokens_map.copy() kwargs.update(_A ) return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname , **_A ) def lowercase_ ( self : str , **_A : Any ): '''simple docstring''' return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname , **_A ) def lowercase_ ( self : str , **_A : Any ): '''simple docstring''' return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name , **_A ) def lowercase_ ( self : Any ): '''simple docstring''' shutil.rmtree(self.tmpdirname ) def lowercase_ ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = self.get_tokenizer() UpperCAmelCase__ : Any = self.get_feature_extractor() UpperCAmelCase__ : Tuple = self.get_decoder() UpperCAmelCase__ : Tuple = WavaVecaProcessorWithLM(tokenizer=_A , feature_extractor=_A , decoder=_A ) processor.save_pretrained(self.tmpdirname ) UpperCAmelCase__ : Union[str, Any] = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname ) # tokenizer self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() ) self.assertIsInstance(processor.tokenizer , _A ) # feature extractor self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() ) self.assertIsInstance(processor.feature_extractor , _A ) # decoder self.assertEqual(processor.decoder._alphabet.labels , decoder._alphabet.labels ) self.assertEqual( processor.decoder.model_container[decoder._model_key]._unigram_set , decoder.model_container[decoder._model_key]._unigram_set , ) self.assertIsInstance(processor.decoder , _A ) def lowercase_ ( self : int ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = WavaVecaProcessorWithLM( tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() ) processor.save_pretrained(self.tmpdirname ) # make sure that error is thrown when decoder alphabet doesn't match UpperCAmelCase__ : Optional[int] = WavaVecaProcessorWithLM.from_pretrained( self.tmpdirname , alpha=5.0 , beta=3.0 , score_boundary=-7.0 , unk_score_offset=3 ) # decoder self.assertEqual(processor.language_model.alpha , 5.0 ) self.assertEqual(processor.language_model.beta , 3.0 ) self.assertEqual(processor.language_model.score_boundary , -7.0 ) self.assertEqual(processor.language_model.unk_score_offset , 3 ) def lowercase_ ( self : Tuple ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = self.get_tokenizer() # add token to trigger raise tokenizer.add_tokens(['''xx'''] ) with self.assertRaisesRegex(_A , '''include''' ): WavaVecaProcessorWithLM( tokenizer=_A , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() ) def lowercase_ ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : Any = self.get_feature_extractor() UpperCAmelCase__ : Optional[int] = self.get_tokenizer() UpperCAmelCase__ : Any = self.get_decoder() UpperCAmelCase__ : Optional[Any] = WavaVecaProcessorWithLM(tokenizer=_A , feature_extractor=_A , decoder=_A ) UpperCAmelCase__ : List[Any] = floats_list((3, 1_000) ) UpperCAmelCase__ : Dict = feature_extractor(_A , return_tensors='''np''' ) UpperCAmelCase__ : str = processor(_A , return_tensors='''np''' ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 ) def lowercase_ ( self : Tuple ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = self.get_feature_extractor() UpperCAmelCase__ : str = self.get_tokenizer() UpperCAmelCase__ : str = self.get_decoder() UpperCAmelCase__ : Union[str, Any] = WavaVecaProcessorWithLM(tokenizer=_A , feature_extractor=_A , decoder=_A ) UpperCAmelCase__ : Union[str, Any] = '''This is a test string''' UpperCAmelCase__ : Optional[int] = processor(text=_A ) UpperCAmelCase__ : List[str] = tokenizer(_A ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def lowercase_ ( self : Dict , _A : Optional[int]=(2, 10, 16) , _A : List[str]=77 ): '''simple docstring''' np.random.seed(_A ) return np.random.rand(*_A ) def lowercase_ ( self : Any ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = self.get_feature_extractor() UpperCAmelCase__ : Union[str, Any] = self.get_tokenizer() UpperCAmelCase__ : Optional[Any] = self.get_decoder() UpperCAmelCase__ : Tuple = WavaVecaProcessorWithLM(tokenizer=_A , feature_extractor=_A , decoder=_A ) UpperCAmelCase__ : int = self._get_dummy_logits(shape=(10, 16) , seed=13 ) UpperCAmelCase__ : List[Any] = processor.decode(_A ) UpperCAmelCase__ : List[Any] = decoder.decode_beams(_A )[0] self.assertEqual(decoded_decoder[0] , decoded_processor.text ) self.assertEqual('''</s> <s> </s>''' , decoded_processor.text ) self.assertEqual(decoded_decoder[-2] , decoded_processor.logit_score ) self.assertEqual(decoded_decoder[-1] , decoded_processor.lm_score ) @parameterized.expand([[None], ['''fork'''], ['''spawn''']] ) def lowercase_ ( self : Any , _A : str ): '''simple docstring''' UpperCAmelCase__ : Any = self.get_feature_extractor() UpperCAmelCase__ : Tuple = self.get_tokenizer() UpperCAmelCase__ : Tuple = self.get_decoder() UpperCAmelCase__ : Any = WavaVecaProcessorWithLM(tokenizer=_A , feature_extractor=_A , decoder=_A ) UpperCAmelCase__ : Optional[Any] = self._get_dummy_logits() # note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM. # otherwise, the LM won't be available to the pool's sub-processes. # manual logic used to allow parameterized test for both pool=None and pool=Pool(...) if pool_context is None: UpperCAmelCase__ : Union[str, Any] = processor.batch_decode(_A ) else: with get_context(_A ).Pool() as pool: UpperCAmelCase__ : Union[str, Any] = processor.batch_decode(_A , _A ) UpperCAmelCase__ : str = list(_A ) with get_context('''fork''' ).Pool() as p: UpperCAmelCase__ : Dict = decoder.decode_beams_batch(_A , _A ) UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Dict = [], [], [] for beams in decoded_beams: texts_decoder.append(beams[0][0] ) logit_scores_decoder.append(beams[0][-2] ) lm_scores_decoder.append(beams[0][-1] ) self.assertListEqual(_A , decoded_processor.text ) self.assertListEqual(['''<s> <s> </s>''', '''<s> <s> <s>'''] , decoded_processor.text ) self.assertListEqual(_A , decoded_processor.logit_score ) self.assertListEqual(_A , decoded_processor.lm_score ) def lowercase_ ( self : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : str = self.get_feature_extractor() UpperCAmelCase__ : List[Any] = self.get_tokenizer() UpperCAmelCase__ : int = self.get_decoder() UpperCAmelCase__ : Optional[int] = WavaVecaProcessorWithLM(tokenizer=_A , feature_extractor=_A , decoder=_A ) UpperCAmelCase__ : str = self._get_dummy_logits() UpperCAmelCase__ : Optional[int] = 15 UpperCAmelCase__ : Dict = -2_0.0 UpperCAmelCase__ : Optional[Any] = -4.0 UpperCAmelCase__ : Union[str, Any] = processor.batch_decode( _A , beam_width=_A , beam_prune_logp=_A , token_min_logp=_A , ) UpperCAmelCase__ : List[Any] = decoded_processor_out.text UpperCAmelCase__ : List[str] = list(_A ) with get_context('''fork''' ).Pool() as pool: UpperCAmelCase__ : Tuple = decoder.decode_beams_batch( _A , _A , beam_width=_A , beam_prune_logp=_A , token_min_logp=_A , ) UpperCAmelCase__ : Optional[int] = [d[0][0] for d in decoded_decoder_out] UpperCAmelCase__ : Optional[Any] = [d[0][2] for d in decoded_decoder_out] UpperCAmelCase__ : Optional[int] = [d[0][3] for d in decoded_decoder_out] self.assertListEqual(_A , _A ) self.assertListEqual(['''</s> <s> <s>''', '''<s> <s> <s>'''] , _A ) self.assertTrue(np.array_equal(_A , decoded_processor_out.logit_score ) ) self.assertTrue(np.allclose([-2_0.0_5_4, -1_8.4_4_7] , _A , atol=1e-3 ) ) self.assertTrue(np.array_equal(_A , decoded_processor_out.lm_score ) ) self.assertTrue(np.allclose([-1_5.5_5_4, -1_3.9_4_7_4] , _A , atol=1e-3 ) ) def lowercase_ ( self : str ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = self.get_feature_extractor() UpperCAmelCase__ : Optional[Any] = self.get_tokenizer() UpperCAmelCase__ : Dict = self.get_decoder() UpperCAmelCase__ : int = WavaVecaProcessorWithLM(tokenizer=_A , feature_extractor=_A , decoder=_A ) UpperCAmelCase__ : Optional[int] = self._get_dummy_logits() UpperCAmelCase__ : List[str] = 2.0 UpperCAmelCase__ : Union[str, Any] = 5.0 UpperCAmelCase__ : str = -2_0.0 UpperCAmelCase__ : Optional[int] = True UpperCAmelCase__ : Union[str, Any] = processor.batch_decode( _A , alpha=_A , beta=_A , unk_score_offset=_A , lm_score_boundary=_A , ) UpperCAmelCase__ : Union[str, Any] = decoded_processor_out.text UpperCAmelCase__ : Tuple = list(_A ) decoder.reset_params( alpha=_A , beta=_A , unk_score_offset=_A , lm_score_boundary=_A , ) with get_context('''fork''' ).Pool() as pool: UpperCAmelCase__ : Optional[Any] = decoder.decode_beams_batch( _A , _A , ) UpperCAmelCase__ : str = [d[0][0] for d in decoded_decoder_out] self.assertListEqual(_A , _A ) self.assertListEqual(['''<s> </s> <s> </s> </s>''', '''</s> </s> <s> </s> </s>'''] , _A ) UpperCAmelCase__ : Optional[Any] = processor.decoder.model_container[processor.decoder._model_key] self.assertEqual(lm_model.alpha , 2.0 ) self.assertEqual(lm_model.beta , 5.0 ) self.assertEqual(lm_model.unk_score_offset , -2_0.0 ) self.assertEqual(lm_model.score_boundary , _A ) def lowercase_ ( self : int ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' ) UpperCAmelCase__ : Dict = processor.decoder.model_container[processor.decoder._model_key] UpperCAmelCase__ : Optional[int] = Path(language_model._kenlm_model.path.decode('''utf-8''' ) ).parent.parent.absolute() UpperCAmelCase__ : Dict = os.listdir(_A ) UpperCAmelCase__ : Optional[Any] = ['''alphabet.json''', '''language_model'''] downloaded_decoder_files.sort() expected_decoder_files.sort() # test that only decoder relevant files from # https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main # are downloaded and none of the rest (e.g. README.md, ...) self.assertListEqual(_A , _A ) def lowercase_ ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : str = snapshot_download('''hf-internal-testing/processor_with_lm''' ) UpperCAmelCase__ : Any = WavaVecaProcessorWithLM.from_pretrained(_A ) UpperCAmelCase__ : Optional[int] = processor.decoder.model_container[processor.decoder._model_key] UpperCAmelCase__ : str = Path(language_model._kenlm_model.path.decode('''utf-8''' ) ).parent.parent.absolute() UpperCAmelCase__ : List[str] = os.listdir(_A ) UpperCAmelCase__ : Any = os.listdir(_A ) local_decoder_files.sort() expected_decoder_files.sort() # test that both decoder form hub and local files in cache are the same self.assertListEqual(_A , _A ) def lowercase_ ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : Dict = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' ) UpperCAmelCase__ : Dict = AutoProcessor.from_pretrained('''hf-internal-testing/processor_with_lm''' ) UpperCAmelCase__ : Tuple = floats_list((3, 1_000) ) UpperCAmelCase__ : int = processor_wavaveca(_A , return_tensors='''np''' ) UpperCAmelCase__ : List[str] = processor_auto(_A , return_tensors='''np''' ) for key in input_wavaveca.keys(): self.assertAlmostEqual(input_wavaveca[key].sum() , input_auto[key].sum() , delta=1e-2 ) UpperCAmelCase__ : Tuple = self._get_dummy_logits() UpperCAmelCase__ : List[str] = processor_wavaveca.batch_decode(_A ) UpperCAmelCase__ : int = processor_auto.batch_decode(_A ) self.assertListEqual(decoded_wavaveca.text , decoded_auto.text ) def lowercase_ ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : int = self.get_feature_extractor() UpperCAmelCase__ : int = self.get_tokenizer() UpperCAmelCase__ : Optional[Any] = self.get_decoder() UpperCAmelCase__ : Optional[int] = WavaVecaProcessorWithLM(tokenizer=_A , feature_extractor=_A , decoder=_A ) self.assertListEqual( processor.model_input_names , feature_extractor.model_input_names , msg='''`processor` and `feature_extractor` model input names do not match''' , ) @staticmethod def lowercase_ ( _A : Dict , _A : str ): '''simple docstring''' UpperCAmelCase__ : int = [d[key] for d in offsets] return retrieved_list def lowercase_ ( self : Any ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' ) UpperCAmelCase__ : str = self._get_dummy_logits()[0] UpperCAmelCase__ : List[str] = processor.decode(_A , output_word_offsets=_A ) # check Wav2Vec2CTCTokenizerOutput keys for word self.assertEqual(len(outputs.keys() ) , 4 ) self.assertTrue('''text''' in outputs ) self.assertTrue('''word_offsets''' in outputs ) self.assertTrue(isinstance(_A , _A ) ) self.assertEqual(''' '''.join(self.get_from_offsets(outputs['''word_offsets'''] , '''word''' ) ) , outputs.text ) self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''word''' ) , ['''<s>''', '''<s>''', '''</s>'''] ) self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''start_offset''' ) , [0, 2, 4] ) self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''end_offset''' ) , [1, 3, 5] ) def lowercase_ ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : Any = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' ) UpperCAmelCase__ : Dict = self._get_dummy_logits() UpperCAmelCase__ : Dict = processor.batch_decode(_A , output_word_offsets=_A ) # check Wav2Vec2CTCTokenizerOutput keys for word self.assertEqual(len(outputs.keys() ) , 4 ) self.assertTrue('''text''' in outputs ) self.assertTrue('''word_offsets''' in outputs ) self.assertTrue(isinstance(_A , _A ) ) self.assertListEqual( [''' '''.join(self.get_from_offsets(_A , '''word''' ) ) for o in outputs['''word_offsets''']] , outputs.text ) self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''word''' ) , ['''<s>''', '''<s>''', '''</s>'''] ) self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''start_offset''' ) , [0, 2, 4] ) self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''end_offset''' ) , [1, 3, 5] ) @slow @require_torch @require_torchaudio def lowercase_ ( self : Optional[Any] ): '''simple docstring''' import torch UpperCAmelCase__ : Any = load_dataset('''common_voice''' , '''en''' , split='''train''' , streaming=_A ) UpperCAmelCase__ : Dict = ds.cast_column('''audio''' , datasets.Audio(sampling_rate=16_000 ) ) UpperCAmelCase__ : List[Any] = iter(_A ) UpperCAmelCase__ : Optional[Any] = next(_A ) UpperCAmelCase__ : Any = AutoProcessor.from_pretrained('''patrickvonplaten/wav2vec2-base-100h-with-lm''' ) UpperCAmelCase__ : int = WavaVecaForCTC.from_pretrained('''patrickvonplaten/wav2vec2-base-100h-with-lm''' ) # compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train UpperCAmelCase__ : int = processor(sample['''audio''']['''array'''] , return_tensors='''pt''' ).input_values with torch.no_grad(): UpperCAmelCase__ : Dict = model(_A ).logits.cpu().numpy() UpperCAmelCase__ : int = processor.decode(logits[0] , output_word_offsets=_A ) UpperCAmelCase__ : Any = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate UpperCAmelCase__ : Any = [ { '''start_time''': d['''start_offset'''] * time_offset, '''end_time''': d['''end_offset'''] * time_offset, '''word''': d['''word'''], } for d in output['''word_offsets'''] ] UpperCAmelCase__ : int = '''WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL''' # output words self.assertEqual(''' '''.join(self.get_from_offsets(_A , '''word''' ) ) , _A ) self.assertEqual(''' '''.join(self.get_from_offsets(_A , '''word''' ) ) , output.text ) # output times UpperCAmelCase__ : List[Any] = torch.tensor(self.get_from_offsets(_A , '''start_time''' ) ) UpperCAmelCase__ : List[str] = torch.tensor(self.get_from_offsets(_A , '''end_time''' ) ) # fmt: off UpperCAmelCase__ : int = torch.tensor([1.4_1_9_9, 1.6_5_9_9, 2.2_5_9_9, 3.0, 3.2_4, 3.5_9_9_9, 3.7_9_9_9, 4.0_9_9_9, 4.2_6, 4.9_4, 5.2_8, 5.6_5_9_9, 5.7_8, 5.9_4, 6.3_2, 6.5_3_9_9, 6.6_5_9_9] ) UpperCAmelCase__ : List[str] = torch.tensor([1.5_3_9_9, 1.8_9_9_9, 2.9, 3.1_6, 3.5_3_9_9, 3.7_2, 4.0_1_9_9, 4.1_7_9_9, 4.7_6, 5.1_5_9_9, 5.5_5_9_9, 5.6_9_9_9, 5.8_6, 6.1_9_9_9, 6.3_8, 6.6_1_9_9, 6.9_4] ) # fmt: on self.assertTrue(torch.allclose(_A , _A , atol=0.0_1 ) ) self.assertTrue(torch.allclose(_A , _A , atol=0.0_1 ) )
299
1
"""simple docstring""" import tempfile import torch from diffusers import IPNDMScheduler from .test_schedulers import SchedulerCommonTest class UpperCAmelCase_ ( _a): lowerCamelCase__ : int = (IPNDMScheduler,) lowerCamelCase__ : str = (("num_inference_steps", 5_0),) def _UpperCAmelCase ( self , **a ) -> List[str]: lowercase__ : int = {'num_train_timesteps': 1_0_0_0} config.update(**a ) return config def _UpperCAmelCase ( self , a=0 , **a ) -> Union[str, Any]: lowercase__ : Tuple = dict(self.forward_default_kwargs ) lowercase__ : List[Any] = kwargs.pop('num_inference_steps' , a ) lowercase__ : Dict = self.dummy_sample lowercase__ : Union[str, Any] = 0.1 * sample lowercase__ : Optional[Any] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: lowercase__ : List[str] = self.get_scheduler_config(**a ) lowercase__ : Union[str, Any] = scheduler_class(**a ) scheduler.set_timesteps(a ) # copy over dummy past residuals lowercase__ : Optional[int] = dummy_past_residuals[:] if time_step is None: lowercase__ : Optional[int] = scheduler.timesteps[len(scheduler.timesteps ) // 2] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(a ) lowercase__ : int = scheduler_class.from_pretrained(a ) new_scheduler.set_timesteps(a ) # copy over dummy past residuals lowercase__ : Tuple = dummy_past_residuals[:] lowercase__ : Optional[Any] = scheduler.step(a , a , a , **a ).prev_sample lowercase__ : List[Any] = new_scheduler.step(a , a , a , **a ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" lowercase__ : Tuple = scheduler.step(a , a , a , **a ).prev_sample lowercase__ : List[str] = new_scheduler.step(a , a , a , **a ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" def _UpperCAmelCase ( self ) -> Tuple: pass def _UpperCAmelCase ( self , a=0 , **a ) -> List[Any]: lowercase__ : List[str] = dict(self.forward_default_kwargs ) lowercase__ : List[str] = kwargs.pop('num_inference_steps' , a ) lowercase__ : Union[str, Any] = self.dummy_sample lowercase__ : Tuple = 0.1 * sample lowercase__ : Optional[Any] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: lowercase__ : str = self.get_scheduler_config() lowercase__ : Union[str, Any] = scheduler_class(**a ) scheduler.set_timesteps(a ) # copy over dummy past residuals (must be after setting timesteps) lowercase__ : Optional[Any] = dummy_past_residuals[:] if time_step is None: lowercase__ : List[Any] = scheduler.timesteps[len(scheduler.timesteps ) // 2] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(a ) lowercase__ : Optional[Any] = scheduler_class.from_pretrained(a ) # copy over dummy past residuals new_scheduler.set_timesteps(a ) # copy over dummy past residual (must be after setting timesteps) lowercase__ : Union[str, Any] = dummy_past_residuals[:] lowercase__ : Optional[Any] = scheduler.step(a , a , a , **a ).prev_sample lowercase__ : int = new_scheduler.step(a , a , a , **a ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" lowercase__ : List[str] = scheduler.step(a , a , a , **a ).prev_sample lowercase__ : int = new_scheduler.step(a , a , a , **a ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" def _UpperCAmelCase ( self , **a ) -> Tuple: lowercase__ : Union[str, Any] = self.scheduler_classes[0] lowercase__ : Tuple = self.get_scheduler_config(**a ) lowercase__ : str = scheduler_class(**a ) lowercase__ : Dict = 1_0 lowercase__ : str = self.dummy_model() lowercase__ : Optional[Any] = self.dummy_sample_deter scheduler.set_timesteps(a ) for i, t in enumerate(scheduler.timesteps ): lowercase__ : Any = model(a , a ) lowercase__ : Union[str, Any] = scheduler.step(a , a , a ).prev_sample for i, t in enumerate(scheduler.timesteps ): lowercase__ : str = model(a , a ) lowercase__ : Tuple = scheduler.step(a , a , a ).prev_sample return sample def _UpperCAmelCase ( self ) -> List[Any]: lowercase__ : List[Any] = dict(self.forward_default_kwargs ) lowercase__ : Tuple = kwargs.pop('num_inference_steps' , a ) for scheduler_class in self.scheduler_classes: lowercase__ : List[Any] = self.get_scheduler_config() lowercase__ : Optional[int] = scheduler_class(**a ) lowercase__ : List[Any] = self.dummy_sample lowercase__ : int = 0.1 * sample if num_inference_steps is not None and hasattr(a , 'set_timesteps' ): scheduler.set_timesteps(a ) elif num_inference_steps is not None and not hasattr(a , 'set_timesteps' ): lowercase__ : Any = num_inference_steps # copy over dummy past residuals (must be done after set_timesteps) lowercase__ : Any = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] lowercase__ : Union[str, Any] = dummy_past_residuals[:] lowercase__ : Any = scheduler.timesteps[5] lowercase__ : str = scheduler.timesteps[6] lowercase__ : Optional[int] = scheduler.step(a , a , a , **a ).prev_sample lowercase__ : Union[str, Any] = scheduler.step(a , a , a , **a ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) lowercase__ : Union[str, Any] = scheduler.step(a , a , a , **a ).prev_sample lowercase__ : Optional[Any] = scheduler.step(a , a , a , **a ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) def _UpperCAmelCase ( self ) -> Optional[int]: for timesteps in [1_0_0, 1_0_0_0]: self.check_over_configs(num_train_timesteps=a , time_step=a ) def _UpperCAmelCase ( self ) -> Any: for t, num_inference_steps in zip([1, 5, 1_0] , [1_0, 5_0, 1_0_0] ): self.check_over_forward(num_inference_steps=a , time_step=a ) def _UpperCAmelCase ( self ) -> Dict: lowercase__ : List[str] = self.full_loop() lowercase__ : Tuple = torch.mean(torch.abs(a ) ) assert abs(result_mean.item() - 2_5_4_0_5_2_9 ) < 1_0
77
"""simple docstring""" from typing import List, Union from ..utils import ( add_end_docstrings, is_tf_available, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_tf_available(): import tensorflow as tf from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING from ..tf_utils import stable_softmax if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING _UpperCamelCase : int = logging.get_logger(__name__) @add_end_docstrings(_a) class UpperCAmelCase_ ( _a): def __init__( self , *a , **a ) -> Union[str, Any]: super().__init__(*a , **a ) requires_backends(self , 'vision' ) self.check_model_type( TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING if self.framework == 'tf' else MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING ) def _UpperCAmelCase ( self , a=None ) -> Dict: lowercase__ : Any = {} if top_k is not None: lowercase__ : List[str] = top_k return {}, {}, postprocess_params def __call__( self , a , **a ) -> Tuple: return super().__call__(a , **a ) def _UpperCAmelCase ( self , a ) -> Dict: lowercase__ : List[Any] = load_image(a ) lowercase__ : Union[str, Any] = self.image_processor(images=a , return_tensors=self.framework ) return model_inputs def _UpperCAmelCase ( self , a ) -> List[str]: lowercase__ : Dict = self.model(**a ) return model_outputs def _UpperCAmelCase ( self , a , a=5 ) -> Dict: if top_k > self.model.config.num_labels: lowercase__ : List[Any] = self.model.config.num_labels if self.framework == "pt": lowercase__ : Tuple = model_outputs.logits.softmax(-1 )[0] lowercase__ , lowercase__ : Optional[Any] = probs.topk(a ) elif self.framework == "tf": lowercase__ : Union[str, Any] = stable_softmax(model_outputs.logits , axis=-1 )[0] lowercase__ : str = tf.math.top_k(a , k=a ) lowercase__ , lowercase__ : Dict = topk.values.numpy(), topk.indices.numpy() else: raise ValueError(f"""Unsupported framework: {self.framework}""" ) lowercase__ : Dict = scores.tolist() lowercase__ : Dict = ids.tolist() return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(a , a )]
77
1
from __future__ import annotations import requests def __UpperCAmelCase ( __a : str ) -> Optional[int]: """simple docstring""" _a : Dict = F"""https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty""" return requests.get(A_ ).json() def __UpperCAmelCase ( __a : Dict = 10 ) -> Optional[int]: """simple docstring""" _a : int = '''https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty''' _a : Optional[int] = requests.get(A_ ).json()[:max_stories] return [get_hackernews_story(A_ ) for story_id in story_ids] def __UpperCAmelCase ( __a : Any = 10 ) -> int: """simple docstring""" _a : Tuple = hackernews_top_stories(A_ ) return "\n".join('''* [{title}]({url})'''.format(**A_ ) for story in stories ) if __name__ == "__main__": print(hackernews_top_stories_as_markdown())
357
import argparse import os import re import packaging.version a__ = '''examples/''' a__ = { '''examples''': (re.compile(R'''^check_min_version\("[^"]+"\)\s*$''', re.MULTILINE), '''check_min_version("VERSION")\n'''), '''init''': (re.compile(R'''^__version__\s+=\s+"([^"]+)"\s*$''', re.MULTILINE), '''__version__ = "VERSION"\n'''), '''setup''': (re.compile(R'''^(\s*)version\s*=\s*"[^"]+",''', re.MULTILINE), R'''\1version="VERSION",'''), '''doc''': (re.compile(R'''^(\s*)release\s*=\s*"[^"]+"$''', re.MULTILINE), '''release = "VERSION"\n'''), } a__ = { '''init''': '''src/transformers/__init__.py''', '''setup''': '''setup.py''', } a__ = '''README.md''' def __UpperCAmelCase ( __a : List[str] ,__a : int ,__a : Optional[Any] ) -> int: """simple docstring""" with open(__a ,'''r''' ,encoding='''utf-8''' ,newline='''\n''' ) as f: _a : Tuple = f.read() _a , _a : str = REPLACE_PATTERNS[pattern] _a : List[str] = replace.replace('''VERSION''' ,__a ) _a : List[Any] = re_pattern.sub(__a ,__a ) with open(__a ,'''w''' ,encoding='''utf-8''' ,newline='''\n''' ) as f: f.write(__a ) def __UpperCAmelCase ( __a : Any ) -> List[Any]: """simple docstring""" for folder, directories, fnames in os.walk(__a ): # Removing some of the folders with non-actively maintained examples from the walk if "research_projects" in directories: directories.remove('''research_projects''' ) if "legacy" in directories: directories.remove('''legacy''' ) for fname in fnames: if fname.endswith('''.py''' ): update_version_in_file(os.path.join(__a ,__a ) ,__a ,pattern='''examples''' ) def __UpperCAmelCase ( __a : List[Any] ,__a : List[str]=False ) -> int: """simple docstring""" for pattern, fname in REPLACE_FILES.items(): update_version_in_file(__a ,__a ,__a ) if not patch: update_version_in_examples(__a ) def __UpperCAmelCase ( ) -> List[str]: """simple docstring""" _a : Optional[Any] = '''🤗 Transformers currently provides the following architectures''' _a : str = '''1. Want to contribute a new model?''' with open(__a ,'''r''' ,encoding='''utf-8''' ,newline='''\n''' ) as f: _a : Optional[int] = f.readlines() # Find the start of the list. _a : Optional[int] = 0 while not lines[start_index].startswith(_start_prompt ): start_index += 1 start_index += 1 _a : List[Any] = start_index # Update the lines in the model list. while not lines[index].startswith(_end_prompt ): if lines[index].startswith('''1.''' ): _a : Tuple = lines[index].replace( '''https://huggingface.co/docs/transformers/main/model_doc''' ,'''https://huggingface.co/docs/transformers/model_doc''' ,) index += 1 with open(__a ,'''w''' ,encoding='''utf-8''' ,newline='''\n''' ) as f: f.writelines(__a ) def __UpperCAmelCase ( ) -> List[str]: """simple docstring""" with open(REPLACE_FILES['''init'''] ,'''r''' ) as f: _a : Optional[Any] = f.read() _a : Optional[Any] = REPLACE_PATTERNS['''init'''][0].search(__a ).groups()[0] return packaging.version.parse(__a ) def __UpperCAmelCase ( __a : Dict=False ) -> str: """simple docstring""" _a : Optional[Any] = get_version() if patch and default_version.is_devrelease: raise ValueError('''Can\'t create a patch version from the dev branch, checkout a released version!''' ) if default_version.is_devrelease: _a : List[Any] = default_version.base_version elif patch: _a : str = F"""{default_version.major}.{default_version.minor}.{default_version.micro + 1}""" else: _a : List[str] = F"""{default_version.major}.{default_version.minor + 1}.0""" # Now let's ask nicely if that's the right one. _a : Dict = input(F"""Which version are you releasing? [{default_version}]""" ) if len(__a ) == 0: _a : int = default_version print(F"""Updating version to {version}.""" ) global_version_update(__a ,patch=__a ) if not patch: print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' ) clean_main_ref_in_model_list() def __UpperCAmelCase ( ) -> Tuple: """simple docstring""" _a : str = get_version() _a : int = F"""{current_version.major}.{current_version.minor + 1}.0.dev0""" _a : List[Any] = current_version.base_version # Check with the user we got that right. _a : Union[str, Any] = input(F"""Which version are we developing now? [{dev_version}]""" ) if len(__a ) == 0: _a : List[str] = dev_version print(F"""Updating version to {version}.""" ) global_version_update(__a ) print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' ) clean_main_ref_in_model_list() if __name__ == "__main__": a__ = argparse.ArgumentParser() parser.add_argument('''--post_release''', action='''store_true''', help='''Whether this is pre or post release.''') parser.add_argument('''--patch''', action='''store_true''', help='''Whether or not this is a patch release.''') a__ = parser.parse_args() if not args.post_release: pre_release_work(patch=args.patch) elif args.patch: print('''Nothing to do after a patch :-)''') else: post_release_work()
15
0
from typing import Dict, List, Optional, Tuple, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, flip_channel_order, get_resize_output_image_size, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_torch_available, is_torch_tensor, is_vision_available, logging if is_vision_available(): import PIL if is_torch_available(): import torch A__ = logging.get_logger(__name__) class __lowerCAmelCase ( lowerCamelCase__ ): __lowerCamelCase = ['''pixel_values'''] def __init__( self , _snake_case = True , _snake_case = None , _snake_case = PILImageResampling.BILINEAR , _snake_case = True , _snake_case = 1 / 255 , _snake_case = True , _snake_case = None , _snake_case = True , **_snake_case , ): """simple docstring""" super().__init__(**_snake_case ) _lowerCAmelCase = size if size is not None else {"""shortest_edge""": 224} _lowerCAmelCase = get_size_dict(_snake_case , default_to_square=_snake_case ) _lowerCAmelCase = crop_size if crop_size is not None else {"""height""": 256, """width""": 256} _lowerCAmelCase = get_size_dict(_snake_case , param_name="""crop_size""" ) _lowerCAmelCase = do_resize _lowerCAmelCase = size _lowerCAmelCase = resample _lowerCAmelCase = do_rescale _lowerCAmelCase = rescale_factor _lowerCAmelCase = do_center_crop _lowerCAmelCase = crop_size _lowerCAmelCase = do_flip_channel_order def snake_case ( self , _snake_case , _snake_case , _snake_case = PIL.Image.BILINEAR , _snake_case = None , **_snake_case , ): """simple docstring""" _lowerCAmelCase = get_size_dict(_snake_case , default_to_square=_snake_case ) if "shortest_edge" not in size: raise ValueError(F'The `size` dictionary must contain the key `shortest_edge`. Got {size.keys()}' ) _lowerCAmelCase = get_resize_output_image_size(_snake_case , size=size["""shortest_edge"""] , default_to_square=_snake_case ) return resize(_snake_case , size=_snake_case , resample=_snake_case , data_format=_snake_case , **_snake_case ) def snake_case ( self , _snake_case , _snake_case , _snake_case = None , **_snake_case , ): """simple docstring""" _lowerCAmelCase = get_size_dict(_snake_case ) if "height" not in size or "width" not in size: raise ValueError(F'The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}' ) return center_crop(_snake_case , size=(size["""height"""], size["""width"""]) , data_format=_snake_case , **_snake_case ) def snake_case ( self , _snake_case , _snake_case , _snake_case = None , **_snake_case , ): """simple docstring""" return rescale(_snake_case , scale=_snake_case , data_format=_snake_case , **_snake_case ) def snake_case ( self , _snake_case , _snake_case = None ): """simple docstring""" return flip_channel_order(_snake_case , data_format=_snake_case ) def snake_case ( self , _snake_case , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = ChannelDimension.FIRST , **_snake_case , ): """simple docstring""" _lowerCAmelCase = do_resize if do_resize is not None else self.do_resize _lowerCAmelCase = resample if resample is not None else self.resample _lowerCAmelCase = do_rescale if do_rescale is not None else self.do_rescale _lowerCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor _lowerCAmelCase = do_center_crop if do_center_crop is not None else self.do_center_crop _lowerCAmelCase = ( do_flip_channel_order if do_flip_channel_order is not None else self.do_flip_channel_order ) _lowerCAmelCase = size if size is not None else self.size _lowerCAmelCase = get_size_dict(_snake_case , default_to_square=_snake_case ) _lowerCAmelCase = crop_size if crop_size is not None else self.crop_size _lowerCAmelCase = get_size_dict(_snake_case , param_name="""crop_size""" ) _lowerCAmelCase = make_list_of_images(_snake_case ) if not valid_images(_snake_case ): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""" ) if do_resize and size is None: raise ValueError("""Size must be specified if do_resize is True.""" ) if do_rescale and rescale_factor is None: raise ValueError("""Rescale factor must be specified if do_rescale is True.""" ) if do_center_crop and crop_size is None: raise ValueError("""Crop size must be specified if do_center_crop is True.""" ) # All transformations expect numpy arrays. _lowerCAmelCase = [to_numpy_array(_snake_case ) for image in images] if do_resize: _lowerCAmelCase = [self.resize(image=_snake_case , size=_snake_case , resample=_snake_case ) for image in images] if do_center_crop: _lowerCAmelCase = [self.center_crop(image=_snake_case , size=_snake_case ) for image in images] if do_rescale: _lowerCAmelCase = [self.rescale(image=_snake_case , scale=_snake_case ) for image in images] # the pretrained checkpoints assume images are BGR, not RGB if do_flip_channel_order: _lowerCAmelCase = [self.flip_channel_order(image=_snake_case ) for image in images] _lowerCAmelCase = [to_channel_dimension_format(_snake_case , _snake_case ) for image in images] _lowerCAmelCase = {"""pixel_values""": images} return BatchFeature(data=_snake_case , tensor_type=_snake_case ) def snake_case ( self , _snake_case , _snake_case = None ): """simple docstring""" _lowerCAmelCase = outputs.logits # Resize logits and compute semantic segmentation maps if target_sizes is not None: if len(_snake_case ) != len(_snake_case ): raise ValueError( """Make sure that you pass in as many target sizes as the batch dimension of the logits""" ) if is_torch_tensor(_snake_case ): _lowerCAmelCase = target_sizes.numpy() _lowerCAmelCase = [] for idx in range(len(_snake_case ) ): _lowerCAmelCase = torch.nn.functional.interpolate( logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="""bilinear""" , align_corners=_snake_case ) _lowerCAmelCase = resized_logits[0].argmax(dim=0 ) semantic_segmentation.append(_snake_case ) else: _lowerCAmelCase = logits.argmax(dim=1 ) _lowerCAmelCase = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )] return semantic_segmentation
82
import argparse import gdown import numpy as np import torch from huggingface_hub import hf_hub_download from transformers import ( CLIPTokenizer, CLIPTokenizerFast, VideoMAEImageProcessor, XCLIPConfig, XCLIPModel, XCLIPProcessor, XCLIPTextConfig, XCLIPVisionConfig, ) def _UpperCAmelCase ( snake_case , snake_case ): """simple docstring""" _lowerCAmelCase = XCLIPTextConfig() # derive patch size from model name _lowerCAmelCase = model_name.find("""patch""" ) _lowerCAmelCase = int(model_name[start_idx + len("""patch""" ) : start_idx + len("""patch""" ) + 2] ) _lowerCAmelCase = XCLIPVisionConfig(patch_size=snake_case , num_frames=snake_case ) if "large" in model_name: _lowerCAmelCase = 7_68 _lowerCAmelCase = 30_72 _lowerCAmelCase = 12 _lowerCAmelCase = 10_24 _lowerCAmelCase = 40_96 _lowerCAmelCase = 16 _lowerCAmelCase = 24 _lowerCAmelCase = 7_68 _lowerCAmelCase = 30_72 if model_name == "xclip-large-patch14-16-frames": _lowerCAmelCase = 3_36 _lowerCAmelCase = XCLIPConfig.from_text_vision_configs(snake_case , snake_case ) if "large" in model_name: _lowerCAmelCase = 7_68 return config def _UpperCAmelCase ( snake_case ): """simple docstring""" if name == "token_embedding.weight": _lowerCAmelCase = name.replace("""token_embedding.weight""" , """text_model.embeddings.token_embedding.weight""" ) if name == "positional_embedding": _lowerCAmelCase = name.replace("""positional_embedding""" , """text_model.embeddings.position_embedding.weight""" ) if "ln_1" in name: _lowerCAmelCase = name.replace("""ln_1""" , """layer_norm1""" ) if "ln_2" in name: _lowerCAmelCase = name.replace("""ln_2""" , """layer_norm2""" ) if "c_fc" in name: _lowerCAmelCase = name.replace("""c_fc""" , """fc1""" ) if "c_proj" in name: _lowerCAmelCase = name.replace("""c_proj""" , """fc2""" ) if name.startswith("""transformer.resblocks""" ): _lowerCAmelCase = name.replace("""transformer.resblocks""" , """text_model.encoder.layers""" ) if "attn.out_proj" in name and "message" not in name: _lowerCAmelCase = name.replace("""attn.out_proj""" , """self_attn.out_proj""" ) if "ln_final" in name: _lowerCAmelCase = name.replace("""ln_final""" , """text_model.final_layer_norm""" ) # visual encoder if name == "visual.class_embedding": _lowerCAmelCase = name.replace("""visual.class_embedding""" , """vision_model.embeddings.class_embedding""" ) if name == "visual.positional_embedding": _lowerCAmelCase = name.replace("""visual.positional_embedding""" , """vision_model.embeddings.position_embedding.weight""" ) if name.startswith("""visual.transformer.resblocks""" ): _lowerCAmelCase = name.replace("""visual.transformer.resblocks""" , """vision_model.encoder.layers""" ) if "visual.conv1" in name: _lowerCAmelCase = name.replace("""visual.conv1""" , """vision_model.embeddings.patch_embedding""" ) if "visual.ln_pre" in name: _lowerCAmelCase = name.replace("""visual.ln_pre""" , """vision_model.pre_layernorm""" ) if "visual.ln_post" in name: _lowerCAmelCase = name.replace("""visual.ln_post""" , """vision_model.post_layernorm""" ) if "visual.proj" in name: _lowerCAmelCase = name.replace("""visual.proj""" , """visual_projection.weight""" ) if "text_projection" in name: _lowerCAmelCase = name.replace("""text_projection""" , """text_projection.weight""" ) # things on top if "prompts_visual_proj" in name: _lowerCAmelCase = name.replace("""prompts_visual_proj""" , """prompts_visual_projection""" ) if "prompts_visual_ln" in name: _lowerCAmelCase = name.replace("""prompts_visual_ln""" , """prompts_visual_layernorm""" ) # mit if name == "mit.positional_embedding": _lowerCAmelCase = name.replace("""positional""" , """position""" ) if name.startswith("""mit.resblocks""" ): _lowerCAmelCase = name.replace("""mit.resblocks""" , """mit.encoder.layers""" ) # prompts generator if name.startswith("""prompts_generator.norm""" ): _lowerCAmelCase = name.replace("""prompts_generator.norm""" , """prompts_generator.layernorm""" ) return name def _UpperCAmelCase ( snake_case , snake_case ): """simple docstring""" for key in orig_state_dict.copy().keys(): _lowerCAmelCase = orig_state_dict.pop(snake_case ) if "attn.in_proj" in key: _lowerCAmelCase = key.split(""".""" ) if key.startswith("""visual""" ): _lowerCAmelCase = key_split[3] _lowerCAmelCase = config.vision_config.hidden_size if "message_attn" in key: if "weight" in key: _lowerCAmelCase = val[ :dim, : ] _lowerCAmelCase = val[ dim : dim * 2, : ] _lowerCAmelCase = val[ -dim:, : ] else: _lowerCAmelCase = val[ :dim ] _lowerCAmelCase = val[ dim : dim * 2 ] _lowerCAmelCase = val[ -dim: ] else: if "weight" in key: _lowerCAmelCase = val[ :dim, : ] _lowerCAmelCase = val[ dim : dim * 2, : ] _lowerCAmelCase = val[ -dim:, : ] else: _lowerCAmelCase = val[:dim] _lowerCAmelCase = val[ dim : dim * 2 ] _lowerCAmelCase = val[-dim:] elif key.startswith("""mit""" ): _lowerCAmelCase = key_split[2] _lowerCAmelCase = config.vision_config.mit_hidden_size if "weight" in key: _lowerCAmelCase = val[:dim, :] _lowerCAmelCase = val[dim : dim * 2, :] _lowerCAmelCase = val[-dim:, :] else: _lowerCAmelCase = val[:dim] _lowerCAmelCase = val[dim : dim * 2] _lowerCAmelCase = val[-dim:] else: _lowerCAmelCase = key_split[2] _lowerCAmelCase = config.text_config.hidden_size if "weight" in key: _lowerCAmelCase = val[:dim, :] _lowerCAmelCase = val[ dim : dim * 2, : ] _lowerCAmelCase = val[-dim:, :] else: _lowerCAmelCase = val[:dim] _lowerCAmelCase = val[ dim : dim * 2 ] _lowerCAmelCase = val[-dim:] else: _lowerCAmelCase = rename_key(snake_case ) if new_key_name in ["visual_projection.weight", "text_projection.weight"]: _lowerCAmelCase = val.T _lowerCAmelCase = val return orig_state_dict def _UpperCAmelCase ( snake_case ): """simple docstring""" if num_frames == 8: _lowerCAmelCase = """eating_spaghetti_8_frames.npy""" elif num_frames == 16: _lowerCAmelCase = """eating_spaghetti.npy""" elif num_frames == 32: _lowerCAmelCase = """eating_spaghetti_32_frames.npy""" _lowerCAmelCase = hf_hub_download( repo_id="""hf-internal-testing/spaghetti-video""" , filename=snake_case , repo_type="""dataset""" , ) _lowerCAmelCase = np.load(snake_case ) return list(snake_case ) def _UpperCAmelCase ( snake_case , snake_case=None , snake_case=False ): """simple docstring""" _lowerCAmelCase = { # fully supervised kinetics-400 checkpoints """xclip-base-patch32""": """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_8.pth""", """xclip-base-patch32-16-frames""": ( """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_16.pth""" ), """xclip-base-patch16""": """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_8.pth""", """xclip-base-patch16-16-frames""": ( """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_16.pth""" ), """xclip-large-patch14""": """https://drive.google.com/u/0/uc?id=1NUOImq0o5DlQTST17iIP3vG7DgmHQuCx&amp;export=download&amp;confirm=t&amp;uuid=b26caedc-88e2-473e-830a-9d158b653cdb""", """xclip-large-patch14-16-frames""": """https://drive.google.com/u/0/uc?id=1FOYgnJc097OJ4lGwtRCCydQyVPJEOH7d&amp;export=download&amp;confirm=t&amp;uuid=538fa810-e671-4050-b385-9a623f89804f""", # fully supervised kinetics-600 checkpoints """xclip-base-patch16-kinetics-600""": ( """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_8.pth""" ), """xclip-base-patch16-kinetics-600-16-frames""": ( """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_16.pth""" ), """xclip-large-patch14-kinetics-600""": """https://drive.google.com/u/0/uc?id=1FV8C1INuM91sLAN4ImjzePLIlpMSihwV&amp;export=download&amp;confirm=t&amp;uuid=141d4977-4a65-44ae-864f-4b0c19f838be""", # few shot """xclip-base-patch16-hmdb-2-shot""": ( """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_2.pth""" ), """xclip-base-patch16-hmdb-4-shot""": ( """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_4.pth""" ), """xclip-base-patch16-hmdb-8-shot""": ( """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_8.pth""" ), """xclip-base-patch16-hmdb-16-shot""": ( """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_16.pth""" ), """xclip-base-patch16-ucf-2-shot""": ( """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_2.pth""" ), """xclip-base-patch16-ucf-4-shot""": ( """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_4.pth""" ), """xclip-base-patch16-ucf-8-shot""": ( """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_8.pth""" ), """xclip-base-patch16-ucf-16-shot""": ( """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_16.pth""" ), # zero shot """xclip-base-patch16-zero-shot""": """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/zero.pth""", } _lowerCAmelCase = model_to_url[model_name] _lowerCAmelCase = 8 if "16-frames" in model_name: _lowerCAmelCase = 16 elif "shot" in model_name: _lowerCAmelCase = 32 _lowerCAmelCase = get_xclip_config(snake_case , snake_case ) _lowerCAmelCase = XCLIPModel(snake_case ) model.eval() if "drive" in checkpoint_url: _lowerCAmelCase = """pytorch_model.bin""" gdown.cached_download(snake_case , snake_case , quiet=snake_case ) _lowerCAmelCase = torch.load(snake_case , map_location="""cpu""" )["""model"""] else: _lowerCAmelCase = torch.hub.load_state_dict_from_url(snake_case )["""model"""] _lowerCAmelCase = convert_state_dict(snake_case , snake_case ) _lowerCAmelCase = XCLIPModel(snake_case ) _lowerCAmelCase , _lowerCAmelCase = model.load_state_dict(snake_case , strict=snake_case ) assert missing_keys == ["text_model.embeddings.position_ids", "vision_model.embeddings.position_ids"] model.eval() _lowerCAmelCase = 3_36 if model_name == """xclip-large-patch14-16-frames""" else 2_24 _lowerCAmelCase = VideoMAEImageProcessor(size=snake_case ) _lowerCAmelCase = CLIPTokenizer.from_pretrained("""openai/clip-vit-base-patch32""" ) _lowerCAmelCase = CLIPTokenizerFast.from_pretrained("""openai/clip-vit-base-patch32""" ) _lowerCAmelCase = XCLIPProcessor(image_processor=snake_case , tokenizer=snake_case ) _lowerCAmelCase = prepare_video(snake_case ) _lowerCAmelCase = processor( text=["""playing sports""", """eating spaghetti""", """go shopping"""] , videos=snake_case , return_tensors="""pt""" , padding=snake_case ) print("""Shape of pixel values:""" , inputs.pixel_values.shape ) with torch.no_grad(): _lowerCAmelCase = model(**snake_case ) # Verify outputs _lowerCAmelCase = outputs.logits_per_video _lowerCAmelCase = logits_per_video.softmax(dim=1 ) print("""Probs:""" , snake_case ) # kinetics-400 if model_name == "xclip-base-patch32": _lowerCAmelCase = torch.tensor([[0.0_019, 0.9_951, 0.0_030]] ) elif model_name == "xclip-base-patch32-16-frames": _lowerCAmelCase = torch.tensor([[7.09_99E-04, 9.98_83E-01, 4.55_80E-04]] ) elif model_name == "xclip-base-patch16": _lowerCAmelCase = torch.tensor([[0.0_083, 0.9_681, 0.0_236]] ) elif model_name == "xclip-base-patch16-16-frames": _lowerCAmelCase = torch.tensor([[7.69_37E-04, 9.97_28E-01, 1.94_73E-03]] ) elif model_name == "xclip-large-patch14": _lowerCAmelCase = torch.tensor([[0.0_062, 0.9_864, 0.0_075]] ) elif model_name == "xclip-large-patch14-16-frames": _lowerCAmelCase = torch.tensor([[3.38_77E-04, 9.99_37E-01, 2.88_88E-04]] ) # kinetics-600 elif model_name == "xclip-base-patch16-kinetics-600": _lowerCAmelCase = torch.tensor([[0.0_555, 0.8_914, 0.0_531]] ) elif model_name == "xclip-base-patch16-kinetics-600-16-frames": _lowerCAmelCase = torch.tensor([[3.85_54E-04, 9.99_29E-01, 3.27_54E-04]] ) elif model_name == "xclip-large-patch14-kinetics-600": _lowerCAmelCase = torch.tensor([[0.0_036, 0.9_920, 0.0_045]] ) # few shot elif model_name == "xclip-base-patch16-hmdb-2-shot": _lowerCAmelCase = torch.tensor([[7.18_90E-06, 9.99_94E-01, 5.65_59E-05]] ) elif model_name == "xclip-base-patch16-hmdb-4-shot": _lowerCAmelCase = torch.tensor([[1.03_20E-05, 9.99_93E-01, 6.24_35E-05]] ) elif model_name == "xclip-base-patch16-hmdb-8-shot": _lowerCAmelCase = torch.tensor([[4.13_77E-06, 9.99_90E-01, 9.83_86E-05]] ) elif model_name == "xclip-base-patch16-hmdb-16-shot": _lowerCAmelCase = torch.tensor([[4.13_47E-05, 9.99_62E-01, 3.34_11E-04]] ) elif model_name == "xclip-base-patch16-ucf-2-shot": _lowerCAmelCase = torch.tensor([[8.58_57E-05, 9.99_28E-01, 6.32_91E-04]] ) elif model_name == "xclip-base-patch16-ucf-4-shot": _lowerCAmelCase = torch.tensor([[8.58_57E-05, 9.99_28E-01, 6.32_91E-04]] ) elif model_name == "xclip-base-patch16-ucf-8-shot": _lowerCAmelCase = torch.tensor([[0.0_027, 0.9_904, 0.0_070]] ) elif model_name == "xclip-base-patch16-ucf-16-shot": _lowerCAmelCase = torch.tensor([[9.82_19E-04, 9.95_93E-01, 3.08_63E-03]] ) # zero shot elif model_name == "xclip-base-patch16-zero-shot": _lowerCAmelCase = torch.tensor([[3.50_82E-04, 9.97_85E-01, 1.79_66E-03]] ) else: raise ValueError(F'Model name {model_name} not supported' ) assert torch.allclose(snake_case , snake_case , atol=1E-3 ) print("""Looks ok!""" ) if pytorch_dump_folder_path is not None: print(F'Saving model {model_name} to {pytorch_dump_folder_path}' ) model.save_pretrained(snake_case ) if push_to_hub: print("""Pushing model, processor and slow tokenizer files to the hub...""" ) model.push_to_hub(snake_case , organization="""nielsr""" ) processor.push_to_hub(snake_case , organization="""nielsr""" ) slow_tokenizer.push_to_hub(snake_case , organization="""nielsr""" ) if __name__ == "__main__": A__ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default="""xclip-base-patch32""", type=str, help="""Name of the model.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub.""" ) A__ = parser.parse_args() convert_xclip_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
82
1
'''simple docstring''' import argparse import numpy as np import torch from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging logging.set_verbosity_info() _lowerCamelCase : Union[str, Any] = logging.get_logger('transformers.models.speecht5') def __a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->str: """simple docstring""" hf_model.apply_weight_norm() A = checkpoint["""input_conv.weight_g"""] A = checkpoint["""input_conv.weight_v"""] A = checkpoint["""input_conv.bias"""] for i in range(len(config.upsample_rates ) ): A = checkpoint[f"""upsamples.{i}.1.weight_g"""] A = checkpoint[f"""upsamples.{i}.1.weight_v"""] A = checkpoint[f"""upsamples.{i}.1.bias"""] for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ): for j in range(len(config.resblock_dilation_sizes ) ): A = checkpoint[f"""blocks.{i}.convs1.{j}.1.weight_g"""] A = checkpoint[f"""blocks.{i}.convs1.{j}.1.weight_v"""] A = checkpoint[f"""blocks.{i}.convs1.{j}.1.bias"""] A = checkpoint[f"""blocks.{i}.convs2.{j}.1.weight_g"""] A = checkpoint[f"""blocks.{i}.convs2.{j}.1.weight_v"""] A = checkpoint[f"""blocks.{i}.convs2.{j}.1.bias"""] A = checkpoint["""output_conv.1.weight_g"""] A = checkpoint["""output_conv.1.weight_v"""] A = checkpoint["""output_conv.1.bias"""] hf_model.remove_weight_norm() @torch.no_grad() def __a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase=None , ) ->str: """simple docstring""" if config_path is not None: A = SpeechTaHifiGanConfig.from_pretrained(UpperCAmelCase ) else: A = SpeechTaHifiGanConfig() A = SpeechTaHifiGan(UpperCAmelCase ) A = torch.load(UpperCAmelCase ) load_weights(orig_checkpoint["""model"""]["""generator"""] , UpperCAmelCase , UpperCAmelCase ) A = np.load(UpperCAmelCase ) A = stats[0].reshape(-1 ) A = stats[1].reshape(-1 ) A = torch.from_numpy(UpperCAmelCase ).float() A = torch.from_numpy(UpperCAmelCase ).float() model.save_pretrained(UpperCAmelCase ) if repo_id: print("""Pushing to the hub...""" ) model.push_to_hub(UpperCAmelCase ) if __name__ == "__main__": _lowerCamelCase : Any = argparse.ArgumentParser() parser.add_argument('--checkpoint_path', required=True, default=None, type=str, help='Path to original checkpoint') parser.add_argument('--stats_path', required=True, default=None, type=str, help='Path to stats.npy file') parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert') parser.add_argument( '--pytorch_dump_folder_path', required=True, default=None, type=str, help='Path to the output PyTorch model.' ) parser.add_argument( '--push_to_hub', default=None, type=str, help='Where to upload the converted model on the 🤗 hub.' ) _lowerCamelCase : Union[str, Any] = parser.parse_args() convert_hifigan_checkpoint( args.checkpoint_path, args.stats_path, args.pytorch_dump_folder_path, args.config_path, args.push_to_hub, )
337
'''simple docstring''' from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _lowerCamelCase : List[Any] = logging.get_logger(__name__) _lowerCamelCase : List[Any] = { 'hustvl/yolos-small': 'https://huggingface.co/hustvl/yolos-small/resolve/main/config.json', # See all YOLOS models at https://huggingface.co/models?filter=yolos } class __UpperCAmelCase ( A__ ): '''simple docstring''' __lowerCAmelCase = '''yolos''' def __init__(self : Tuple , _lowerCAmelCase : List[Any]=768 , _lowerCAmelCase : str=12 , _lowerCAmelCase : Tuple=12 , _lowerCAmelCase : Optional[int]=3072 , _lowerCAmelCase : List[str]="gelu" , _lowerCAmelCase : Dict=0.0 , _lowerCAmelCase : Optional[Any]=0.0 , _lowerCAmelCase : Tuple=0.02 , _lowerCAmelCase : Optional[Any]=1e-12 , _lowerCAmelCase : Optional[Any]=[512, 864] , _lowerCAmelCase : Union[str, Any]=16 , _lowerCAmelCase : Any=3 , _lowerCAmelCase : Any=True , _lowerCAmelCase : Optional[int]=100 , _lowerCAmelCase : Optional[int]=True , _lowerCAmelCase : List[str]=False , _lowerCAmelCase : Union[str, Any]=1 , _lowerCAmelCase : Optional[Any]=5 , _lowerCAmelCase : Optional[Any]=2 , _lowerCAmelCase : Optional[Any]=5 , _lowerCAmelCase : Optional[Any]=2 , _lowerCAmelCase : Any=0.1 , **_lowerCAmelCase : Union[str, Any] , ): super().__init__(**_lowerCAmelCase ) A = hidden_size A = num_hidden_layers A = num_attention_heads A = intermediate_size A = hidden_act A = hidden_dropout_prob A = attention_probs_dropout_prob A = initializer_range A = layer_norm_eps A = image_size A = patch_size A = num_channels A = qkv_bias A = num_detection_tokens A = use_mid_position_embeddings A = auxiliary_loss # Hungarian matcher A = class_cost A = bbox_cost A = giou_cost # Loss coefficients A = bbox_loss_coefficient A = giou_loss_coefficient A = eos_coefficient class __UpperCAmelCase ( A__ ): '''simple docstring''' __lowerCAmelCase = version.parse('''1.11''' ) @property def A (self : int ): return OrderedDict( [ ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ] ) @property def A (self : Any ): return 1e-4 @property def A (self : int ): return 12
337
1
'''simple docstring''' from ..utils import DummyObject, requires_backends class _lowerCAmelCase ( metaclass=__snake_case ): """simple docstring""" lowerCamelCase = ["sentencepiece"] def __init__( self , *_lowerCamelCase , **_lowerCamelCase ) -> Any: requires_backends(self , ["""sentencepiece"""] ) class _lowerCAmelCase ( metaclass=__snake_case ): """simple docstring""" lowerCamelCase = ["sentencepiece"] def __init__( self , *_lowerCamelCase , **_lowerCamelCase ) -> List[Any]: requires_backends(self , ["""sentencepiece"""] ) class _lowerCAmelCase ( metaclass=__snake_case ): """simple docstring""" lowerCamelCase = ["sentencepiece"] def __init__( self , *_lowerCamelCase , **_lowerCamelCase ) -> str: requires_backends(self , ["""sentencepiece"""] ) class _lowerCAmelCase ( metaclass=__snake_case ): """simple docstring""" lowerCamelCase = ["sentencepiece"] def __init__( self , *_lowerCamelCase , **_lowerCamelCase ) -> str: requires_backends(self , ["""sentencepiece"""] ) class _lowerCAmelCase ( metaclass=__snake_case ): """simple docstring""" lowerCamelCase = ["sentencepiece"] def __init__( self , *_lowerCamelCase , **_lowerCamelCase ) -> Any: requires_backends(self , ["""sentencepiece"""] ) class _lowerCAmelCase ( metaclass=__snake_case ): """simple docstring""" lowerCamelCase = ["sentencepiece"] def __init__( self , *_lowerCamelCase , **_lowerCamelCase ) -> Dict: requires_backends(self , ["""sentencepiece"""] ) class _lowerCAmelCase ( metaclass=__snake_case ): """simple docstring""" lowerCamelCase = ["sentencepiece"] def __init__( self , *_lowerCamelCase , **_lowerCamelCase ) -> Dict: requires_backends(self , ["""sentencepiece"""] ) class _lowerCAmelCase ( metaclass=__snake_case ): """simple docstring""" lowerCamelCase = ["sentencepiece"] def __init__( self , *_lowerCamelCase , **_lowerCamelCase ) -> Any: requires_backends(self , ["""sentencepiece"""] ) class _lowerCAmelCase ( metaclass=__snake_case ): """simple docstring""" lowerCamelCase = ["sentencepiece"] def __init__( self , *_lowerCamelCase , **_lowerCamelCase ) -> Union[str, Any]: requires_backends(self , ["""sentencepiece"""] ) class _lowerCAmelCase ( metaclass=__snake_case ): """simple docstring""" lowerCamelCase = ["sentencepiece"] def __init__( self , *_lowerCamelCase , **_lowerCamelCase ) -> Tuple: requires_backends(self , ["""sentencepiece"""] ) class _lowerCAmelCase ( metaclass=__snake_case ): """simple docstring""" lowerCamelCase = ["sentencepiece"] def __init__( self , *_lowerCamelCase , **_lowerCamelCase ) -> Dict: requires_backends(self , ["""sentencepiece"""] ) class _lowerCAmelCase ( metaclass=__snake_case ): """simple docstring""" lowerCamelCase = ["sentencepiece"] def __init__( self , *_lowerCamelCase , **_lowerCamelCase ) -> List[Any]: requires_backends(self , ["""sentencepiece"""] ) class _lowerCAmelCase ( metaclass=__snake_case ): """simple docstring""" lowerCamelCase = ["sentencepiece"] def __init__( self , *_lowerCamelCase , **_lowerCamelCase ) -> Dict: requires_backends(self , ["""sentencepiece"""] ) class _lowerCAmelCase ( metaclass=__snake_case ): """simple docstring""" lowerCamelCase = ["sentencepiece"] def __init__( self , *_lowerCamelCase , **_lowerCamelCase ) -> Optional[int]: requires_backends(self , ["""sentencepiece"""] ) class _lowerCAmelCase ( metaclass=__snake_case ): """simple docstring""" lowerCamelCase = ["sentencepiece"] def __init__( self , *_lowerCamelCase , **_lowerCamelCase ) -> List[str]: requires_backends(self , ["""sentencepiece"""] ) class _lowerCAmelCase ( metaclass=__snake_case ): """simple docstring""" lowerCamelCase = ["sentencepiece"] def __init__( self , *_lowerCamelCase , **_lowerCamelCase ) -> Optional[Any]: requires_backends(self , ["""sentencepiece"""] ) class _lowerCAmelCase ( metaclass=__snake_case ): """simple docstring""" lowerCamelCase = ["sentencepiece"] def __init__( self , *_lowerCamelCase , **_lowerCamelCase ) -> Optional[int]: requires_backends(self , ["""sentencepiece"""] ) class _lowerCAmelCase ( metaclass=__snake_case ): """simple docstring""" lowerCamelCase = ["sentencepiece"] def __init__( self , *_lowerCamelCase , **_lowerCamelCase ) -> Tuple: requires_backends(self , ["""sentencepiece"""] ) class _lowerCAmelCase ( metaclass=__snake_case ): """simple docstring""" lowerCamelCase = ["sentencepiece"] def __init__( self , *_lowerCamelCase , **_lowerCamelCase ) -> Optional[int]: requires_backends(self , ["""sentencepiece"""] ) class _lowerCAmelCase ( metaclass=__snake_case ): """simple docstring""" lowerCamelCase = ["sentencepiece"] def __init__( self , *_lowerCamelCase , **_lowerCamelCase ) -> Tuple: requires_backends(self , ["""sentencepiece"""] ) class _lowerCAmelCase ( metaclass=__snake_case ): """simple docstring""" lowerCamelCase = ["sentencepiece"] def __init__( self , *_lowerCamelCase , **_lowerCamelCase ) -> Dict: requires_backends(self , ["""sentencepiece"""] ) class _lowerCAmelCase ( metaclass=__snake_case ): """simple docstring""" lowerCamelCase = ["sentencepiece"] def __init__( self , *_lowerCamelCase , **_lowerCamelCase ) -> str: requires_backends(self , ["""sentencepiece"""] ) class _lowerCAmelCase ( metaclass=__snake_case ): """simple docstring""" lowerCamelCase = ["sentencepiece"] def __init__( self , *_lowerCamelCase , **_lowerCamelCase ) -> Dict: requires_backends(self , ["""sentencepiece"""] ) class _lowerCAmelCase ( metaclass=__snake_case ): """simple docstring""" lowerCamelCase = ["sentencepiece"] def __init__( self , *_lowerCamelCase , **_lowerCamelCase ) -> Optional[Any]: requires_backends(self , ["""sentencepiece"""] ) class _lowerCAmelCase ( metaclass=__snake_case ): """simple docstring""" lowerCamelCase = ["sentencepiece"] def __init__( self , *_lowerCamelCase , **_lowerCamelCase ) -> str: requires_backends(self , ["""sentencepiece"""] ) class _lowerCAmelCase ( metaclass=__snake_case ): """simple docstring""" lowerCamelCase = ["sentencepiece"] def __init__( self , *_lowerCamelCase , **_lowerCamelCase ) -> int: requires_backends(self , ["""sentencepiece"""] ) class _lowerCAmelCase ( metaclass=__snake_case ): """simple docstring""" lowerCamelCase = ["sentencepiece"] def __init__( self , *_lowerCamelCase , **_lowerCamelCase ) -> Dict: requires_backends(self , ["""sentencepiece"""] ) class _lowerCAmelCase ( metaclass=__snake_case ): """simple docstring""" lowerCamelCase = ["sentencepiece"] def __init__( self , *_lowerCamelCase , **_lowerCamelCase ) -> Optional[int]: requires_backends(self , ["""sentencepiece"""] ) class _lowerCAmelCase ( metaclass=__snake_case ): """simple docstring""" lowerCamelCase = ["sentencepiece"] def __init__( self , *_lowerCamelCase , **_lowerCamelCase ) -> str: requires_backends(self , ["""sentencepiece"""] ) class _lowerCAmelCase ( metaclass=__snake_case ): """simple docstring""" lowerCamelCase = ["sentencepiece"] def __init__( self , *_lowerCamelCase , **_lowerCamelCase ) -> List[str]: requires_backends(self , ["""sentencepiece"""] ) class _lowerCAmelCase ( metaclass=__snake_case ): """simple docstring""" lowerCamelCase = ["sentencepiece"] def __init__( self , *_lowerCamelCase , **_lowerCamelCase ) -> str: requires_backends(self , ["""sentencepiece"""] )
344
import os import sys import tempfile import unittest import unittest.mock as mock from pathlib import Path from huggingface_hub import HfFolder, delete_repo from huggingface_hub.file_download import http_get from requests.exceptions import HTTPError from transformers import ( AlbertTokenizer, AutoTokenizer, BertTokenizer, BertTokenizerFast, GPTaTokenizerFast, is_tokenizers_available, ) from transformers.testing_utils import TOKEN, USER, is_staging_test, require_tokenizers from transformers.tokenization_utils import Trie sys.path.append(str(Path(__file__).parent.parent / '''utils''')) from test_module.custom_tokenization import CustomTokenizer # noqa E402 if is_tokenizers_available(): from test_module.custom_tokenization_fast import CustomTokenizerFast class _snake_case ( unittest.TestCase ): '''simple docstring''' def A__ ( self: int ) -> str: # A mock response for an HTTP head request to emulate server down UpperCAmelCase_ : List[str] = mock.Mock() UpperCAmelCase_ : List[Any] = 500 UpperCAmelCase_ : Union[str, Any] = {} UpperCAmelCase_ : Union[str, Any] = HTTPError UpperCAmelCase_ : Any = {} # Download this model to make sure it's in the cache. UpperCAmelCase_ : Union[str, Any] = BertTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bert""" ) # Under the mock environment we get a 500 error when trying to reach the tokenizer. with mock.patch("""requests.Session.request""" ,return_value=lowerCamelCase_ ) as mock_head: UpperCAmelCase_ : Any = BertTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bert""" ) # This check we did call the fake head request mock_head.assert_called() @require_tokenizers def A__ ( self: str ) -> int: # A mock response for an HTTP head request to emulate server down UpperCAmelCase_ : str = mock.Mock() UpperCAmelCase_ : Optional[int] = 500 UpperCAmelCase_ : int = {} UpperCAmelCase_ : Union[str, Any] = HTTPError UpperCAmelCase_ : List[Any] = {} # Download this model to make sure it's in the cache. UpperCAmelCase_ : Optional[int] = GPTaTokenizerFast.from_pretrained("""gpt2""" ) # Under the mock environment we get a 500 error when trying to reach the tokenizer. with mock.patch("""requests.Session.request""" ,return_value=lowerCamelCase_ ) as mock_head: UpperCAmelCase_ : Any = GPTaTokenizerFast.from_pretrained("""gpt2""" ) # This check we did call the fake head request mock_head.assert_called() def A__ ( self: str ) -> Dict: # This test is for deprecated behavior and can be removed in v5 try: UpperCAmelCase_ : Any = tempfile.mktemp() with open(lowerCamelCase_ ,"""wb""" ) as f: http_get("""https://huggingface.co/albert-base-v1/resolve/main/spiece.model""" ,lowerCamelCase_ ) UpperCAmelCase_ : Tuple = AlbertTokenizer.from_pretrained(lowerCamelCase_ ) finally: os.remove(lowerCamelCase_ ) # Supporting this legacy load introduced a weird bug where the tokenizer would load local files if they are in # the current folder and have the right name. if os.path.isfile("""tokenizer.json""" ): # We skip the test if the user has a `tokenizer.json` in this folder to avoid deleting it. return try: with open("""tokenizer.json""" ,"""wb""" ) as f: http_get("""https://huggingface.co/hf-internal-testing/tiny-random-bert/blob/main/tokenizer.json""" ,lowerCamelCase_ ) UpperCAmelCase_ : str = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ) # The tiny random BERT has a vocab size of 1024, tiny gpt2 as a vocab size of 1000 self.assertEqual(tokenizer.vocab_size ,1000 ) # Tokenizer should depend on the remote checkpoint, not the local tokenizer.json file. finally: os.remove("""tokenizer.json""" ) def A__ ( self: List[str] ) -> Tuple: # This test is for deprecated behavior and can be removed in v5 UpperCAmelCase_ : str = AlbertTokenizer.from_pretrained("""https://huggingface.co/albert-base-v1/resolve/main/spiece.model""" ) @is_staging_test class _snake_case ( unittest.TestCase ): '''simple docstring''' A__ : str = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"] @classmethod def A__ ( cls: Dict ) -> Optional[int]: UpperCAmelCase_ : List[str] = TOKEN HfFolder.save_token(lowerCamelCase_ ) @classmethod def A__ ( cls: Optional[Any] ) -> List[str]: try: delete_repo(token=cls._token ,repo_id="""test-tokenizer""" ) except HTTPError: pass try: delete_repo(token=cls._token ,repo_id="""valid_org/test-tokenizer-org""" ) except HTTPError: pass try: delete_repo(token=cls._token ,repo_id="""test-dynamic-tokenizer""" ) except HTTPError: pass def A__ ( self: Any ) -> Optional[int]: with tempfile.TemporaryDirectory() as tmp_dir: UpperCAmelCase_ : Tuple = os.path.join(lowerCamelCase_ ,"""vocab.txt""" ) with open(lowerCamelCase_ ,"""w""" ,encoding="""utf-8""" ) as vocab_writer: vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) ) UpperCAmelCase_ : List[Any] = BertTokenizer(lowerCamelCase_ ) tokenizer.push_to_hub("""test-tokenizer""" ,use_auth_token=self._token ) UpperCAmelCase_ : List[Any] = BertTokenizer.from_pretrained(F'''{USER}/test-tokenizer''' ) self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab ) # Reset repo delete_repo(token=self._token ,repo_id="""test-tokenizer""" ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(lowerCamelCase_ ,repo_id="""test-tokenizer""" ,push_to_hub=lowerCamelCase_ ,use_auth_token=self._token ) UpperCAmelCase_ : List[Any] = BertTokenizer.from_pretrained(F'''{USER}/test-tokenizer''' ) self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab ) def A__ ( self: Optional[int] ) -> Any: with tempfile.TemporaryDirectory() as tmp_dir: UpperCAmelCase_ : List[Any] = os.path.join(lowerCamelCase_ ,"""vocab.txt""" ) with open(lowerCamelCase_ ,"""w""" ,encoding="""utf-8""" ) as vocab_writer: vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) ) UpperCAmelCase_ : Dict = BertTokenizer(lowerCamelCase_ ) tokenizer.push_to_hub("""valid_org/test-tokenizer-org""" ,use_auth_token=self._token ) UpperCAmelCase_ : Dict = BertTokenizer.from_pretrained("""valid_org/test-tokenizer-org""" ) self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab ) # Reset repo delete_repo(token=self._token ,repo_id="""valid_org/test-tokenizer-org""" ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained( lowerCamelCase_ ,repo_id="""valid_org/test-tokenizer-org""" ,push_to_hub=lowerCamelCase_ ,use_auth_token=self._token ) UpperCAmelCase_ : List[Any] = BertTokenizer.from_pretrained("""valid_org/test-tokenizer-org""" ) self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab ) @require_tokenizers def A__ ( self: Optional[int] ) -> Optional[Any]: CustomTokenizer.register_for_auto_class() with tempfile.TemporaryDirectory() as tmp_dir: UpperCAmelCase_ : Any = os.path.join(lowerCamelCase_ ,"""vocab.txt""" ) with open(lowerCamelCase_ ,"""w""" ,encoding="""utf-8""" ) as vocab_writer: vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) ) UpperCAmelCase_ : Optional[Any] = CustomTokenizer(lowerCamelCase_ ) # No fast custom tokenizer tokenizer.push_to_hub("""test-dynamic-tokenizer""" ,use_auth_token=self._token ) UpperCAmelCase_ : Optional[Any] = AutoTokenizer.from_pretrained(F'''{USER}/test-dynamic-tokenizer''' ,trust_remote_code=lowerCamelCase_ ) # Can't make an isinstance check because the new_model.config is from the CustomTokenizer class of a dynamic module self.assertEqual(tokenizer.__class__.__name__ ,"""CustomTokenizer""" ) # Fast and slow custom tokenizer CustomTokenizerFast.register_for_auto_class() with tempfile.TemporaryDirectory() as tmp_dir: UpperCAmelCase_ : List[str] = os.path.join(lowerCamelCase_ ,"""vocab.txt""" ) with open(lowerCamelCase_ ,"""w""" ,encoding="""utf-8""" ) as vocab_writer: vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) ) UpperCAmelCase_ : str = BertTokenizerFast.from_pretrained(lowerCamelCase_ ) bert_tokenizer.save_pretrained(lowerCamelCase_ ) UpperCAmelCase_ : List[str] = CustomTokenizerFast.from_pretrained(lowerCamelCase_ ) tokenizer.push_to_hub("""test-dynamic-tokenizer""" ,use_auth_token=self._token ) UpperCAmelCase_ : List[str] = AutoTokenizer.from_pretrained(F'''{USER}/test-dynamic-tokenizer''' ,trust_remote_code=lowerCamelCase_ ) # Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module self.assertEqual(tokenizer.__class__.__name__ ,"""CustomTokenizerFast""" ) UpperCAmelCase_ : List[str] = AutoTokenizer.from_pretrained( F'''{USER}/test-dynamic-tokenizer''' ,use_fast=lowerCamelCase_ ,trust_remote_code=lowerCamelCase_ ) # Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module self.assertEqual(tokenizer.__class__.__name__ ,"""CustomTokenizer""" ) class _snake_case ( unittest.TestCase ): '''simple docstring''' def A__ ( self: Optional[Any] ) -> Any: UpperCAmelCase_ : Any = Trie() trie.add("""Hello 友達""" ) self.assertEqual(trie.data ,{"""H""": {"""e""": {"""l""": {"""l""": {"""o""": {""" """: {"""友""": {"""達""": {"""""": 1}}}}}}}}} ) trie.add("""Hello""" ) trie.data self.assertEqual(trie.data ,{"""H""": {"""e""": {"""l""": {"""l""": {"""o""": {"""""": 1, """ """: {"""友""": {"""達""": {"""""": 1}}}}}}}}} ) def A__ ( self: Tuple ) -> Optional[int]: UpperCAmelCase_ : str = Trie() self.assertEqual(trie.split("""[CLS] This is a extra_id_100""" ) ,["""[CLS] This is a extra_id_100"""] ) trie.add("""[CLS]""" ) trie.add("""extra_id_1""" ) trie.add("""extra_id_100""" ) self.assertEqual(trie.split("""[CLS] This is a extra_id_100""" ) ,["""[CLS]""", """ This is a """, """extra_id_100"""] ) def A__ ( self: Optional[Any] ) -> Optional[int]: UpperCAmelCase_ : Dict = Trie() trie.add("""A""" ) self.assertEqual(trie.split("""ABC""" ) ,["""A""", """BC"""] ) self.assertEqual(trie.split("""BCA""" ) ,["""BC""", """A"""] ) def A__ ( self: Union[str, Any] ) -> int: UpperCAmelCase_ : List[str] = Trie() trie.add("""TOKEN]""" ) trie.add("""[SPECIAL_TOKEN]""" ) self.assertEqual(trie.split("""This is something [SPECIAL_TOKEN]""" ) ,["""This is something """, """[SPECIAL_TOKEN]"""] ) def A__ ( self: int ) -> Union[str, Any]: UpperCAmelCase_ : List[str] = Trie() trie.add("""A""" ) trie.add("""P""" ) trie.add("""[SPECIAL_TOKEN]""" ) self.assertEqual(trie.split("""This is something [SPECIAL_TOKEN]""" ) ,["""This is something """, """[SPECIAL_TOKEN]"""] ) def A__ ( self: int ) -> List[str]: UpperCAmelCase_ : int = Trie() trie.add("""AB""" ) trie.add("""B""" ) trie.add("""C""" ) self.assertEqual(trie.split("""ABC""" ) ,["""AB""", """C"""] ) def A__ ( self: str ) -> Optional[int]: UpperCAmelCase_ : Optional[Any] = Trie() trie.add("""ABC""" ) trie.add("""B""" ) trie.add("""CD""" ) self.assertEqual(trie.split("""ABCD""" ) ,["""ABC""", """D"""] ) def A__ ( self: List[Any] ) -> Any: # Even if the offsets are wrong, we necessarily output correct string # parts. UpperCAmelCase_ : Tuple = Trie() UpperCAmelCase_ : Optional[Any] = trie.cut_text("""ABC""" ,[0, 0, 2, 1, 2, 3] ) self.assertEqual(lowerCamelCase_ ,["""AB""", """C"""] )
345
0
import unittest from transformers import RoFormerTokenizer, RoFormerTokenizerFast from transformers.testing_utils import require_rjieba, require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_rjieba @require_tokenizers class A ( _UpperCAmelCase , unittest.TestCase ): """simple docstring""" lowerCamelCase = RoFormerTokenizer lowerCamelCase = RoFormerTokenizerFast lowerCamelCase = True lowerCamelCase = True def snake_case__ ( self : Any )-> Tuple: '''simple docstring''' super().setUp() def snake_case__ ( self : Optional[Any],**lowercase_ : Union[str, Any] )-> int: '''simple docstring''' return self.tokenizer_class.from_pretrained('junnyu/roformer_chinese_base',**lowercase_ ) def snake_case__ ( self : Any,**lowercase_ : int )-> Union[str, Any]: '''simple docstring''' return self.rust_tokenizer_class.from_pretrained('junnyu/roformer_chinese_base',**lowercase_ ) def snake_case__ ( self : List[str] )-> Optional[int]: '''simple docstring''' A__ = '永和服装饰品有限公司,今天天气非常好' A__ = '永和 服装 饰品 有限公司 , 今 天 天 气 非常 好' return input_text, output_text def snake_case__ ( self : List[str] )-> Optional[Any]: '''simple docstring''' A__ = self.get_tokenizer() A__ , A__ = self.get_chinese_input_output_texts() A__ = tokenizer.tokenize(lowercase_ ) self.assertListEqual(lowercase_,output_text.split() ) A__ = tokens + [tokenizer.unk_token] A__ = [2_2_9_4_3, 2_1_3_3_2, 3_4_4_3_1, 4_5_9_0_4, 1_1_7, 3_0_6, 1_2_3_1, 1_2_3_1, 2_6_5_3, 3_3_9_9_4, 1_2_6_6, 1_0_0] self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase_ ),lowercase_ ) def snake_case__ ( self : Dict )-> Optional[Any]: '''simple docstring''' A__ = self.get_rust_tokenizer() A__ , A__ = self.get_chinese_input_output_texts() A__ = tokenizer.tokenize(lowercase_ ) self.assertListEqual(lowercase_,output_text.split() ) A__ = tokens + [tokenizer.unk_token] A__ = [2_2_9_4_3, 2_1_3_3_2, 3_4_4_3_1, 4_5_9_0_4, 1_1_7, 3_0_6, 1_2_3_1, 1_2_3_1, 2_6_5_3, 3_3_9_9_4, 1_2_6_6, 1_0_0] self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase_ ),lowercase_ ) def snake_case__ ( self : int )-> Optional[Any]: '''simple docstring''' pass def snake_case__ ( self : List[Any] )-> Union[str, Any]: '''simple docstring''' pass def snake_case__ ( self : int )-> Dict: '''simple docstring''' pass
282
import argparse import datetime import json import time import warnings from logging import getLogger from pathlib import Path from typing import Dict, List import torch from tqdm import tqdm from transformers import AutoModelForSeqaSeqLM, AutoTokenizer from utils import calculate_bleu, calculate_rouge, chunks, parse_numeric_n_bool_cl_kwargs, use_task_specific_params lowercase_ = getLogger(__name__) lowercase_ = "cuda" if torch.cuda.is_available() else "cpu" def _snake_case( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : int = 8 , SCREAMING_SNAKE_CASE__ : str = DEFAULT_DEVICE , SCREAMING_SNAKE_CASE__ : Optional[Any]=False , SCREAMING_SNAKE_CASE__ : Dict="summarization" , SCREAMING_SNAKE_CASE__ : Any=None , **SCREAMING_SNAKE_CASE__ : str , ) -> Dict: '''simple docstring''' A__ = Path(SCREAMING_SNAKE_CASE__ ).open('w' , encoding='utf-8' ) A__ = str(SCREAMING_SNAKE_CASE__ ) A__ = AutoModelForSeqaSeqLM.from_pretrained(SCREAMING_SNAKE_CASE__ ).to(SCREAMING_SNAKE_CASE__ ) if fpaa: A__ = model.half() A__ = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ ) logger.info(f'Inferred tokenizer type: {tokenizer.__class__}' ) # if this is wrong, check config.model_type. A__ = time.time() # update config with task specific params use_task_specific_params(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) if prefix is None: A__ = prefix or getattr(model.config , 'prefix' , '' ) or '' for examples_chunk in tqdm(list(chunks(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) ) ): A__ = [prefix + text for text in examples_chunk] A__ = tokenizer(SCREAMING_SNAKE_CASE__ , return_tensors='pt' , truncation=SCREAMING_SNAKE_CASE__ , padding='longest' ).to(SCREAMING_SNAKE_CASE__ ) A__ = model.generate( input_ids=batch.input_ids , attention_mask=batch.attention_mask , **SCREAMING_SNAKE_CASE__ , ) A__ = tokenizer.batch_decode(SCREAMING_SNAKE_CASE__ , skip_special_tokens=SCREAMING_SNAKE_CASE__ , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE__ ) for hypothesis in dec: fout.write(hypothesis + '\n' ) fout.flush() fout.close() A__ = int(time.time() - start_time ) # seconds A__ = len(SCREAMING_SNAKE_CASE__ ) return {"n_obs": n_obs, "runtime": runtime, "seconds_per_sample": round(runtime / n_obs , 4 )} def _snake_case( ) -> Tuple: '''simple docstring''' return datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S' ) def _snake_case( SCREAMING_SNAKE_CASE__ : Optional[int]=True ) -> Dict: '''simple docstring''' A__ = argparse.ArgumentParser() parser.add_argument('model_name' , type=SCREAMING_SNAKE_CASE__ , help='like facebook/bart-large-cnn,t5-base, etc.' ) parser.add_argument('input_path' , type=SCREAMING_SNAKE_CASE__ , help='like cnn_dm/test.source' ) parser.add_argument('save_path' , type=SCREAMING_SNAKE_CASE__ , help='where to save summaries' ) parser.add_argument('--reference_path' , type=SCREAMING_SNAKE_CASE__ , required=SCREAMING_SNAKE_CASE__ , help='like cnn_dm/test.target' ) parser.add_argument('--score_path' , type=SCREAMING_SNAKE_CASE__ , required=SCREAMING_SNAKE_CASE__ , default='metrics.json' , help='where to save metrics' ) parser.add_argument('--device' , type=SCREAMING_SNAKE_CASE__ , required=SCREAMING_SNAKE_CASE__ , default=SCREAMING_SNAKE_CASE__ , help='cuda, cuda:1, cpu etc.' ) parser.add_argument( '--prefix' , type=SCREAMING_SNAKE_CASE__ , required=SCREAMING_SNAKE_CASE__ , default=SCREAMING_SNAKE_CASE__ , help='will be added to the begininng of src examples' ) parser.add_argument('--task' , type=SCREAMING_SNAKE_CASE__ , default='summarization' , help='used for task_specific_params + metrics' ) parser.add_argument('--bs' , type=SCREAMING_SNAKE_CASE__ , default=8 , required=SCREAMING_SNAKE_CASE__ , help='batch size' ) parser.add_argument( '--n_obs' , type=SCREAMING_SNAKE_CASE__ , default=-1 , required=SCREAMING_SNAKE_CASE__ , help='How many observations. Defaults to all.' ) parser.add_argument('--fp16' , action='store_true' ) parser.add_argument('--dump-args' , action='store_true' , help='print the custom hparams with the results' ) parser.add_argument( '--info' , nargs='?' , type=SCREAMING_SNAKE_CASE__ , const=datetime_now() , help=( 'use in conjunction w/ --dump-args to print with the results whatever other info you\'d like, e.g.' ' lang=en-ru. If no value is passed, the current datetime string will be used.' ) , ) # Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate A__ , A__ = parser.parse_known_args() A__ = parse_numeric_n_bool_cl_kwargs(SCREAMING_SNAKE_CASE__ ) if parsed_args and verbose: print(f'parsed the following generate kwargs: {parsed_args}' ) A__ = [' ' + x.rstrip() if 't5' in args.model_name else x.rstrip() for x in open(args.input_path ).readlines()] if args.n_obs > 0: A__ = examples[: args.n_obs] Path(args.save_path ).parent.mkdir(exist_ok=SCREAMING_SNAKE_CASE__ ) if args.reference_path is None and Path(args.score_path ).exists(): warnings.warn(f'score_path {args.score_path} will be overwritten unless you type ctrl-c.' ) if args.device == "cpu" and args.fpaa: # this mix leads to RuntimeError: "threshold_cpu" not implemented for 'Half' raise ValueError('Can\'t mix --fp16 and --device cpu' ) A__ = generate_summaries_or_translations( SCREAMING_SNAKE_CASE__ , args.save_path , args.model_name , batch_size=args.bs , device=args.device , fpaa=args.fpaa , task=args.task , prefix=args.prefix , **SCREAMING_SNAKE_CASE__ , ) if args.reference_path is None: return {} # Compute scores A__ = calculate_bleu if 'translation' in args.task else calculate_rouge A__ = [x.rstrip() for x in open(args.save_path ).readlines()] A__ = [x.rstrip() for x in open(args.reference_path ).readlines()][: len(SCREAMING_SNAKE_CASE__ )] A__ = score_fn(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) scores.update(SCREAMING_SNAKE_CASE__ ) if args.dump_args: scores.update(SCREAMING_SNAKE_CASE__ ) if args.info: A__ = args.info if verbose: print(SCREAMING_SNAKE_CASE__ ) if args.score_path is not None: json.dump(SCREAMING_SNAKE_CASE__ , open(args.score_path , 'w' ) ) return scores if __name__ == "__main__": # Usage for MT: # python run_eval.py MODEL_NAME $DATA_DIR/test.source $save_dir/test_translations.txt --reference_path $DATA_DIR/test.target --score_path $save_dir/test_bleu.json --task translation $@ run_generate(verbose=True)
282
1
import argparse import json import os import fairseq import torch from torch import nn from transformers import ( SpeechaTextaConfig, SpeechaTextaForCausalLM, SpeechaTextaTokenizer, SpeechEncoderDecoderConfig, SpeechEncoderDecoderModel, WavaVecaConfig, WavaVecaFeatureExtractor, WavaVecaModel, logging, ) logging.set_verbosity_info() __UpperCAmelCase = logging.get_logger(__name__) __UpperCAmelCase = { "post_extract_proj": "feature_projection.projection", "encoder.pos_conv.0": "encoder.pos_conv_embed.conv", "self_attn.k_proj": "encoder.layers.*.attention.k_proj", "self_attn.v_proj": "encoder.layers.*.attention.v_proj", "self_attn.q_proj": "encoder.layers.*.attention.q_proj", "self_attn.out_proj": "encoder.layers.*.attention.out_proj", "self_attn_layer_norm": "encoder.layers.*.layer_norm", "fc1": "encoder.layers.*.feed_forward.intermediate_dense", "fc2": "encoder.layers.*.feed_forward.output_dense", "final_layer_norm": "encoder.layers.*.final_layer_norm", "encoder.layer_norm": "encoder.layer_norm", "w2v_model.layer_norm": "feature_projection.layer_norm", "quantizer.weight_proj": "quantizer.weight_proj", "quantizer.vars": "quantizer.codevectors", "project_q": "project_q", "final_proj": "project_hid", "w2v_encoder.proj": "lm_head", "mask_emb": "masked_spec_embed", } __UpperCAmelCase = [ "lm_head", "quantizer.weight_proj", "quantizer.codevectors", "project_q", "project_hid", ] def A__ ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ): for attribute in key.split('''.''' ): SCREAMING_SNAKE_CASE_ = getattr(__lowerCamelCase, __lowerCamelCase ) if weight_type is not None: SCREAMING_SNAKE_CASE_ = getattr(__lowerCamelCase, __lowerCamelCase ).shape else: SCREAMING_SNAKE_CASE_ = hf_pointer.shape assert hf_shape == value.shape, ( F'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be''' F''' {value.shape} for {full_name}''' ) if weight_type == "weight": SCREAMING_SNAKE_CASE_ = value elif weight_type == "weight_g": SCREAMING_SNAKE_CASE_ = value elif weight_type == "weight_v": SCREAMING_SNAKE_CASE_ = value elif weight_type == "bias": SCREAMING_SNAKE_CASE_ = value else: SCREAMING_SNAKE_CASE_ = value logger.info(F'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' ) def A__ ( __lowerCamelCase, __lowerCamelCase ): SCREAMING_SNAKE_CASE_ = [] SCREAMING_SNAKE_CASE_ = fairseq_model.state_dict() SCREAMING_SNAKE_CASE_ = hf_model.feature_extractor # if encoder has different dim to decoder -> use proj_weight SCREAMING_SNAKE_CASE_ = None for name, value in fairseq_dict.items(): SCREAMING_SNAKE_CASE_ = False if "conv_layers" in name: load_conv_layer( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, hf_model.config.feat_extract_norm == '''group''', ) SCREAMING_SNAKE_CASE_ = True elif name.split('''.''' )[0] == "proj": SCREAMING_SNAKE_CASE_ = fairseq_model.proj SCREAMING_SNAKE_CASE_ = True else: for key, mapped_key in MAPPING.items(): if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]: SCREAMING_SNAKE_CASE_ = True if "*" in mapped_key: SCREAMING_SNAKE_CASE_ = name.split(__lowerCamelCase )[0].split('''.''' )[-2] SCREAMING_SNAKE_CASE_ = mapped_key.replace('''*''', __lowerCamelCase ) if "weight_g" in name: SCREAMING_SNAKE_CASE_ = '''weight_g''' elif "weight_v" in name: SCREAMING_SNAKE_CASE_ = '''weight_v''' elif "bias" in name: SCREAMING_SNAKE_CASE_ = '''bias''' elif "weight" in name: SCREAMING_SNAKE_CASE_ = '''weight''' else: SCREAMING_SNAKE_CASE_ = None set_recursively(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ) continue if not is_used: unused_weights.append(__lowerCamelCase ) logger.warning(F'''Unused weights: {unused_weights}''' ) return proj_weight def A__ ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ): SCREAMING_SNAKE_CASE_ = full_name.split('''conv_layers.''' )[-1] SCREAMING_SNAKE_CASE_ = name.split('''.''' ) SCREAMING_SNAKE_CASE_ = int(items[0] ) SCREAMING_SNAKE_CASE_ = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' ) SCREAMING_SNAKE_CASE_ = value logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' ) SCREAMING_SNAKE_CASE_ = value logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was''' " found." ) SCREAMING_SNAKE_CASE_ = value logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.''' ) SCREAMING_SNAKE_CASE_ = value logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) else: unused_weights.append(__lowerCamelCase ) def A__ ( __lowerCamelCase ): SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = emb.weight.shape SCREAMING_SNAKE_CASE_ = nn.Linear(__lowerCamelCase, __lowerCamelCase, bias=__lowerCamelCase ) SCREAMING_SNAKE_CASE_ = emb.weight.data return lin_layer def A__ ( __lowerCamelCase ): with open(__lowerCamelCase, '''r''', encoding='''utf-8''' ) as f: SCREAMING_SNAKE_CASE_ = f.readlines() SCREAMING_SNAKE_CASE_ = [line.split(''' ''' )[0] for line in lines] SCREAMING_SNAKE_CASE_ = len(__lowerCamelCase ) SCREAMING_SNAKE_CASE_ = { '''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3, } vocab_dict.update(dict(zip(__lowerCamelCase, range(4, num_words + 4 ) ) ) ) return vocab_dict @torch.no_grad() def A__ ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, ): SCREAMING_SNAKE_CASE_ = WavaVecaConfig.from_pretrained(__lowerCamelCase ) SCREAMING_SNAKE_CASE_ = SpeechaTextaConfig.from_pretrained( __lowerCamelCase, vocab_size=__lowerCamelCase, decoder_layers=__lowerCamelCase, do_stable_layer_norm=__lowerCamelCase ) SCREAMING_SNAKE_CASE_ = WavaVecaFeatureExtractor( feature_size=1, sampling_rate=1_60_00, padding_value=0, do_normalize=__lowerCamelCase, return_attention_mask=__lowerCamelCase, ) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path], arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} ) SCREAMING_SNAKE_CASE_ = model[0].eval() # set weights for wav2vec2 encoder SCREAMING_SNAKE_CASE_ = WavaVecaModel(__lowerCamelCase ) SCREAMING_SNAKE_CASE_ = recursively_load_weights_wavaveca(model.encoder, __lowerCamelCase ) SCREAMING_SNAKE_CASE_ = SpeechaTextaForCausalLM(__lowerCamelCase ) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict(), strict=__lowerCamelCase ) # set output linear layer unexpected_keys.remove('''embed_out''' ) SCREAMING_SNAKE_CASE_ = nn.Parameter(model.decoder.embed_out.detach() ) # layer norm is init to identity matrix so leaving it is fine logger.warning(F'''The following keys are missing when loading the decoder weights: {missing_keys}''' ) logger.warning(F'''The following keys are unexpected when loading the decoder weights: {unexpected_keys}''' ) SCREAMING_SNAKE_CASE_ = SpeechEncoderDecoderModel(encoder=__lowerCamelCase, decoder=__lowerCamelCase ) SCREAMING_SNAKE_CASE_ = False # add projection layer SCREAMING_SNAKE_CASE_ = nn.Parameter(projection_layer.weight ) SCREAMING_SNAKE_CASE_ = nn.Parameter(projection_layer.bias ) SCREAMING_SNAKE_CASE_ = create_vocab_dict(__lowerCamelCase ) with open(os.path.join(__lowerCamelCase, '''vocab.json''' ), '''w''' ) as fp: json.dump(__lowerCamelCase, __lowerCamelCase ) SCREAMING_SNAKE_CASE_ = SpeechaTextaTokenizer(os.path.join(__lowerCamelCase, '''vocab.json''' ) ) tokenizer.save_pretrained(__lowerCamelCase ) SCREAMING_SNAKE_CASE_ = hf_wavavec.config.to_dict() SCREAMING_SNAKE_CASE_ = tokenizer.pad_token_id SCREAMING_SNAKE_CASE_ = tokenizer.bos_token_id SCREAMING_SNAKE_CASE_ = tokenizer.eos_token_id SCREAMING_SNAKE_CASE_ = '''speech_to_text_2''' SCREAMING_SNAKE_CASE_ = '''wav2vec2''' SCREAMING_SNAKE_CASE_ = SpeechEncoderDecoderConfig.from_dict(__lowerCamelCase ) hf_wavavec.save_pretrained(__lowerCamelCase ) feature_extractor.save_pretrained(__lowerCamelCase ) if __name__ == "__main__": __UpperCAmelCase = argparse.ArgumentParser() parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint") parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model") parser.add_argument( "--encoder_config_path", default="facebook/wav2vec2-large-lv60", type=str, help="Path to hf encoder wav2vec2 checkpoint config", ) parser.add_argument( "--decoder_config_path", default="facebook/s2t-small-mustc-en-fr-st", type=str, help="Path to hf decoder s2t checkpoint config", ) parser.add_argument("--vocab_size", default=1_02_24, type=int, help="Vocab size of decoder") parser.add_argument("--num_decoder_layers", default=7, type=int, help="Number of decoder layers") __UpperCAmelCase = parser.parse_args() convert_wavaveca_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.dict_path, encoder_config_path=args.encoder_config_path, decoder_config_path=args.decoder_config_path, vocab_size=args.vocab_size, num_decoder_layers=args.num_decoder_layers, )
299
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available, ) __UpperCAmelCase = { "configuration_layoutlmv2": ["LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP", "LayoutLMv2Config"], "processing_layoutlmv2": ["LayoutLMv2Processor"], "tokenization_layoutlmv2": ["LayoutLMv2Tokenizer"], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = ["LayoutLMv2TokenizerFast"] try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = ["LayoutLMv2FeatureExtractor"] __UpperCAmelCase = ["LayoutLMv2ImageProcessor"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = [ "LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST", "LayoutLMv2ForQuestionAnswering", "LayoutLMv2ForSequenceClassification", "LayoutLMv2ForTokenClassification", "LayoutLMv2Layer", "LayoutLMv2Model", "LayoutLMv2PreTrainedModel", ] if TYPE_CHECKING: from .configuration_layoutlmva import LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig from .processing_layoutlmva import LayoutLMvaProcessor from .tokenization_layoutlmva import LayoutLMvaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor, LayoutLMvaImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_layoutlmva import ( LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST, LayoutLMvaForQuestionAnswering, LayoutLMvaForSequenceClassification, LayoutLMvaForTokenClassification, LayoutLMvaLayer, LayoutLMvaModel, LayoutLMvaPreTrainedModel, ) else: import sys __UpperCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
299
1
"""simple docstring""" import warnings from typing import List import numpy as np from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding from ...utils import is_flax_available, is_tf_available, is_torch_available class lowerCamelCase ( A_ ): UpperCAmelCase__ : List[str] = ["image_processor", "tokenizer"] UpperCAmelCase__ : Tuple = "OwlViTImageProcessor" UpperCAmelCase__ : Any = ("CLIPTokenizer", "CLIPTokenizerFast") def __init__(self : Dict , _A : Optional[Any]=None , _A : Any=None , **_A : Dict ) -> Union[str, Any]: snake_case = None if "feature_extractor" in kwargs: warnings.warn( "The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`" " instead." , _A , ) snake_case = kwargs.pop("feature_extractor" ) snake_case = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("You need to specify an `image_processor`." ) if tokenizer is None: raise ValueError("You need to specify a `tokenizer`." ) super().__init__(_A , _A ) def __call__(self : List[str] , _A : Optional[Any]=None , _A : int=None , _A : Optional[int]=None , _A : Optional[int]="max_length" , _A : Optional[Any]="np" , **_A : int ) -> str: if text is None and query_images is None and images is None: raise ValueError( "You have to specify at least one text or query image or image. All three cannot be none." ) if text is not None: if isinstance(_A , _A ) or (isinstance(_A , _A ) and not isinstance(text[0] , _A )): snake_case = [self.tokenizer(_A , padding=_A , return_tensors=_A , **_A )] elif isinstance(_A , _A ) and isinstance(text[0] , _A ): snake_case = [] # Maximum number of queries across batch snake_case = max([len(_A ) for t in text] ) # Pad all batch samples to max number of text queries for t in text: if len(_A ) != max_num_queries: snake_case = t + [" "] * (max_num_queries - len(_A )) snake_case = self.tokenizer(_A , padding=_A , return_tensors=_A , **_A ) encodings.append(_A ) else: raise TypeError("Input text should be a string, a list of strings or a nested list of strings" ) if return_tensors == "np": snake_case = np.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 ) snake_case = np.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 ) elif return_tensors == "jax" and is_flax_available(): import jax.numpy as jnp snake_case = jnp.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 ) snake_case = jnp.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 ) elif return_tensors == "pt" and is_torch_available(): import torch snake_case = torch.cat([encoding["input_ids"] for encoding in encodings] , dim=0 ) snake_case = torch.cat([encoding["attention_mask"] for encoding in encodings] , dim=0 ) elif return_tensors == "tf" and is_tf_available(): import tensorflow as tf snake_case = tf.stack([encoding["input_ids"] for encoding in encodings] , axis=0 ) snake_case = tf.stack([encoding["attention_mask"] for encoding in encodings] , axis=0 ) else: raise ValueError("Target return tensor type could not be returned" ) snake_case = BatchEncoding() snake_case = input_ids snake_case = attention_mask if query_images is not None: snake_case = BatchEncoding() snake_case = self.image_processor( _A , return_tensors=_A , **_A ).pixel_values snake_case = query_pixel_values if images is not None: snake_case = self.image_processor(_A , return_tensors=_A , **_A ) if text is not None and images is not None: snake_case = image_features.pixel_values return encoding elif query_images is not None and images is not None: snake_case = image_features.pixel_values return encoding elif text is not None or query_images is not None: return encoding else: return BatchEncoding(data=dict(**_A ) , tensor_type=_A ) def UpperCAmelCase(self : List[Any] , *_A : Optional[int] , **_A : List[Any] ) -> Any: return self.image_processor.post_process(*_A , **_A ) def UpperCAmelCase(self : List[Any] , *_A : Tuple , **_A : Any ) -> Tuple: return self.image_processor.post_process_object_detection(*_A , **_A ) def UpperCAmelCase(self : Dict , *_A : str , **_A : Optional[int] ) -> int: return self.image_processor.post_process_image_guided_detection(*_A , **_A ) def UpperCAmelCase(self : Any , *_A : Any , **_A : List[str] ) -> int: return self.tokenizer.batch_decode(*_A , **_A ) def UpperCAmelCase(self : int , *_A : Tuple , **_A : List[Any] ) -> str: return self.tokenizer.decode(*_A , **_A ) @property def UpperCAmelCase(self : Tuple ) -> List[Any]: warnings.warn( "`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , _A , ) return self.image_processor_class @property def UpperCAmelCase(self : int ) -> Optional[int]: warnings.warn( "`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , _A , ) return self.image_processor
359
from ....utils import logging _A = logging.get_logger(__name__) class lowerCamelCase ( A_ ): def __init__(self : Tuple , _A : Optional[int] , _A : Tuple=None , _A : Union[str, Any]=2_0_4_8 ) -> List[Any]: snake_case = config.__dict__ snake_case = modal_hidden_size if num_labels: snake_case = num_labels
137
0
"""simple docstring""" import unittest from transformers import DonutProcessor _lowercase = '''naver-clova-ix/donut-base''' class lowerCAmelCase_ ( unittest.TestCase ): '''simple docstring''' def _SCREAMING_SNAKE_CASE ( self : Any ) -> Any: A = DonutProcessor.from_pretrained(A_ ) def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[int]: A = { 'name': 'John Doe', 'age': '99', 'city': 'Atlanta', 'state': 'GA', 'zip': '30301', 'phone': '123-4567', 'nicknames': [{'nickname': 'Johnny'}, {'nickname': 'JD'}], } A = ( '<s_name>John Doe</s_name><s_age>99</s_age><s_city>Atlanta</s_city>' '<s_state>GA</s_state><s_zip>30301</s_zip><s_phone>123-4567</s_phone>' '<s_nicknames><s_nickname>Johnny</s_nickname>' '<sep/><s_nickname>JD</s_nickname></s_nicknames>' ) A = self.processor.tokenajson(A_ ) self.assertDictEqual(A_ ,A_ )
74
from dataclasses import dataclass from typing import Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, randn_tensor from .scheduling_utils import SchedulerMixin @dataclass class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' snake_case_ = 42 snake_case_ = 42 snake_case_ = None class UpperCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): '''simple docstring''' snake_case_ = 2 @register_to_config def __init__( self : str ,A : float = 0.02 ,A : float = 1_00 ,A : float = 1.0_07 ,A : float = 80 ,A : float = 0.05 ,A : float = 50 ,): # standard deviation of the initial noise distribution __A = sigma_max # setable values __A = None __A = None __A = None # sigma(t_i) def UpperCamelCase_ ( self : str ,A : torch.FloatTensor ,A : Optional[int] = None ): return sample def UpperCamelCase_ ( self : Dict ,A : int ,A : Union[str, torch.device] = None ): __A = num_inference_steps __A = np.arange(0 ,self.num_inference_steps )[::-1].copy() __A = torch.from_numpy(A ).to(A ) __A = [ ( self.config.sigma_max**2 * (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1)) ) for i in self.timesteps ] __A = torch.tensor(A ,dtype=torch.floataa ,device=A ) def UpperCamelCase_ ( self : Union[str, Any] ,A : torch.FloatTensor ,A : float ,A : Optional[torch.Generator] = None ): if self.config.s_min <= sigma <= self.config.s_max: __A = min(self.config.s_churn / self.num_inference_steps ,2**0.5 - 1 ) else: __A = 0 # sample eps ~ N(0, S_noise^2 * I) __A = self.config.s_noise * randn_tensor(sample.shape ,generator=A ).to(sample.device ) __A = sigma + gamma * sigma __A = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps) return sample_hat, sigma_hat def UpperCamelCase_ ( self : Dict ,A : torch.FloatTensor ,A : float ,A : float ,A : torch.FloatTensor ,A : bool = True ,): __A = sample_hat + sigma_hat * model_output __A = (sample_hat - pred_original_sample) / sigma_hat __A = sample_hat + (sigma_prev - sigma_hat) * derivative if not return_dict: return (sample_prev, derivative) return KarrasVeOutput( prev_sample=A ,derivative=A ,pred_original_sample=A ) def UpperCamelCase_ ( self : Optional[int] ,A : torch.FloatTensor ,A : float ,A : float ,A : torch.FloatTensor ,A : torch.FloatTensor ,A : torch.FloatTensor ,A : bool = True ,): __A = sample_prev + sigma_prev * model_output __A = (sample_prev - pred_original_sample) / sigma_prev __A = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr) if not return_dict: return (sample_prev, derivative) return KarrasVeOutput( prev_sample=A ,derivative=A ,pred_original_sample=A ) def UpperCamelCase_ ( self : List[Any] ,A : Dict ,A : List[str] ,A : str ): raise NotImplementedError()
15
0
'''simple docstring''' def UpperCamelCase ( a , a ) -> list: '''simple docstring''' __magic_name__ = word.split() def justify(a , a , a ) -> str: __magic_name__ = max_width - width __magic_name__ = len(UpperCAmelCase_ ) if len(UpperCAmelCase_ ) == 1: # if there is only word in line # just insert overall_spaces_count for the remainder of line return line[0] + " " * overall_spaces_count else: __magic_name__ = words_count - 1 # num_spaces_between_words_list[i] : tells you to insert # num_spaces_between_words_list[i] spaces # after word on line[i] __magic_name__ = spaces_to_insert_between_words * [ overall_spaces_count // spaces_to_insert_between_words ] __magic_name__ = ( overall_spaces_count % spaces_to_insert_between_words ) # distribute spaces via round robin to the left words for i in range(UpperCAmelCase_ ): num_spaces_between_words_list[i] += 1 __magic_name__ = [] for i in range(UpperCAmelCase_ ): # add the word aligned_words_list.append(line[i] ) # add the spaces to insert aligned_words_list.append(num_spaces_between_words_list[i] * ''' ''' ) # just add the last word to the sentence aligned_words_list.append(line[-1] ) # join the aligned words list to form a justified line return "".join(UpperCAmelCase_ ) __magic_name__ = [] __magic_name__ = [] __magic_name__ = 0 for word in words: if width + len(UpperCAmelCase_ ) + len(UpperCAmelCase_ ) <= max_width: # keep adding words until we can fill out max_width # width = sum of length of all words (without overall_spaces_count) # len(word) = length of current word # len(line) = number of overall_spaces_count to insert between words line.append(UpperCAmelCase_ ) width += len(UpperCAmelCase_ ) else: # justify the line and add it to result answer.append(justify(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) ) # reset new line and new width __magic_name__ = [word], len(UpperCAmelCase_ ) __magic_name__ = max_width - width - len(UpperCAmelCase_ ) answer.append(''' '''.join(UpperCAmelCase_ ) + (remaining_spaces + 1) * ''' ''' ) return answer if __name__ == "__main__": from doctest import testmod testmod()
357
'''simple docstring''' import importlib import inspect import json import os import re import shutil import sys from pathlib import Path from typing import Dict, Optional, Union from urllib import request from huggingface_hub import HfFolder, cached_download, hf_hub_download, model_info from packaging import version from .. import __version__ from . import DIFFUSERS_DYNAMIC_MODULE_NAME, HF_MODULES_CACHE, logging _lowerCAmelCase = ( "https://raw.githubusercontent.com/huggingface/diffusers/{revision}/examples/community/{pipeline}.py" ) _lowerCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name def UpperCamelCase ( ) -> Optional[Any]: '''simple docstring''' __magic_name__ = '''https://pypi.org/pypi/diffusers/json''' __magic_name__ = json.loads(request.urlopen(a ).read() )['''releases'''].keys() return sorted(a , key=lambda a : version.Version(a ) ) def UpperCamelCase ( ) -> Any: '''simple docstring''' # This function has already been executed if HF_MODULES_CACHE already is in the Python path. if HF_MODULES_CACHE in sys.path: return sys.path.append(a ) os.makedirs(a , exist_ok=a ) __magic_name__ = Path(a ) / '''__init__.py''' if not init_path.exists(): init_path.touch() def UpperCamelCase ( a ) -> Tuple: '''simple docstring''' init_hf_modules() __magic_name__ = Path(a ) / name # If the parent module does not exist yet, recursively create it. if not dynamic_module_path.parent.exists(): create_dynamic_module(dynamic_module_path.parent ) os.makedirs(a , exist_ok=a ) __magic_name__ = dynamic_module_path / '''__init__.py''' if not init_path.exists(): init_path.touch() def UpperCamelCase ( a ) -> List[str]: '''simple docstring''' with open(a , '''r''' , encoding='''utf-8''' ) as f: __magic_name__ = f.read() # Imports of the form `import .xxx` __magic_name__ = re.findall('''^\s*import\s+\.(\S+)\s*$''' , a , flags=re.MULTILINE ) # Imports of the form `from .xxx import yyy` relative_imports += re.findall('''^\s*from\s+\.(\S+)\s+import''' , a , flags=re.MULTILINE ) # Unique-ify return list(set(a ) ) def UpperCamelCase ( a ) -> str: '''simple docstring''' __magic_name__ = False __magic_name__ = [module_file] __magic_name__ = [] # Let's recurse through all relative imports while not no_change: __magic_name__ = [] for f in files_to_check: new_imports.extend(get_relative_imports(a ) ) __magic_name__ = Path(a ).parent __magic_name__ = [str(module_path / m ) for m in new_imports] __magic_name__ = [f for f in new_import_files if f not in all_relative_imports] __magic_name__ = [F'''{f}.py''' for f in new_import_files] __magic_name__ = len(a ) == 0 all_relative_imports.extend(a ) return all_relative_imports def UpperCamelCase ( a ) -> List[str]: '''simple docstring''' with open(a , '''r''' , encoding='''utf-8''' ) as f: __magic_name__ = f.read() # Imports of the form `import xxx` __magic_name__ = re.findall('''^\s*import\s+(\S+)\s*$''' , a , flags=re.MULTILINE ) # Imports of the form `from xxx import yyy` imports += re.findall('''^\s*from\s+(\S+)\s+import''' , a , flags=re.MULTILINE ) # Only keep the top-level module __magic_name__ = [imp.split('''.''' )[0] for imp in imports if not imp.startswith('''.''' )] # Unique-ify and test we got them all __magic_name__ = list(set(a ) ) __magic_name__ = [] for imp in imports: try: importlib.import_module(a ) except ImportError: missing_packages.append(a ) if len(a ) > 0: raise ImportError( '''This modeling file requires the following packages that were not found in your environment: ''' F'''{", ".join(a )}. Run `pip install {" ".join(a )}`''' ) return get_relative_imports(a ) def UpperCamelCase ( a , a ) -> Optional[int]: '''simple docstring''' __magic_name__ = module_path.replace(os.path.sep , '''.''' ) __magic_name__ = importlib.import_module(a ) if class_name is None: return find_pipeline_class(a ) return getattr(a , a ) def UpperCamelCase ( a ) -> Tuple: '''simple docstring''' from ..pipelines import DiffusionPipeline __magic_name__ = dict(inspect.getmembers(a , inspect.isclass ) ) __magic_name__ = None for cls_name, cls in cls_members.items(): if ( cls_name != DiffusionPipeline.__name__ and issubclass(cls , a ) and cls.__module__.split('''.''' )[0] != "diffusers" ): if pipeline_class is not None: raise ValueError( F'''Multiple classes that inherit from {DiffusionPipeline.__name__} have been found:''' F''' {pipeline_class.__name__}, and {cls_name}. Please make sure to define only one in''' F''' {loaded_module}.''' ) __magic_name__ = cls return pipeline_class def UpperCamelCase ( a , a , a = None , a = False , a = False , a = None , a = None , a = None , a = False , ) -> List[Any]: '''simple docstring''' __magic_name__ = str(a ) __magic_name__ = os.path.join(a , a ) if os.path.isfile(a ): __magic_name__ = module_file_or_url __magic_name__ = '''local''' elif pretrained_model_name_or_path.count('''/''' ) == 0: __magic_name__ = get_diffusers_versions() # cut ".dev0" __magic_name__ = '''v''' + '''.'''.join(__version__.split('''.''' )[:3] ) # retrieve github version that matches if revision is None: __magic_name__ = latest_version if latest_version[1:] in available_versions else '''main''' logger.info(F'''Defaulting to latest_version: {revision}.''' ) elif revision in available_versions: __magic_name__ = F'''v{revision}''' elif revision == "main": __magic_name__ = revision else: raise ValueError( F'''`custom_revision`: {revision} does not exist. Please make sure to choose one of''' F''' {", ".join(available_versions + ["main"] )}.''' ) # community pipeline on GitHub __magic_name__ = COMMUNITY_PIPELINES_URL.format(revision=a , pipeline=a ) try: __magic_name__ = cached_download( a , cache_dir=a , force_download=a , proxies=a , resume_download=a , local_files_only=a , use_auth_token=a , ) __magic_name__ = '''git''' __magic_name__ = pretrained_model_name_or_path + '''.py''' except EnvironmentError: logger.error(F'''Could not locate the {module_file} inside {pretrained_model_name_or_path}.''' ) raise else: try: # Load from URL or cache if already cached __magic_name__ = hf_hub_download( a , a , cache_dir=a , force_download=a , proxies=a , resume_download=a , local_files_only=a , use_auth_token=a , ) __magic_name__ = os.path.join('''local''' , '''--'''.join(pretrained_model_name_or_path.split('''/''' ) ) ) except EnvironmentError: logger.error(F'''Could not locate the {module_file} inside {pretrained_model_name_or_path}.''' ) raise # Check we have all the requirements in our environment __magic_name__ = check_imports(a ) # Now we move the module inside our cached dynamic modules. __magic_name__ = DIFFUSERS_DYNAMIC_MODULE_NAME + os.path.sep + submodule create_dynamic_module(a ) __magic_name__ = Path(a ) / full_submodule if submodule == "local" or submodule == "git": # We always copy local files (we could hash the file to see if there was a change, and give them the name of # that hash, to only copy when there is a modification but it seems overkill for now). # The only reason we do the copy is to avoid putting too many folders in sys.path. shutil.copy(a , submodule_path / module_file ) for module_needed in modules_needed: __magic_name__ = F'''{module_needed}.py''' shutil.copy(os.path.join(a , a ) , submodule_path / module_needed ) else: # Get the commit hash # TODO: we will get this info in the etag soon, so retrieve it from there and not here. if isinstance(a , a ): __magic_name__ = use_auth_token elif use_auth_token is True: __magic_name__ = HfFolder.get_token() else: __magic_name__ = None __magic_name__ = model_info(a , revision=a , token=a ).sha # The module file will end up being placed in a subfolder with the git hash of the repo. This way we get the # benefit of versioning. __magic_name__ = submodule_path / commit_hash __magic_name__ = full_submodule + os.path.sep + commit_hash create_dynamic_module(a ) if not (submodule_path / module_file).exists(): shutil.copy(a , submodule_path / module_file ) # Make sure we also have every file with relative for module_needed in modules_needed: if not (submodule_path / module_needed).exists(): get_cached_module_file( a , F'''{module_needed}.py''' , cache_dir=a , force_download=a , resume_download=a , proxies=a , use_auth_token=a , revision=a , local_files_only=a , ) return os.path.join(a , a ) def UpperCamelCase ( a , a , a = None , a = None , a = False , a = False , a = None , a = None , a = None , a = False , **a , ) -> List[Any]: '''simple docstring''' __magic_name__ = get_cached_module_file( a , a , cache_dir=a , force_download=a , resume_download=a , proxies=a , use_auth_token=a , revision=a , local_files_only=a , ) return get_class_in_module(a , final_module.replace('''.py''' , '''''' ) )
98
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __a = { '''configuration_swinv2''': ['''SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Swinv2Config'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = [ '''SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST''', '''Swinv2ForImageClassification''', '''Swinv2ForMaskedImageModeling''', '''Swinv2Model''', '''Swinv2PreTrainedModel''', ] if TYPE_CHECKING: from .configuration_swinva import SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinvaConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_swinva import ( SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST, SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel, SwinvaPreTrainedModel, ) else: import sys __a = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
337
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments def __lowercase ( ) ->int: """simple docstring""" lowercase : Tuple = HfArgumentParser(_UpperCamelCase ) lowercase : List[str] = parser.parse_args_into_dataclasses()[0] lowercase : Optional[int] = TensorFlowBenchmark(args=_UpperCamelCase ) try: lowercase : Any = parser.parse_args_into_dataclasses()[0] except ValueError as e: lowercase : Optional[int] = '''Arg --no_{0} is no longer used, please use --no-{0} instead.''' lowercase : Any = ''' '''.join(str(_UpperCamelCase ).split(''' ''' )[:-1] ) lowercase : Any = '''''' lowercase : str = eval(str(_UpperCamelCase ).split(''' ''' )[-1] ) lowercase : List[str] = [] for arg in depreciated_args: # arg[2:] removes '--' if arg[2:] in TensorFlowBenchmark.deprecated_args: # arg[5:] removes '--no_' full_error_msg += arg_error_msg.format(arg[5:] ) else: wrong_args.append(_UpperCamelCase ) if len(_UpperCamelCase ) > 0: lowercase : Union[str, Any] = full_error_msg + begin_error_msg + str(_UpperCamelCase ) raise ValueError(_UpperCamelCase ) benchmark.run() if __name__ == "__main__": main()
337
1
import numpy as np import pandas as pd from sklearn.preprocessing import MinMaxScaler from tensorflow.keras.layers import LSTM, Dense from tensorflow.keras.models import Sequential if __name__ == "__main__": lowerCamelCase__ : Tuple = pd.read_csv('sample_data.csv', header=None) lowerCamelCase__ : Tuple = df.shape[:1][0] # If you're using some other dataset input the target column lowerCamelCase__ : List[Any] = df.iloc[:, 1:2] lowerCamelCase__ : str = actual_data.values.reshape(len_data, 1) lowerCamelCase__ : int = MinMaxScaler().fit_transform(actual_data) lowerCamelCase__ : Optional[int] = 10 lowerCamelCase__ : Dict = 5 lowerCamelCase__ : Optional[int] = 20 lowerCamelCase__ : List[str] = len_data - periods * look_back lowerCamelCase__ : Any = actual_data[:division] lowerCamelCase__ : List[str] = actual_data[division - look_back :] lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = [], [] lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = [], [] for i in range(0, len(train_data) - forward_days - look_back + 1): train_x.append(train_data[i : i + look_back]) train_y.append(train_data[i + look_back : i + look_back + forward_days]) for i in range(0, len(test_data) - forward_days - look_back + 1): test_x.append(test_data[i : i + look_back]) test_y.append(test_data[i + look_back : i + look_back + forward_days]) lowerCamelCase__ : List[Any] = np.array(train_x) lowerCamelCase__ : Union[str, Any] = np.array(test_x) lowerCamelCase__ : str = np.array([list(i.ravel()) for i in train_y]) lowerCamelCase__ : Optional[Any] = np.array([list(i.ravel()) for i in test_y]) lowerCamelCase__ : Tuple = Sequential() model.add(LSTM(128, input_shape=(look_back, 1), return_sequences=True)) model.add(LSTM(64, input_shape=(128, 1))) model.add(Dense(forward_days)) model.compile(loss='mean_squared_error', optimizer='adam') lowerCamelCase__ : Union[str, Any] = model.fit( x_train, y_train, epochs=150, verbose=1, shuffle=True, batch_size=4 ) lowerCamelCase__ : Dict = model.predict(x_test)
210
from packaging import version from .import_utils import is_accelerate_available if is_accelerate_available(): import accelerate def UpperCAmelCase_ ( __UpperCAmelCase : Optional[int] ) -> int: if not is_accelerate_available(): return method SCREAMING_SNAKE_CASE_ = version.parse(accelerate.__version__ ).base_version if version.parse(__UpperCAmelCase ) < version.parse('0.17.0' ): return method def wrapper(self : Optional[int] , *__UpperCAmelCase : Optional[Any] , **__UpperCAmelCase : Optional[Any] ): if hasattr(self , '_hf_hook' ) and hasattr(self._hf_hook , 'pre_forward' ): self._hf_hook.pre_forward(self ) return method(self , *__UpperCAmelCase , **__UpperCAmelCase ) return wrapper
210
1
# Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer from .base import PipelineTool class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ): '''simple docstring''' _UpperCAmelCase : List[Any] = "facebook/bart-large-mnli" _UpperCAmelCase : Tuple = ( "This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which " "should be the text to classify, and `labels`, which should be the list of labels to use for classification. " "It returns the most likely label in the list of provided `labels` for the input text." ) _UpperCAmelCase : Tuple = "text_classifier" _UpperCAmelCase : str = AutoTokenizer _UpperCAmelCase : Optional[int] = AutoModelForSequenceClassification _UpperCAmelCase : Any = ["text", ["text"]] _UpperCAmelCase : List[Any] = ["text"] def A ( self : Optional[Any] ): '''simple docstring''' super().setup() _snake_case = self.model.config _snake_case = -1 for idx, label in config.idalabel.items(): if label.lower().startswith('entail' ): _snake_case = int(lowercase ) if self.entailment_id == -1: raise ValueError('Could not determine the entailment ID from the model config, please pass it at init.' ) def A ( self : List[Any] , lowercase : str , lowercase : int ): '''simple docstring''' _snake_case = labels return self.pre_processor( [text] * len(lowercase ) , [f'''This example is {label}''' for label in labels] , return_tensors='pt' , padding='max_length' , ) def A ( self : Dict , lowercase : str ): '''simple docstring''' _snake_case = outputs.logits _snake_case = torch.argmax(logits[:, 2] ).item() return self._labels[label_id]
282
import warnings from ...utils import logging from .image_processing_chinese_clip import ChineseCLIPImageProcessor _lowerCamelCase : Dict = logging.get_logger(__name__) class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ): '''simple docstring''' def __init__( self : Tuple , *lowercase : Optional[int] , **lowercase : Any ): '''simple docstring''' warnings.warn( 'The class ChineseCLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers.' ' Please use ChineseCLIPImageProcessor instead.' , lowercase , ) super().__init__(*lowercase , **lowercase )
282
1
'''simple docstring''' import gc import unittest import numpy as np import torch from diffusers import StableDiffusionKDiffusionPipeline from diffusers.utils import slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu enable_full_determinism() @slow @require_torch_gpu class a_ ( unittest.TestCase ): def A__ ( self ) -> List[Any]: """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def A__ ( self ) -> List[str]: """simple docstring""" UpperCamelCase = StableDiffusionKDiffusionPipeline.from_pretrained("""CompVis/stable-diffusion-v1-4""" ) UpperCamelCase = sd_pipe.to(_SCREAMING_SNAKE_CASE ) sd_pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE ) sd_pipe.set_scheduler("""sample_euler""" ) UpperCamelCase = """A painting of a squirrel eating a burger""" UpperCamelCase = torch.manual_seed(0 ) UpperCamelCase = sd_pipe([prompt] , generator=_SCREAMING_SNAKE_CASE , guidance_scale=9.0 , num_inference_steps=20 , output_type="""np""" ) UpperCamelCase = output.images UpperCamelCase = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) UpperCamelCase = np.array([0.0_4_4_7, 0.0_4_9_2, 0.0_4_6_8, 0.0_4_0_8, 0.0_3_8_3, 0.0_4_0_8, 0.0_3_5_4, 0.0_3_8_0, 0.0_3_3_9] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def A__ ( self ) -> Optional[int]: """simple docstring""" UpperCamelCase = StableDiffusionKDiffusionPipeline.from_pretrained("""stabilityai/stable-diffusion-2-1-base""" ) UpperCamelCase = sd_pipe.to(_SCREAMING_SNAKE_CASE ) sd_pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE ) sd_pipe.set_scheduler("""sample_euler""" ) UpperCamelCase = """A painting of a squirrel eating a burger""" UpperCamelCase = torch.manual_seed(0 ) UpperCamelCase = sd_pipe([prompt] , generator=_SCREAMING_SNAKE_CASE , guidance_scale=9.0 , num_inference_steps=20 , output_type="""np""" ) UpperCamelCase = output.images UpperCamelCase = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) UpperCamelCase = np.array([0.1_2_3_7, 0.1_3_2_0, 0.1_4_3_8, 0.1_3_5_9, 0.1_3_9_0, 0.1_1_3_2, 0.1_2_7_7, 0.1_1_7_5, 0.1_1_1_2] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-1 def A__ ( self ) -> Optional[int]: """simple docstring""" UpperCamelCase = StableDiffusionKDiffusionPipeline.from_pretrained("""stabilityai/stable-diffusion-2-1-base""" ) UpperCamelCase = sd_pipe.to(_SCREAMING_SNAKE_CASE ) sd_pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE ) sd_pipe.set_scheduler("""sample_dpmpp_2m""" ) UpperCamelCase = """A painting of a squirrel eating a burger""" UpperCamelCase = torch.manual_seed(0 ) UpperCamelCase = sd_pipe( [prompt] , generator=_SCREAMING_SNAKE_CASE , guidance_scale=7.5 , num_inference_steps=15 , output_type="""np""" , use_karras_sigmas=_SCREAMING_SNAKE_CASE , ) UpperCamelCase = output.images UpperCamelCase = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) UpperCamelCase = np.array( [0.1_1_3_8_1_6_8_9, 0.1_2_1_1_2_9_2_1, 0.1_3_8_9_4_5_7, 0.1_2_5_4_9_6_0_6, 0.1_2_4_4_9_6_4, 0.1_0_8_3_1_5_1_7, 0.1_1_5_6_2_8_6_6, 0.1_0_8_6_7_8_1_6, 0.1_0_4_9_9_0_4_8] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
356
'''simple docstring''' import collections import inspect import unittest from transformers import FocalNetConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( FocalNetBackbone, FocalNetForImageClassification, FocalNetForMaskedImageModeling, FocalNetModel, ) from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class a_ : def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=13 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=[32, 64, 128] , _SCREAMING_SNAKE_CASE=[1, 2, 1] , _SCREAMING_SNAKE_CASE=[2, 2, 4] , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=2.0 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=0.0_2 , _SCREAMING_SNAKE_CASE=1e-5 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=10 , _SCREAMING_SNAKE_CASE=8 , _SCREAMING_SNAKE_CASE=["stage1", "stage2"] , _SCREAMING_SNAKE_CASE=[1, 2] , ) -> Optional[Any]: """simple docstring""" UpperCamelCase = parent UpperCamelCase = batch_size UpperCamelCase = image_size UpperCamelCase = patch_size UpperCamelCase = num_channels UpperCamelCase = embed_dim UpperCamelCase = hidden_sizes UpperCamelCase = depths UpperCamelCase = num_heads UpperCamelCase = window_size UpperCamelCase = mlp_ratio UpperCamelCase = qkv_bias UpperCamelCase = hidden_dropout_prob UpperCamelCase = attention_probs_dropout_prob UpperCamelCase = drop_path_rate UpperCamelCase = hidden_act UpperCamelCase = use_absolute_embeddings UpperCamelCase = patch_norm UpperCamelCase = layer_norm_eps UpperCamelCase = initializer_range UpperCamelCase = is_training UpperCamelCase = scope UpperCamelCase = use_labels UpperCamelCase = type_sequence_label_size UpperCamelCase = encoder_stride UpperCamelCase = out_features UpperCamelCase = out_indices def A__ ( self ) -> Any: """simple docstring""" UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) UpperCamelCase = None if self.use_labels: UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCamelCase = self.get_config() return config, pixel_values, labels def A__ ( self ) -> str: """simple docstring""" return FocalNetConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , hidden_sizes=self.hidden_sizes , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , ) def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple: """simple docstring""" UpperCamelCase = FocalNetModel(config=_SCREAMING_SNAKE_CASE ) model.to(_SCREAMING_SNAKE_CASE ) model.eval() UpperCamelCase = model(_SCREAMING_SNAKE_CASE ) UpperCamelCase = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1)) UpperCamelCase = int(config.embed_dim * 2 ** (len(config.depths ) - 1) ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) ) def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[Any]: """simple docstring""" UpperCamelCase = FocalNetBackbone(config=_SCREAMING_SNAKE_CASE ) model.to(_SCREAMING_SNAKE_CASE ) model.eval() UpperCamelCase = model(_SCREAMING_SNAKE_CASE ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size, 8, 8] ) # verify channels self.parent.assertEqual(len(model.channels ) , len(config.out_features ) ) self.parent.assertListEqual(model.channels , config.hidden_sizes[:-1] ) # verify backbone works with out_features=None UpperCamelCase = None UpperCamelCase = FocalNetBackbone(config=_SCREAMING_SNAKE_CASE ) model.to(_SCREAMING_SNAKE_CASE ) model.eval() UpperCamelCase = model(_SCREAMING_SNAKE_CASE ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , 1 ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size * 2, 4, 4] ) # verify channels self.parent.assertEqual(len(model.channels ) , 1 ) self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] ) def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[Any]: """simple docstring""" UpperCamelCase = FocalNetForMaskedImageModeling(config=_SCREAMING_SNAKE_CASE ) model.to(_SCREAMING_SNAKE_CASE ) model.eval() UpperCamelCase = model(_SCREAMING_SNAKE_CASE ) self.parent.assertEqual( result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) ) # test greyscale images UpperCamelCase = 1 UpperCamelCase = FocalNetForMaskedImageModeling(_SCREAMING_SNAKE_CASE ) model.to(_SCREAMING_SNAKE_CASE ) model.eval() UpperCamelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) UpperCamelCase = model(_SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) ) def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[str]: """simple docstring""" UpperCamelCase = self.type_sequence_label_size UpperCamelCase = FocalNetForImageClassification(_SCREAMING_SNAKE_CASE ) model.to(_SCREAMING_SNAKE_CASE ) model.eval() UpperCamelCase = model(_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images UpperCamelCase = 1 UpperCamelCase = FocalNetForImageClassification(_SCREAMING_SNAKE_CASE ) model.to(_SCREAMING_SNAKE_CASE ) model.eval() UpperCamelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) UpperCamelCase = model(_SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def A__ ( self ) -> List[str]: """simple docstring""" UpperCamelCase = self.prepare_config_and_inputs() UpperCamelCase ,UpperCamelCase ,UpperCamelCase = config_and_inputs UpperCamelCase = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class a_ ( lowerCamelCase , lowerCamelCase , unittest.TestCase ): lowercase = ( ( FocalNetModel, FocalNetForImageClassification, FocalNetForMaskedImageModeling, FocalNetBackbone, ) if is_torch_available() else () ) lowercase = ( {"""feature-extraction""": FocalNetModel, """image-classification""": FocalNetForImageClassification} if is_torch_available() else {} ) lowercase = False lowercase = False lowercase = False lowercase = False lowercase = False def A__ ( self ) -> int: """simple docstring""" UpperCamelCase = FocalNetModelTester(self ) UpperCamelCase = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , embed_dim=37 , has_text_modality=_SCREAMING_SNAKE_CASE ) def A__ ( self ) -> List[Any]: """simple docstring""" self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def A__ ( self ) -> Tuple: """simple docstring""" return def A__ ( self ) -> str: """simple docstring""" UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE ) def A__ ( self ) -> List[str]: """simple docstring""" UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*_SCREAMING_SNAKE_CASE ) def A__ ( self ) -> Optional[Any]: """simple docstring""" UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*_SCREAMING_SNAKE_CASE ) def A__ ( self ) -> Union[str, Any]: """simple docstring""" UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*_SCREAMING_SNAKE_CASE ) @unittest.skip(reason="""FocalNet does not use inputs_embeds""" ) def A__ ( self ) -> int: """simple docstring""" pass @unittest.skip(reason="""FocalNet does not use feedforward chunking""" ) def A__ ( self ) -> int: """simple docstring""" pass def A__ ( self ) -> str: """simple docstring""" UpperCamelCase ,UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes[:-1]: UpperCamelCase = model_class(_SCREAMING_SNAKE_CASE ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) UpperCamelCase = model.get_output_embeddings() self.assertTrue(x is None or isinstance(_SCREAMING_SNAKE_CASE , nn.Linear ) ) def A__ ( self ) -> List[Any]: """simple docstring""" UpperCamelCase ,UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes[:-1]: UpperCamelCase = model_class(_SCREAMING_SNAKE_CASE ) UpperCamelCase = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCamelCase = [*signature.parameters.keys()] UpperCamelCase = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , _SCREAMING_SNAKE_CASE ) def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Dict: """simple docstring""" UpperCamelCase = model_class(_SCREAMING_SNAKE_CASE ) model.to(_SCREAMING_SNAKE_CASE ) model.eval() with torch.no_grad(): UpperCamelCase = model(**self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) UpperCamelCase = outputs.hidden_states UpperCamelCase = getattr( self.model_tester , """expected_num_hidden_layers""" , len(self.model_tester.depths ) + 1 ) self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE ) # FocalNet has a different seq_length UpperCamelCase = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) UpperCamelCase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , ) UpperCamelCase = outputs.reshaped_hidden_states self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE ) UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase = reshaped_hidden_states[0].shape UpperCamelCase = ( reshaped_hidden_states[0].view(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , height * width ).permute(0 , 2 , 1 ) ) self.assertListEqual( list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , ) def A__ ( self ) -> Optional[Any]: """simple docstring""" UpperCamelCase ,UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() UpperCamelCase = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) for model_class in self.all_model_classes[:-1]: UpperCamelCase = True self.check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] UpperCamelCase = True self.check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) def A__ ( self ) -> List[Any]: """simple docstring""" UpperCamelCase ,UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() UpperCamelCase = 3 UpperCamelCase = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) UpperCamelCase = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) UpperCamelCase = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0]) UpperCamelCase = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1]) for model_class in self.all_model_classes[:-1]: UpperCamelCase = True self.check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , (padded_height, padded_width) ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] UpperCamelCase = True self.check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , (padded_height, padded_width) ) @slow def A__ ( self ) -> Union[str, Any]: """simple docstring""" for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCamelCase = FocalNetModel.from_pretrained(_SCREAMING_SNAKE_CASE ) self.assertIsNotNone(_SCREAMING_SNAKE_CASE ) def A__ ( self ) -> Any: """simple docstring""" UpperCamelCase ,UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() UpperCamelCase = _config_zero_init(_SCREAMING_SNAKE_CASE ) for model_class in self.all_model_classes: UpperCamelCase = model_class(config=_SCREAMING_SNAKE_CASE ) for name, param in model.named_parameters(): if "embeddings" not in name and param.requires_grad: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F"Parameter {name} of model {model_class} seems not properly initialized" , ) @require_vision @require_torch class a_ ( unittest.TestCase ): @cached_property def A__ ( self ) -> List[str]: """simple docstring""" return AutoImageProcessor.from_pretrained("""microsoft/focalnet-tiny""" ) if is_vision_available() else None @slow def A__ ( self ) -> str: """simple docstring""" UpperCamelCase = FocalNetForImageClassification.from_pretrained("""microsoft/focalnet-tiny""" ).to(_SCREAMING_SNAKE_CASE ) UpperCamelCase = self.default_image_processor UpperCamelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) UpperCamelCase = image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).to(_SCREAMING_SNAKE_CASE ) # forward pass with torch.no_grad(): UpperCamelCase = model(**_SCREAMING_SNAKE_CASE ) # verify the logits UpperCamelCase = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape , _SCREAMING_SNAKE_CASE ) UpperCamelCase = torch.tensor([0.2_1_6_6, -0.4_3_6_8, 0.2_1_9_1] ).to(_SCREAMING_SNAKE_CASE ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , _SCREAMING_SNAKE_CASE , atol=1e-4 ) ) self.assertTrue(outputs.logits.argmax(dim=-1 ).item() , 281 ) @require_torch class a_ ( lowerCamelCase , unittest.TestCase ): lowercase = (FocalNetBackbone,) if is_torch_available() else () lowercase = FocalNetConfig lowercase = False def A__ ( self ) -> Union[str, Any]: """simple docstring""" UpperCamelCase = FocalNetModelTester(self )
183
0
import inspect import unittest import numpy as np from transformers import ViTConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_configuration_common import ConfigTester from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor if is_flax_available(): import jax from transformers.models.vit.modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel class __lowerCAmelCase ( unittest.TestCase): def __init__( self: Tuple , _lowerCAmelCase: Optional[int] , _lowerCAmelCase: List[Any]=13 , _lowerCAmelCase: int=30 , _lowerCAmelCase: Tuple=2 , _lowerCAmelCase: List[Any]=3 , _lowerCAmelCase: List[str]=True , _lowerCAmelCase: int=True , _lowerCAmelCase: Any=32 , _lowerCAmelCase: Dict=5 , _lowerCAmelCase: Optional[Any]=4 , _lowerCAmelCase: Tuple=37 , _lowerCAmelCase: List[Any]="gelu" , _lowerCAmelCase: Optional[int]=0.1 , _lowerCAmelCase: Dict=0.1 , _lowerCAmelCase: Any=10 , _lowerCAmelCase: Any=0.02 , ): lowercase :List[Any] = parent lowercase :List[Any] = batch_size lowercase :Any = image_size lowercase :int = patch_size lowercase :str = num_channels lowercase :Union[str, Any] = is_training lowercase :Union[str, Any] = use_labels lowercase :List[Any] = hidden_size lowercase :List[str] = num_hidden_layers lowercase :Optional[int] = num_attention_heads lowercase :List[Any] = intermediate_size lowercase :Any = hidden_act lowercase :str = hidden_dropout_prob lowercase :str = attention_probs_dropout_prob lowercase :int = type_sequence_label_size lowercase :Optional[int] = initializer_range # in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) lowercase :List[Any] = (image_size // patch_size) ** 2 lowercase :List[Any] = num_patches + 1 def SCREAMING_SNAKE_CASE ( self: Optional[Any] ): lowercase :Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowercase :Dict = ViTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_lowerCAmelCase , initializer_range=self.initializer_range , ) return config, pixel_values def SCREAMING_SNAKE_CASE ( self: int , _lowerCAmelCase: List[Any] , _lowerCAmelCase: List[str] ): lowercase :Tuple = FlaxViTModel(config=_lowerCAmelCase ) lowercase :List[str] = model(_lowerCAmelCase ) # expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) lowercase :Tuple = (self.image_size, self.image_size) lowercase :Optional[int] = (self.patch_size, self.patch_size) lowercase :List[Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, num_patches + 1, self.hidden_size) ) def SCREAMING_SNAKE_CASE ( self: Optional[int] , _lowerCAmelCase: int , _lowerCAmelCase: Optional[int] ): lowercase :Any = self.type_sequence_label_size lowercase :Dict = FlaxViTForImageClassification(config=_lowerCAmelCase ) lowercase :Optional[Any] = model(_lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images lowercase :Dict = 1 lowercase :Optional[Any] = FlaxViTForImageClassification(_lowerCAmelCase ) lowercase :Optional[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) lowercase :Dict = model(_lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self: List[Any] ): lowercase :int = self.prepare_config_and_inputs() ( ( lowercase ) , ( lowercase ) , ) :Dict = config_and_inputs lowercase :Optional[int] = {"pixel_values": pixel_values} return config, inputs_dict @require_flax class __lowerCAmelCase ( A__ , unittest.TestCase): _a = (FlaxViTModel, FlaxViTForImageClassification) if is_flax_available() else () def SCREAMING_SNAKE_CASE ( self: Union[str, Any] ): lowercase :int = FlaxViTModelTester(self ) lowercase :Tuple = ConfigTester(self , config_class=_lowerCAmelCase , has_text_modality=_lowerCAmelCase , hidden_size=37 ) def SCREAMING_SNAKE_CASE ( self: Any ): self.config_tester.run_common_tests() def SCREAMING_SNAKE_CASE ( self: List[Any] ): lowercase :int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self: Any ): lowercase :List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*_lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self: List[Any] ): lowercase , lowercase :Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase :Optional[Any] = model_class(_lowerCAmelCase ) lowercase :Dict = inspect.signature(model.__call__ ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowercase :int = [*signature.parameters.keys()] lowercase :Any = ["pixel_values"] self.assertListEqual(arg_names[:1] , _lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self: str ): lowercase , lowercase :List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): lowercase :List[Any] = self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) lowercase :Optional[Any] = model_class(_lowerCAmelCase ) @jax.jit def model_jitted(_lowerCAmelCase: Union[str, Any] , **_lowerCAmelCase: Tuple ): return model(pixel_values=_lowerCAmelCase , **_lowerCAmelCase ) with self.subTest("JIT Enabled" ): lowercase :int = model_jitted(**_lowerCAmelCase ).to_tuple() with self.subTest("JIT Disabled" ): with jax.disable_jit(): lowercase :List[Any] = model_jitted(**_lowerCAmelCase ).to_tuple() self.assertEqual(len(_lowerCAmelCase ) , len(_lowerCAmelCase ) ) for jitted_output, output in zip(_lowerCAmelCase , _lowerCAmelCase ): self.assertEqual(jitted_output.shape , output.shape ) @slow def SCREAMING_SNAKE_CASE ( self: int ): for model_class_name in self.all_model_classes: lowercase :List[str] = model_class_name.from_pretrained("google/vit-base-patch16-224" ) lowercase :Union[str, Any] = model(np.ones((1, 3, 2_24, 2_24) ) ) self.assertIsNotNone(_lowerCAmelCase )
236
import argparse import logging import sys from unittest.mock import patch import run_glue_deebert from transformers.testing_utils import TestCasePlus, get_gpu_count, require_torch_non_multi_gpu, slow logging.basicConfig(level=logging.DEBUG) a_ : Tuple = logging.getLogger() def lowerCamelCase__ (): SCREAMING_SNAKE_CASE = argparse.ArgumentParser() parser.add_argument('-f') SCREAMING_SNAKE_CASE = parser.parse_args() return args.f class _snake_case ( A__ ): def SCREAMING_SNAKE_CASE__ ( self) -> None: SCREAMING_SNAKE_CASE = logging.StreamHandler(sys.stdout) logger.addHandler(a) def SCREAMING_SNAKE_CASE__ ( self , a) -> str: SCREAMING_SNAKE_CASE = get_gpu_count() if n_gpu > 1: pass # XXX: doesn't quite work with n_gpu > 1 https://github.com/huggingface/transformers/issues/10560 # script = f"{self.examples_dir_str}/research_projects/deebert/run_glue_deebert.py" # distributed_args = f"-m torch.distributed.launch --nproc_per_node={n_gpu} {script}".split() # cmd = [sys.executable] + distributed_args + args # execute_subprocess_async(cmd, env=self.get_env()) # XXX: test the results - need to save them first into .json file else: args.insert(0 , 'run_glue_deebert.py') with patch.object(a , 'argv' , a): SCREAMING_SNAKE_CASE = run_glue_deebert.main() for value in result.values(): self.assertGreaterEqual(a , 0.6_66) @slow @require_torch_non_multi_gpu def SCREAMING_SNAKE_CASE__ ( self) -> Optional[Any]: SCREAMING_SNAKE_CASE = '\n --model_type roberta\n --model_name_or_path roberta-base\n --task_name MRPC\n --do_train\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --max_seq_length 128\n --per_gpu_eval_batch_size=1\n --per_gpu_train_batch_size=8\n --learning_rate 2e-4\n --num_train_epochs 3\n --overwrite_output_dir\n --seed 42\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --save_steps 0\n --overwrite_cache\n --eval_after_first_stage\n '.split() self.run_and_check(a) SCREAMING_SNAKE_CASE = '\n --model_type roberta\n --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --task_name MRPC\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --max_seq_length 128\n --eval_each_highway\n --eval_highway\n --overwrite_cache\n --per_gpu_eval_batch_size=1\n '.split() self.run_and_check(a) SCREAMING_SNAKE_CASE = '\n --model_type roberta\n --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --task_name MRPC\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --max_seq_length 128\n --early_exit_entropy 0.1\n --eval_highway\n --overwrite_cache\n --per_gpu_eval_batch_size=1\n '.split() self.run_and_check(a)
137
0
'''simple docstring''' __A ={str(digit): digit**5 for digit in range(10)} def _UpperCamelCase ( UpperCamelCase__ ): return sum(DIGITS_FIFTH_POWER[digit] for digit in str(UpperCamelCase__ ) ) def _UpperCamelCase ( ): return sum( number for number in range(1_0_0_0 , 1_0_0_0_0_0_0 ) if number == digits_fifth_powers_sum(UpperCamelCase__ ) ) if __name__ == "__main__": print(solution())
283
'''simple docstring''' from __future__ import annotations import unittest from transformers import is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow if is_tf_available(): import tensorflow as tf from transformers import AutoTokenizer, TFAutoModelForSeqaSeqLM @require_tf @require_sentencepiece @require_tokenizers class _snake_case ( unittest.TestCase ): @slow def snake_case__ ( self): UpperCAmelCase__ : Optional[int] = TFAutoModelForSeqaSeqLM.from_pretrained("""google/mt5-small""") UpperCAmelCase__ : str = AutoTokenizer.from_pretrained("""google/mt5-small""") UpperCAmelCase__ : Optional[Any] = tokenizer("""Hello there""" , return_tensors="""tf""").input_ids UpperCAmelCase__ : Tuple = tokenizer("""Hi I am""" , return_tensors="""tf""").input_ids UpperCAmelCase__ : List[Any] = model(_lowerCamelCase , labels=_lowerCamelCase).loss UpperCAmelCase__ : Tuple = -tf.math.reduce_mean(_lowerCamelCase).numpy() UpperCAmelCase__ : Optional[int] = -21.228168 self.assertTrue(abs(mtf_score - EXPECTED_SCORE) < 2e-4)
283
1
'''simple docstring''' import tempfile import torch from diffusers import IPNDMScheduler from .test_schedulers import SchedulerCommonTest class lowerCAmelCase__ ( lowerCamelCase_ ): lowerCAmelCase_ = (IPNDMScheduler,) lowerCAmelCase_ = (('''num_inference_steps''', 5_0),) def _snake_case ( self , **__SCREAMING_SNAKE_CASE ): """simple docstring""" lowercase_ : Optional[Any] = {'''num_train_timesteps''': 10_00} config.update(**__SCREAMING_SNAKE_CASE ) return config def _snake_case ( self , __SCREAMING_SNAKE_CASE=0 , **__SCREAMING_SNAKE_CASE ): """simple docstring""" lowercase_ : str = dict(self.forward_default_kwargs ) lowercase_ : List[Any] = kwargs.pop('''num_inference_steps''' , __SCREAMING_SNAKE_CASE ) lowercase_ : Optional[int] = self.dummy_sample lowercase_ : Optional[Any] = 0.1 * sample lowercase_ : List[Any] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: lowercase_ : Any = self.get_scheduler_config(**__SCREAMING_SNAKE_CASE ) lowercase_ : int = scheduler_class(**__SCREAMING_SNAKE_CASE ) scheduler.set_timesteps(__SCREAMING_SNAKE_CASE ) # copy over dummy past residuals lowercase_ : Optional[Any] = dummy_past_residuals[:] if time_step is None: lowercase_ : List[Any] = scheduler.timesteps[len(scheduler.timesteps ) // 2] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(__SCREAMING_SNAKE_CASE ) lowercase_ : Optional[Any] = scheduler_class.from_pretrained(__SCREAMING_SNAKE_CASE ) new_scheduler.set_timesteps(__SCREAMING_SNAKE_CASE ) # copy over dummy past residuals lowercase_ : str = dummy_past_residuals[:] lowercase_ : Optional[int] = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ).prev_sample lowercase_ : List[Any] = new_scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" lowercase_ : List[str] = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ).prev_sample lowercase_ : Dict = new_scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def _snake_case ( self ): """simple docstring""" pass def _snake_case ( self , __SCREAMING_SNAKE_CASE=0 , **__SCREAMING_SNAKE_CASE ): """simple docstring""" lowercase_ : List[Any] = dict(self.forward_default_kwargs ) lowercase_ : Tuple = kwargs.pop('''num_inference_steps''' , __SCREAMING_SNAKE_CASE ) lowercase_ : Tuple = self.dummy_sample lowercase_ : Dict = 0.1 * sample lowercase_ : str = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: lowercase_ : Optional[int] = self.get_scheduler_config() lowercase_ : Any = scheduler_class(**__SCREAMING_SNAKE_CASE ) scheduler.set_timesteps(__SCREAMING_SNAKE_CASE ) # copy over dummy past residuals (must be after setting timesteps) lowercase_ : List[Any] = dummy_past_residuals[:] if time_step is None: lowercase_ : Dict = scheduler.timesteps[len(scheduler.timesteps ) // 2] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(__SCREAMING_SNAKE_CASE ) lowercase_ : str = scheduler_class.from_pretrained(__SCREAMING_SNAKE_CASE ) # copy over dummy past residuals new_scheduler.set_timesteps(__SCREAMING_SNAKE_CASE ) # copy over dummy past residual (must be after setting timesteps) lowercase_ : Any = dummy_past_residuals[:] lowercase_ : Optional[Any] = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ).prev_sample lowercase_ : Dict = new_scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" lowercase_ : int = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ).prev_sample lowercase_ : int = new_scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def _snake_case ( self , **__SCREAMING_SNAKE_CASE ): """simple docstring""" lowercase_ : str = self.scheduler_classes[0] lowercase_ : Optional[Any] = self.get_scheduler_config(**__SCREAMING_SNAKE_CASE ) lowercase_ : Dict = scheduler_class(**__SCREAMING_SNAKE_CASE ) lowercase_ : Dict = 10 lowercase_ : List[Any] = self.dummy_model() lowercase_ : Tuple = self.dummy_sample_deter scheduler.set_timesteps(__SCREAMING_SNAKE_CASE ) for i, t in enumerate(scheduler.timesteps ): lowercase_ : int = model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) lowercase_ : int = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).prev_sample for i, t in enumerate(scheduler.timesteps ): lowercase_ : List[Any] = model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) lowercase_ : Union[str, Any] = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).prev_sample return sample def _snake_case ( self ): """simple docstring""" lowercase_ : List[str] = dict(self.forward_default_kwargs ) lowercase_ : List[str] = kwargs.pop('''num_inference_steps''' , __SCREAMING_SNAKE_CASE ) for scheduler_class in self.scheduler_classes: lowercase_ : Dict = self.get_scheduler_config() lowercase_ : Dict = scheduler_class(**__SCREAMING_SNAKE_CASE ) lowercase_ : Union[str, Any] = self.dummy_sample lowercase_ : str = 0.1 * sample if num_inference_steps is not None and hasattr(__SCREAMING_SNAKE_CASE , '''set_timesteps''' ): scheduler.set_timesteps(__SCREAMING_SNAKE_CASE ) elif num_inference_steps is not None and not hasattr(__SCREAMING_SNAKE_CASE , '''set_timesteps''' ): lowercase_ : Any = num_inference_steps # copy over dummy past residuals (must be done after set_timesteps) lowercase_ : Any = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] lowercase_ : int = dummy_past_residuals[:] lowercase_ : List[str] = scheduler.timesteps[5] lowercase_ : Optional[int] = scheduler.timesteps[6] lowercase_ : int = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ).prev_sample lowercase_ : int = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) lowercase_ : Optional[int] = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ).prev_sample lowercase_ : Tuple = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) def _snake_case ( self ): """simple docstring""" for timesteps in [1_00, 10_00]: self.check_over_configs(num_train_timesteps=__SCREAMING_SNAKE_CASE , time_step=__SCREAMING_SNAKE_CASE ) def _snake_case ( self ): """simple docstring""" for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 1_00] ): self.check_over_forward(num_inference_steps=__SCREAMING_SNAKE_CASE , time_step=__SCREAMING_SNAKE_CASE ) def _snake_case ( self ): """simple docstring""" lowercase_ : Union[str, Any] = self.full_loop() lowercase_ : Optional[Any] = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE ) ) assert abs(result_mean.item() - 2_54_05_29 ) < 10
93
"""simple docstring""" from __future__ import annotations from collections import namedtuple from dataclasses import dataclass @dataclass class snake_case : """simple docstring""" snake_case__ = 42 snake_case__ = None snake_case__ = None lowerCAmelCase__ : Union[str, Any] = namedtuple('CoinsDistribResult', 'moves excess') def a_ ( lowerCamelCase ): if root is None: return 0 # Validation def count_nodes(lowerCamelCase ) -> int: if node is None: return 0 return count_nodes(node.left ) + count_nodes(node.right ) + 1 def count_coins(lowerCamelCase ) -> int: if node is None: return 0 return count_coins(node.left ) + count_coins(node.right ) + node.data if count_nodes(lowerCamelCase ) != count_coins(lowerCamelCase ): raise ValueError('The nodes number should be same as the number of coins' ) # Main calculation def get_distrib(lowerCamelCase ) -> CoinsDistribResult: if node is None: return CoinsDistribResult(0 , 1 ) UpperCAmelCase__ , UpperCAmelCase__ = get_distrib(node.left ) UpperCAmelCase__ , UpperCAmelCase__ = get_distrib(node.right ) UpperCAmelCase__ = 1 - left_distrib_excess UpperCAmelCase__ = 1 - right_distrib_excess UpperCAmelCase__ = ( left_distrib_moves + right_distrib_moves + abs(lowerCamelCase ) + abs(lowerCamelCase ) ) UpperCAmelCase__ = node.data - coins_to_left - coins_to_right return CoinsDistribResult(lowerCamelCase , lowerCamelCase ) return get_distrib(lowerCamelCase )[0] if __name__ == "__main__": import doctest doctest.testmod()
98
0
'''simple docstring''' import argparse import json import requests import timm import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import AutoImageProcessor, SwinConfig, SwinForImageClassification def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' A : Dict = SwinConfig() A : List[Any] = swin_name.split('''_''' ) A : Tuple = name_split[1] A : Union[str, Any] = int(name_split[4] ) A : str = int(name_split[3][-1] ) if model_size == "tiny": A : Optional[int] = 96 A : Optional[Any] = (2, 2, 6, 2) A : Any = (3, 6, 12, 24) elif model_size == "small": A : Optional[int] = 96 A : str = (2, 2, 18, 2) A : Tuple = (3, 6, 12, 24) elif model_size == "base": A : int = 128 A : Optional[Any] = (2, 2, 18, 2) A : List[str] = (4, 8, 16, 32) else: A : Dict = 192 A : Optional[Any] = (2, 2, 18, 2) A : Optional[Any] = (6, 12, 24, 48) if "in22k" in swin_name: A : Dict = 2_1841 else: A : str = 1000 A : List[str] = '''huggingface/label-files''' A : Any = '''imagenet-1k-id2label.json''' A : Any = json.load(open(hf_hub_download(snake_case__ , snake_case__ , repo_type='''dataset''' ) , '''r''' ) ) A : str = {int(snake_case__ ): v for k, v in idalabel.items()} A : Tuple = idalabel A : Tuple = {v: k for k, v in idalabel.items()} A : Tuple = img_size A : Dict = num_classes A : Optional[Any] = embed_dim A : str = depths A : str = num_heads A : Optional[int] = window_size return config def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' if "patch_embed.proj" in name: A : Any = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' ) if "patch_embed.norm" in name: A : Tuple = name.replace('''patch_embed.norm''' , '''embeddings.norm''' ) if "layers" in name: A : Optional[int] = '''encoder.''' + name if "attn.proj" in name: A : List[str] = name.replace('''attn.proj''' , '''attention.output.dense''' ) if "attn" in name: A : List[str] = name.replace('''attn''' , '''attention.self''' ) if "norm1" in name: A : Any = name.replace('''norm1''' , '''layernorm_before''' ) if "norm2" in name: A : Tuple = name.replace('''norm2''' , '''layernorm_after''' ) if "mlp.fc1" in name: A : Dict = name.replace('''mlp.fc1''' , '''intermediate.dense''' ) if "mlp.fc2" in name: A : str = name.replace('''mlp.fc2''' , '''output.dense''' ) if name == "norm.weight": A : Tuple = '''layernorm.weight''' if name == "norm.bias": A : Tuple = '''layernorm.bias''' if "head" in name: A : Any = name.replace('''head''' , '''classifier''' ) else: A : List[Any] = '''swin.''' + name return name def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' for key in orig_state_dict.copy().keys(): A : Dict = orig_state_dict.pop(snake_case__ ) if "mask" in key: continue elif "qkv" in key: A : Dict = key.split('''.''' ) A : Optional[int] = int(key_split[1] ) A : List[str] = int(key_split[3] ) A : Optional[int] = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size if "weight" in key: A : Any = val[:dim, :] A : Dict = val[ dim : dim * 2, : ] A : List[str] = val[-dim:, :] else: A : Any = val[ :dim ] A : Optional[int] = val[ dim : dim * 2 ] A : Any = val[ -dim: ] else: A : str = val return orig_state_dict def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' A : Tuple = timm.create_model(snake_case__ , pretrained=snake_case__ ) timm_model.eval() A : Optional[Any] = get_swin_config(snake_case__ ) A : Optional[int] = SwinForImageClassification(snake_case__ ) model.eval() A : List[str] = convert_state_dict(timm_model.state_dict() , snake_case__ ) model.load_state_dict(snake_case__ ) A : Optional[int] = '''http://images.cocodataset.org/val2017/000000039769.jpg''' A : Any = AutoImageProcessor.from_pretrained('''microsoft/{}'''.format(swin_name.replace('''_''' , '''-''' ) ) ) A : List[Any] = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw ) A : List[Any] = image_processor(images=snake_case__ , return_tensors='''pt''' ) A : Any = timm_model(inputs['''pixel_values'''] ) A : Optional[Any] = model(**snake_case__ ).logits assert torch.allclose(snake_case__ , snake_case__ , atol=1E-3 ) print(F'Saving model {swin_name} to {pytorch_dump_folder_path}' ) model.save_pretrained(snake_case__ ) print(F'Saving image processor to {pytorch_dump_folder_path}' ) image_processor.save_pretrained(snake_case__ ) if __name__ == "__main__": lowercase : Optional[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( '--swin_name', default='swin_tiny_patch4_window7_224', type=str, help='Name of the Swin timm model you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.' ) lowercase : int = parser.parse_args() convert_swin_checkpoint(args.swin_name, args.pytorch_dump_folder_path)
364
'''simple docstring''' from __future__ import annotations lowercase : Union[str, Any] = list[tuple[int, int]] lowercase : Optional[Any] = [ [0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles [0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0], [1, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0], ] lowercase : Any = ([-1, 0], [0, -1], [1, 0], [0, 1]) # up, left, down, right class A : def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , ) -> List[Any]: """simple docstring""" A : int = pos_x A : Optional[Any] = pos_y A : Optional[Any] = (pos_y, pos_x) A : str = goal_x A : Optional[int] = goal_y A : List[Any] = g_cost A : str = parent A : str = self.calculate_heuristic() def __lowerCAmelCase ( self ) -> float: """simple docstring""" A : Optional[int] = abs(self.pos_x - self.goal_x ) A : Optional[Any] = abs(self.pos_y - self.goal_y ) return dx + dy def __lt__( self , SCREAMING_SNAKE_CASE ) -> bool: """simple docstring""" return self.f_cost < other.f_cost class A : def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Tuple: """simple docstring""" A : List[Any] = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , SCREAMING_SNAKE_CASE ) A : Tuple = Node(goal[1] , goal[0] , goal[1] , goal[0] , 99999 , SCREAMING_SNAKE_CASE ) A : Optional[Any] = [self.start] A : list[Node] = [] A : Tuple = False def __lowerCAmelCase ( self ) -> Path | None: """simple docstring""" while self.open_nodes: # Open Nodes are sorted using __lt__ self.open_nodes.sort() A : Optional[int] = self.open_nodes.pop(0 ) if current_node.pos == self.target.pos: A : Optional[int] = True return self.retrace_path(SCREAMING_SNAKE_CASE ) self.closed_nodes.append(SCREAMING_SNAKE_CASE ) A : Any = self.get_successors(SCREAMING_SNAKE_CASE ) for child_node in successors: if child_node in self.closed_nodes: continue if child_node not in self.open_nodes: self.open_nodes.append(SCREAMING_SNAKE_CASE ) else: # retrieve the best current path A : str = self.open_nodes.pop(self.open_nodes.index(SCREAMING_SNAKE_CASE ) ) if child_node.g_cost < better_node.g_cost: self.open_nodes.append(SCREAMING_SNAKE_CASE ) else: self.open_nodes.append(SCREAMING_SNAKE_CASE ) if not self.reached: return [self.start.pos] return None def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> list[Node]: """simple docstring""" A : List[Any] = [] for action in delta: A : List[str] = parent.pos_x + action[1] A : Dict = parent.pos_y + action[0] if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(SCREAMING_SNAKE_CASE ) - 1): continue if grid[pos_y][pos_x] != 0: continue successors.append( Node( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , SCREAMING_SNAKE_CASE , ) ) return successors def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> Path: """simple docstring""" A : int = node A : Union[str, Any] = [] while current_node is not None: path.append((current_node.pos_y, current_node.pos_x) ) A : int = current_node.parent path.reverse() return path if __name__ == "__main__": lowercase : Tuple = (0, 0) lowercase : List[str] = (len(grid) - 1, len(grid[0]) - 1) for elem in grid: print(elem) print('------') lowercase : int = GreedyBestFirst(init, goal) lowercase : Union[str, Any] = greedy_bf.search() if path: for pos_x, pos_y in path: lowercase : Dict = 2 for elem in grid: print(elem)
311
0
from typing import TYPE_CHECKING from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __a : int = { """configuration_trajectory_transformer""": [ """TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TrajectoryTransformerConfig""", ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a : Any = [ """TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""", """TrajectoryTransformerModel""", """TrajectoryTransformerPreTrainedModel""", """load_tf_weights_in_trajectory_transformer""", ] if TYPE_CHECKING: from .configuration_trajectory_transformer import ( TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TrajectoryTransformerConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_trajectory_transformer import ( TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TrajectoryTransformerModel, TrajectoryTransformerPreTrainedModel, load_tf_weights_in_trajectory_transformer, ) else: import sys __a : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
210
from decimal import Decimal, getcontext from math import ceil, factorial def UpperCAmelCase ( lowercase ): """simple docstring""" if not isinstance(lowercase , lowercase ): raise TypeError('''Undefined for non-integers''' ) elif precision < 1: raise ValueError('''Undefined for non-natural numbers''' ) __lowercase = precision __lowercase = ceil(precision / 14 ) __lowercase = 426880 * Decimal(10005 ).sqrt() __lowercase = 1 __lowercase = 13591409 __lowercase = Decimal(lowercase ) for k in range(1 , lowercase ): __lowercase = factorial(6 * k ) // (factorial(3 * k ) * factorial(lowercase ) ** 3) linear_term += 545140134 exponential_term *= -262537412640768000 partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term return str(constant_term / partial_sum )[:-1] if __name__ == "__main__": __a : Optional[Any] = 5_0 print(F'''The first {n} digits of pi is: {pi(n)}''')
210
1
import math def UpperCamelCase ( snake_case__ : int = 100 ) -> int: UpperCamelCase : str = sum(i * i for i in range(1 , n + 1 ) ) UpperCamelCase : Optional[int] = int(math.pow(sum(range(1 , n + 1 ) ) , 2 ) ) return square_of_sum - sum_of_squares if __name__ == "__main__": print(F"""{solution() = }""")
103
import argparse import requests import torch # pip3 install salesforce-lavis # I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis from lavis.models import load_model_and_preprocess from PIL import Image from transformers import ( AutoTokenizer, BlipaConfig, BlipaForConditionalGeneration, BlipaProcessor, BlipaVisionConfig, BlipImageProcessor, OPTConfig, TaConfig, ) from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD def UpperCamelCase ( ) -> Tuple: UpperCamelCase : Any = 'https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png' UpperCamelCase : List[str] = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw ).convert('RGB' ) return image def UpperCamelCase ( snake_case__ : int ) -> List[Any]: UpperCamelCase : Optional[int] = [] # fmt: off # vision encoder rename_keys.append(('visual_encoder.cls_token', 'vision_model.embeddings.class_embedding') ) rename_keys.append(('visual_encoder.pos_embed', 'vision_model.embeddings.position_embedding') ) rename_keys.append(('visual_encoder.patch_embed.proj.weight', 'vision_model.embeddings.patch_embedding.weight') ) rename_keys.append(('visual_encoder.patch_embed.proj.bias', 'vision_model.embeddings.patch_embedding.bias') ) rename_keys.append(('ln_vision.weight', 'vision_model.post_layernorm.weight') ) rename_keys.append(('ln_vision.bias', 'vision_model.post_layernorm.bias') ) for i in range(config.vision_config.num_hidden_layers ): rename_keys.append((F"""visual_encoder.blocks.{i}.norm1.weight""", F"""vision_model.encoder.layers.{i}.layer_norm1.weight""") ) rename_keys.append((F"""visual_encoder.blocks.{i}.norm1.bias""", F"""vision_model.encoder.layers.{i}.layer_norm1.bias""") ) rename_keys.append((F"""visual_encoder.blocks.{i}.norm2.weight""", F"""vision_model.encoder.layers.{i}.layer_norm2.weight""") ) rename_keys.append((F"""visual_encoder.blocks.{i}.norm2.bias""", F"""vision_model.encoder.layers.{i}.layer_norm2.bias""") ) rename_keys.append((F"""visual_encoder.blocks.{i}.attn.qkv.weight""", F"""vision_model.encoder.layers.{i}.self_attn.qkv.weight""") ) rename_keys.append((F"""visual_encoder.blocks.{i}.attn.proj.weight""", F"""vision_model.encoder.layers.{i}.self_attn.projection.weight""",) ) rename_keys.append((F"""visual_encoder.blocks.{i}.attn.proj.bias""", F"""vision_model.encoder.layers.{i}.self_attn.projection.bias""") ) rename_keys.append((F"""visual_encoder.blocks.{i}.mlp.fc1.weight""", F"""vision_model.encoder.layers.{i}.mlp.fc1.weight""") ) rename_keys.append((F"""visual_encoder.blocks.{i}.mlp.fc1.bias""", F"""vision_model.encoder.layers.{i}.mlp.fc1.bias""") ) rename_keys.append((F"""visual_encoder.blocks.{i}.mlp.fc2.weight""", F"""vision_model.encoder.layers.{i}.mlp.fc2.weight""") ) rename_keys.append((F"""visual_encoder.blocks.{i}.mlp.fc2.bias""", F"""vision_model.encoder.layers.{i}.mlp.fc2.bias""") ) # QFormer rename_keys.append(('Qformer.bert.embeddings.LayerNorm.weight', 'qformer.layernorm.weight') ) rename_keys.append(('Qformer.bert.embeddings.LayerNorm.bias', 'qformer.layernorm.bias') ) # fmt: on return rename_keys def UpperCamelCase ( snake_case__ : Optional[Any] , snake_case__ : Dict , snake_case__ : Optional[int] ) -> Optional[int]: UpperCamelCase : Dict = dct.pop(snake_case__ ) UpperCamelCase : str = val def UpperCamelCase ( snake_case__ : str , snake_case__ : Union[str, Any] ) -> Optional[int]: for i in range(config.vision_config.num_hidden_layers ): # read in original q and v biases UpperCamelCase : Optional[Any] = state_dict.pop(F"""visual_encoder.blocks.{i}.attn.q_bias""" ) UpperCamelCase : int = state_dict.pop(F"""visual_encoder.blocks.{i}.attn.v_bias""" ) # next, set bias in the state dict UpperCamelCase : int = torch.cat((q_bias, torch.zeros_like(snake_case__ , requires_grad=snake_case__ ), v_bias) ) UpperCamelCase : Tuple = qkv_bias def UpperCamelCase ( snake_case__ : Tuple , snake_case__ : Optional[Any] ) -> Dict: UpperCamelCase : str = 364 if 'coco' in model_name else 224 UpperCamelCase : Union[str, Any] = BlipaVisionConfig(image_size=snake_case__ ).to_dict() # make sure the models have proper bos_token_id and eos_token_id set (important for generation) # seems like flan-T5 models don't have bos_token_id properly set? if "opt-2.7b" in model_name: UpperCamelCase : List[Any] = OPTConfig.from_pretrained('facebook/opt-2.7b' , eos_token_id=snake_case__ ).to_dict() elif "opt-6.7b" in model_name: UpperCamelCase : int = OPTConfig.from_pretrained('facebook/opt-6.7b' , eos_token_id=snake_case__ ).to_dict() elif "t5-xl" in model_name: UpperCamelCase : List[str] = TaConfig.from_pretrained('google/flan-t5-xl' , dense_act_fn='gelu' , bos_token_id=1 ).to_dict() elif "t5-xxl" in model_name: UpperCamelCase : int = TaConfig.from_pretrained('google/flan-t5-xxl' , dense_act_fn='gelu' , bos_token_id=1 ).to_dict() UpperCamelCase : Any = BlipaConfig(vision_config=snake_case__ , text_config=snake_case__ ) return config, image_size @torch.no_grad() def UpperCamelCase ( snake_case__ : int , snake_case__ : Dict=None , snake_case__ : int=False ) -> List[Any]: UpperCamelCase : str = ( AutoTokenizer.from_pretrained('facebook/opt-2.7b' ) if 'opt' in model_name else AutoTokenizer.from_pretrained('google/flan-t5-xl' ) ) UpperCamelCase : int = tokenizer('\n' , add_special_tokens=snake_case__ ).input_ids[0] UpperCamelCase , UpperCamelCase : Union[str, Any] = get_blipa_config(snake_case__ , eos_token_id=snake_case__ ) UpperCamelCase : Dict = BlipaForConditionalGeneration(snake_case__ ).eval() UpperCamelCase : Optional[Any] = { 'blip2-opt-2.7b': ('blip2_opt', 'pretrain_opt2.7b'), 'blip2-opt-6.7b': ('blip2_opt', 'pretrain_opt6.7b'), 'blip2-opt-2.7b-coco': ('blip2_opt', 'caption_coco_opt2.7b'), 'blip2-opt-6.7b-coco': ('blip2_opt', 'caption_coco_opt6.7b'), 'blip2-flan-t5-xl': ('blip2_t5', 'pretrain_flant5xl'), 'blip2-flan-t5-xl-coco': ('blip2_t5', 'caption_coco_flant5xl'), 'blip2-flan-t5-xxl': ('blip2_t5', 'pretrain_flant5xxl'), } UpperCamelCase , UpperCamelCase : Optional[Any] = model_name_to_original[model_name] # load original model print('Loading original model...' ) UpperCamelCase : List[str] = 'cuda' if torch.cuda.is_available() else 'cpu' UpperCamelCase , UpperCamelCase , UpperCamelCase : Tuple = load_model_and_preprocess( name=snake_case__ , model_type=snake_case__ , is_eval=snake_case__ , device=snake_case__ ) original_model.eval() print('Done!' ) # update state dict keys UpperCamelCase : List[Any] = original_model.state_dict() UpperCamelCase : Tuple = create_rename_keys(snake_case__ ) for src, dest in rename_keys: rename_key(snake_case__ , snake_case__ , snake_case__ ) # some keys can be renamed efficiently for key, val in state_dict.copy().items(): UpperCamelCase : Optional[Any] = state_dict.pop(snake_case__ ) if key.startswith('Qformer.bert' ): UpperCamelCase : List[str] = key.replace('Qformer.bert' , 'qformer' ) if "attention.self" in key: UpperCamelCase : Tuple = key.replace('self' , 'attention' ) if "opt_proj" in key: UpperCamelCase : Union[str, Any] = key.replace('opt_proj' , 'language_projection' ) if "t5_proj" in key: UpperCamelCase : Optional[Any] = key.replace('t5_proj' , 'language_projection' ) if key.startswith('opt' ): UpperCamelCase : Dict = key.replace('opt' , 'language' ) if key.startswith('t5' ): UpperCamelCase : Dict = key.replace('t5' , 'language' ) UpperCamelCase : Optional[int] = val # read in qv biases read_in_q_v_bias(snake_case__ , snake_case__ ) UpperCamelCase , UpperCamelCase : Any = hf_model.load_state_dict(snake_case__ , strict=snake_case__ ) assert len(snake_case__ ) == 0 assert unexpected_keys == ["qformer.embeddings.position_ids"] UpperCamelCase : List[str] = load_demo_image() UpperCamelCase : str = vis_processors['eval'](snake_case__ ).unsqueeze(0 ).to(snake_case__ ) UpperCamelCase : Any = tokenizer(['\n'] , return_tensors='pt' ).input_ids.to(snake_case__ ) # create processor UpperCamelCase : Optional[Any] = BlipImageProcessor( size={'height': image_size, 'width': image_size} , image_mean=snake_case__ , image_std=snake_case__ ) UpperCamelCase : Any = BlipaProcessor(image_processor=snake_case__ , tokenizer=snake_case__ ) UpperCamelCase : Optional[int] = processor(images=snake_case__ , return_tensors='pt' ).pixel_values.to(snake_case__ ) # make sure processor creates exact same pixel values assert torch.allclose(snake_case__ , snake_case__ ) original_model.to(snake_case__ ) hf_model.to(snake_case__ ) with torch.no_grad(): if "opt" in model_name: UpperCamelCase : Tuple = original_model({'image': original_pixel_values, 'text_input': ['']} ).logits UpperCamelCase : str = hf_model(snake_case__ , snake_case__ ).logits else: UpperCamelCase : Tuple = original_model( {'image': original_pixel_values, 'text_input': ['\n'], 'text_output': ['\n']} ).logits UpperCamelCase : List[Any] = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -100 ) UpperCamelCase : Optional[int] = hf_model(snake_case__ , snake_case__ , labels=snake_case__ ).logits assert original_logits.shape == logits.shape print('First values of original logits:' , original_logits[0, :3, :3] ) print('First values of HF logits:' , logits[0, :3, :3] ) # assert values if model_name == "blip2-flan-t5-xl": UpperCamelCase : List[str] = torch.tensor( [[-41.5850, -4.4440, -8.9922], [-47.4322, -5.9143, -1.7340]] , device=snake_case__ ) assert torch.allclose(logits[0, :3, :3] , snake_case__ , atol=1E-4 ) elif model_name == "blip2-flan-t5-xl-coco": UpperCamelCase : Union[str, Any] = torch.tensor( [[-57.0109, -9.8967, -12.6280], [-68.6578, -12.7191, -10.5065]] , device=snake_case__ ) else: # cast to same type UpperCamelCase : Optional[int] = logits.dtype assert torch.allclose(original_logits.to(snake_case__ ) , snake_case__ , atol=1E-2 ) print('Looks ok!' ) print('Generating a caption...' ) UpperCamelCase : Optional[int] = '' UpperCamelCase : Union[str, Any] = tokenizer(snake_case__ , return_tensors='pt' ).input_ids.to(snake_case__ ) UpperCamelCase : str = original_model.generate({'image': original_pixel_values} ) UpperCamelCase : str = hf_model.generate( snake_case__ , snake_case__ , do_sample=snake_case__ , num_beams=5 , max_length=30 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , ) print('Original generation:' , snake_case__ ) UpperCamelCase : Optional[int] = input_ids.shape[1] UpperCamelCase : Union[str, Any] = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=snake_case__ ) UpperCamelCase : Dict = [text.strip() for text in output_text] print('HF generation:' , snake_case__ ) if pytorch_dump_folder_path is not None: processor.save_pretrained(snake_case__ ) hf_model.save_pretrained(snake_case__ ) if push_to_hub: processor.push_to_hub(F"""nielsr/{model_name}""" ) hf_model.push_to_hub(F"""nielsr/{model_name}""" ) if __name__ == "__main__": __UpperCAmelCase = argparse.ArgumentParser() __UpperCAmelCase = [ '''blip2-opt-2.7b''', '''blip2-opt-6.7b''', '''blip2-opt-2.7b-coco''', '''blip2-opt-6.7b-coco''', '''blip2-flan-t5-xl''', '''blip2-flan-t5-xl-coco''', '''blip2-flan-t5-xxl''', ] parser.add_argument( '''--model_name''', default='''blip2-opt-2.7b''', choices=choices, type=str, help='''Path to hf config.json of model to convert''', ) parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Whether to push the model and processor to the hub after converting''', ) __UpperCAmelCase = parser.parse_args() convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
103
1
import unittest from transformers import load_tool from .test_tools_common import ToolTesterMixin a_ = ''' Hugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf originally as a company that developed a chatbot app targeted at teenagers.[2] After open-sourcing the model behind the chatbot, the company pivoted to focus on being a platform for machine learning. In March 2021, Hugging Face raised $40 million in a Series B funding round.[3] On April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model.[4] In 2022, the workshop concluded with the announcement of BLOOM, a multilingual large language model with 176 billion parameters.[5] ''' class lowercase__ ( unittest.TestCase, __snake_case ): def UpperCAmelCase ( self )-> Optional[int]: '''simple docstring''' lowerCAmelCase__ = load_tool("text-question-answering" ) self.tool.setup() lowerCAmelCase__ = load_tool("text-question-answering" , remote=__SCREAMING_SNAKE_CASE ) def UpperCAmelCase ( self )-> Tuple: '''simple docstring''' lowerCAmelCase__ = self.tool(__SCREAMING_SNAKE_CASE , "What did Hugging Face do in April 2021?" ) self.assertEqual(__SCREAMING_SNAKE_CASE , "launched the BigScience Research Workshop" ) def UpperCAmelCase ( self )-> Optional[int]: '''simple docstring''' lowerCAmelCase__ = self.remote_tool(__SCREAMING_SNAKE_CASE , "What did Hugging Face do in April 2021?" ) self.assertEqual(__SCREAMING_SNAKE_CASE , "launched the BigScience Research Workshop" ) def UpperCAmelCase ( self )-> Optional[int]: '''simple docstring''' lowerCAmelCase__ = self.tool(text=__SCREAMING_SNAKE_CASE , question="What did Hugging Face do in April 2021?" ) self.assertEqual(__SCREAMING_SNAKE_CASE , "launched the BigScience Research Workshop" ) def UpperCAmelCase ( self )-> int: '''simple docstring''' lowerCAmelCase__ = self.remote_tool(text=__SCREAMING_SNAKE_CASE , question="What did Hugging Face do in April 2021?" ) self.assertEqual(__SCREAMING_SNAKE_CASE , "launched the BigScience Research Workshop" )
340
"""simple docstring""" from cva import destroyAllWindows, imread, imshow, waitKey def lowerCamelCase__ ( _lowerCamelCase : Tuple ) -> Dict: # getting number of pixels in the image lowerCamelCase_ , lowerCamelCase_ = img.shape[0], img.shape[1] # converting each pixel's color to its negative for i in range(_lowerCamelCase ): for j in range(_lowerCamelCase ): lowerCamelCase_ = [255, 255, 255] - img[i][j] return img if __name__ == "__main__": # read original image _SCREAMING_SNAKE_CASE : List[Any] = imread('''image_data/lena.jpg''', 1) # convert to its negative _SCREAMING_SNAKE_CASE : List[Any] = convert_to_negative(img) # show result image imshow('''negative of original image''', img) waitKey(0) destroyAllWindows()
183
0
'''simple docstring''' def __lowerCamelCase ( __snake_case : str, __snake_case : list[str] ) -> str: """simple docstring""" A__ : int ="""""" for word_or_phrase in separated: if not isinstance(__snake_case, __snake_case ): raise Exception("""join() accepts only strings to be joined""" ) joined += word_or_phrase + separator return joined.strip(__snake_case ) if __name__ == "__main__": from doctest import testmod testmod()
136
'''simple docstring''' import math __snake_case : List[Any] = 10 __snake_case : Dict = 7 __snake_case : str = BALLS_PER_COLOUR * NUM_COLOURS def __lowerCamelCase ( __snake_case : int = 20 ) -> str: """simple docstring""" A__ : Union[str, Any] =math.comb(__snake_case, __snake_case ) A__ : str =math.comb(NUM_BALLS - BALLS_PER_COLOUR, __snake_case ) A__ : Optional[int] =NUM_COLOURS * (1 - missing_colour / total) return f"{result:.9f}" if __name__ == "__main__": print(solution(20))
136
1
import json import os import unittest from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, require_torch from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class UpperCAmelCase_ ( UpperCamelCase , unittest.TestCase ): '''simple docstring''' __A : Dict = LEDTokenizer __A : List[Any] = LEDTokenizerFast __A : str = True def _snake_case ( self ): """simple docstring""" super().setUp() lowerCamelCase : str = [ "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "\u0120", "\u0120l", "\u0120n", "\u0120lo", "\u0120low", "er", "\u0120lowest", "\u0120newer", "\u0120wider", "<unk>", ] lowerCamelCase : Union[str, Any] = dict(zip(__A , range(len(__A ) ) ) ) lowerCamelCase : Any = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""] lowerCamelCase : Dict = {"unk_token": "<unk>"} lowerCamelCase : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) lowerCamelCase : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as fp: fp.write(json.dumps(__A ) + "\n" ) with open(self.merges_file , "w" , encoding="utf-8" ) as fp: fp.write("\n".join(__A ) ) def _snake_case ( self , **__A ): """simple docstring""" kwargs.update(self.special_tokens_map ) return self.tokenizer_class.from_pretrained(self.tmpdirname , **__A ) def _snake_case ( self , **__A ): """simple docstring""" kwargs.update(self.special_tokens_map ) return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **__A ) def _snake_case ( self , __A ): """simple docstring""" return "lower newer", "lower newer" @cached_property def _snake_case ( self ): """simple docstring""" return LEDTokenizer.from_pretrained("allenai/led-base-16384" ) @cached_property def _snake_case ( self ): """simple docstring""" return LEDTokenizerFast.from_pretrained("allenai/led-base-16384" ) @require_torch def _snake_case ( self ): """simple docstring""" lowerCamelCase : List[Any] = ["A long paragraph for summarization.", "Another paragraph for summarization."] lowerCamelCase : Union[str, Any] = [0, 250, 251, 1_7818, 13, 3_9186, 1938, 4, 2] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: lowerCamelCase : int = tokenizer(__A , max_length=len(__A ) , padding=__A , return_tensors="pt" ) self.assertIsInstance(__A , __A ) self.assertEqual((2, 9) , batch.input_ids.shape ) self.assertEqual((2, 9) , batch.attention_mask.shape ) lowerCamelCase : Tuple = batch.input_ids.tolist()[0] self.assertListEqual(__A , __A ) @require_torch def _snake_case ( self ): """simple docstring""" lowerCamelCase : Optional[Any] = ["A long paragraph for summarization.", "Another paragraph for summarization."] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: lowerCamelCase : Union[str, Any] = tokenizer(__A , padding=__A , return_tensors="pt" ) self.assertIn("input_ids" , __A ) self.assertIn("attention_mask" , __A ) self.assertNotIn("labels" , __A ) self.assertNotIn("decoder_attention_mask" , __A ) @require_torch def _snake_case ( self ): """simple docstring""" lowerCamelCase : Optional[Any] = [ "Summary of the text.", "Another summary.", ] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: lowerCamelCase : str = tokenizer(text_target=__A , max_length=32 , padding="max_length" , return_tensors="pt" ) self.assertEqual(32 , targets["input_ids"].shape[1] ) @require_torch def _snake_case ( self ): """simple docstring""" for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: lowerCamelCase : Tuple = tokenizer( ["I am a small frog" * 1024, "I am a small frog"] , padding=__A , truncation=__A , return_tensors="pt" ) self.assertIsInstance(__A , __A ) self.assertEqual(batch.input_ids.shape , (2, 5122) ) @require_torch def _snake_case ( self ): """simple docstring""" lowerCamelCase : Optional[Any] = ["A long paragraph for summarization."] lowerCamelCase : str = [ "Summary of the text.", ] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: lowerCamelCase : Union[str, Any] = tokenizer(__A , return_tensors="pt" ) lowerCamelCase : List[str] = tokenizer(text_target=__A , return_tensors="pt" ) lowerCamelCase : Tuple = inputs["input_ids"] lowerCamelCase : List[Any] = targets["input_ids"] self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() ) self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() ) self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() ) self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() ) @require_torch def _snake_case ( self ): """simple docstring""" for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: lowerCamelCase : Dict = ["Summary of the text.", "Another summary."] lowerCamelCase : Union[str, Any] = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]] lowerCamelCase : Tuple = tokenizer(__A , padding=__A ) lowerCamelCase : Union[str, Any] = [[0] * len(__A ) for x in encoded_output["input_ids"]] lowerCamelCase : Union[str, Any] = tokenizer.pad(__A ) self.assertSequenceEqual(outputs["global_attention_mask"] , __A ) def _snake_case ( self ): """simple docstring""" pass def _snake_case ( self ): """simple docstring""" for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): lowerCamelCase : Dict = self.rust_tokenizer_class.from_pretrained(__A , **__A ) lowerCamelCase : Union[str, Any] = self.tokenizer_class.from_pretrained(__A , **__A ) lowerCamelCase : Optional[int] = "A, <mask> AllenNLP sentence." lowerCamelCase : List[Any] = tokenizer_r.encode_plus(__A , add_special_tokens=__A , return_token_type_ids=__A ) lowerCamelCase : int = tokenizer_p.encode_plus(__A , add_special_tokens=__A , return_token_type_ids=__A ) self.assertEqual(sum(tokens_r["token_type_ids"] ) , sum(tokens_p["token_type_ids"] ) ) self.assertEqual( sum(tokens_r["attention_mask"] ) / len(tokens_r["attention_mask"] ) , sum(tokens_p["attention_mask"] ) / len(tokens_p["attention_mask"] ) , ) lowerCamelCase : Optional[int] = tokenizer_r.convert_ids_to_tokens(tokens_r["input_ids"] ) lowerCamelCase : Optional[Any] = tokenizer_p.convert_ids_to_tokens(tokens_p["input_ids"] ) self.assertSequenceEqual(tokens_p["input_ids"] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] ) self.assertSequenceEqual(tokens_r["input_ids"] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] ) self.assertSequenceEqual( __A , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] ) self.assertSequenceEqual( __A , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
283
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) _snake_case = { '''configuration_vision_encoder_decoder''': ['''VisionEncoderDecoderConfig''', '''VisionEncoderDecoderOnnxConfig'''] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _snake_case = ['''VisionEncoderDecoderModel'''] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _snake_case = ['''TFVisionEncoderDecoderModel'''] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _snake_case = ['''FlaxVisionEncoderDecoderModel'''] if TYPE_CHECKING: from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel else: import sys _snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
283
1
import enum import shutil import sys lowerCamelCase_ : int = shutil.get_terminal_size() lowerCamelCase_ : List[Any] = {"UP": "A", "DOWN": "B", "RIGHT": "C", "LEFT": "D"} class __A ( enum.Enum ): """simple docstring""" __lowerCAmelCase = 0 __lowerCAmelCase = 1 def _A ( lowercase , lowercase="" ): """simple docstring""" sys.stdout.write(str(lowerCamelCase__ ) + end ) sys.stdout.flush() def _A ( lowercase , lowercase , lowercase="" ): """simple docstring""" forceWrite(f'''\u001b[{color}m{content}\u001b[0m''' , lowerCamelCase__ ) def _A ( ): """simple docstring""" forceWrite('''\r''' ) def _A ( lowercase , lowercase ): """simple docstring""" forceWrite(f'''\033[{num_lines}{CURSOR_TO_CHAR[direction.upper()]}''' ) def _A ( ): """simple docstring""" forceWrite(''' ''' * TERMINAL_WIDTH ) reset_cursor() def _A ( ): """simple docstring""" reset_cursor() forceWrite('''-''' * TERMINAL_WIDTH )
350
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, is_vision_available, ) lowerCamelCase_ : Union[str, Any] = {"""processing_layoutxlm""": ["""LayoutXLMProcessor"""]} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase_ : Optional[int] = ["""LayoutXLMTokenizer"""] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase_ : Dict = ["""LayoutXLMTokenizerFast"""] if TYPE_CHECKING: from .processing_layoutxlm import LayoutXLMProcessor try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_layoutxlm import LayoutXLMTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast else: import sys lowerCamelCase_ : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
215
0
"""simple docstring""" def lowercase ( lowerCAmelCase__ : Dict = 1000 ) -> List[Any]: __a = 2**power __a = 0 while n: __a = r + n % 10, n // 10 return r if __name__ == "__main__": print(solution(int(str(input()).strip())))
45
'''simple docstring''' def lowercase ( __magic_name__ ): '''simple docstring''' if number < 0: raise ValueError("number must not be negative" ) return number & (number - 1) == 0 if __name__ == "__main__": import doctest doctest.testmod()
311
0
'''simple docstring''' from __future__ import annotations from collections.abc import Sequence from typing import Literal def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str , _UpperCamelCase : str ) -> str | Literal[False]: '''simple docstring''' UpperCamelCase__ = list(_UpperCamelCase ) UpperCamelCase__ = list(_UpperCamelCase ) UpperCamelCase__ = 0 for i in range(len(_UpperCamelCase ) ): if lista[i] != lista[i]: count += 1 UpperCamelCase__ = "_" if count > 1: return False else: return "".join(_UpperCamelCase ) def SCREAMING_SNAKE_CASE__( _UpperCamelCase : list[str] ) -> list[str]: '''simple docstring''' UpperCamelCase__ = [] while True: UpperCamelCase__ = ["$"] * len(_UpperCamelCase ) UpperCamelCase__ = [] for i in range(len(_UpperCamelCase ) ): for j in range(i + 1 , len(_UpperCamelCase ) ): UpperCamelCase__ = compare_string(binary[i] , binary[j] ) if k is False: UpperCamelCase__ = "*" UpperCamelCase__ = "*" temp.append("X" ) for i in range(len(_UpperCamelCase ) ): if checka[i] == "$": pi.append(binary[i] ) if len(_UpperCamelCase ) == 0: return pi UpperCamelCase__ = list(set(_UpperCamelCase ) ) def SCREAMING_SNAKE_CASE__( _UpperCamelCase : int , _UpperCamelCase : Sequence[float] ) -> list[str]: '''simple docstring''' UpperCamelCase__ = [] for minterm in minterms: UpperCamelCase__ = "" for _ in range(_UpperCamelCase ): UpperCamelCase__ = str(minterm % 2 ) + string minterm //= 2 temp.append(_UpperCamelCase ) return temp def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str , _UpperCamelCase : str , _UpperCamelCase : int ) -> bool: '''simple docstring''' UpperCamelCase__ = list(_UpperCamelCase ) UpperCamelCase__ = list(_UpperCamelCase ) UpperCamelCase__ = 0 for i in range(len(_UpperCamelCase ) ): if lista[i] != lista[i]: count_n += 1 return count_n == count def SCREAMING_SNAKE_CASE__( _UpperCamelCase : list[list[int]] , _UpperCamelCase : list[str] ) -> list[str]: '''simple docstring''' UpperCamelCase__ = [] UpperCamelCase__ = [0] * len(_UpperCamelCase ) for i in range(len(chart[0] ) ): UpperCamelCase__ = 0 UpperCamelCase__ = -1 for j in range(len(_UpperCamelCase ) ): if chart[j][i] == 1: count += 1 UpperCamelCase__ = j if count == 1: UpperCamelCase__ = 1 for i in range(len(_UpperCamelCase ) ): if select[i] == 1: for j in range(len(chart[0] ) ): if chart[i][j] == 1: for k in range(len(_UpperCamelCase ) ): UpperCamelCase__ = 0 temp.append(prime_implicants[i] ) while True: UpperCamelCase__ = 0 UpperCamelCase__ = -1 UpperCamelCase__ = 0 for i in range(len(_UpperCamelCase ) ): UpperCamelCase__ = chart[i].count(1 ) if count_n > max_n: UpperCamelCase__ = count_n UpperCamelCase__ = i if max_n == 0: return temp temp.append(prime_implicants[rem] ) for i in range(len(chart[0] ) ): if chart[rem][i] == 1: for j in range(len(_UpperCamelCase ) ): UpperCamelCase__ = 0 def SCREAMING_SNAKE_CASE__( _UpperCamelCase : list[str] , _UpperCamelCase : list[str] ) -> list[list[int]]: '''simple docstring''' UpperCamelCase__ = [[0 for x in range(len(_UpperCamelCase ) )] for x in range(len(_UpperCamelCase ) )] for i in range(len(_UpperCamelCase ) ): UpperCamelCase__ = prime_implicants[i].count("_" ) for j in range(len(_UpperCamelCase ) ): if is_for_table(prime_implicants[i] , binary[j] , _UpperCamelCase ): UpperCamelCase__ = 1 return chart def SCREAMING_SNAKE_CASE__( ) -> None: '''simple docstring''' UpperCamelCase__ = int(input("Enter the no. of variables\n" ) ) UpperCamelCase__ = [ float(_UpperCamelCase ) for x in input( "Enter the decimal representation of Minterms 'Spaces Separated'\n" ).split() ] UpperCamelCase__ = decimal_to_binary(_UpperCamelCase , _UpperCamelCase ) UpperCamelCase__ = check(_UpperCamelCase ) print("Prime Implicants are:" ) print(_UpperCamelCase ) UpperCamelCase__ = prime_implicant_chart(_UpperCamelCase , _UpperCamelCase ) UpperCamelCase__ = selection(_UpperCamelCase , _UpperCamelCase ) print("Essential Prime Implicants are:" ) print(_UpperCamelCase ) if __name__ == "__main__": import doctest doctest.testmod() main()
31
'''simple docstring''' from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __lowercase: int = logging.get_logger(__name__) __lowercase: str = { "hustvl/yolos-small": "https://huggingface.co/hustvl/yolos-small/resolve/main/config.json", # See all YOLOS models at https://huggingface.co/models?filter=yolos } class UpperCAmelCase ( SCREAMING_SNAKE_CASE__): _lowerCamelCase : List[str] = 'yolos' def __init__( self : List[str], a_ : Optional[int]=768, a_ : Optional[int]=12, a_ : Any=12, a_ : List[str]=3072, a_ : Any="gelu", a_ : int=0.0, a_ : List[Any]=0.0, a_ : Dict=0.02, a_ : Optional[int]=1e-1_2, a_ : List[Any]=[512, 864], a_ : Any=16, a_ : Any=3, a_ : Tuple=True, a_ : List[str]=100, a_ : Union[str, Any]=True, a_ : Any=False, a_ : List[str]=1, a_ : Tuple=5, a_ : Union[str, Any]=2, a_ : int=5, a_ : Union[str, Any]=2, a_ : Dict=0.1, **a_ : Dict, ): """simple docstring""" super().__init__(**a_ ) UpperCamelCase__ = hidden_size UpperCamelCase__ = num_hidden_layers UpperCamelCase__ = num_attention_heads UpperCamelCase__ = intermediate_size UpperCamelCase__ = hidden_act UpperCamelCase__ = hidden_dropout_prob UpperCamelCase__ = attention_probs_dropout_prob UpperCamelCase__ = initializer_range UpperCamelCase__ = layer_norm_eps UpperCamelCase__ = image_size UpperCamelCase__ = patch_size UpperCamelCase__ = num_channels UpperCamelCase__ = qkv_bias UpperCamelCase__ = num_detection_tokens UpperCamelCase__ = use_mid_position_embeddings UpperCamelCase__ = auxiliary_loss # Hungarian matcher UpperCamelCase__ = class_cost UpperCamelCase__ = bbox_cost UpperCamelCase__ = giou_cost # Loss coefficients UpperCamelCase__ = bbox_loss_coefficient UpperCamelCase__ = giou_loss_coefficient UpperCamelCase__ = eos_coefficient class UpperCAmelCase ( SCREAMING_SNAKE_CASE__): _lowerCamelCase : Union[str, Any] = version.parse('1.11') @property def lowercase_ ( self : str ): """simple docstring""" return OrderedDict( [ ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}), ] ) @property def lowercase_ ( self : Tuple ): """simple docstring""" return 1e-4 @property def lowercase_ ( self : Optional[int] ): """simple docstring""" return 12
31
1
# Logistic Regression from scratch # In[62]: # In[63]: # importing all the required libraries import numpy as np from matplotlib import pyplot as plt from sklearn import datasets def UpperCamelCase( __UpperCamelCase : List[Any] ): return 1 / (1 + np.exp(-z )) def UpperCamelCase( __UpperCamelCase : List[str] ,__UpperCamelCase : Optional[int] ): return (-y * np.log(__UpperCamelCase ) - (1 - y) * np.log(1 - h )).mean() def UpperCamelCase( __UpperCamelCase : Optional[int] ,__UpperCamelCase : Dict ,__UpperCamelCase : Dict ): lowerCAmelCase_ : str = np.dot(__UpperCamelCase ,__UpperCamelCase ) return np.sum(y * scores - np.log(1 + np.exp(__UpperCamelCase ) ) ) def UpperCamelCase( __UpperCamelCase : Optional[Any] ,__UpperCamelCase : Optional[int] ,__UpperCamelCase : Optional[int] ,__UpperCamelCase : Optional[int]=70000 ): lowerCAmelCase_ : Tuple = np.zeros(x.shape[1] ) for iterations in range(__UpperCamelCase ): lowerCAmelCase_ : Any = np.dot(__UpperCamelCase ,__UpperCamelCase ) lowerCAmelCase_ : List[str] = sigmoid_function(__UpperCamelCase ) lowerCAmelCase_ : Optional[Any] = np.dot(x.T ,h - y ) / y.size lowerCAmelCase_ : Optional[int] = theta - alpha * gradient # updating the weights lowerCAmelCase_ : int = np.dot(__UpperCamelCase ,__UpperCamelCase ) lowerCAmelCase_ : Tuple = sigmoid_function(__UpperCamelCase ) lowerCAmelCase_ : Optional[int] = cost_function(__UpperCamelCase ,__UpperCamelCase ) if iterations % 100 == 0: print(f"""loss: {j} \t""" ) # printing the loss after every 100 iterations return theta # In[68]: if __name__ == "__main__": A__ : int = datasets.load_iris() A__ : Optional[Any] = iris.data[:, :2] A__ : Union[str, Any] = (iris.target != 0) * 1 A__ : List[str] = 0.1 A__ : List[str] = logistic_reg(alpha, x, y, max_iterations=7_0000) print('''theta: ''', theta) # printing the theta i.e our weights vector def UpperCamelCase( __UpperCamelCase : Tuple ): return sigmoid_function( np.dot(__UpperCamelCase ,__UpperCamelCase ) ) # predicting the value of probability from the logistic regression algorithm plt.figure(figsize=(10, 6)) plt.scatter(x[y == 0][:, 0], x[y == 0][:, 1], color='''b''', label='''0''') plt.scatter(x[y == 1][:, 0], x[y == 1][:, 1], color='''r''', label='''1''') ((A__) , (A__)) : Optional[int] = (x[:, 0].min(), x[:, 0].max()) ((A__) , (A__)) : Optional[Any] = (x[:, 1].min(), x[:, 1].max()) ((A__) , (A__)) : Optional[int] = np.meshgrid(np.linspace(xa_min, xa_max), np.linspace(xa_min, xa_max)) A__ : Tuple = np.c_[xxa.ravel(), xxa.ravel()] A__ : Optional[Any] = predict_prob(grid).reshape(xxa.shape) plt.contour(xxa, xxa, probs, [0.5], linewidths=1, colors='''black''') plt.legend() plt.show()
103
import argparse import os import re import packaging.version A__ : Dict = '''examples/''' A__ : Any = { '''examples''': (re.compile(R'''^check_min_version\("[^"]+"\)\s*$''', re.MULTILINE), '''check_min_version("VERSION")\n'''), '''init''': (re.compile(R'''^__version__\s+=\s+"([^"]+)"\s*$''', re.MULTILINE), '''__version__ = "VERSION"\n'''), '''setup''': (re.compile(R'''^(\s*)version\s*=\s*"[^"]+",''', re.MULTILINE), R'''\1version="VERSION",'''), '''doc''': (re.compile(R'''^(\s*)release\s*=\s*"[^"]+"$''', re.MULTILINE), '''release = "VERSION"\n'''), } A__ : Any = { '''init''': '''src/transformers/__init__.py''', '''setup''': '''setup.py''', } A__ : Any = '''README.md''' def UpperCamelCase( __UpperCamelCase : int ,__UpperCamelCase : List[Any] ,__UpperCamelCase : List[Any] ): with open(__UpperCamelCase ,'''r''' ,encoding='''utf-8''' ,newline='''\n''' ) as f: lowerCAmelCase_ : Tuple = f.read() lowerCAmelCase_ , lowerCAmelCase_ : Dict = REPLACE_PATTERNS[pattern] lowerCAmelCase_ : Tuple = replace.replace('''VERSION''' ,__UpperCamelCase ) lowerCAmelCase_ : Optional[int] = re_pattern.sub(__UpperCamelCase ,__UpperCamelCase ) with open(__UpperCamelCase ,'''w''' ,encoding='''utf-8''' ,newline='''\n''' ) as f: f.write(__UpperCamelCase ) def UpperCamelCase( __UpperCamelCase : Union[str, Any] ): for folder, directories, fnames in os.walk(__UpperCamelCase ): # Removing some of the folders with non-actively maintained examples from the walk if "research_projects" in directories: directories.remove('''research_projects''' ) if "legacy" in directories: directories.remove('''legacy''' ) for fname in fnames: if fname.endswith('''.py''' ): update_version_in_file(os.path.join(__UpperCamelCase ,__UpperCamelCase ) ,__UpperCamelCase ,pattern='''examples''' ) def UpperCamelCase( __UpperCamelCase : int ,__UpperCamelCase : List[Any]=False ): for pattern, fname in REPLACE_FILES.items(): update_version_in_file(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) if not patch: update_version_in_examples(__UpperCamelCase ) def UpperCamelCase( ): lowerCAmelCase_ : List[str] = '''🤗 Transformers currently provides the following architectures''' lowerCAmelCase_ : List[Any] = '''1. Want to contribute a new model?''' with open(__UpperCamelCase ,'''r''' ,encoding='''utf-8''' ,newline='''\n''' ) as f: lowerCAmelCase_ : Union[str, Any] = f.readlines() # Find the start of the list. lowerCAmelCase_ : int = 0 while not lines[start_index].startswith(_start_prompt ): start_index += 1 start_index += 1 lowerCAmelCase_ : str = start_index # Update the lines in the model list. while not lines[index].startswith(_end_prompt ): if lines[index].startswith('''1.''' ): lowerCAmelCase_ : int = lines[index].replace( '''https://huggingface.co/docs/transformers/main/model_doc''' ,'''https://huggingface.co/docs/transformers/model_doc''' ,) index += 1 with open(__UpperCamelCase ,'''w''' ,encoding='''utf-8''' ,newline='''\n''' ) as f: f.writelines(__UpperCamelCase ) def UpperCamelCase( ): with open(REPLACE_FILES['''init'''] ,'''r''' ) as f: lowerCAmelCase_ : Optional[Any] = f.read() lowerCAmelCase_ : Dict = REPLACE_PATTERNS['''init'''][0].search(__UpperCamelCase ).groups()[0] return packaging.version.parse(__UpperCamelCase ) def UpperCamelCase( __UpperCamelCase : Dict=False ): lowerCAmelCase_ : Union[str, Any] = get_version() if patch and default_version.is_devrelease: raise ValueError('''Can\'t create a patch version from the dev branch, checkout a released version!''' ) if default_version.is_devrelease: lowerCAmelCase_ : List[str] = default_version.base_version elif patch: lowerCAmelCase_ : int = f"""{default_version.major}.{default_version.minor}.{default_version.micro + 1}""" else: lowerCAmelCase_ : int = f"""{default_version.major}.{default_version.minor + 1}.0""" # Now let's ask nicely if that's the right one. lowerCAmelCase_ : Optional[Any] = input(f"""Which version are you releasing? [{default_version}]""" ) if len(__UpperCamelCase ) == 0: lowerCAmelCase_ : List[str] = default_version print(f"""Updating version to {version}.""" ) global_version_update(__UpperCamelCase ,patch=__UpperCamelCase ) if not patch: print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' ) clean_main_ref_in_model_list() def UpperCamelCase( ): lowerCAmelCase_ : Any = get_version() lowerCAmelCase_ : int = f"""{current_version.major}.{current_version.minor + 1}.0.dev0""" lowerCAmelCase_ : Optional[Any] = current_version.base_version # Check with the user we got that right. lowerCAmelCase_ : Optional[Any] = input(f"""Which version are we developing now? [{dev_version}]""" ) if len(__UpperCamelCase ) == 0: lowerCAmelCase_ : int = dev_version print(f"""Updating version to {version}.""" ) global_version_update(__UpperCamelCase ) print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' ) clean_main_ref_in_model_list() if __name__ == "__main__": A__ : Dict = argparse.ArgumentParser() parser.add_argument('''--post_release''', action='''store_true''', help='''Whether this is pre or post release.''') parser.add_argument('''--patch''', action='''store_true''', help='''Whether or not this is a patch release.''') A__ : Optional[int] = parser.parse_args() if not args.post_release: pre_release_work(patch=args.patch) elif args.patch: print('''Nothing to do after a patch :-)''') else: post_release_work()
103
1
'''simple docstring''' import numpy as np import torch from torch.nn import CrossEntropyLoss from transformers import AutoModelForCausalLM, AutoTokenizer import datasets from datasets import logging lowerCAmelCase_ : List[Any] = '\\n\n' lowerCAmelCase_ : Optional[Any] = '\nPerplexity (PPL) is one of the most common metrics for evaluating language models.\nIt is defined as the exponentiated average negative log-likelihood of a sequence.\n\nFor more information, see https://huggingface.co/docs/transformers/perplexity\n' lowerCAmelCase_ : Optional[int] = '\nArgs:\n model_id (str): model used for calculating Perplexity\n NOTE: Perplexity can only be calculated for causal language models.\n This includes models such as gpt2, causal variations of bert,\n causal versions of t5, and more (the full list can be found\n in the AutoModelForCausalLM documentation here:\n https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )\n\n input_texts (list of str): input text, each separate text snippet\n is one list entry.\n batch_size (int): the batch size to run texts through the model. Defaults to 16.\n add_start_token (bool): whether to add the start token to the texts,\n so the perplexity can include the probability of the first word. Defaults to True.\n device (str): device to run on, defaults to \'cuda\' when available\nReturns:\n perplexity: dictionary containing the perplexity scores for the texts\n in the input list, as well as the mean perplexity. If one of the input texts is\n longer than the max input length of the model, then it is truncated to the\n max length for the perplexity computation.\nExamples:\n Example 1:\n >>> perplexity = datasets.load_metric("perplexity")\n >>> input_texts = ["lorem ipsum", "Happy Birthday!", "Bienvenue"]\n >>> results = perplexity.compute(model_id=\'gpt2\',\n ... add_start_token=False,\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n [\'perplexities\', \'mean_perplexity\']\n >>> print(round(results["mean_perplexity"], 2))\n 78.22\n >>> print(round(results["perplexities"][0], 2))\n 11.11\n\n Example 2:\n >>> perplexity = datasets.load_metric("perplexity")\n >>> input_texts = datasets.load_dataset("wikitext",\n ... "wikitext-2-raw-v1",\n ... split="test")["text"][:50] # doctest:+ELLIPSIS\n [...]\n >>> input_texts = [s for s in input_texts if s!=\'\']\n >>> results = perplexity.compute(model_id=\'gpt2\',\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n [\'perplexities\', \'mean_perplexity\']\n >>> print(round(results["mean_perplexity"], 2))\n 60.35\n >>> print(round(results["perplexities"][0], 2))\n 81.12\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __SCREAMING_SNAKE_CASE (datasets.Metric ): """simple docstring""" def UpperCamelCase__ ( self : Dict ): return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "input_texts": datasets.Value("string" ), } ) , reference_urls=["https://huggingface.co/docs/transformers/perplexity"] , ) def UpperCamelCase__ ( self : List[str] , __a : Optional[int] , __a : Optional[int] , __a : int = 16 , __a : bool = True , __a : Dict=None ): if device is not None: assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu." if device == "gpu": _a = "cuda" else: _a = "cuda" if torch.cuda.is_available() else "cpu" _a = AutoModelForCausalLM.from_pretrained(__a ) _a = model.to(__a ) _a = AutoTokenizer.from_pretrained(__a ) # if batch_size > 1 (which generally leads to padding being required), and # if there is not an already assigned pad_token, assign an existing # special token to also be the padding token if tokenizer.pad_token is None and batch_size > 1: _a = list(tokenizer.special_tokens_map_extended.values() ) # check that the model already has at least one special token defined assert ( len(__a ) > 0 ), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1." # assign one of the special tokens to also be the pad token tokenizer.add_special_tokens({"pad_token": existing_special_tokens[0]} ) if add_start_token: # leave room for <BOS> token to be added: assert ( tokenizer.bos_token is not None ), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False" _a = model.config.max_length - 1 else: _a = model.config.max_length _a = tokenizer( __a , add_special_tokens=__a , padding=__a , truncation=__a , max_length=__a , return_tensors="pt" , return_attention_mask=__a , ).to(__a ) _a = encodings["input_ids"] _a = encodings["attention_mask"] # check that each input is long enough: if add_start_token: assert torch.all(torch.ge(attn_masks.sum(1 ) , 1 ) ), "Each input text must be at least one token long." else: assert torch.all( torch.ge(attn_masks.sum(1 ) , 2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings." _a = [] _a = CrossEntropyLoss(reduction="none" ) for start_index in logging.tqdm(range(0 , len(__a ) , __a ) ): _a = min(start_index + batch_size , len(__a ) ) _a = encoded_texts[start_index:end_index] _a = attn_masks[start_index:end_index] if add_start_token: _a = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(__a ) _a = torch.cat([bos_tokens_tensor, encoded_batch] , dim=1 ) _a = torch.cat( [torch.ones(bos_tokens_tensor.size() , dtype=torch.intaa ).to(__a ), attn_mask] , dim=1 ) _a = encoded_batch with torch.no_grad(): _a = model(__a , attention_mask=__a ).logits _a = out_logits[..., :-1, :].contiguous() _a = labels[..., 1:].contiguous() _a = attn_mask[..., 1:].contiguous() _a = torch.expa( (loss_fct(shift_logits.transpose(1 , 2 ) , __a ) * shift_attention_mask_batch).sum(1 ) / shift_attention_mask_batch.sum(1 ) ) ppls += perplexity_batch.tolist() return {"perplexities": ppls, "mean_perplexity": np.mean(__a )}
363
'''simple docstring''' from typing import Union from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING lowerCAmelCase_ : Union[str, Any] = logging.get_logger(__name__) @add_end_docstrings(lowerCamelCase_ ) class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ): """simple docstring""" def __init__( self : List[Any] , *__a : Optional[int] , **__a : List[str] ): super().__init__(*__a , **__a ) self.check_model_type(__a ) def UpperCamelCase__ ( self : Optional[Any] , __a : Dict=None , __a : int=None , __a : Optional[Any]=None , **__a : List[Any] ): _a , _a = {}, {} if padding is not None: _a = padding if truncation is not None: _a = truncation if top_k is not None: _a = top_k return preprocess_params, {}, postprocess_params def __call__( self : Union[str, Any] , __a : Union["Image.Image", str] , __a : str = None , **__a : Any ): if isinstance(__a , (Image.Image, str) ) and isinstance(__a , __a ): _a = {"image": image, "question": question} else: _a = image _a = super().__call__(__a , **__a ) return results def UpperCamelCase__ ( self : Tuple , __a : Tuple , __a : Optional[Any]=False , __a : List[Any]=False ): _a = load_image(inputs["image"] ) _a = self.tokenizer( inputs["question"] , return_tensors=self.framework , padding=__a , truncation=__a ) _a = self.image_processor(images=__a , return_tensors=self.framework ) model_inputs.update(__a ) return model_inputs def UpperCamelCase__ ( self : List[Any] , __a : List[str] ): _a = self.model(**__a ) return model_outputs def UpperCamelCase__ ( self : int , __a : Optional[int] , __a : Dict=5 ): if top_k > self.model.config.num_labels: _a = self.model.config.num_labels if self.framework == "pt": _a = model_outputs.logits.sigmoid()[0] _a , _a = probs.topk(__a ) else: raise ValueError(f'Unsupported framework: {self.framework}' ) _a = scores.tolist() _a = ids.tolist() return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(__a , __a )]
346
0
"""simple docstring""" import torch from diffusers import CMStochasticIterativeScheduler from .test_schedulers import SchedulerCommonTest class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ): lowercase__ = (CMStochasticIterativeScheduler,) lowercase__ = 10 def _UpperCAmelCase ( self : Optional[int] , **lowerCAmelCase_ : str): """simple docstring""" lowercase_ = { """num_train_timesteps""": 2_0_1, """sigma_min""": 0.002, """sigma_max""": 80.0, } config.update(**lowerCAmelCase_) return config def _UpperCAmelCase ( self : Optional[int]): """simple docstring""" lowercase_ = 1_0 lowercase_ = self.get_scheduler_config() lowercase_ = self.scheduler_classes[0](**lowerCAmelCase_) scheduler.set_timesteps(lowerCAmelCase_) lowercase_ = scheduler.timesteps[0] lowercase_ = scheduler.timesteps[1] lowercase_ = self.dummy_sample lowercase_ = 0.1 * sample lowercase_ = scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_).prev_sample lowercase_ = scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_).prev_sample self.assertEqual(output_a.shape , sample.shape) self.assertEqual(output_a.shape , output_a.shape) def _UpperCAmelCase ( self : Optional[Any]): """simple docstring""" for timesteps in [1_0, 5_0, 1_0_0, 1_0_0_0]: self.check_over_configs(num_train_timesteps=lowerCAmelCase_) def _UpperCAmelCase ( self : Optional[Any]): """simple docstring""" for clip_denoised in [True, False]: self.check_over_configs(clip_denoised=lowerCAmelCase_) def _UpperCAmelCase ( self : List[Any]): """simple docstring""" lowercase_ = self.scheduler_classes[0] lowercase_ = self.get_scheduler_config() lowercase_ = scheduler_class(**lowerCAmelCase_) lowercase_ = 1 scheduler.set_timesteps(lowerCAmelCase_) lowercase_ = scheduler.timesteps lowercase_ = torch.manual_seed(0) lowercase_ = self.dummy_model() lowercase_ = self.dummy_sample_deter * scheduler.init_noise_sigma for i, t in enumerate(lowerCAmelCase_): # 1. scale model input lowercase_ = scheduler.scale_model_input(lowerCAmelCase_ , lowerCAmelCase_) # 2. predict noise residual lowercase_ = model(lowerCAmelCase_ , lowerCAmelCase_) # 3. predict previous sample x_t-1 lowercase_ = scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , generator=lowerCAmelCase_).prev_sample lowercase_ = pred_prev_sample lowercase_ = torch.sum(torch.abs(lowerCAmelCase_)) lowercase_ = torch.mean(torch.abs(lowerCAmelCase_)) assert abs(result_sum.item() - 192.7_614) < 1E-2 assert abs(result_mean.item() - 0.2_510) < 1E-3 def _UpperCAmelCase ( self : List[Any]): """simple docstring""" lowercase_ = self.scheduler_classes[0] lowercase_ = self.get_scheduler_config() lowercase_ = scheduler_class(**lowerCAmelCase_) lowercase_ = [1_0_6, 0] scheduler.set_timesteps(timesteps=lowerCAmelCase_) lowercase_ = scheduler.timesteps lowercase_ = torch.manual_seed(0) lowercase_ = self.dummy_model() lowercase_ = self.dummy_sample_deter * scheduler.init_noise_sigma for t in timesteps: # 1. scale model input lowercase_ = scheduler.scale_model_input(lowerCAmelCase_ , lowerCAmelCase_) # 2. predict noise residual lowercase_ = model(lowerCAmelCase_ , lowerCAmelCase_) # 3. predict previous sample x_t-1 lowercase_ = scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , generator=lowerCAmelCase_).prev_sample lowercase_ = pred_prev_sample lowercase_ = torch.sum(torch.abs(lowerCAmelCase_)) lowercase_ = torch.mean(torch.abs(lowerCAmelCase_)) assert abs(result_sum.item() - 347.6_357) < 1E-2 assert abs(result_mean.item() - 0.4_527) < 1E-3 def _UpperCAmelCase ( self : Union[str, Any]): """simple docstring""" lowercase_ = self.scheduler_classes[0] lowercase_ = self.get_scheduler_config() lowercase_ = scheduler_class(**lowerCAmelCase_) lowercase_ = [3_9, 3_0, 1_2, 1_5, 0] with self.assertRaises(lowerCAmelCase_ , msg="""`timesteps` must be in descending order."""): scheduler.set_timesteps(timesteps=lowerCAmelCase_) def _UpperCAmelCase ( self : Optional[Any]): """simple docstring""" lowercase_ = self.scheduler_classes[0] lowercase_ = self.get_scheduler_config() lowercase_ = scheduler_class(**lowerCAmelCase_) lowercase_ = [3_9, 3_0, 1_2, 1, 0] lowercase_ = len(lowerCAmelCase_) with self.assertRaises(lowerCAmelCase_ , msg="""Can only pass one of `num_inference_steps` or `timesteps`."""): scheduler.set_timesteps(num_inference_steps=lowerCAmelCase_ , timesteps=lowerCAmelCase_) def _UpperCAmelCase ( self : int): """simple docstring""" lowercase_ = self.scheduler_classes[0] lowercase_ = self.get_scheduler_config() lowercase_ = scheduler_class(**lowerCAmelCase_) lowercase_ = [scheduler.config.num_train_timesteps] with self.assertRaises( lowerCAmelCase_ , msg="""`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}""" , ): scheduler.set_timesteps(timesteps=lowerCAmelCase_)
136
"""simple docstring""" from typing import Dict, Optional import numpy as np import datasets UpperCAmelCase : Tuple = "\nIoU is the area of overlap between the predicted segmentation and the ground truth divided by the area of union\nbetween the predicted segmentation and the ground truth. For binary (two classes) or multi-class segmentation,\nthe mean IoU of the image is calculated by taking the IoU of each class and averaging them.\n" UpperCAmelCase : Optional[int] = "\nArgs:\n predictions (`List[ndarray]`):\n List of predicted segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.\n references (`List[ndarray]`):\n List of ground truth segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.\n num_labels (`int`):\n Number of classes (categories).\n ignore_index (`int`):\n Index that will be ignored during evaluation.\n nan_to_num (`int`, *optional*):\n If specified, NaN values will be replaced by the number defined by the user.\n label_map (`dict`, *optional*):\n If specified, dictionary mapping old label indices to new label indices.\n reduce_labels (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background,\n and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by 255.\n\nReturns:\n `Dict[str, float | ndarray]` comprising various elements:\n - *mean_iou* (`float`):\n Mean Intersection-over-Union (IoU averaged over all categories).\n - *mean_accuracy* (`float`):\n Mean accuracy (averaged over all categories).\n - *overall_accuracy* (`float`):\n Overall accuracy on all images.\n - *per_category_accuracy* (`ndarray` of shape `(num_labels,)`):\n Per category accuracy.\n - *per_category_iou* (`ndarray` of shape `(num_labels,)`):\n Per category IoU.\n\nExamples:\n\n >>> import numpy as np\n\n >>> mean_iou = datasets.load_metric(\"mean_iou\")\n\n >>> # suppose one has 3 different segmentation maps predicted\n >>> predicted_1 = np.array([[1, 2], [3, 4], [5, 255]])\n >>> actual_1 = np.array([[0, 3], [5, 4], [6, 255]])\n\n >>> predicted_2 = np.array([[2, 7], [9, 2], [3, 6]])\n >>> actual_2 = np.array([[1, 7], [9, 2], [3, 6]])\n\n >>> predicted_3 = np.array([[2, 2, 3], [8, 2, 4], [3, 255, 2]])\n >>> actual_3 = np.array([[1, 2, 2], [8, 2, 1], [3, 255, 1]])\n\n >>> predicted = [predicted_1, predicted_2, predicted_3]\n >>> ground_truth = [actual_1, actual_2, actual_3]\n\n >>> results = mean_iou.compute(predictions=predicted, references=ground_truth, num_labels=10, ignore_index=255, reduce_labels=False)\n >>> print(results) # doctest: +NORMALIZE_WHITESPACE\n {'mean_iou': 0.47750000000000004, 'mean_accuracy': 0.5916666666666666, 'overall_accuracy': 0.5263157894736842, 'per_category_iou': array([0. , 0. , 0.375, 0.4 , 0.5 , 0. , 0.5 , 1. , 1. , 1. ]), 'per_category_accuracy': array([0. , 0. , 0.75 , 0.66666667, 1. , 0. , 0.5 , 1. , 1. , 1. ])}\n" UpperCAmelCase : List[str] = "\\n@software{MMSegmentation_Contributors_OpenMMLab_Semantic_Segmentation_2020,\nauthor = {{MMSegmentation Contributors}},\nlicense = {Apache-2.0},\nmonth = {7},\ntitle = {{OpenMMLab Semantic Segmentation Toolbox and Benchmark}},\nurl = {https://github.com/open-mmlab/mmsegmentation},\nyear = {2020}\n}" def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = False , ) -> Dict: '''simple docstring''' if label_map is not None: for old_id, new_id in label_map.items(): lowercase_ = new_id # turn into Numpy arrays lowercase_ = np.array(__lowerCAmelCase ) lowercase_ = np.array(__lowerCAmelCase ) if reduce_labels: lowercase_ = 2_55 lowercase_ = label - 1 lowercase_ = 2_55 lowercase_ = label != ignore_index lowercase_ = np.not_equal(__lowerCAmelCase , __lowerCAmelCase ) lowercase_ = pred_label[mask] lowercase_ = np.array(__lowerCAmelCase )[mask] lowercase_ = pred_label[pred_label == label] lowercase_ = np.histogram(__lowerCAmelCase , bins=__lowerCAmelCase , range=(0, num_labels - 1) )[0] lowercase_ = np.histogram(__lowerCAmelCase , bins=__lowerCAmelCase , range=(0, num_labels - 1) )[0] lowercase_ = np.histogram(__lowerCAmelCase , bins=__lowerCAmelCase , range=(0, num_labels - 1) )[0] lowercase_ = area_pred_label + area_label - area_intersect return area_intersect, area_union, area_pred_label, area_label def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = False , ) -> Optional[Any]: '''simple docstring''' lowercase_ = np.zeros((num_labels,) , dtype=np.floataa ) lowercase_ = np.zeros((num_labels,) , dtype=np.floataa ) lowercase_ = np.zeros((num_labels,) , dtype=np.floataa ) lowercase_ = np.zeros((num_labels,) , dtype=np.floataa ) for result, gt_seg_map in zip(__lowerCAmelCase , __lowerCAmelCase ): lowercase_ , lowercase_ , lowercase_ , lowercase_ = intersect_and_union( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) total_area_intersect += area_intersect total_area_union += area_union total_area_pred_label += area_pred_label total_area_label += area_label return total_area_intersect, total_area_union, total_area_pred_label, total_area_label def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = False , ) -> Any: '''simple docstring''' lowercase_ , lowercase_ , lowercase_ , lowercase_ = total_intersect_and_union( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) # compute metrics lowercase_ = {} lowercase_ = total_area_intersect.sum() / total_area_label.sum() lowercase_ = total_area_intersect / total_area_union lowercase_ = total_area_intersect / total_area_label lowercase_ = np.nanmean(__lowerCAmelCase ) lowercase_ = np.nanmean(__lowerCAmelCase ) lowercase_ = all_acc lowercase_ = iou lowercase_ = acc if nan_to_num is not None: lowercase_ = {metric: np.nan_to_num(__lowerCAmelCase , nan=__lowerCAmelCase ) for metric, metric_value in metrics.items()} return metrics @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class SCREAMING_SNAKE_CASE__ ( datasets.Metric ): def _UpperCAmelCase ( self : Union[str, Any]): """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( # 1st Seq - height dim, 2nd - width dim { """predictions""": datasets.Sequence(datasets.Sequence(datasets.Value("""uint16"""))), """references""": datasets.Sequence(datasets.Sequence(datasets.Value("""uint16"""))), }) , reference_urls=[ """https://github.com/open-mmlab/mmsegmentation/blob/71c201b1813267d78764f306a297ca717827c4bf/mmseg/core/evaluation/metrics.py""" ] , ) def _UpperCAmelCase ( self : Tuple , lowerCAmelCase_ : Any , lowerCAmelCase_ : Dict , lowerCAmelCase_ : int , lowerCAmelCase_ : bool , lowerCAmelCase_ : Optional[int] = None , lowerCAmelCase_ : Optional[Dict[int, int]] = None , lowerCAmelCase_ : bool = False , ): """simple docstring""" lowercase_ = mean_iou( results=lowerCAmelCase_ , gt_seg_maps=lowerCAmelCase_ , num_labels=lowerCAmelCase_ , ignore_index=lowerCAmelCase_ , nan_to_num=lowerCAmelCase_ , label_map=lowerCAmelCase_ , reduce_labels=lowerCAmelCase_ , ) return iou_result
136
1
'''simple docstring''' from __future__ import annotations _lowerCAmelCase = list[tuple[int, int]] _lowerCAmelCase = [ [0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles [0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0], [1, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0], ] _lowerCAmelCase = ([-1, 0], [0, -1], [1, 0], [0, 1]) # up, left, down, right class lowerCAmelCase_: '''simple docstring''' def __init__( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,) -> int: lowerCAmelCase__ : Union[str, Any] = pos_x lowerCAmelCase__ : Dict = pos_y lowerCAmelCase__ : Tuple = (pos_y, pos_x) lowerCAmelCase__ : Optional[Any] = goal_x lowerCAmelCase__ : Dict = goal_y lowerCAmelCase__ : List[Any] = g_cost lowerCAmelCase__ : Optional[int] = parent lowerCAmelCase__ : str = self.calculate_heuristic() def UpperCAmelCase_ ( self ) -> float: lowerCAmelCase__ : Dict = abs(self.pos_x - self.goal_x ) lowerCAmelCase__ : Tuple = abs(self.pos_y - self.goal_y ) return dx + dy def __lt__( self ,__UpperCAmelCase ) -> bool: return self.f_cost < other.f_cost class lowerCAmelCase_: '''simple docstring''' def __init__( self ,__UpperCAmelCase ,__UpperCAmelCase ) -> Optional[int]: lowerCAmelCase__ : Optional[int] = Node(start[1] ,start[0] ,goal[1] ,goal[0] ,0 ,__UpperCAmelCase ) lowerCAmelCase__ : List[str] = Node(goal[1] ,goal[0] ,goal[1] ,goal[0] ,9_9999 ,__UpperCAmelCase ) lowerCAmelCase__ : Optional[Any] = [self.start] lowerCAmelCase__ : list[Node] = [] lowerCAmelCase__ : Optional[int] = False def UpperCAmelCase_ ( self ) -> Path | None: while self.open_nodes: # Open Nodes are sorted using __lt__ self.open_nodes.sort() lowerCAmelCase__ : Union[str, Any] = self.open_nodes.pop(0 ) if current_node.pos == self.target.pos: lowerCAmelCase__ : Optional[Any] = True return self.retrace_path(__UpperCAmelCase ) self.closed_nodes.append(__UpperCAmelCase ) lowerCAmelCase__ : Any = self.get_successors(__UpperCAmelCase ) for child_node in successors: if child_node in self.closed_nodes: continue if child_node not in self.open_nodes: self.open_nodes.append(__UpperCAmelCase ) else: # retrieve the best current path lowerCAmelCase__ : Dict = self.open_nodes.pop(self.open_nodes.index(__UpperCAmelCase ) ) if child_node.g_cost < better_node.g_cost: self.open_nodes.append(__UpperCAmelCase ) else: self.open_nodes.append(__UpperCAmelCase ) if not self.reached: return [self.start.pos] return None def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> list[Node]: lowerCAmelCase__ : List[Any] = [] for action in delta: lowerCAmelCase__ : str = parent.pos_x + action[1] lowerCAmelCase__ : Optional[Any] = parent.pos_y + action[0] if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(__UpperCAmelCase ) - 1): continue if grid[pos_y][pos_x] != 0: continue successors.append( Node( __UpperCAmelCase ,__UpperCAmelCase ,self.target.pos_y ,self.target.pos_x ,parent.g_cost + 1 ,__UpperCAmelCase ,) ) return successors def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Path: lowerCAmelCase__ : List[Any] = node lowerCAmelCase__ : int = [] while current_node is not None: path.append((current_node.pos_y, current_node.pos_x) ) lowerCAmelCase__ : Dict = current_node.parent path.reverse() return path if __name__ == "__main__": _lowerCAmelCase = (0, 0) _lowerCAmelCase = (len(grid) - 1, len(grid[0]) - 1) for elem in grid: print(elem) print('''------''') _lowerCAmelCase = GreedyBestFirst(init, goal) _lowerCAmelCase = greedy_bf.search() if path: for pos_x, pos_y in path: _lowerCAmelCase = 2 for elem in grid: print(elem)
184
'''simple docstring''' import json import os from typing import Optional, Tuple import regex as re from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging _lowerCAmelCase = logging.get_logger(__name__) _lowerCAmelCase = { '''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', } _lowerCAmelCase = { '''vocab_file''': {'''ctrl''': '''https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-vocab.json'''}, '''merges_file''': {'''ctrl''': '''https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-merges.txt'''}, } _lowerCAmelCase = { '''ctrl''': 256, } _lowerCAmelCase = { '''Pregnancy''': 16_8629, '''Christianity''': 7675, '''Explain''': 10_6423, '''Fitness''': 6_3440, '''Saving''': 6_3163, '''Ask''': 2_7171, '''Ass''': 9_5985, '''Joke''': 16_3509, '''Questions''': 4_5622, '''Thoughts''': 4_9605, '''Retail''': 5_2342, '''Feminism''': 16_4338, '''Writing''': 1_1992, '''Atheism''': 19_2263, '''Netflix''': 4_8616, '''Computing''': 3_9639, '''Opinion''': 4_3213, '''Alone''': 4_4967, '''Funny''': 5_8917, '''Gaming''': 4_0358, '''Human''': 4088, '''India''': 1331, '''Joker''': 7_7138, '''Diet''': 3_6206, '''Legal''': 1_1859, '''Norman''': 4939, '''Tip''': 7_2689, '''Weight''': 5_2343, '''Movies''': 4_6273, '''Running''': 2_3425, '''Science''': 2090, '''Horror''': 3_7793, '''Confession''': 6_0572, '''Finance''': 1_2250, '''Politics''': 1_6360, '''Scary''': 19_1985, '''Support''': 1_2654, '''Technologies''': 3_2516, '''Teenage''': 6_6160, '''Event''': 3_2769, '''Learned''': 6_7460, '''Notion''': 18_2770, '''Wikipedia''': 3_7583, '''Books''': 6665, '''Extract''': 7_6050, '''Confessions''': 10_2701, '''Conspiracy''': 7_5932, '''Links''': 6_3674, '''Narcissus''': 15_0425, '''Relationship''': 5_4766, '''Relationships''': 13_4796, '''Reviews''': 4_1671, '''News''': 4256, '''Translation''': 2_6820, '''multilingual''': 12_8406, } def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" lowerCAmelCase__ : Dict = set() lowerCAmelCase__ : List[str] = word[0] for char in word[1:]: pairs.add((prev_char, char) ) lowerCAmelCase__ : str = char lowerCAmelCase__ : int = set(UpperCamelCase ) return pairs class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ ): '''simple docstring''' __lowercase : Optional[int] = VOCAB_FILES_NAMES __lowercase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP __lowercase : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __lowercase : str = CONTROL_CODES def __init__( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase="<unk>" ,**__UpperCAmelCase ) -> Optional[Any]: super().__init__(unk_token=__UpperCAmelCase ,**__UpperCAmelCase ) with open(__UpperCAmelCase ,encoding="""utf-8""" ) as vocab_handle: lowerCAmelCase__ : List[Any] = json.load(__UpperCAmelCase ) lowerCAmelCase__ : str = {v: k for k, v in self.encoder.items()} with open(__UpperCAmelCase ,encoding="""utf-8""" ) as merges_handle: lowerCAmelCase__ : Any = merges_handle.read().split("""\n""" )[1:-1] lowerCAmelCase__ : Optional[Any] = [tuple(merge.split() ) for merge in merges] lowerCAmelCase__ : Tuple = dict(zip(__UpperCAmelCase ,range(len(__UpperCAmelCase ) ) ) ) lowerCAmelCase__ : int = {} @property def UpperCAmelCase_ ( self ) -> List[Any]: return len(self.encoder ) def UpperCAmelCase_ ( self ) -> Optional[int]: return dict(self.encoder ,**self.added_tokens_encoder ) def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Optional[Any]: if token in self.cache: return self.cache[token] lowerCAmelCase__ : int = tuple(__UpperCAmelCase ) lowerCAmelCase__ : str = tuple(list(word[:-1] ) + [word[-1] + """</w>"""] ) lowerCAmelCase__ : Tuple = get_pairs(__UpperCAmelCase ) if not pairs: return token while True: lowerCAmelCase__ : Tuple = min(__UpperCAmelCase ,key=lambda __UpperCAmelCase : self.bpe_ranks.get(__UpperCAmelCase ,float("""inf""" ) ) ) if bigram not in self.bpe_ranks: break lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = bigram lowerCAmelCase__ : Dict = [] lowerCAmelCase__ : int = 0 while i < len(__UpperCAmelCase ): try: lowerCAmelCase__ : Any = word.index(__UpperCAmelCase ,__UpperCAmelCase ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) lowerCAmelCase__ : Any = j if word[i] == first and i < len(__UpperCAmelCase ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 lowerCAmelCase__ : Any = tuple(__UpperCAmelCase ) lowerCAmelCase__ : Tuple = new_word if len(__UpperCAmelCase ) == 1: break else: lowerCAmelCase__ : List[str] = get_pairs(__UpperCAmelCase ) lowerCAmelCase__ : List[Any] = """@@ """.join(__UpperCAmelCase ) lowerCAmelCase__ : int = word[:-4] lowerCAmelCase__ : Optional[Any] = word return word def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Union[str, Any]: lowerCAmelCase__ : List[Any] = [] lowerCAmelCase__ : Optional[int] = re.findall(R"""\S+\n?""" ,__UpperCAmelCase ) for token in words: split_tokens.extend(list(self.bpe(__UpperCAmelCase ).split(""" """ ) ) ) return split_tokens def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Tuple: return self.encoder.get(__UpperCAmelCase ,self.encoder.get(self.unk_token ) ) def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Optional[int]: return self.decoder.get(__UpperCAmelCase ,self.unk_token ) def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Dict: lowerCAmelCase__ : Optional[int] = """ """.join(__UpperCAmelCase ).replace("""@@ """ ,"""""" ).strip() return out_string def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase = None ) -> Tuple[str]: if not os.path.isdir(__UpperCAmelCase ): logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" ) return lowerCAmelCase__ : Tuple = os.path.join( __UpperCAmelCase ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) lowerCAmelCase__ : str = os.path.join( __UpperCAmelCase ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] ) with open(__UpperCAmelCase ,"""w""" ,encoding="""utf-8""" ) as f: f.write(json.dumps(self.encoder ,indent=2 ,sort_keys=__UpperCAmelCase ,ensure_ascii=__UpperCAmelCase ) + """\n""" ) lowerCAmelCase__ : Optional[int] = 0 with open(__UpperCAmelCase ,"""w""" ,encoding="""utf-8""" ) as writer: writer.write("""#version: 0.2\n""" ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() ,key=lambda __UpperCAmelCase : kv[1] ): if index != token_index: logger.warning( F"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.""" """ Please check that the tokenizer is not corrupted!""" ) lowerCAmelCase__ : List[str] = token_index writer.write(""" """.join(__UpperCAmelCase ) + """\n""" ) index += 1 return vocab_file, merge_file # def decode(self, token_ids, skip_special_tokens=False, clean_up_tokenization_spaces=True): # filtered_tokens = ' '.join(self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens)) # tokens_generated_so_far = re.sub('(@@ )', '', string=filtered_tokens) # tokens_generated_so_far = re.sub('(@@ ?$)', '', string=tokens_generated_so_far) # return ''.join(tokens_generated_so_far)
184
1
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging _a = logging.get_logger(__name__) _a = { 'google/realm-cc-news-pretrained-embedder': ( 'https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/config.json' ), 'google/realm-cc-news-pretrained-encoder': ( 'https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/config.json' ), 'google/realm-cc-news-pretrained-scorer': ( 'https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/config.json' ), 'google/realm-cc-news-pretrained-openqa': ( 'https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/config.json' ), 'google/realm-orqa-nq-openqa': 'https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/config.json', 'google/realm-orqa-nq-reader': 'https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/config.json', 'google/realm-orqa-wq-openqa': 'https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/config.json', 'google/realm-orqa-wq-reader': 'https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/config.json', # See all REALM models at https://huggingface.co/models?filter=realm } class A_ (lowercase__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Union[str, Any] = """realm""" def __init__( self , lowercase_=3_0522 , lowercase_=768 , lowercase_=128 , lowercase_=12 , lowercase_=12 , lowercase_=8 , lowercase_=3072 , lowercase_="gelu_new" , lowercase_=0.1 , lowercase_=0.1 , lowercase_=512 , lowercase_=2 , lowercase_=0.02 , lowercase_=1E-1_2 , lowercase_=256 , lowercase_=10 , lowercase_=1E-3 , lowercase_=5 , lowercase_=320 , lowercase_=1335_3718 , lowercase_=5000 , lowercase_=1 , lowercase_=0 , lowercase_=2 , **lowercase_ , ): """simple docstring""" super().__init__(pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , **lowercase_ ) # Common config UpperCAmelCase_ : List[Any] = vocab_size UpperCAmelCase_ : Dict = max_position_embeddings UpperCAmelCase_ : Union[str, Any] = hidden_size UpperCAmelCase_ : List[str] = retriever_proj_size UpperCAmelCase_ : Dict = num_hidden_layers UpperCAmelCase_ : Tuple = num_attention_heads UpperCAmelCase_ : Optional[int] = num_candidates UpperCAmelCase_ : List[str] = intermediate_size UpperCAmelCase_ : List[Any] = hidden_act UpperCAmelCase_ : Optional[int] = hidden_dropout_prob UpperCAmelCase_ : Optional[Any] = attention_probs_dropout_prob UpperCAmelCase_ : List[str] = initializer_range UpperCAmelCase_ : Optional[int] = type_vocab_size UpperCAmelCase_ : List[str] = layer_norm_eps # Reader config UpperCAmelCase_ : int = span_hidden_size UpperCAmelCase_ : Optional[Any] = max_span_width UpperCAmelCase_ : Dict = reader_layer_norm_eps UpperCAmelCase_ : Optional[int] = reader_beam_size UpperCAmelCase_ : Union[str, Any] = reader_seq_len # Retrieval config UpperCAmelCase_ : Union[str, Any] = num_block_records UpperCAmelCase_ : Optional[Any] = searcher_beam_size
61
'''simple docstring''' import unittest from transformers import load_tool from .test_tools_common import ToolTesterMixin class lowercase ( unittest.TestCase , _lowerCamelCase ): """simple docstring""" def _snake_case ( self ) -> Any: _UpperCAmelCase : int = load_tool("""text-classification""" ) self.tool.setup() _UpperCAmelCase : Tuple = load_tool("""text-classification""" ,remote=a_ ) def _snake_case ( self ) -> Union[str, Any]: _UpperCAmelCase : Tuple = self.tool("""That's quite cool""" ,["""positive""", """negative"""] ) self.assertEqual(a_ ,"""positive""" ) def _snake_case ( self ) -> Any: _UpperCAmelCase : Dict = self.remote_tool("""That's quite cool""" ,["""positive""", """negative"""] ) self.assertEqual(a_ ,"""positive""" ) def _snake_case ( self ) -> str: _UpperCAmelCase : List[Any] = self.tool(text="""That's quite cool""" ,labels=["""positive""", """negative"""] ) self.assertEqual(a_ ,"""positive""" ) def _snake_case ( self ) -> Union[str, Any]: _UpperCAmelCase : Any = self.remote_tool(text="""That's quite cool""" ,labels=["""positive""", """negative"""] ) self.assertEqual(a_ ,"""positive""" )
215
0
"""simple docstring""" from dataclasses import dataclass from typing import Dict, Optional, Union import torch import torch.nn.functional as F from torch import nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput from .attention import BasicTransformerBlock from .attention_processor import AttentionProcessor, AttnProcessor from .embeddings import TimestepEmbedding, Timesteps from .modeling_utils import ModelMixin @dataclass class SCREAMING_SNAKE_CASE_ ( __a ): """simple docstring""" __lowercase : torch.FloatTensor class SCREAMING_SNAKE_CASE_ ( __a , __a ): """simple docstring""" @register_to_config def __init__( self , lowerCAmelCase__ = 3_2 , lowerCAmelCase__ = 6_4 , lowerCAmelCase__ = 2_0 , lowerCAmelCase__ = 7_6_8 , lowerCAmelCase__=7_7 , lowerCAmelCase__=4 , lowerCAmelCase__ = 0.0 , lowerCAmelCase__ = "silu" , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = "linear" , lowerCAmelCase__ = "prd" , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , ): super().__init__() __SCREAMING_SNAKE_CASE = num_attention_heads __SCREAMING_SNAKE_CASE = attention_head_dim __SCREAMING_SNAKE_CASE = num_attention_heads * attention_head_dim __SCREAMING_SNAKE_CASE = additional_embeddings __SCREAMING_SNAKE_CASE = time_embed_dim or inner_dim __SCREAMING_SNAKE_CASE = embedding_proj_dim or embedding_dim __SCREAMING_SNAKE_CASE = clip_embed_dim or embedding_dim __SCREAMING_SNAKE_CASE = Timesteps(lowerCAmelCase__ , lowerCAmelCase__ , 0) __SCREAMING_SNAKE_CASE = TimestepEmbedding(lowerCAmelCase__ , lowerCAmelCase__ , out_dim=lowerCAmelCase__ , act_fn=lowerCAmelCase__) __SCREAMING_SNAKE_CASE = nn.Linear(lowerCAmelCase__ , lowerCAmelCase__) if embedding_proj_norm_type is None: __SCREAMING_SNAKE_CASE = None elif embedding_proj_norm_type == "layer": __SCREAMING_SNAKE_CASE = nn.LayerNorm(lowerCAmelCase__) else: raise ValueError(f"unsupported embedding_proj_norm_type: {embedding_proj_norm_type}") __SCREAMING_SNAKE_CASE = nn.Linear(lowerCAmelCase__ , lowerCAmelCase__) if encoder_hid_proj_type is None: __SCREAMING_SNAKE_CASE = None elif encoder_hid_proj_type == "linear": __SCREAMING_SNAKE_CASE = nn.Linear(lowerCAmelCase__ , lowerCAmelCase__) else: raise ValueError(f"unsupported encoder_hid_proj_type: {encoder_hid_proj_type}") __SCREAMING_SNAKE_CASE = nn.Parameter(torch.zeros(1 , num_embeddings + additional_embeddings , lowerCAmelCase__)) if added_emb_type == "prd": __SCREAMING_SNAKE_CASE = nn.Parameter(torch.zeros(1 , 1 , lowerCAmelCase__)) elif added_emb_type is None: __SCREAMING_SNAKE_CASE = None else: raise ValueError( f"`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `'prd'` or `None`.") __SCREAMING_SNAKE_CASE = nn.ModuleList( [ BasicTransformerBlock( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , dropout=lowerCAmelCase__ , activation_fn="""gelu""" , attention_bias=lowerCAmelCase__ , ) for d in range(lowerCAmelCase__) ]) if norm_in_type == "layer": __SCREAMING_SNAKE_CASE = nn.LayerNorm(lowerCAmelCase__) elif norm_in_type is None: __SCREAMING_SNAKE_CASE = None else: raise ValueError(f"Unsupported norm_in_type: {norm_in_type}.") __SCREAMING_SNAKE_CASE = nn.LayerNorm(lowerCAmelCase__) __SCREAMING_SNAKE_CASE = nn.Linear(lowerCAmelCase__ , lowerCAmelCase__) __SCREAMING_SNAKE_CASE = torch.full( [num_embeddings + additional_embeddings, num_embeddings + additional_embeddings] , -1_00_00.0) causal_attention_mask.triu_(1) __SCREAMING_SNAKE_CASE = causal_attention_mask[None, ...] self.register_buffer("""causal_attention_mask""" , lowerCAmelCase__ , persistent=lowerCAmelCase__) __SCREAMING_SNAKE_CASE = nn.Parameter(torch.zeros(1 , lowerCAmelCase__)) __SCREAMING_SNAKE_CASE = nn.Parameter(torch.zeros(1 , lowerCAmelCase__)) @property # Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors def snake_case_ ( self): __SCREAMING_SNAKE_CASE = {} def fn_recursive_add_processors(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__): if hasattr(lowerCAmelCase__ , """set_processor"""): __SCREAMING_SNAKE_CASE = module.processor for sub_name, child in module.named_children(): fn_recursive_add_processors(f"{name}.{sub_name}" , lowerCAmelCase__ , lowerCAmelCase__) return processors for name, module in self.named_children(): fn_recursive_add_processors(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__) return processors def snake_case_ ( self , lowerCAmelCase__): __SCREAMING_SNAKE_CASE = len(self.attn_processors.keys()) if isinstance(lowerCAmelCase__ , lowerCAmelCase__) and len(lowerCAmelCase__) != count: raise ValueError( f"A dict of processors was passed, but the number of processors {len(lowerCAmelCase__)} does not match the" f" number of attention layers: {count}. Please make sure to pass {count} processor classes.") def fn_recursive_attn_processor(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__): if hasattr(lowerCAmelCase__ , """set_processor"""): if not isinstance(lowerCAmelCase__ , lowerCAmelCase__): module.set_processor(lowerCAmelCase__) else: module.set_processor(processor.pop(f"{name}.processor")) for sub_name, child in module.named_children(): fn_recursive_attn_processor(f"{name}.{sub_name}" , lowerCAmelCase__ , lowerCAmelCase__) for name, module in self.named_children(): fn_recursive_attn_processor(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__) def snake_case_ ( self): self.set_attn_processor(AttnProcessor()) def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = True , ): __SCREAMING_SNAKE_CASE = hidden_states.shape[0] __SCREAMING_SNAKE_CASE = timestep if not torch.is_tensor(lowerCAmelCase__): __SCREAMING_SNAKE_CASE = torch.tensor([timesteps] , dtype=torch.long , device=hidden_states.device) elif torch.is_tensor(lowerCAmelCase__) and len(timesteps.shape) == 0: __SCREAMING_SNAKE_CASE = timesteps[None].to(hidden_states.device) # broadcast to batch dimension in a way that's compatible with ONNX/Core ML __SCREAMING_SNAKE_CASE = timesteps * torch.ones(lowerCAmelCase__ , dtype=timesteps.dtype , device=timesteps.device) __SCREAMING_SNAKE_CASE = self.time_proj(lowerCAmelCase__) # timesteps does not contain any weights and will always return f32 tensors # but time_embedding might be fp16, so we need to cast here. __SCREAMING_SNAKE_CASE = timesteps_projected.to(dtype=self.dtype) __SCREAMING_SNAKE_CASE = self.time_embedding(lowerCAmelCase__) if self.embedding_proj_norm is not None: __SCREAMING_SNAKE_CASE = self.embedding_proj_norm(lowerCAmelCase__) __SCREAMING_SNAKE_CASE = self.embedding_proj(lowerCAmelCase__) if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None: __SCREAMING_SNAKE_CASE = self.encoder_hidden_states_proj(lowerCAmelCase__) elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None: raise ValueError("""`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set""") __SCREAMING_SNAKE_CASE = self.proj_in(lowerCAmelCase__) __SCREAMING_SNAKE_CASE = self.positional_embedding.to(hidden_states.dtype) __SCREAMING_SNAKE_CASE = [] __SCREAMING_SNAKE_CASE = 0 if encoder_hidden_states is not None: additional_embeds.append(lowerCAmelCase__) additional_embeddings_len += encoder_hidden_states.shape[1] if len(proj_embeddings.shape) == 2: __SCREAMING_SNAKE_CASE = proj_embeddings[:, None, :] if len(hidden_states.shape) == 2: __SCREAMING_SNAKE_CASE = hidden_states[:, None, :] __SCREAMING_SNAKE_CASE = additional_embeds + [ proj_embeddings, time_embeddings[:, None, :], hidden_states, ] if self.prd_embedding is not None: __SCREAMING_SNAKE_CASE = self.prd_embedding.to(hidden_states.dtype).expand(lowerCAmelCase__ , -1 , -1) additional_embeds.append(lowerCAmelCase__) __SCREAMING_SNAKE_CASE = torch.cat( lowerCAmelCase__ , dim=1 , ) # Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens __SCREAMING_SNAKE_CASE = additional_embeddings_len + proj_embeddings.shape[1] + 1 if positional_embeddings.shape[1] < hidden_states.shape[1]: __SCREAMING_SNAKE_CASE = F.pad( lowerCAmelCase__ , ( 0, 0, additional_embeddings_len, self.prd_embedding.shape[1] if self.prd_embedding is not None else 0, ) , value=0.0 , ) __SCREAMING_SNAKE_CASE = hidden_states + positional_embeddings if attention_mask is not None: __SCREAMING_SNAKE_CASE = (1 - attention_mask.to(hidden_states.dtype)) * -1_00_00.0 __SCREAMING_SNAKE_CASE = F.pad(lowerCAmelCase__ , (0, self.additional_embeddings) , value=0.0) __SCREAMING_SNAKE_CASE = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype) __SCREAMING_SNAKE_CASE = attention_mask.repeat_interleave(self.config.num_attention_heads , dim=0) if self.norm_in is not None: __SCREAMING_SNAKE_CASE = self.norm_in(lowerCAmelCase__) for block in self.transformer_blocks: __SCREAMING_SNAKE_CASE = block(lowerCAmelCase__ , attention_mask=lowerCAmelCase__) __SCREAMING_SNAKE_CASE = self.norm_out(lowerCAmelCase__) if self.prd_embedding is not None: __SCREAMING_SNAKE_CASE = hidden_states[:, -1] else: __SCREAMING_SNAKE_CASE = hidden_states[:, additional_embeddings_len:] __SCREAMING_SNAKE_CASE = self.proj_to_clip_embeddings(lowerCAmelCase__) if not return_dict: return (predicted_image_embedding,) return PriorTransformerOutput(predicted_image_embedding=lowerCAmelCase__) def snake_case_ ( self , lowerCAmelCase__): __SCREAMING_SNAKE_CASE = (prior_latents * self.clip_std) + self.clip_mean return prior_latents
255
"""simple docstring""" import pickle import shutil import tempfile import unittest from transformers import SPIECE_UNDERLINE, XGLMTokenizer, XGLMTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin __magic_name__ = get_tests_dir("fixtures/test_sentencepiece.model") @require_sentencepiece @require_tokenizers class SCREAMING_SNAKE_CASE_ ( __a , unittest.TestCase ): """simple docstring""" __lowercase : Union[str, Any] = XGLMTokenizer __lowercase : int = XGLMTokenizerFast __lowercase : Optional[Any] = True __lowercase : str = True def snake_case_ ( self): super().setUp() # We have a SentencePiece fixture for testing __SCREAMING_SNAKE_CASE = XGLMTokenizer(lowerCAmelCase__ , keep_accents=lowerCAmelCase__) tokenizer.save_pretrained(self.tmpdirname) def snake_case_ ( self): __SCREAMING_SNAKE_CASE = """<pad>""" __SCREAMING_SNAKE_CASE = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase__) , lowerCAmelCase__) self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase__) , lowerCAmelCase__) def snake_case_ ( self): __SCREAMING_SNAKE_CASE = list(self.get_tokenizer().get_vocab().keys()) self.assertEqual(vocab_keys[0] , """<s>""") self.assertEqual(vocab_keys[1] , """<pad>""") self.assertEqual(len(lowerCAmelCase__) , 1_0_0_8) def snake_case_ ( self): self.assertEqual(self.get_tokenizer().vocab_size , 1_0_0_8) def snake_case_ ( self): __SCREAMING_SNAKE_CASE = XGLMTokenizer(lowerCAmelCase__ , keep_accents=lowerCAmelCase__) __SCREAMING_SNAKE_CASE = tokenizer.tokenize("""This is a test""") self.assertListEqual(lowerCAmelCase__ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""]) self.assertListEqual( tokenizer.convert_tokens_to_ids(lowerCAmelCase__) , [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] , ) __SCREAMING_SNAKE_CASE = tokenizer.tokenize("""I was born in 92000, and this is falsé.""") self.assertListEqual( lowerCAmelCase__ , [ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """9""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """é""", """.""", ] , ) __SCREAMING_SNAKE_CASE = tokenizer.convert_tokens_to_ids(lowerCAmelCase__) self.assertListEqual( lowerCAmelCase__ , [ value + tokenizer.fairseq_offset for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 2, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 2, 4] ] , ) __SCREAMING_SNAKE_CASE = tokenizer.convert_ids_to_tokens(lowerCAmelCase__) self.assertListEqual( lowerCAmelCase__ , [ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """<unk>""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """<unk>""", """.""", ] , ) @cached_property def snake_case_ ( self): return XGLMTokenizer.from_pretrained("""facebook/xglm-564M""") def snake_case_ ( self): with tempfile.NamedTemporaryFile() as f: shutil.copyfile(lowerCAmelCase__ , f.name) __SCREAMING_SNAKE_CASE = XGLMTokenizer(f.name , keep_accents=lowerCAmelCase__) __SCREAMING_SNAKE_CASE = pickle.dumps(lowerCAmelCase__) pickle.loads(lowerCAmelCase__) def snake_case_ ( self): if not self.test_rust_tokenizer: return __SCREAMING_SNAKE_CASE = self.get_tokenizer() __SCREAMING_SNAKE_CASE = self.get_rust_tokenizer() __SCREAMING_SNAKE_CASE = """I was born in 92000, and this is falsé.""" __SCREAMING_SNAKE_CASE = tokenizer.tokenize(lowerCAmelCase__) __SCREAMING_SNAKE_CASE = rust_tokenizer.tokenize(lowerCAmelCase__) self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__) __SCREAMING_SNAKE_CASE = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__) __SCREAMING_SNAKE_CASE = rust_tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__) self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__) __SCREAMING_SNAKE_CASE = self.get_rust_tokenizer() __SCREAMING_SNAKE_CASE = tokenizer.encode(lowerCAmelCase__) __SCREAMING_SNAKE_CASE = rust_tokenizer.encode(lowerCAmelCase__) self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__) @slow def snake_case_ ( self): __SCREAMING_SNAKE_CASE = """Hello World!""" __SCREAMING_SNAKE_CASE = [2, 3_1_2_2_7, 4_4_4_7, 3_5] self.assertListEqual(lowerCAmelCase__ , self.big_tokenizer.encode(lowerCAmelCase__)) @slow def snake_case_ ( self): __SCREAMING_SNAKE_CASE = ( """This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will""" """ add words that should not exsist and be tokenized to unk, such as saoneuhaoesuth""" ) # fmt: off __SCREAMING_SNAKE_CASE = [2, 1_0_1_8, 6_7, 1_1, 1_9_8_8, 2_6_1_7, 5_6_3_1, 2_7_8, 1_1, 3_4_0_7, 4_8, 7_1_6_3_0, 2_8_0_8_5, 4, 3_2_3_4, 1_5_7, 1_3, 6, 5, 6, 4, 3_5_2_6, 7_6_8, 1_5, 6_5_9, 5_7, 2_9_8, 3_9_8_3, 8_6_4, 1_2_9, 2_1, 6, 5, 1_3_6_7_5, 3_7_7, 6_5_2, 7_5_8_0, 1_0_3_4_1, 1_5_5, 2_8_1_7, 4_2_2, 1_6_6_6, 7, 1_6_7_4, 5_3, 1_1_3, 2_0_2_2_7_7, 1_7_8_9_2, 3_3, 6_0, 8_7, 4, 3_2_3_4, 1_5_7, 6_1, 2_6_6_7, 5_2_3_7_6, 1_9, 8_8, 2_3, 7_3_5] # fmt: on self.assertListEqual(lowerCAmelCase__ , self.big_tokenizer.encode(lowerCAmelCase__)) @slow def snake_case_ ( self): # fmt: off __SCREAMING_SNAKE_CASE = { """input_ids""": [[2, 1_0_8_8_2_5, 1_1_6_3, 1_5, 8_8_0_1_0, 4_7_3, 1_5_8_9_8, 1_5_7, 1_3_6_7_2, 1_8_5_7, 3_1_2, 8, 2_3_8_0_2_1, 1_1_6_3, 5_3, 1_3_6_7_2, 1_8_5_7, 3_1_2, 8, 5_3_2_8_3, 1_8_2_3_9_6, 8, 1_8_5_6_6, 1_6, 3_6_7_3_3, 4_1_0_1, 8, 2_3_0, 2_4_4_0_1_7, 1_2_2_5_5_3, 7, 1_5, 1_3_2_5_9_7, 4, 2_9_3, 1_2_5_1_1, 7_6_1_0, 4, 3_4_1_4, 1_3_2_5_9_7, 9, 4, 3_2_3_6_1, 3_6_2, 4, 7_3_4, 2_8_5_1_2, 3_2_5_6_9, 1_8, 4, 3_2_3_6_1, 2_6_0_9_6, 1_4_9_8_2, 7_3, 1_8_7_1_5, 2_1_4_3_3, 2_3_5_2_6_1, 1_5, 4_9_2, 1_2_4_2_7, 1_6, 5_3, 1_8_7_1_5, 2_1_4_3_3, 6_5_4_5_4, 1_5, 2_3_6_5_9, 5_6_3, 1_6, 2_7_8, 5_9_7, 2_8_4_3, 5_9_5, 7_9_3_1, 1_8_2_3_9_6, 6_4_1_8_6, 2_2, 8_8_6, 5_9_5, 1_3_2_9_8_1, 5_3, 2_5_5_4_0, 3_4_4_9, 4_3_9_8_2, 3_9_9_0_1, 5_9_5_1, 8_7_8, 3_3_0, 4, 2_7_6_9_4, 8_0_2_6_9, 3_1_2, 5_3, 6_5_1_7, 1_1_7_8_0, 6_1_1, 2_0_4_0_8, 5], [2, 6, 1_3_2_5_9_7, 6_7, 4_2_8_9_7, 3_3, 5_9_2, 8, 1_6_3_7_2_9, 2_5_5_4_0, 3_6_1, 1_3_6_9_9_7, 1_0_9_5_1_4, 1_7_3_2_3_0, 7, 5_0_1, 6_0, 1_0_2_9_1_3, 1_9_6, 5_6_3_1, 2_3_5, 6_3_2_4_3, 4_7_3, 6, 2_3_1_7_5_7, 7_4, 5_2_7_7, 7_9_0_5, 5_3, 3_0_9_5, 3_7_3_1_7, 2_2, 4_5_4, 1_8_3_8_7_4, 5], [2, 2_6_8, 3_1_2_9_8, 4_6_5_3_0, 6, 1_3_2_9_3_5, 4_3_8_3_1, 7, 5_9_7, 3_2, 2_4, 3_6_8_8, 9_8_6_5, 5]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] } # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=lowerCAmelCase__ , model_name="""facebook/xglm-564M""" , padding=lowerCAmelCase__ , )
255
1
'''simple docstring''' import os from pickle import UnpicklingError from typing import Dict, Tuple import jax import jax.numpy as jnp import numpy as np from flax.serialization import from_bytes from flax.traverse_util import flatten_dict, unflatten_dict import transformers from .utils import logging __SCREAMING_SNAKE_CASE : Optional[int] = logging.get_logger(__name__) def UpperCamelCase_ ( _UpperCAmelCase : Dict , _UpperCAmelCase : List[Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : List[Any]=False ) -> str: """simple docstring""" try: import torch # noqa: F401 except ImportError: logger.error( "Loading a PyTorch model in Flax, requires both PyTorch and Flax to be installed. Please see" " https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation" " instructions." ) raise if not is_sharded: _UpperCAmelCase : Optional[int] = os.path.abspath(_UpperCAmelCase ) logger.info(F"""Loading PyTorch weights from {pt_path}""" ) _UpperCAmelCase : Tuple = torch.load(_UpperCAmelCase , map_location="cpu" ) logger.info(F"""PyTorch checkpoint contains {sum(t.numel() for t in pt_state_dict.values() ):,} parameters.""" ) _UpperCAmelCase : Tuple = convert_pytorch_state_dict_to_flax(_UpperCAmelCase , _UpperCAmelCase ) else: # model is sharded and pytorch_checkpoint_path already contains the list of .pt shard files _UpperCAmelCase : Optional[Any] = convert_pytorch_sharded_state_dict_to_flax(_UpperCAmelCase , _UpperCAmelCase ) return flax_state_dict def UpperCamelCase_ ( _UpperCAmelCase : Tuple[str] , _UpperCAmelCase : np.ndarray , _UpperCAmelCase : Dict[str, jnp.ndarray] , _UpperCAmelCase : str , ) -> (Tuple[str], np.ndarray): """simple docstring""" def is_key_or_prefix_key_in_dict(_UpperCAmelCase : Tuple[str] ) -> bool: return len(set(_UpperCAmelCase ) & {key, (model_prefix,) + key} ) > 0 # layer norm _UpperCAmelCase : int = pt_tuple_key[:-1] + ("scale",) if pt_tuple_key[-1] in ["weight", "gamma"] and is_key_or_prefix_key_in_dict(_UpperCAmelCase ): return renamed_pt_tuple_key, pt_tensor # batch norm layer mean _UpperCAmelCase : Tuple = pt_tuple_key[:-1] + ("mean",) if pt_tuple_key[-1] == "running_mean" and not is_key_or_prefix_key_in_dict(_UpperCAmelCase ): return renamed_pt_tuple_key, pt_tensor # batch norm layer var _UpperCAmelCase : List[str] = pt_tuple_key[:-1] + ("var",) if pt_tuple_key[-1] == "running_var" and not is_key_or_prefix_key_in_dict(_UpperCAmelCase ): return renamed_pt_tuple_key, pt_tensor # embedding _UpperCAmelCase : Optional[Any] = pt_tuple_key[:-1] + ("embedding",) if pt_tuple_key[-1] == "weight" and is_key_or_prefix_key_in_dict(_UpperCAmelCase ): return renamed_pt_tuple_key, pt_tensor # conv layer _UpperCAmelCase : int = pt_tuple_key[:-1] + ("kernel",) if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4 and not is_key_or_prefix_key_in_dict(_UpperCAmelCase ): _UpperCAmelCase : int = pt_tensor.transpose(2 , 3 , 1 , 0 ) return renamed_pt_tuple_key, pt_tensor # linear layer _UpperCAmelCase : Tuple = pt_tuple_key[:-1] + ("kernel",) if pt_tuple_key[-1] == "weight" and not is_key_or_prefix_key_in_dict(_UpperCAmelCase ): _UpperCAmelCase : Tuple = pt_tensor.T return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm weight _UpperCAmelCase : Any = pt_tuple_key[:-1] + ("weight",) if pt_tuple_key[-1] == "gamma": return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm bias _UpperCAmelCase : Tuple = pt_tuple_key[:-1] + ("bias",) if pt_tuple_key[-1] == "beta": return renamed_pt_tuple_key, pt_tensor # New `weight_norm` from https://github.com/huggingface/transformers/pull/24030 _UpperCAmelCase : List[str] = None if pt_tuple_key[-3::2] == ("parametrizations", "original0"): _UpperCAmelCase : List[str] = pt_tuple_key[-2] + "_g" elif pt_tuple_key[-3::2] == ("parametrizations", "original1"): _UpperCAmelCase : int = pt_tuple_key[-2] + "_v" if name is not None: _UpperCAmelCase : Tuple = pt_tuple_key[:-3] + (name,) return renamed_pt_tuple_key, pt_tensor return pt_tuple_key, pt_tensor def UpperCamelCase_ ( _UpperCAmelCase : Any , _UpperCAmelCase : List[Any] ) -> Tuple: """simple docstring""" _UpperCAmelCase : Optional[int] = {k: v.numpy() for k, v in pt_state_dict.items()} _UpperCAmelCase : Optional[Any] = flax_model.base_model_prefix # use params dict if the model contains batch norm layers if "params" in flax_model.params: _UpperCAmelCase : str = flax_model.params["params"] else: _UpperCAmelCase : Union[str, Any] = flax_model.params _UpperCAmelCase : List[Any] = flatten_dict(_UpperCAmelCase ) # add batch_stats keys,values to dict if "batch_stats" in flax_model.params: _UpperCAmelCase : Dict = flatten_dict(flax_model.params["batch_stats"] ) random_flax_state_dict.update(_UpperCAmelCase ) _UpperCAmelCase : str = {} _UpperCAmelCase : List[Any] = (model_prefix not in flax_model_params) and ( model_prefix in {k.split("." )[0] for k in pt_state_dict.keys()} ) _UpperCAmelCase : List[str] = (model_prefix in flax_model_params) and ( model_prefix not in {k.split("." )[0] for k in pt_state_dict.keys()} ) # Need to change some parameters name to match Flax names for pt_key, pt_tensor in pt_state_dict.items(): _UpperCAmelCase : int = tuple(pt_key.split("." ) ) # remove base model prefix if necessary _UpperCAmelCase : Any = pt_tuple_key[0] == model_prefix if load_model_with_head_into_base_model and has_base_model_prefix: _UpperCAmelCase : Tuple = pt_tuple_key[1:] # Correctly rename weight parameters _UpperCAmelCase , _UpperCAmelCase : List[Any] = rename_key_and_reshape_tensor( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) # add model prefix if necessary _UpperCAmelCase : Optional[int] = (model_prefix,) + flax_key in random_flax_state_dict if load_base_model_into_model_with_head and require_base_model_prefix: _UpperCAmelCase : Union[str, Any] = (model_prefix,) + flax_key if flax_key in random_flax_state_dict: if flax_tensor.shape != random_flax_state_dict[flax_key].shape: raise ValueError( F"""PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape """ F"""{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.""" ) # add batch stats if the model contains batchnorm layers if "batch_stats" in flax_model.params: if "mean" in flax_key[-1] or "var" in flax_key[-1]: _UpperCAmelCase : List[str] = jnp.asarray(_UpperCAmelCase ) continue # remove num_batches_tracked key if "num_batches_tracked" in flax_key[-1]: flax_state_dict.pop(_UpperCAmelCase , _UpperCAmelCase ) continue # also add unexpected weight so that warning is thrown _UpperCAmelCase : Union[str, Any] = jnp.asarray(_UpperCAmelCase ) else: # also add unexpected weight so that warning is thrown _UpperCAmelCase : Optional[int] = jnp.asarray(_UpperCAmelCase ) return unflatten_dict(_UpperCAmelCase ) def UpperCamelCase_ ( _UpperCAmelCase : Tuple , _UpperCAmelCase : Optional[Any] ) -> Optional[int]: """simple docstring""" import torch # Load the index _UpperCAmelCase : Union[str, Any] = {} for shard_file in shard_filenames: # load using msgpack utils _UpperCAmelCase : List[str] = torch.load(_UpperCAmelCase ) _UpperCAmelCase : int = {k: v.numpy() for k, v in pt_state_dict.items()} _UpperCAmelCase : List[Any] = flax_model.base_model_prefix # use params dict if the model contains batch norm layers and then add batch_stats keys,values to dict if "batch_stats" in flax_model.params: _UpperCAmelCase : Optional[Any] = flax_model.params["params"] _UpperCAmelCase : List[Any] = flatten_dict(_UpperCAmelCase ) random_flax_state_dict.update(flatten_dict(flax_model.params["batch_stats"] ) ) else: _UpperCAmelCase : List[Any] = flax_model.params _UpperCAmelCase : List[str] = flatten_dict(_UpperCAmelCase ) _UpperCAmelCase : Optional[int] = (model_prefix not in flax_model_params) and ( model_prefix in {k.split("." )[0] for k in pt_state_dict.keys()} ) _UpperCAmelCase : int = (model_prefix in flax_model_params) and ( model_prefix not in {k.split("." )[0] for k in pt_state_dict.keys()} ) # Need to change some parameters name to match Flax names for pt_key, pt_tensor in pt_state_dict.items(): _UpperCAmelCase : int = tuple(pt_key.split("." ) ) # remove base model prefix if necessary _UpperCAmelCase : List[Any] = pt_tuple_key[0] == model_prefix if load_model_with_head_into_base_model and has_base_model_prefix: _UpperCAmelCase : str = pt_tuple_key[1:] # Correctly rename weight parameters _UpperCAmelCase , _UpperCAmelCase : Tuple = rename_key_and_reshape_tensor( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) # add model prefix if necessary _UpperCAmelCase : Tuple = (model_prefix,) + flax_key in random_flax_state_dict if load_base_model_into_model_with_head and require_base_model_prefix: _UpperCAmelCase : Tuple = (model_prefix,) + flax_key if flax_key in random_flax_state_dict: if flax_tensor.shape != random_flax_state_dict[flax_key].shape: raise ValueError( F"""PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape """ F"""{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.""" ) # add batch stats if the model contains batchnorm layers if "batch_stats" in flax_model.params: if "mean" in flax_key[-1]: _UpperCAmelCase : Union[str, Any] = jnp.asarray(_UpperCAmelCase ) continue if "var" in flax_key[-1]: _UpperCAmelCase : str = jnp.asarray(_UpperCAmelCase ) continue # remove num_batches_tracked key if "num_batches_tracked" in flax_key[-1]: flax_state_dict.pop(_UpperCAmelCase , _UpperCAmelCase ) continue # also add unexpected weight so that warning is thrown _UpperCAmelCase : List[str] = jnp.asarray(_UpperCAmelCase ) else: # also add unexpected weight so that warning is thrown _UpperCAmelCase : Any = jnp.asarray(_UpperCAmelCase ) return unflatten_dict(_UpperCAmelCase ) def UpperCamelCase_ ( _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Any ) -> str: """simple docstring""" _UpperCAmelCase : List[Any] = os.path.abspath(_UpperCAmelCase ) logger.info(F"""Loading Flax weights from {flax_checkpoint_path}""" ) # import correct flax class _UpperCAmelCase : List[str] = getattr(_UpperCAmelCase , "Flax" + model.__class__.__name__ ) # load flax weight dict with open(_UpperCAmelCase , "rb" ) as state_f: try: _UpperCAmelCase : Dict = from_bytes(_UpperCAmelCase , state_f.read() ) except UnpicklingError: raise EnvironmentError(F"""Unable to convert {flax_checkpoint_path} to Flax deserializable object. """ ) return load_flax_weights_in_pytorch_model(_UpperCAmelCase , _UpperCAmelCase ) def UpperCamelCase_ ( _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : int ) -> int: """simple docstring""" try: import torch # noqa: F401 except ImportError: logger.error( "Loading a Flax weights in PyTorch, requires both PyTorch and Flax to be installed. Please see" " https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation" " instructions." ) raise # check if we have bf16 weights _UpperCAmelCase : List[str] = flatten_dict(jax.tree_util.tree_map(lambda _UpperCAmelCase : x.dtype == jnp.bfloataa , _UpperCAmelCase ) ).values() if any(_UpperCAmelCase ): # convert all weights to fp32 if the are bf16 since torch.from_numpy can-not handle bf16 # and bf16 is not fully supported in PT yet. logger.warning( "Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` " "before loading those in PyTorch model." ) _UpperCAmelCase : Optional[int] = jax.tree_util.tree_map( lambda _UpperCAmelCase : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , _UpperCAmelCase ) _UpperCAmelCase : Tuple = flatten_dict(_UpperCAmelCase ) _UpperCAmelCase : List[str] = pt_model.state_dict() _UpperCAmelCase : Tuple = (pt_model.base_model_prefix in flax_state) and ( pt_model.base_model_prefix not in {k.split("." )[0] for k in pt_model_dict.keys()} ) _UpperCAmelCase : Union[str, Any] = (pt_model.base_model_prefix not in flax_state) and ( pt_model.base_model_prefix in {k.split("." )[0] for k in pt_model_dict.keys()} ) # keep track of unexpected & missing keys _UpperCAmelCase : List[Any] = [] _UpperCAmelCase : str = set(pt_model_dict.keys() ) for flax_key_tuple, flax_tensor in flax_state_dict.items(): _UpperCAmelCase : Tuple = flax_key_tuple[0] == pt_model.base_model_prefix _UpperCAmelCase : Optional[Any] = ".".join((pt_model.base_model_prefix,) + flax_key_tuple ) in pt_model_dict # adapt flax_key to prepare for loading from/to base model only if load_model_with_head_into_base_model and has_base_model_prefix: _UpperCAmelCase : str = flax_key_tuple[1:] elif load_base_model_into_model_with_head and require_base_model_prefix: _UpperCAmelCase : int = (pt_model.base_model_prefix,) + flax_key_tuple # rename flax weights to PyTorch format if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 4 and ".".join(_UpperCAmelCase ) not in pt_model_dict: # conv layer _UpperCAmelCase : int = flax_key_tuple[:-1] + ("weight",) _UpperCAmelCase : Optional[Any] = jnp.transpose(_UpperCAmelCase , (3, 2, 0, 1) ) elif flax_key_tuple[-1] == "kernel" and ".".join(_UpperCAmelCase ) not in pt_model_dict: # linear layer _UpperCAmelCase : List[Any] = flax_key_tuple[:-1] + ("weight",) _UpperCAmelCase : List[Any] = flax_tensor.T elif flax_key_tuple[-1] in ["scale", "embedding"]: _UpperCAmelCase : int = flax_key_tuple[:-1] + ("weight",) # adding batch stats from flax batch norm to pt elif "mean" in flax_key_tuple[-1]: _UpperCAmelCase : Optional[Any] = flax_key_tuple[:-1] + ("running_mean",) elif "var" in flax_key_tuple[-1]: _UpperCAmelCase : List[str] = flax_key_tuple[:-1] + ("running_var",) if "batch_stats" in flax_state: _UpperCAmelCase : Union[str, Any] = ".".join(flax_key_tuple[1:] ) # Remove the params/batch_stats header else: _UpperCAmelCase : Optional[Any] = ".".join(_UpperCAmelCase ) # We also need to look at `pt_model_dict` and see if there are keys requiring further transformation. _UpperCAmelCase : Optional[Any] = {} # New `weight_norm` from https://github.com/huggingface/transformers/pull/24030 for key in pt_model_dict: _UpperCAmelCase : Optional[int] = key.split("." ) _UpperCAmelCase : Tuple = None if key_components[-3::2] == ["parametrizations", "original0"]: _UpperCAmelCase : Union[str, Any] = key_components[-2] + "_g" elif key_components[-3::2] == ["parametrizations", "original1"]: _UpperCAmelCase : int = key_components[-2] + "_v" if name is not None: _UpperCAmelCase : List[str] = key_components[:-3] + [name] _UpperCAmelCase : Optional[Any] = ".".join(_UpperCAmelCase ) _UpperCAmelCase : Any = key if flax_key in special_pt_names: _UpperCAmelCase : Union[str, Any] = special_pt_names[flax_key] if flax_key in pt_model_dict: if flax_tensor.shape != pt_model_dict[flax_key].shape: raise ValueError( F"""Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected """ F"""to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.""" ) else: # add weight to pytorch dict _UpperCAmelCase : Optional[Any] = np.asarray(_UpperCAmelCase ) if not isinstance(_UpperCAmelCase , np.ndarray ) else flax_tensor _UpperCAmelCase : int = torch.from_numpy(_UpperCAmelCase ) # remove from missing keys missing_keys.remove(_UpperCAmelCase ) else: # weight is not expected by PyTorch model unexpected_keys.append(_UpperCAmelCase ) pt_model.load_state_dict(_UpperCAmelCase ) # re-transform missing_keys to list _UpperCAmelCase : Optional[int] = list(_UpperCAmelCase ) if len(_UpperCAmelCase ) > 0: logger.warning( "Some weights of the Flax model were not used when initializing the PyTorch model" F""" {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing""" F""" {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture""" " (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This" F""" IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect""" " to be exactly identical (e.g. initializing a BertForSequenceClassification model from a" " FlaxBertForSequenceClassification model)." ) else: logger.warning(F"""All Flax model weights were used when initializing {pt_model.__class__.__name__}.\n""" ) if len(_UpperCAmelCase ) > 0: logger.warning( F"""Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly""" F""" initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to""" " use it for predictions and inference." ) else: logger.warning( F"""All the weights of {pt_model.__class__.__name__} were initialized from the Flax model.\n""" "If your task is similar to the task the model of the checkpoint was trained on, " F"""you can already use {pt_model.__class__.__name__} for predictions without further training.""" ) return pt_model
31
'''simple docstring''' import shutil import tempfile import unittest from unittest.mock import patch from transformers import ( DefaultFlowCallback, IntervalStrategy, PrinterCallback, ProgressCallback, Trainer, TrainerCallback, TrainingArguments, is_torch_available, ) from transformers.testing_utils import require_torch if is_torch_available(): from transformers.trainer import DEFAULT_CALLBACKS from .test_trainer import RegressionDataset, RegressionModelConfig, RegressionPreTrainedModel class lowerCamelCase_ (snake_case__ ): '''simple docstring''' def __init__( self : List[Any] ): _UpperCAmelCase : Union[str, Any] = [] def _A ( self : Any , A : Union[str, Any] , A : Optional[int] , A : List[str] , **A : Tuple ): self.events.append("on_init_end" ) def _A ( self : Any , A : str , A : List[Any] , A : List[Any] , **A : Tuple ): self.events.append("on_train_begin" ) def _A ( self : Tuple , A : List[str] , A : Tuple , A : int , **A : List[str] ): self.events.append("on_train_end" ) def _A ( self : Optional[Any] , A : Dict , A : Any , A : Optional[Any] , **A : List[Any] ): self.events.append("on_epoch_begin" ) def _A ( self : Optional[Any] , A : List[Any] , A : List[str] , A : Optional[int] , **A : Optional[int] ): self.events.append("on_epoch_end" ) def _A ( self : List[str] , A : Optional[int] , A : List[Any] , A : Union[str, Any] , **A : Any ): self.events.append("on_step_begin" ) def _A ( self : Tuple , A : Union[str, Any] , A : int , A : Optional[int] , **A : int ): self.events.append("on_step_end" ) def _A ( self : Optional[int] , A : Optional[Any] , A : Union[str, Any] , A : str , **A : Union[str, Any] ): self.events.append("on_evaluate" ) def _A ( self : Optional[Any] , A : Optional[int] , A : Dict , A : List[Any] , **A : Dict ): self.events.append("on_predict" ) def _A ( self : Dict , A : Dict , A : List[Any] , A : Dict , **A : str ): self.events.append("on_save" ) def _A ( self : Tuple , A : Optional[Any] , A : Union[str, Any] , A : Optional[int] , **A : Dict ): self.events.append("on_log" ) def _A ( self : Optional[int] , A : Optional[Any] , A : Tuple , A : Tuple , **A : List[str] ): self.events.append("on_prediction_step" ) @require_torch class lowerCamelCase_ (unittest.TestCase ): '''simple docstring''' def _A ( self : Optional[int] ): _UpperCAmelCase : Optional[Any] = tempfile.mkdtemp() def _A ( self : List[Any] ): shutil.rmtree(self.output_dir ) def _A ( self : Union[str, Any] , A : Optional[int]=0 , A : Optional[Any]=0 , A : Optional[Any]=64 , A : Dict=64 , A : Any=None , A : Tuple=False , **A : Optional[int] ): # disable_tqdm in TrainingArguments has a flaky default since it depends on the level of logging. We make sure # its set to False since the tests later on depend on its value. _UpperCAmelCase : str = RegressionDataset(length=A ) _UpperCAmelCase : Union[str, Any] = RegressionDataset(length=A ) _UpperCAmelCase : Any = RegressionModelConfig(a=A , b=A ) _UpperCAmelCase : List[Any] = RegressionPreTrainedModel(A ) _UpperCAmelCase : Dict = TrainingArguments(self.output_dir , disable_tqdm=A , report_to=[] , **A ) return Trainer( A , A , train_dataset=A , eval_dataset=A , callbacks=A , ) def _A ( self : str , A : List[str] , A : List[str] ): self.assertEqual(len(A ) , len(A ) ) # Order doesn't matter _UpperCAmelCase : Tuple = sorted(A , key=lambda A : cb.__name__ if isinstance(A , A ) else cb.__class__.__name__ ) _UpperCAmelCase : Any = sorted(A , key=lambda A : cb.__name__ if isinstance(A , A ) else cb.__class__.__name__ ) for cba, cba in zip(A , A ): if isinstance(A , A ) and isinstance(A , A ): self.assertEqual(A , A ) elif isinstance(A , A ) and not isinstance(A , A ): self.assertEqual(A , cba.__class__ ) elif not isinstance(A , A ) and isinstance(A , A ): self.assertEqual(cba.__class__ , A ) else: self.assertEqual(A , A ) def _A ( self : int , A : List[str] ): _UpperCAmelCase : List[str] = ["on_init_end", "on_train_begin"] _UpperCAmelCase : str = 0 _UpperCAmelCase : Optional[Any] = len(trainer.get_eval_dataloader() ) _UpperCAmelCase : Optional[int] = ["on_prediction_step"] * len(trainer.get_eval_dataloader() ) + ["on_log", "on_evaluate"] for _ in range(trainer.state.num_train_epochs ): expected_events.append("on_epoch_begin" ) for _ in range(A ): step += 1 expected_events += ["on_step_begin", "on_step_end"] if step % trainer.args.logging_steps == 0: expected_events.append("on_log" ) if trainer.args.evaluation_strategy == IntervalStrategy.STEPS and step % trainer.args.eval_steps == 0: expected_events += evaluation_events.copy() if step % trainer.args.save_steps == 0: expected_events.append("on_save" ) expected_events.append("on_epoch_end" ) if trainer.args.evaluation_strategy == IntervalStrategy.EPOCH: expected_events += evaluation_events.copy() expected_events += ["on_log", "on_train_end"] return expected_events def _A ( self : str ): _UpperCAmelCase : Any = self.get_trainer() _UpperCAmelCase : int = DEFAULT_CALLBACKS.copy() + [ProgressCallback] self.check_callbacks_equality(trainer.callback_handler.callbacks , A ) # Callbacks passed at init are added to the default callbacks _UpperCAmelCase : Optional[int] = self.get_trainer(callbacks=[MyTestTrainerCallback] ) expected_callbacks.append(A ) self.check_callbacks_equality(trainer.callback_handler.callbacks , A ) # TrainingArguments.disable_tqdm controls if use ProgressCallback or PrinterCallback _UpperCAmelCase : List[Any] = self.get_trainer(disable_tqdm=A ) _UpperCAmelCase : Tuple = DEFAULT_CALLBACKS.copy() + [PrinterCallback] self.check_callbacks_equality(trainer.callback_handler.callbacks , A ) def _A ( self : Optional[Any] ): _UpperCAmelCase : Dict = DEFAULT_CALLBACKS.copy() + [ProgressCallback] _UpperCAmelCase : Dict = self.get_trainer() # We can add, pop, or remove by class name trainer.remove_callback(A ) expected_callbacks.remove(A ) self.check_callbacks_equality(trainer.callback_handler.callbacks , A ) _UpperCAmelCase : Optional[Any] = self.get_trainer() _UpperCAmelCase : Any = trainer.pop_callback(A ) self.assertEqual(cb.__class__ , A ) self.check_callbacks_equality(trainer.callback_handler.callbacks , A ) trainer.add_callback(A ) expected_callbacks.insert(0 , A ) self.check_callbacks_equality(trainer.callback_handler.callbacks , A ) # We can also add, pop, or remove by instance _UpperCAmelCase : Union[str, Any] = self.get_trainer() _UpperCAmelCase : List[Any] = trainer.callback_handler.callbacks[0] trainer.remove_callback(A ) expected_callbacks.remove(A ) self.check_callbacks_equality(trainer.callback_handler.callbacks , A ) _UpperCAmelCase : List[Any] = self.get_trainer() _UpperCAmelCase : List[Any] = trainer.callback_handler.callbacks[0] _UpperCAmelCase : Union[str, Any] = trainer.pop_callback(A ) self.assertEqual(A , A ) self.check_callbacks_equality(trainer.callback_handler.callbacks , A ) trainer.add_callback(A ) expected_callbacks.insert(0 , A ) self.check_callbacks_equality(trainer.callback_handler.callbacks , A ) def _A ( self : Optional[Any] ): import warnings # XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested warnings.simplefilter(action="ignore" , category=A ) _UpperCAmelCase : Optional[int] = self.get_trainer(callbacks=[MyTestTrainerCallback] ) trainer.train() _UpperCAmelCase : Union[str, Any] = trainer.callback_handler.callbacks[-2].events self.assertEqual(A , self.get_expected_events(A ) ) # Independent log/save/eval _UpperCAmelCase : Tuple = self.get_trainer(callbacks=[MyTestTrainerCallback] , logging_steps=5 ) trainer.train() _UpperCAmelCase : int = trainer.callback_handler.callbacks[-2].events self.assertEqual(A , self.get_expected_events(A ) ) _UpperCAmelCase : List[str] = self.get_trainer(callbacks=[MyTestTrainerCallback] , save_steps=5 ) trainer.train() _UpperCAmelCase : Tuple = trainer.callback_handler.callbacks[-2].events self.assertEqual(A , self.get_expected_events(A ) ) _UpperCAmelCase : int = self.get_trainer(callbacks=[MyTestTrainerCallback] , eval_steps=5 , evaluation_strategy="steps" ) trainer.train() _UpperCAmelCase : Optional[int] = trainer.callback_handler.callbacks[-2].events self.assertEqual(A , self.get_expected_events(A ) ) _UpperCAmelCase : Optional[int] = self.get_trainer(callbacks=[MyTestTrainerCallback] , evaluation_strategy="epoch" ) trainer.train() _UpperCAmelCase : int = trainer.callback_handler.callbacks[-2].events self.assertEqual(A , self.get_expected_events(A ) ) # A bit of everything _UpperCAmelCase : int = self.get_trainer( callbacks=[MyTestTrainerCallback] , logging_steps=3 , save_steps=10 , eval_steps=5 , evaluation_strategy="steps" , ) trainer.train() _UpperCAmelCase : Optional[int] = trainer.callback_handler.callbacks[-2].events self.assertEqual(A , self.get_expected_events(A ) ) # warning should be emitted for duplicated callbacks with patch("transformers.trainer_callback.logger.warning" ) as warn_mock: _UpperCAmelCase : Optional[Any] = self.get_trainer( callbacks=[MyTestTrainerCallback, MyTestTrainerCallback] , ) assert str(A ) in warn_mock.call_args[0][0]
31
1
"""simple docstring""" import copy import os from collections import OrderedDict from typing import TYPE_CHECKING, Any, Dict, Mapping, Optional, Union if TYPE_CHECKING: from ...processing_utils import ProcessorMixin from ...utils import TensorType from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowerCamelCase_ = logging.get_logger(__name__) lowerCamelCase_ = { '''google/owlvit-base-patch32''': '''https://huggingface.co/google/owlvit-base-patch32/resolve/main/config.json''', '''google/owlvit-base-patch16''': '''https://huggingface.co/google/owlvit-base-patch16/resolve/main/config.json''', '''google/owlvit-large-patch14''': '''https://huggingface.co/google/owlvit-large-patch14/resolve/main/config.json''', } class UpperCamelCase_ (lowerCamelCase__ ): __magic_name__ = '''owlvit_text_model''' def __init__( self : Optional[Any] , lowerCAmelCase_ : Any=49_408 , lowerCAmelCase_ : Optional[Any]=512 , lowerCAmelCase_ : List[str]=2_048 , lowerCAmelCase_ : str=12 , lowerCAmelCase_ : List[Any]=8 , lowerCAmelCase_ : int=16 , lowerCAmelCase_ : Dict="quick_gelu" , lowerCAmelCase_ : List[Any]=1e-5 , lowerCAmelCase_ : Any=0.0 , lowerCAmelCase_ : Optional[int]=0.0_2 , lowerCAmelCase_ : int=1.0 , lowerCAmelCase_ : Tuple=0 , lowerCAmelCase_ : Dict=49_406 , lowerCAmelCase_ : Optional[int]=49_407 , **lowerCAmelCase_ : Dict , ) -> Optional[Any]: super().__init__(pad_token_id=lowerCAmelCase_ , bos_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ , **lowerCAmelCase_ ) UpperCAmelCase_ : int = vocab_size UpperCAmelCase_ : Optional[int] = hidden_size UpperCAmelCase_ : Dict = intermediate_size UpperCAmelCase_ : Any = num_hidden_layers UpperCAmelCase_ : int = num_attention_heads UpperCAmelCase_ : List[str] = max_position_embeddings UpperCAmelCase_ : Any = hidden_act UpperCAmelCase_ : str = layer_norm_eps UpperCAmelCase_ : List[Any] = attention_dropout UpperCAmelCase_ : List[str] = initializer_range UpperCAmelCase_ : List[str] = initializer_factor @classmethod def _SCREAMING_SNAKE_CASE ( cls : Optional[Any] , lowerCAmelCase_ : Union[str, os.PathLike] , **lowerCAmelCase_ : Dict ) -> Union[str, Any]: cls._set_token_in_kwargs(lowerCAmelCase_ ) UpperCAmelCase_ : str = cls.get_config_dict(lowerCAmelCase_ , **lowerCAmelCase_ ) # get the text config dict if we are loading from OwlViTConfig if config_dict.get("model_type" ) == "owlvit": UpperCAmelCase_ : Tuple = config_dict["text_config"] if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type: logger.warning( f"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """ f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" ) return cls.from_dict(lowerCAmelCase_ , **lowerCAmelCase_ ) class UpperCamelCase_ (lowerCamelCase__ ): __magic_name__ = '''owlvit_vision_model''' def __init__( self : Union[str, Any] , lowerCAmelCase_ : Union[str, Any]=768 , lowerCAmelCase_ : Union[str, Any]=3_072 , lowerCAmelCase_ : str=12 , lowerCAmelCase_ : List[Any]=12 , lowerCAmelCase_ : Tuple=3 , lowerCAmelCase_ : str=768 , lowerCAmelCase_ : Union[str, Any]=32 , lowerCAmelCase_ : str="quick_gelu" , lowerCAmelCase_ : str=1e-5 , lowerCAmelCase_ : List[Any]=0.0 , lowerCAmelCase_ : List[Any]=0.0_2 , lowerCAmelCase_ : str=1.0 , **lowerCAmelCase_ : Any , ) -> Optional[int]: super().__init__(**lowerCAmelCase_ ) UpperCAmelCase_ : Dict = hidden_size UpperCAmelCase_ : Optional[int] = intermediate_size UpperCAmelCase_ : List[str] = num_hidden_layers UpperCAmelCase_ : str = num_attention_heads UpperCAmelCase_ : int = num_channels UpperCAmelCase_ : Union[str, Any] = image_size UpperCAmelCase_ : List[str] = patch_size UpperCAmelCase_ : Dict = hidden_act UpperCAmelCase_ : Any = layer_norm_eps UpperCAmelCase_ : List[Any] = attention_dropout UpperCAmelCase_ : Union[str, Any] = initializer_range UpperCAmelCase_ : Union[str, Any] = initializer_factor @classmethod def _SCREAMING_SNAKE_CASE ( cls : int , lowerCAmelCase_ : Union[str, os.PathLike] , **lowerCAmelCase_ : Optional[int] ) -> Union[str, Any]: cls._set_token_in_kwargs(lowerCAmelCase_ ) UpperCAmelCase_ : str = cls.get_config_dict(lowerCAmelCase_ , **lowerCAmelCase_ ) # get the vision config dict if we are loading from OwlViTConfig if config_dict.get("model_type" ) == "owlvit": UpperCAmelCase_ : List[Any] = config_dict["vision_config"] if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type: logger.warning( f"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """ f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" ) return cls.from_dict(lowerCAmelCase_ , **lowerCAmelCase_ ) class UpperCamelCase_ (lowerCamelCase__ ): __magic_name__ = '''owlvit''' __magic_name__ = True def __init__( self : Optional[Any] , lowerCAmelCase_ : List[Any]=None , lowerCAmelCase_ : int=None , lowerCAmelCase_ : int=512 , lowerCAmelCase_ : int=2.6_5_9_2 , lowerCAmelCase_ : Optional[Any]=True , **lowerCAmelCase_ : List[Any] , ) -> Optional[int]: super().__init__(**lowerCAmelCase_ ) if text_config is None: UpperCAmelCase_ : Dict = {} logger.info("text_config is None. Initializing the OwlViTTextConfig with default values." ) if vision_config is None: UpperCAmelCase_ : Union[str, Any] = {} logger.info("vision_config is None. initializing the OwlViTVisionConfig with default values." ) UpperCAmelCase_ : int = OwlViTTextConfig(**lowerCAmelCase_ ) UpperCAmelCase_ : Optional[int] = OwlViTVisionConfig(**lowerCAmelCase_ ) UpperCAmelCase_ : str = projection_dim UpperCAmelCase_ : str = logit_scale_init_value UpperCAmelCase_ : int = return_dict UpperCAmelCase_ : Optional[Any] = 1.0 @classmethod def _SCREAMING_SNAKE_CASE ( cls : int , lowerCAmelCase_ : Union[str, os.PathLike] , **lowerCAmelCase_ : Optional[Any] ) -> List[str]: cls._set_token_in_kwargs(lowerCAmelCase_ ) UpperCAmelCase_ : Optional[Any] = cls.get_config_dict(lowerCAmelCase_ , **lowerCAmelCase_ ) if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type: logger.warning( f"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """ f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" ) return cls.from_dict(lowerCAmelCase_ , **lowerCAmelCase_ ) @classmethod def _SCREAMING_SNAKE_CASE ( cls : Dict , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Dict , **lowerCAmelCase_ : Dict ) -> Optional[Any]: UpperCAmelCase_ : int = {} UpperCAmelCase_ : List[Any] = text_config UpperCAmelCase_ : Optional[int] = vision_config return cls.from_dict(lowerCAmelCase_ , **lowerCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : Dict ) -> str: UpperCAmelCase_ : List[Any] = copy.deepcopy(self.__dict__ ) UpperCAmelCase_ : List[str] = self.text_config.to_dict() UpperCAmelCase_ : List[Any] = self.vision_config.to_dict() UpperCAmelCase_ : List[Any] = self.__class__.model_type return output class UpperCamelCase_ (lowerCamelCase__ ): @property def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[Any]: return OrderedDict( [ ("input_ids", {0: "batch", 1: "sequence"}), ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}), ("attention_mask", {0: "batch", 1: "sequence"}), ] ) @property def _SCREAMING_SNAKE_CASE ( self : int ) -> List[str]: return OrderedDict( [ ("logits_per_image", {0: "batch"}), ("logits_per_text", {0: "batch"}), ("text_embeds", {0: "batch"}), ("image_embeds", {0: "batch"}), ] ) @property def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Union[str, Any]: return 1e-4 def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase_ : "ProcessorMixin" , lowerCAmelCase_ : int = -1 , lowerCAmelCase_ : int = -1 , lowerCAmelCase_ : Optional["TensorType"] = None , ) -> List[str]: UpperCAmelCase_ : int = super().generate_dummy_inputs( processor.tokenizer , batch_size=lowerCAmelCase_ , seq_length=lowerCAmelCase_ , framework=lowerCAmelCase_ ) UpperCAmelCase_ : Dict = super().generate_dummy_inputs( processor.image_processor , batch_size=lowerCAmelCase_ , framework=lowerCAmelCase_ ) return {**text_input_dict, **image_input_dict} @property def _SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[Any]: return 14
354
"""simple docstring""" def snake_case ( A__ ): UpperCAmelCase_ : Optional[Any] = 0 for ch in input_str: UpperCAmelCase_ : Tuple = ord(A__ ) UpperCAmelCase_ : Dict = pow(2 ,A__ ) # If we already turned on bit for current character's unicode if bitmap >> ch_unicode & 1 == 1: return False bitmap |= ch_bit_index_on return True if __name__ == "__main__": import doctest doctest.testmod()
253
0
"""simple docstring""" from pathlib import PurePosixPath from typing import Optional import fsspec from fsspec import AbstractFileSystem from huggingface_hub.hf_api import DatasetInfo from ..utils.file_utils import get_authentication_headers_for_url from ..utils.hub import hf_hub_url class A__ ( lowerCamelCase_ ): '''simple docstring''' SCREAMING_SNAKE_CASE = """""" SCREAMING_SNAKE_CASE = """hf-legacy""" # "hf://"" is reserved for hffs def __init__( self: Dict , _SCREAMING_SNAKE_CASE: Optional[DatasetInfo] = None , _SCREAMING_SNAKE_CASE: Optional[str] = None , **_SCREAMING_SNAKE_CASE: List[str] , ) -> List[str]: """simple docstring""" super().__init__(self , **_UpperCAmelCase) __lowerCAmelCase : Any = repo_info __lowerCAmelCase : int = token __lowerCAmelCase : Optional[Any] = None def _SCREAMING_SNAKE_CASE ( self: Optional[Any]) -> int: """simple docstring""" if self.dir_cache is None: __lowerCAmelCase : Dict = {} for hf_file in self.repo_info.siblings: # TODO(QL): add sizes __lowerCAmelCase : Any = { "name": hf_file.rfilename, "size": None, "type": "file", } self.dir_cache.update( { str(_UpperCAmelCase): {"name": str(_UpperCAmelCase), "size": None, "type": "directory"} for d in list(PurePosixPath(hf_file.rfilename).parents)[:-1] }) def _SCREAMING_SNAKE_CASE ( self: Union[str, Any] , _SCREAMING_SNAKE_CASE: str , _SCREAMING_SNAKE_CASE: str = "rb" , **_SCREAMING_SNAKE_CASE: Optional[Any] , ) -> Tuple: """simple docstring""" if not isinstance(self.repo_info , _UpperCAmelCase): raise NotImplementedError(F"""Open is only implemented for dataset repositories, but got {self.repo_info}""") __lowerCAmelCase : Dict = hf_hub_url(self.repo_info.id , _UpperCAmelCase , revision=self.repo_info.sha) return fsspec.open( _UpperCAmelCase , mode=_UpperCAmelCase , headers=get_authentication_headers_for_url(_UpperCAmelCase , use_auth_token=self.token) , client_kwargs={"trust_env": True} , ).open() def _SCREAMING_SNAKE_CASE ( self: List[str] , _SCREAMING_SNAKE_CASE: List[str] , **_SCREAMING_SNAKE_CASE: List[Any]) -> Dict: """simple docstring""" self._get_dirs() __lowerCAmelCase : str = self._strip_protocol(_UpperCAmelCase) if path in self.dir_cache: return self.dir_cache[path] else: raise FileNotFoundError(_UpperCAmelCase) def _SCREAMING_SNAKE_CASE ( self: Tuple , _SCREAMING_SNAKE_CASE: Tuple , _SCREAMING_SNAKE_CASE: Any=False , **_SCREAMING_SNAKE_CASE: Union[str, Any]) -> Optional[int]: """simple docstring""" self._get_dirs() __lowerCAmelCase : int = PurePosixPath(path.strip("/")) __lowerCAmelCase : Tuple = {} for p, f in self.dir_cache.items(): __lowerCAmelCase : Tuple = PurePosixPath(p.strip("/")) __lowerCAmelCase : Optional[Any] = p.parent if root == path: __lowerCAmelCase : List[str] = f __lowerCAmelCase : Dict = list(paths.values()) if detail: return out else: return sorted(f["name"] for f in out)
269
'''simple docstring''' from dataclasses import dataclass from typing import List, Optional, Union import numpy as np import torch from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available @dataclass class lowerCAmelCase_ ( lowerCamelCase_ ): '''simple docstring''' lowerCAmelCase_ : Union[List[np.ndarray], torch.FloatTensor] try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .pipeline_text_to_video_synth import TextToVideoSDPipeline from .pipeline_text_to_video_synth_imgaimg import VideoToVideoSDPipeline # noqa: F401 from .pipeline_text_to_video_zero import TextToVideoZeroPipeline
346
0
'''simple docstring''' import argparse from pathlib import Path from transformers import AutoConfig, AutoTokenizer, RagConfig, RagSequenceForGeneration, RagTokenForGeneration def __A ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , ): if config_name_or_path is None: _UpperCAmelCase : List[Any] = """facebook/rag-token-base""" if model_type == """rag_token""" else """facebook/rag-sequence-base""" if generator_tokenizer_name_or_path is None: _UpperCAmelCase : str = generator_name_or_path if question_encoder_tokenizer_name_or_path is None: _UpperCAmelCase : Optional[int] = question_encoder_name_or_path _UpperCAmelCase : Tuple = RagTokenForGeneration if model_type == """rag_token""" else RagSequenceForGeneration # Save model. _UpperCAmelCase : List[Any] = RagConfig.from_pretrained(lowerCAmelCase_ ) _UpperCAmelCase : Union[str, Any] = AutoConfig.from_pretrained(lowerCAmelCase_ ) _UpperCAmelCase : List[str] = AutoConfig.from_pretrained(lowerCAmelCase_ ) _UpperCAmelCase : Dict = gen_config _UpperCAmelCase : int = question_encoder_config _UpperCAmelCase : Optional[Any] = model_class.from_pretrained_question_encoder_generator( lowerCAmelCase_ , lowerCAmelCase_ , config=lowerCAmelCase_ ) rag_model.save_pretrained(lowerCAmelCase_ ) # Sanity check. model_class.from_pretrained(lowerCAmelCase_ ) # Save tokenizers. _UpperCAmelCase : Dict = AutoTokenizer.from_pretrained(lowerCAmelCase_ ) gen_tokenizer.save_pretrained(dest_dir / """generator_tokenizer/""" ) _UpperCAmelCase : List[Any] = AutoTokenizer.from_pretrained(lowerCAmelCase_ ) question_encoder_tokenizer.save_pretrained(dest_dir / """question_encoder_tokenizer/""" ) if __name__ == "__main__": lowerCAmelCase_ : str = argparse.ArgumentParser() parser.add_argument( '''--model_type''', choices=['''rag_sequence''', '''rag_token'''], required=True, type=str, help='''RAG model type: rag_sequence, rag_token''', ) parser.add_argument('''--dest''', type=str, required=True, help='''Path to the output checkpoint directory.''') parser.add_argument('''--generator_name_or_path''', type=str, required=True, help='''Generator model identifier''') parser.add_argument( '''--question_encoder_name_or_path''', type=str, required=True, help='''Question encoder model identifier''' ) parser.add_argument( '''--generator_tokenizer_name_or_path''', type=str, help='''Generator tokenizer identifier, if not specified, resolves to ``generator_name_or_path``''', ) parser.add_argument( '''--question_encoder_tokenizer_name_or_path''', type=str, help='''Question encoder tokenizer identifier, if not specified, resolves to ``question_encoder_name_or_path``''', ) parser.add_argument( '''--config_name_or_path''', type=str, help=( '''Identifier of the model config to use, if not provided, resolves to a base config for a given''' ''' ``model_type``''' ), ) lowerCAmelCase_ : List[Any] = parser.parse_args() lowerCAmelCase_ : Tuple = Path(args.dest) dest_dir.mkdir(exist_ok=True) consolidate( args.model_type, args.generator_name_or_path, args.question_encoder_name_or_path, dest_dir, args.config_name_or_path, args.generator_tokenizer_name_or_path, args.question_encoder_tokenizer_name_or_path, )
170
'''simple docstring''' import warnings from ...utils import logging from .image_processing_segformer import SegformerImageProcessor lowerCAmelCase_ : Union[str, Any] = logging.get_logger(__name__) class __lowerCAmelCase ( __a ): def __init__(self , *lowerCAmelCase__ , **lowerCAmelCase__ ): warnings.warn( """The class SegformerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.""" """ Please use SegformerImageProcessor instead.""" , lowerCAmelCase__ , ) super().__init__(*lowerCAmelCase__ , **lowerCAmelCase__ )
170
1
from __future__ import annotations def lowercase_ ( _A : list[int] ): """simple docstring""" if not nums: return 0 lowerCamelCase__ : Any = nums[0] lowerCamelCase__ : List[Any] = 0 for num in nums[1:]: lowerCamelCase__ , lowerCamelCase__ : Tuple = ( max_excluding + num, max(_A , _A ), ) return max(_A , _A ) if __name__ == "__main__": import doctest doctest.testmod()
184
import unittest from transformers import ( MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TextClassificationPipeline, pipeline, ) from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow from .test_pipelines_common import ANY # These 2 model types require different inputs than those of the usual text models. A : Union[str, Any] = {"LayoutLMv2Config", "LayoutLMv3Config"} @is_pipeline_test class _lowercase ( unittest.TestCase): """simple docstring""" A__ = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING A__ = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING if model_mapping is not None: A__ = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP} if tf_model_mapping is not None: A__ = { config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP } @require_torch def lowerCAmelCase ( self : List[str] ): '''simple docstring''' lowerCamelCase__ : Tuple = pipeline( task="text-classification" , model="hf-internal-testing/tiny-random-distilbert" , framework="pt" ) lowerCamelCase__ : Dict = text_classifier("This is great !" ) self.assertEqual(nested_simplify(__lowerCamelCase ) , [{"label": "LABEL_0", "score": 0.5_0_4}] ) lowerCamelCase__ : List[str] = text_classifier("This is great !" , top_k=2 ) self.assertEqual( nested_simplify(__lowerCamelCase ) , [{"label": "LABEL_0", "score": 0.5_0_4}, {"label": "LABEL_1", "score": 0.4_9_6}] ) lowerCamelCase__ : Optional[int] = text_classifier(["This is great !", "This is bad"] , top_k=2 ) self.assertEqual( nested_simplify(__lowerCamelCase ) , [ [{"label": "LABEL_0", "score": 0.5_0_4}, {"label": "LABEL_1", "score": 0.4_9_6}], [{"label": "LABEL_0", "score": 0.5_0_4}, {"label": "LABEL_1", "score": 0.4_9_6}], ] , ) lowerCamelCase__ : Any = text_classifier("This is great !" , top_k=1 ) self.assertEqual(nested_simplify(__lowerCamelCase ) , [{"label": "LABEL_0", "score": 0.5_0_4}] ) # Legacy behavior lowerCamelCase__ : Dict = text_classifier("This is great !" , return_all_scores=__lowerCamelCase ) self.assertEqual(nested_simplify(__lowerCamelCase ) , [{"label": "LABEL_0", "score": 0.5_0_4}] ) lowerCamelCase__ : str = text_classifier("This is great !" , return_all_scores=__lowerCamelCase ) self.assertEqual( nested_simplify(__lowerCamelCase ) , [[{"label": "LABEL_0", "score": 0.5_0_4}, {"label": "LABEL_1", "score": 0.4_9_6}]] ) lowerCamelCase__ : Optional[Any] = text_classifier(["This is great !", "Something else"] , return_all_scores=__lowerCamelCase ) self.assertEqual( nested_simplify(__lowerCamelCase ) , [ [{"label": "LABEL_0", "score": 0.5_0_4}, {"label": "LABEL_1", "score": 0.4_9_6}], [{"label": "LABEL_0", "score": 0.5_0_4}, {"label": "LABEL_1", "score": 0.4_9_6}], ] , ) lowerCamelCase__ : Any = text_classifier(["This is great !", "Something else"] , return_all_scores=__lowerCamelCase ) self.assertEqual( nested_simplify(__lowerCamelCase ) , [ {"label": "LABEL_0", "score": 0.5_0_4}, {"label": "LABEL_0", "score": 0.5_0_4}, ] , ) @require_torch def lowerCAmelCase ( self : str ): '''simple docstring''' import torch lowerCamelCase__ : int = pipeline( task="text-classification" , model="hf-internal-testing/tiny-random-distilbert" , framework="pt" , device=torch.device("cpu" ) , ) lowerCamelCase__ : Any = text_classifier("This is great !" ) self.assertEqual(nested_simplify(__lowerCamelCase ) , [{"label": "LABEL_0", "score": 0.5_0_4}] ) @require_tf def lowerCAmelCase ( self : int ): '''simple docstring''' lowerCamelCase__ : List[str] = pipeline( task="text-classification" , model="hf-internal-testing/tiny-random-distilbert" , framework="tf" ) lowerCamelCase__ : List[str] = text_classifier("This is great !" ) self.assertEqual(nested_simplify(__lowerCamelCase ) , [{"label": "LABEL_0", "score": 0.5_0_4}] ) @slow @require_torch def lowerCAmelCase ( self : List[Any] ): '''simple docstring''' lowerCamelCase__ : Optional[Any] = pipeline("text-classification" ) lowerCamelCase__ : List[str] = text_classifier("This is great !" ) self.assertEqual(nested_simplify(__lowerCamelCase ) , [{"label": "POSITIVE", "score": 1.0}] ) lowerCamelCase__ : Optional[int] = text_classifier("This is bad !" ) self.assertEqual(nested_simplify(__lowerCamelCase ) , [{"label": "NEGATIVE", "score": 1.0}] ) lowerCamelCase__ : Tuple = text_classifier("Birds are a type of animal" ) self.assertEqual(nested_simplify(__lowerCamelCase ) , [{"label": "POSITIVE", "score": 0.9_8_8}] ) @slow @require_tf def lowerCAmelCase ( self : List[Any] ): '''simple docstring''' lowerCamelCase__ : str = pipeline("text-classification" , framework="tf" ) lowerCamelCase__ : Optional[int] = text_classifier("This is great !" ) self.assertEqual(nested_simplify(__lowerCamelCase ) , [{"label": "POSITIVE", "score": 1.0}] ) lowerCamelCase__ : Optional[Any] = text_classifier("This is bad !" ) self.assertEqual(nested_simplify(__lowerCamelCase ) , [{"label": "NEGATIVE", "score": 1.0}] ) lowerCamelCase__ : Dict = text_classifier("Birds are a type of animal" ) self.assertEqual(nested_simplify(__lowerCamelCase ) , [{"label": "POSITIVE", "score": 0.9_8_8}] ) def lowerCAmelCase ( self : Tuple , __lowerCamelCase : Any , __lowerCamelCase : str , __lowerCamelCase : Optional[Any] ): '''simple docstring''' lowerCamelCase__ : Tuple = TextClassificationPipeline(model=__lowerCamelCase , tokenizer=__lowerCamelCase ) return text_classifier, ["HuggingFace is in", "This is another test"] def lowerCAmelCase ( self : Optional[Any] , __lowerCamelCase : Tuple , __lowerCamelCase : Any ): '''simple docstring''' lowerCamelCase__ : int = text_classifier.model # Small inputs because BartTokenizer tiny has maximum position embeddings = 22 lowerCamelCase__ : List[Any] = "HuggingFace is in" lowerCamelCase__ : Tuple = text_classifier(__lowerCamelCase ) self.assertEqual(nested_simplify(__lowerCamelCase ) , [{"label": ANY(__lowerCamelCase ), "score": ANY(__lowerCamelCase )}] ) self.assertTrue(outputs[0]["label"] in model.config.idalabel.values() ) lowerCamelCase__ : Optional[int] = ["HuggingFace is in ", "Paris is in France"] lowerCamelCase__ : Dict = text_classifier(__lowerCamelCase ) self.assertEqual( nested_simplify(__lowerCamelCase ) , [{"label": ANY(__lowerCamelCase ), "score": ANY(__lowerCamelCase )}, {"label": ANY(__lowerCamelCase ), "score": ANY(__lowerCamelCase )}] , ) self.assertTrue(outputs[0]["label"] in model.config.idalabel.values() ) self.assertTrue(outputs[1]["label"] in model.config.idalabel.values() ) # Forcing to get all results with `top_k=None` # This is NOT the legacy format lowerCamelCase__ : List[Any] = text_classifier(__lowerCamelCase , top_k=__lowerCamelCase ) lowerCamelCase__ : Union[str, Any] = len(model.config.idalabel.values() ) self.assertEqual( nested_simplify(__lowerCamelCase ) , [[{"label": ANY(__lowerCamelCase ), "score": ANY(__lowerCamelCase )}] * N, [{"label": ANY(__lowerCamelCase ), "score": ANY(__lowerCamelCase )}] * N] , ) lowerCamelCase__ : Optional[int] = {"text": "HuggingFace is in ", "text_pair": "Paris is in France"} lowerCamelCase__ : List[Any] = text_classifier(__lowerCamelCase ) self.assertEqual( nested_simplify(__lowerCamelCase ) , {"label": ANY(__lowerCamelCase ), "score": ANY(__lowerCamelCase )} , ) self.assertTrue(outputs["label"] in model.config.idalabel.values() ) # This might be used a text pair, but tokenizer + pipe interaction # makes it hard to understand that it's not using the pair properly # https://github.com/huggingface/transformers/issues/17305 # We disabled this usage instead as it was outputting wrong outputs. lowerCamelCase__ : Any = [["HuggingFace is in ", "Paris is in France"]] with self.assertRaises(__lowerCamelCase ): text_classifier(__lowerCamelCase ) # This used to be valid for doing text pairs # We're keeping it working because of backward compatibility lowerCamelCase__ : int = text_classifier([[["HuggingFace is in ", "Paris is in France"]]] ) self.assertEqual( nested_simplify(__lowerCamelCase ) , [{"label": ANY(__lowerCamelCase ), "score": ANY(__lowerCamelCase )}] , ) self.assertTrue(outputs[0]["label"] in model.config.idalabel.values() )
184
1
"""simple docstring""" import unittest import numpy as np from transformers import DistilBertConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax.numpy as jnp from transformers.models.distilbert.modeling_flax_distilbert import ( FlaxDistilBertForMaskedLM, FlaxDistilBertForMultipleChoice, FlaxDistilBertForQuestionAnswering, FlaxDistilBertForSequenceClassification, FlaxDistilBertForTokenClassification, FlaxDistilBertModel, ) class __magic_name__ ( unittest.TestCase ): def __init__( self : str , snake_case__ : List[Any] , snake_case__ : Optional[Any]=1_3 , snake_case__ : List[Any]=7 , snake_case__ : Optional[Any]=True , snake_case__ : Optional[int]=True , snake_case__ : Dict=True , snake_case__ : Optional[Any]=True , snake_case__ : Optional[Any]=9_9 , snake_case__ : Union[str, Any]=3_2 , snake_case__ : Optional[int]=5 , snake_case__ : Optional[int]=4 , snake_case__ : Optional[int]=3_7 , snake_case__ : Optional[Any]="gelu" , snake_case__ : Any=0.1 , snake_case__ : int=0.1 , snake_case__ : int=5_1_2 , snake_case__ : Tuple=1_6 , snake_case__ : Optional[int]=2 , snake_case__ : Any=0.02 , snake_case__ : Dict=4 , ): '''simple docstring''' lowercase :Union[str, Any] = parent lowercase :Tuple = batch_size lowercase :Dict = seq_length lowercase :List[str] = is_training lowercase :int = use_attention_mask lowercase :str = use_token_type_ids lowercase :List[Any] = use_labels lowercase :Tuple = vocab_size lowercase :List[str] = hidden_size lowercase :str = num_hidden_layers lowercase :Optional[Any] = num_attention_heads lowercase :List[str] = intermediate_size lowercase :str = hidden_act lowercase :Optional[Any] = hidden_dropout_prob lowercase :Optional[Any] = attention_probs_dropout_prob lowercase :List[str] = max_position_embeddings lowercase :str = type_vocab_size lowercase :Union[str, Any] = type_sequence_label_size lowercase :Optional[int] = initializer_range lowercase :Tuple = num_choices def __snake_case ( self : Union[str, Any] ): '''simple docstring''' lowercase :List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowercase :Optional[Any] = None if self.use_attention_mask: lowercase :Tuple = random_attention_mask([self.batch_size, self.seq_length] ) lowercase :List[Any] = DistilBertConfig( vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , tie_weights_=snake_case__ , ) return config, input_ids, attention_mask def __snake_case ( self : str ): '''simple docstring''' lowercase :int = self.prepare_config_and_inputs() lowercase :Union[str, Any] = config_and_inputs lowercase :Any = {'''input_ids''': input_ids, '''attention_mask''': attention_mask} return config, inputs_dict @require_flax class __magic_name__ ( __UpperCAmelCase , unittest.TestCase ): __A : Tuple = ( ( FlaxDistilBertModel, FlaxDistilBertForMaskedLM, FlaxDistilBertForMultipleChoice, FlaxDistilBertForQuestionAnswering, FlaxDistilBertForSequenceClassification, FlaxDistilBertForTokenClassification, FlaxDistilBertForQuestionAnswering, ) if is_flax_available() else () ) def __snake_case ( self : Optional[Any] ): '''simple docstring''' lowercase :int = FlaxDistilBertModelTester(self ) @slow def __snake_case ( self : Dict ): '''simple docstring''' for model_class_name in self.all_model_classes: lowercase :List[Any] = model_class_name.from_pretrained('''distilbert-base-uncased''' ) lowercase :Dict = model(np.ones((1, 1) ) ) self.assertIsNotNone(snake_case__ ) @require_flax class __magic_name__ ( unittest.TestCase ): @slow def __snake_case ( self : Dict ): '''simple docstring''' lowercase :Dict = FlaxDistilBertModel.from_pretrained('''distilbert-base-uncased''' ) lowercase :int = np.array([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] ) lowercase :List[str] = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) lowercase :str = model(snake_case__ , attention_mask=snake_case__ )[0] lowercase :int = (1, 1_1, 7_6_8) self.assertEqual(output.shape , snake_case__ ) lowercase :Tuple = np.array([[[-0.16_39, 0.32_99, 0.16_48], [-0.17_46, 0.32_89, 0.17_10], [-0.18_84, 0.33_57, 0.18_10]]] ) self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , snake_case__ , atol=1e-4 ) )
362
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices UpperCAmelCase = logging.get_logger(__name__) class __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase ): __A : Dict = "maskformer-swin" __A : Union[str, Any] = { "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers", } def __init__( self : Dict , snake_case__ : Dict=2_2_4 , snake_case__ : Any=4 , snake_case__ : Dict=3 , snake_case__ : str=9_6 , snake_case__ : List[str]=[2, 2, 6, 2] , snake_case__ : Optional[int]=[3, 6, 1_2, 2_4] , snake_case__ : Optional[Any]=7 , snake_case__ : int=4.0 , snake_case__ : str=True , snake_case__ : Dict=0.0 , snake_case__ : List[Any]=0.0 , snake_case__ : Tuple=0.1 , snake_case__ : List[str]="gelu" , snake_case__ : Tuple=False , snake_case__ : int=0.02 , snake_case__ : Tuple=1e-5 , snake_case__ : Optional[int]=None , snake_case__ : Tuple=None , **snake_case__ : List[Any] , ): '''simple docstring''' super().__init__(**snake_case__ ) lowercase :Optional[int] = image_size lowercase :List[Any] = patch_size lowercase :Optional[Any] = num_channels lowercase :Union[str, Any] = embed_dim lowercase :Union[str, Any] = depths lowercase :List[Any] = len(snake_case__ ) lowercase :Optional[Any] = num_heads lowercase :Optional[Any] = window_size lowercase :Optional[int] = mlp_ratio lowercase :str = qkv_bias lowercase :int = hidden_dropout_prob lowercase :List[str] = attention_probs_dropout_prob lowercase :str = drop_path_rate lowercase :Optional[Any] = hidden_act lowercase :Tuple = use_absolute_embeddings lowercase :Union[str, Any] = layer_norm_eps lowercase :Any = initializer_range # we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model lowercase :Optional[int] = int(embed_dim * 2 ** (len(snake_case__ ) - 1) ) lowercase :Optional[int] = ['''stem'''] + [f"""stage{idx}""" for idx in range(1 , len(snake_case__ ) + 1 )] lowercase , lowercase :List[str] = get_aligned_output_features_output_indices( out_features=snake_case__ , out_indices=snake_case__ , stage_names=self.stage_names )
172
0
"""simple docstring""" from __future__ import annotations def lowercase__ ( _UpperCAmelCase , _UpperCAmelCase ) -> list[int]: '''simple docstring''' lowercase : Tuple = 0 lowercase : int = len(_UpperCAmelCase ) - 1 while i < j: if nums[i] + nums[j] == target: return [i, j] elif nums[i] + nums[j] < target: lowercase : int = i + 1 else: lowercase : List[Any] = j - 1 return [] if __name__ == "__main__": import doctest doctest.testmod() print(f'''{two_pointer([2, 7, 1_1, 1_5], 9) = }''')
255
"""simple docstring""" def lowercase__ ( ) -> str: '''simple docstring''' lowercase : List[str] = 0 for i in range(1 , 10_01 ): total += i**i return str(_UpperCAmelCase )[-10:] if __name__ == "__main__": print(solution())
255
1
from bisect import bisect from itertools import accumulate def lowerCamelCase__ ( UpperCamelCase__ : Dict , UpperCamelCase__ : List[Any] , UpperCamelCase__ : str , UpperCamelCase__ : Any ) -> Optional[Any]: '''simple docstring''' _snake_case = sorted(zip(UpperCamelCase__ , UpperCamelCase__ ) , key=lambda UpperCamelCase__ : x[0] / x[1] , reverse=UpperCamelCase__ ) _snake_case , _snake_case = [i[0] for i in r], [i[1] for i in r] _snake_case = list(accumulate(UpperCamelCase__ ) ) _snake_case = bisect(UpperCamelCase__ , UpperCamelCase__ ) return ( 0 if k == 0 else sum(vl[:k] ) + (w - acc[k - 1]) * (vl[k]) / (wt[k]) if k != n else sum(vl[:k] ) ) if __name__ == "__main__": import doctest doctest.testmod()
295
import json import os from functools import lru_cache from typing import List, Optional, Tuple import regex as re from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging UpperCAmelCase_ = logging.get_logger(__name__) UpperCAmelCase_ = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt"""} # See all BART models at https://huggingface.co/models?filter=bart UpperCAmelCase_ = { """vocab_file""": { """facebook/bart-base""": """https://huggingface.co/facebook/bart-base/resolve/main/vocab.json""", """facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/vocab.json""", """facebook/bart-large-mnli""": """https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json""", """facebook/bart-large-cnn""": """https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json""", """facebook/bart-large-xsum""": """https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json""", """yjernite/bart_eli5""": """https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json""", }, """merges_file""": { """facebook/bart-base""": """https://huggingface.co/facebook/bart-base/resolve/main/merges.txt""", """facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/merges.txt""", """facebook/bart-large-mnli""": """https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt""", """facebook/bart-large-cnn""": """https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt""", """facebook/bart-large-xsum""": """https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt""", """yjernite/bart_eli5""": """https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt""", }, } UpperCAmelCase_ = { """facebook/bart-base""": 1024, """facebook/bart-large""": 1024, """facebook/bart-large-mnli""": 1024, """facebook/bart-large-cnn""": 1024, """facebook/bart-large-xsum""": 1024, """yjernite/bart_eli5""": 1024, } @lru_cache() def lowerCamelCase__ ( ) -> Tuple: '''simple docstring''' _snake_case = ( list(range(ord('!' ) , ord('~' ) + 1 ) ) + list(range(ord('¡' ) , ord('¬' ) + 1 ) ) + list(range(ord('®' ) , ord('ÿ' ) + 1 ) ) ) _snake_case = bs[:] _snake_case = 0 for b in range(2**8 ): if b not in bs: bs.append(UpperCamelCase__ ) cs.append(2**8 + n ) n += 1 _snake_case = [chr(UpperCamelCase__ ) for n in cs] return dict(zip(UpperCamelCase__ , UpperCamelCase__ ) ) def lowerCamelCase__ ( UpperCamelCase__ : Tuple ) -> int: '''simple docstring''' _snake_case = set() _snake_case = word[0] for char in word[1:]: pairs.add((prev_char, char) ) _snake_case = char return pairs class UpperCamelCase_ ( _lowerCamelCase ): lowerCAmelCase_ = VOCAB_FILES_NAMES lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCAmelCase_ = ['''input_ids''', '''attention_mask'''] def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_="replace" , lowerCAmelCase_="<s>" , lowerCAmelCase_="</s>" , lowerCAmelCase_="</s>" , lowerCAmelCase_="<s>" , lowerCAmelCase_="<unk>" , lowerCAmelCase_="<pad>" , lowerCAmelCase_="<mask>" , lowerCAmelCase_=False , **lowerCAmelCase_ , ) -> Tuple: _snake_case = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else bos_token _snake_case = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else eos_token _snake_case = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else sep_token _snake_case = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else cls_token _snake_case = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else unk_token _snake_case = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else pad_token # Mask token behave like a normal word, i.e. include the space before it _snake_case = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else mask_token super().__init__( errors=lowerCAmelCase_ , bos_token=lowerCAmelCase_ , eos_token=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , sep_token=lowerCAmelCase_ , cls_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , mask_token=lowerCAmelCase_ , add_prefix_space=lowerCAmelCase_ , **lowerCAmelCase_ , ) with open(lowerCAmelCase_ , encoding='utf-8' ) as vocab_handle: _snake_case = json.load(lowerCAmelCase_ ) _snake_case = {v: k for k, v in self.encoder.items()} _snake_case = errors # how to handle errors in decoding _snake_case = bytes_to_unicode() _snake_case = {v: k for k, v in self.byte_encoder.items()} with open(lowerCAmelCase_ , encoding='utf-8' ) as merges_handle: _snake_case = merges_handle.read().split('\n' )[1:-1] _snake_case = [tuple(merge.split() ) for merge in bpe_merges] _snake_case = dict(zip(lowerCAmelCase_ , range(len(lowerCAmelCase_ ) ) ) ) _snake_case = {} _snake_case = add_prefix_space # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions _snake_case = re.compile(r'\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+' ) @property def lowerCAmelCase ( self ) -> Any: return len(self.encoder ) def lowerCAmelCase ( self ) -> Dict: return dict(self.encoder , **self.added_tokens_encoder ) def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Dict: if token in self.cache: return self.cache[token] _snake_case = tuple(lowerCAmelCase_ ) _snake_case = get_pairs(lowerCAmelCase_ ) if not pairs: return token while True: _snake_case = min(lowerCAmelCase_ , key=lambda lowerCAmelCase_ : self.bpe_ranks.get(lowerCAmelCase_ , float('inf' ) ) ) if bigram not in self.bpe_ranks: break _snake_case , _snake_case = bigram _snake_case = [] _snake_case = 0 while i < len(lowerCAmelCase_ ): try: _snake_case = word.index(lowerCAmelCase_ , lowerCAmelCase_ ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) _snake_case = j if word[i] == first and i < len(lowerCAmelCase_ ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 _snake_case = tuple(lowerCAmelCase_ ) _snake_case = new_word if len(lowerCAmelCase_ ) == 1: break else: _snake_case = get_pairs(lowerCAmelCase_ ) _snake_case = ' '.join(lowerCAmelCase_ ) _snake_case = word return word def lowerCAmelCase ( self , lowerCAmelCase_ ) -> List[Any]: _snake_case = [] for token in re.findall(self.pat , lowerCAmelCase_ ): _snake_case = ''.join( self.byte_encoder[b] for b in token.encode('utf-8' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) bpe_tokens.extend(bpe_token for bpe_token in self.bpe(lowerCAmelCase_ ).split(' ' ) ) return bpe_tokens def lowerCAmelCase ( self , lowerCAmelCase_ ) -> str: return self.encoder.get(lowerCAmelCase_ , self.encoder.get(self.unk_token ) ) def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Optional[int]: return self.decoder.get(lowerCAmelCase_ ) def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Union[str, Any]: _snake_case = ''.join(lowerCAmelCase_ ) _snake_case = bytearray([self.byte_decoder[c] for c in text] ).decode('utf-8' , errors=self.errors ) return text def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None ) -> Tuple[str]: if not os.path.isdir(lowerCAmelCase_ ): logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' ) return _snake_case = os.path.join( lowerCAmelCase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) _snake_case = os.path.join( lowerCAmelCase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] ) with open(lowerCAmelCase_ , 'w' , encoding='utf-8' ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCAmelCase_ , ensure_ascii=lowerCAmelCase_ ) + '\n' ) _snake_case = 0 with open(lowerCAmelCase_ , 'w' , encoding='utf-8' ) as writer: writer.write('#version: 0.2\n' ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCAmelCase_ : kv[1] ): if index != token_index: logger.warning( F'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.''' ' Please check that the tokenizer is not corrupted!' ) _snake_case = token_index writer.write(' '.join(lowerCAmelCase_ ) + '\n' ) index += 1 return vocab_file, merge_file def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None ) -> List[int]: if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] _snake_case = [self.cls_token_id] _snake_case = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = False ) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=lowerCAmelCase_ , token_ids_a=lowerCAmelCase_ , already_has_special_tokens=lowerCAmelCase_ ) if token_ids_a is None: return [1] + ([0] * len(lowerCAmelCase_ )) + [1] return [1] + ([0] * len(lowerCAmelCase_ )) + [1, 1] + ([0] * len(lowerCAmelCase_ )) + [1] def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None ) -> List[int]: _snake_case = [self.sep_token_id] _snake_case = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=False , **lowerCAmelCase_ ) -> str: _snake_case = kwargs.pop('add_prefix_space' , self.add_prefix_space ) if (is_split_into_words or add_prefix_space) and (len(lowerCAmelCase_ ) > 0 and not text[0].isspace()): _snake_case = ' ' + text return (text, kwargs)
295
1
"""simple docstring""" import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_big_bird import BigBirdTokenizer else: A_ = None A_ = logging.get_logger(__name__) A_ = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''} A_ = { '''vocab_file''': { '''google/bigbird-roberta-base''': '''https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model''', '''google/bigbird-roberta-large''': ( '''https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model''' ), '''google/bigbird-base-trivia-itc''': ( '''https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model''' ), }, '''tokenizer_file''': { '''google/bigbird-roberta-base''': ( '''https://huggingface.co/google/bigbird-roberta-base/resolve/main/tokenizer.json''' ), '''google/bigbird-roberta-large''': ( '''https://huggingface.co/google/bigbird-roberta-large/resolve/main/tokenizer.json''' ), '''google/bigbird-base-trivia-itc''': ( '''https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/tokenizer.json''' ), }, } A_ = { '''google/bigbird-roberta-base''': 40_96, '''google/bigbird-roberta-large''': 40_96, '''google/bigbird-base-trivia-itc''': 40_96, } A_ = '''▁''' class lowercase( __a ): '''simple docstring''' lowercase__ = VOCAB_FILES_NAMES lowercase__ = PRETRAINED_VOCAB_FILES_MAP lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase__ = BigBirdTokenizer lowercase__ = ["input_ids", "attention_mask"] lowercase__ = [] def __init__( self: int, a_: Union[str, Any]=None, a_: Union[str, Any]=None, a_: Any="<unk>", a_: Tuple="<s>", a_: Tuple="</s>", a_: Dict="<pad>", a_: int="[SEP]", a_: Any="[MASK]", a_: str="[CLS]", **a_: Optional[Any], ): '''simple docstring''' _snake_case : Dict = AddedToken(a_, lstrip=a_, rstrip=a_ ) if isinstance(a_, a_ ) else bos_token _snake_case : Union[str, Any] = AddedToken(a_, lstrip=a_, rstrip=a_ ) if isinstance(a_, a_ ) else eos_token _snake_case : str = AddedToken(a_, lstrip=a_, rstrip=a_ ) if isinstance(a_, a_ ) else unk_token _snake_case : Any = AddedToken(a_, lstrip=a_, rstrip=a_ ) if isinstance(a_, a_ ) else pad_token _snake_case : Union[str, Any] = AddedToken(a_, lstrip=a_, rstrip=a_ ) if isinstance(a_, a_ ) else cls_token _snake_case : Optional[Any] = AddedToken(a_, lstrip=a_, rstrip=a_ ) if isinstance(a_, a_ ) else sep_token # Mask token behave like a normal word, i.e. include the space before it _snake_case : Optional[int] = AddedToken(a_, lstrip=a_, rstrip=a_ ) if isinstance(a_, a_ ) else mask_token super().__init__( a_, tokenizer_file=a_, bos_token=a_, eos_token=a_, unk_token=a_, sep_token=a_, pad_token=a_, cls_token=a_, mask_token=a_, **a_, ) _snake_case : Any = vocab_file _snake_case : Optional[int] = False if not self.vocab_file else True def UpperCamelCase_ ( self: int, a_: List[int], a_: Optional[List[int]] = None ): '''simple docstring''' _snake_case : Optional[int] = [self.sep_token_id] _snake_case : List[Any] = [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def UpperCamelCase_ ( self: Optional[int], a_: List[int], a_: Optional[List[int]] = None, a_: bool = False ): '''simple docstring''' if already_has_special_tokens: if token_ids_a is not None: raise ValueError( """You should not supply a second sequence if the provided sequence of """ """ids is already formatted with special tokens for the model.""" ) return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a] if token_ids_a is None: return [1] + ([0] * len(a_ )) + [1] return [1] + ([0] * len(a_ )) + [1] + ([0] * len(a_ )) + [1] def UpperCamelCase_ ( self: int, a_: List[int], a_: Optional[List[int]] = None ): '''simple docstring''' _snake_case : List[str] = [self.sep_token_id] _snake_case : Optional[int] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def UpperCamelCase_ ( self: List[Any], a_: str, a_: Optional[str] = None ): '''simple docstring''' if not self.can_save_slow_tokenizer: raise ValueError( """Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """ """tokenizer.""" ) if not os.path.isdir(a_ ): logger.error(f"Vocabulary path ({save_directory}) should be a directory" ) return _snake_case : int = os.path.join( a_, (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(a_ ): copyfile(self.vocab_file, a_ ) return (out_vocab_file,)
64
def A_ ( a ): """simple docstring""" return "".join(chr(ord(a ) - 3_2 ) if 'a' <= char <= 'z' else char for char in word ) if __name__ == "__main__": from doctest import testmod testmod()
253
0
"""simple docstring""" def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int ) -> bool: return str(_lowerCamelCase ) == str(_lowerCamelCase )[::-1] def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int ) -> int: return int(_lowerCamelCase ) + int(str(_lowerCamelCase )[::-1] ) def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int = 10000 ) -> int: _lowerCAmelCase : List[Any] = [] for num in range(1 ,_lowerCamelCase ): _lowerCAmelCase : Dict = 0 _lowerCAmelCase : Optional[int] = num while iterations < 50: _lowerCAmelCase : int = sum_reverse(_lowerCamelCase ) iterations += 1 if is_palindrome(_lowerCamelCase ): break else: lychrel_nums.append(_lowerCamelCase ) return len(_lowerCamelCase ) if __name__ == "__main__": print(F"""{solution() = }""")
126
"""simple docstring""" import os from pathlib import Path import numpy as np import pytest from pack_dataset import pack_data_dir from parameterized import parameterized from save_len_file import save_len_file from torch.utils.data import DataLoader from transformers import AutoTokenizer from transformers.models.mbart.modeling_mbart import shift_tokens_right from transformers.testing_utils import TestCasePlus, slow from utils import FAIRSEQ_AVAILABLE, DistributedSortishSampler, LegacySeqaSeqDataset, SeqaSeqDataset _a : Optional[int] = 'bert-base-cased' _a : Optional[Any] = 'google/pegasus-xsum' _a : Union[str, Any] = [' Sam ate lunch today.', 'Sams lunch ingredients.'] _a : int = ['A very interesting story about what I ate for lunch.', 'Avocado, celery, turkey, coffee'] _a : Union[str, Any] = 'patrickvonplaten/t5-tiny-random' _a : Tuple = 'sshleifer/bart-tiny-random' _a : str = 'sshleifer/tiny-mbart' _a : Optional[int] = 'sshleifer/tiny-marian-en-de' def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Path ,_lowerCamelCase : list ) -> str: _lowerCAmelCase : List[Any] = """\n""".join(_lowerCamelCase ) Path(_lowerCamelCase ).open("""w""" ).writelines(_lowerCamelCase ) def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[int] ) -> Union[str, Any]: for split in ["train", "val", "test"]: _dump_articles(os.path.join(_lowerCamelCase ,f"{split}.source" ) ,_lowerCamelCase ) _dump_articles(os.path.join(_lowerCamelCase ,f"{split}.target" ) ,_lowerCamelCase ) return tmp_dir class __A ( SCREAMING_SNAKE_CASE_ ): @parameterized.expand( [ MBART_TINY, MARIAN_TINY, T5_TINY, BART_TINY, PEGASUS_XSUM, ] , ) @slow def __A ( self , a__ ): _lowerCAmelCase : str = AutoTokenizer.from_pretrained(a__ ) _lowerCAmelCase : Any = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) _lowerCAmelCase : Union[str, Any] = max(len(tokenizer.encode(a__ ) ) for a in ARTICLES ) _lowerCAmelCase : Optional[int] = max(len(tokenizer.encode(a__ ) ) for a in SUMMARIES ) _lowerCAmelCase : str = 4 _lowerCAmelCase : Optional[int] = 8 assert max_len_target > max_src_len # Will be truncated assert max_len_source > max_src_len # Will be truncated _lowerCAmelCase , _lowerCAmelCase : Optional[int] = """ro_RO""", """de_DE""" # ignored for all but mbart, but never causes error. _lowerCAmelCase : Optional[int] = SeqaSeqDataset( a__ , data_dir=a__ , type_path="""train""" , max_source_length=a__ , max_target_length=a__ , src_lang=a__ , tgt_lang=a__ , ) _lowerCAmelCase : int = DataLoader(a__ , batch_size=2 , collate_fn=train_dataset.collate_fn ) for batch in dataloader: assert isinstance(a__ , a__ ) assert batch["attention_mask"].shape == batch["input_ids"].shape # show that articles were trimmed. assert batch["input_ids"].shape[1] == max_src_len # show that targets are the same len assert batch["labels"].shape[1] == max_tgt_len if tok_name != MBART_TINY: continue # check language codes in correct place _lowerCAmelCase : Any = shift_tokens_right(batch["""labels"""] , tokenizer.pad_token_id ) assert batch["decoder_input_ids"][0, 0].item() == tokenizer.lang_code_to_id[tgt_lang] assert batch["decoder_input_ids"][0, -1].item() == tokenizer.eos_token_id assert batch["input_ids"][0, -2].item() == tokenizer.eos_token_id assert batch["input_ids"][0, -1].item() == tokenizer.lang_code_to_id[src_lang] break # No need to test every batch @parameterized.expand([BART_TINY, BERT_BASE_CASED] ) def __A ( self , a__ ): _lowerCAmelCase : Any = AutoTokenizer.from_pretrained(a__ ) _lowerCAmelCase : Union[str, Any] = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) _lowerCAmelCase : Optional[int] = max(len(tokenizer.encode(a__ ) ) for a in ARTICLES ) _lowerCAmelCase : Any = max(len(tokenizer.encode(a__ ) ) for a in SUMMARIES ) _lowerCAmelCase : int = 4 _lowerCAmelCase : List[str] = LegacySeqaSeqDataset( a__ , data_dir=a__ , type_path="""train""" , max_source_length=20 , max_target_length=a__ , ) _lowerCAmelCase : List[Any] = DataLoader(a__ , batch_size=2 , collate_fn=train_dataset.collate_fn ) for batch in dataloader: assert batch["attention_mask"].shape == batch["input_ids"].shape # show that articles were trimmed. assert batch["input_ids"].shape[1] == max_len_source assert 20 >= batch["input_ids"].shape[1] # trimmed significantly # show that targets were truncated assert batch["labels"].shape[1] == trunc_target # Truncated assert max_len_target > trunc_target # Truncated break # No need to test every batch def __A ( self ): _lowerCAmelCase : Any = AutoTokenizer.from_pretrained("""facebook/mbart-large-cc25""" ) _lowerCAmelCase : List[Any] = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) ) _lowerCAmelCase : List[Any] = tmp_dir.joinpath("""train.source""" ).open().readlines() _lowerCAmelCase : str = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) ) pack_data_dir(a__ , a__ , 128 , a__ ) _lowerCAmelCase : List[Any] = {x.name for x in tmp_dir.iterdir()} _lowerCAmelCase : Tuple = {x.name for x in save_dir.iterdir()} _lowerCAmelCase : Union[str, Any] = save_dir.joinpath("""train.source""" ).open().readlines() # orig: [' Sam ate lunch today.\n', 'Sams lunch ingredients.'] # desired_packed: [' Sam ate lunch today.\n Sams lunch ingredients.'] assert len(a__ ) < len(a__ ) assert len(a__ ) == 1 assert len(packed_examples[0] ) == sum(len(a__ ) for x in orig_examples ) assert orig_paths == new_paths @pytest.mark.skipif(not FAIRSEQ_AVAILABLE , reason="""This test requires fairseq""" ) def __A ( self ): if not FAIRSEQ_AVAILABLE: return _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Tuple = self._get_dataset(max_len=64 ) _lowerCAmelCase : Optional[int] = 64 _lowerCAmelCase : str = ds.make_dynamic_sampler(a__ , required_batch_size_multiple=a__ ) _lowerCAmelCase : int = [len(a__ ) for x in batch_sampler] assert len(set(a__ ) ) > 1 # it's not dynamic batch size if every batch is the same length assert sum(a__ ) == len(a__ ) # no dropped or added examples _lowerCAmelCase : List[str] = DataLoader(a__ , batch_sampler=a__ , collate_fn=ds.collate_fn , num_workers=2 ) _lowerCAmelCase : List[Any] = [] _lowerCAmelCase : Optional[int] = [] for batch in data_loader: _lowerCAmelCase : int = batch["""input_ids"""].shape _lowerCAmelCase : Union[str, Any] = src_shape[0] assert bs % required_batch_size_multiple == 0 or bs < required_batch_size_multiple _lowerCAmelCase : List[str] = np.product(batch["""input_ids"""].shape ) num_src_per_batch.append(a__ ) if num_src_tokens > (max_tokens * 1.1): failures.append(a__ ) assert num_src_per_batch[0] == max(a__ ) if failures: raise AssertionError(F"too many tokens in {len(a__ )} batches" ) def __A ( self ): _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Dict = self._get_dataset(max_len=512 ) _lowerCAmelCase : int = 2 _lowerCAmelCase : List[str] = ds.make_sortish_sampler(a__ , shuffle=a__ ) _lowerCAmelCase : Dict = DataLoader(a__ , batch_size=a__ , collate_fn=ds.collate_fn , num_workers=2 ) _lowerCAmelCase : int = DataLoader(a__ , batch_size=a__ , collate_fn=ds.collate_fn , num_workers=2 , sampler=a__ ) _lowerCAmelCase : int = tokenizer.pad_token_id def count_pad_tokens(a__ , a__="input_ids" ): return [batch[k].eq(a__ ).sum().item() for batch in data_loader] assert sum(count_pad_tokens(a__ , k="""labels""" ) ) < sum(count_pad_tokens(a__ , k="""labels""" ) ) assert sum(count_pad_tokens(a__ ) ) < sum(count_pad_tokens(a__ ) ) assert len(a__ ) == len(a__ ) def __A ( self , a__=1000 , a__=128 ): if os.getenv("""USE_REAL_DATA""" , a__ ): _lowerCAmelCase : List[str] = """examples/seq2seq/wmt_en_ro""" _lowerCAmelCase : str = max_len * 2 * 64 if not Path(a__ ).joinpath("""train.len""" ).exists(): save_len_file(a__ , a__ ) else: _lowerCAmelCase : List[str] = """examples/seq2seq/test_data/wmt_en_ro""" _lowerCAmelCase : Dict = max_len * 4 save_len_file(a__ , a__ ) _lowerCAmelCase : Optional[Any] = AutoTokenizer.from_pretrained(a__ ) _lowerCAmelCase : Dict = SeqaSeqDataset( a__ , data_dir=a__ , type_path="""train""" , max_source_length=a__ , max_target_length=a__ , n_obs=a__ , ) return ds, max_tokens, tokenizer def __A ( self ): _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = self._get_dataset() _lowerCAmelCase : Any = set(DistributedSortishSampler(a__ , 256 , num_replicas=2 , rank=0 , add_extra_examples=a__ ) ) _lowerCAmelCase : Optional[int] = set(DistributedSortishSampler(a__ , 256 , num_replicas=2 , rank=1 , add_extra_examples=a__ ) ) assert idsa.intersection(a__ ) == set() @parameterized.expand( [ MBART_TINY, MARIAN_TINY, T5_TINY, BART_TINY, PEGASUS_XSUM, ] , ) def __A ( self , a__ ): _lowerCAmelCase : int = AutoTokenizer.from_pretrained(a__ , use_fast=a__ ) if tok_name == MBART_TINY: _lowerCAmelCase : Dict = SeqaSeqDataset( a__ , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path="""train""" , max_source_length=4 , max_target_length=8 , src_lang="""EN""" , tgt_lang="""FR""" , ) _lowerCAmelCase : Optional[Any] = train_dataset.dataset_kwargs assert "src_lang" in kwargs and "tgt_lang" in kwargs else: _lowerCAmelCase : List[Any] = SeqaSeqDataset( a__ , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path="""train""" , max_source_length=4 , max_target_length=8 , ) _lowerCAmelCase : Tuple = train_dataset.dataset_kwargs assert "add_prefix_space" not in kwargs if tok_name != BART_TINY else "add_prefix_space" in kwargs assert len(a__ ) == 1 if tok_name == BART_TINY else len(a__ ) == 0
126
1
def lowerCAmelCase_ ( _lowercase : int = 400_0000) -> int: """simple docstring""" a__ : Tuple = [0, 1] a__ : Optional[Any] = 0 while fib[i] <= n: fib.append(fib[i] + fib[i + 1]) if fib[i + 2] > n: break i += 1 a__ : List[str] = 0 for j in range(len(_lowercase) - 1): if fib[j] % 2 == 0: total += fib[j] return total if __name__ == "__main__": print(f'{solution() = }')
170
import itertools from dataclasses import dataclass from typing import Optional import pandas as pd import pyarrow as pa import datasets from datasets.table import table_cast @dataclass class snake_case__ (datasets.BuilderConfig ): """simple docstring""" __lowerCAmelCase :Optional[datasets.Features] = None class snake_case__ (datasets.ArrowBasedBuilder ): """simple docstring""" __lowerCAmelCase :Dict = PandasConfig def SCREAMING_SNAKE_CASE__( self ) -> Tuple: """simple docstring""" return datasets.DatasetInfo(features=self.config.features ) def SCREAMING_SNAKE_CASE__( self , __lowercase ) -> Tuple: """simple docstring""" if not self.config.data_files: raise ValueError(F'''At least one data file must be specified, but got data_files={self.config.data_files}''' ) a__ : str = dl_manager.download_and_extract(self.config.data_files ) if isinstance(__lowercase , (str, list, tuple) ): a__ : Optional[int] = data_files if isinstance(__lowercase , __lowercase ): a__ : List[Any] = [files] # Use `dl_manager.iter_files` to skip hidden files in an extracted archive a__ : str = [dl_manager.iter_files(__lowercase ) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""files""": files} )] a__ : List[str] = [] for split_name, files in data_files.items(): if isinstance(__lowercase , __lowercase ): a__ : Union[str, Any] = [files] # Use `dl_manager.iter_files` to skip hidden files in an extracted archive a__ : Dict = [dl_manager.iter_files(__lowercase ) for file in files] splits.append(datasets.SplitGenerator(name=__lowercase , gen_kwargs={"""files""": files} ) ) return splits def SCREAMING_SNAKE_CASE__( self , __lowercase ) -> pa.Table: """simple docstring""" if self.config.features is not None: # more expensive cast to support nested features with keys in a different order # allows str <-> int/float or str to Audio for example a__ : Tuple = table_cast(__lowercase , self.config.features.arrow_schema ) return pa_table def SCREAMING_SNAKE_CASE__( self , __lowercase ) -> List[Any]: """simple docstring""" for i, file in enumerate(itertools.chain.from_iterable(__lowercase ) ): with open(__lowercase , """rb""" ) as f: a__ : str = pa.Table.from_pandas(pd.read_pickle(__lowercase ) ) yield i, self._cast_table(__lowercase )
170
1
from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available() and is_transformers_version(""">=""", """4.25.0""")): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import ( VersatileDiffusionDualGuidedPipeline, VersatileDiffusionImageVariationPipeline, VersatileDiffusionPipeline, VersatileDiffusionTextToImagePipeline, ) else: from .modeling_text_unet import UNetFlatConditionModel from .pipeline_versatile_diffusion import VersatileDiffusionPipeline from .pipeline_versatile_diffusion_dual_guided import VersatileDiffusionDualGuidedPipeline from .pipeline_versatile_diffusion_image_variation import VersatileDiffusionImageVariationPipeline from .pipeline_versatile_diffusion_text_to_image import VersatileDiffusionTextToImagePipeline
148
import unittest from transformers import AutoTokenizer, NystromformerConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( NystromformerForMaskedLM, NystromformerForMultipleChoice, NystromformerForQuestionAnswering, NystromformerForSequenceClassification, NystromformerForTokenClassification, NystromformerModel, ) from transformers.models.nystromformer.modeling_nystromformer import NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST class __lowerCAmelCase : def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=1_3 , lowerCAmelCase__=7 , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=9_9 , lowerCAmelCase__=3_2 , lowerCAmelCase__=5 , lowerCAmelCase__=4 , lowerCAmelCase__=3_7 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=5_1_2 , lowerCAmelCase__=1_6 , lowerCAmelCase__=2 , lowerCAmelCase__=0.02 , lowerCAmelCase__=3 , lowerCAmelCase__=4 , lowerCAmelCase__=None , ) -> List[Any]: '''simple docstring''' a__ : Optional[Any] =parent a__ : Tuple =batch_size a__ : List[Any] =seq_length a__ : Dict =is_training a__ : Any =use_input_mask a__ : int =use_token_type_ids a__ : Optional[Any] =use_labels a__ : Optional[Any] =vocab_size a__ : List[str] =hidden_size a__ : int =num_hidden_layers a__ : Tuple =num_attention_heads a__ : Union[str, Any] =intermediate_size a__ : Optional[int] =hidden_act a__ : int =hidden_dropout_prob a__ : Union[str, Any] =attention_probs_dropout_prob a__ : List[Any] =max_position_embeddings a__ : str =type_vocab_size a__ : Optional[Any] =type_sequence_label_size a__ : Union[str, Any] =initializer_range a__ : List[Any] =num_labels a__ : str =num_choices a__ : int =scope def _lowercase ( self ) -> int: '''simple docstring''' a__ : List[Any] =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) a__ : str =None if self.use_input_mask: a__ : List[Any] =random_attention_mask([self.batch_size, self.seq_length] ) a__ : str =None if self.use_token_type_ids: a__ : List[str] =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) a__ : Dict =None a__ : str =None a__ : str =None if self.use_labels: a__ : List[str] =ids_tensor([self.batch_size] , self.type_sequence_label_size ) a__ : Tuple =ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) a__ : Dict =ids_tensor([self.batch_size] , self.num_choices ) a__ : Tuple =self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def _lowercase ( self ) -> Tuple: '''simple docstring''' return NystromformerConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCAmelCase__ , initializer_range=self.initializer_range , ) def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> List[Any]: '''simple docstring''' a__ : Tuple =NystromformerModel(config=lowerCAmelCase__ ) model.to(lowerCAmelCase__ ) model.eval() a__ : Optional[Any] =model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ ) a__ : str =model(lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ ) a__ : Optional[int] =model(lowerCAmelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> int: '''simple docstring''' a__ : int =NystromformerForMaskedLM(config=lowerCAmelCase__ ) model.to(lowerCAmelCase__ ) model.eval() a__ : Dict =model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Any: '''simple docstring''' a__ : Optional[int] =NystromformerForQuestionAnswering(config=lowerCAmelCase__ ) model.to(lowerCAmelCase__ ) model.eval() a__ : str =model( lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , start_positions=lowerCAmelCase__ , end_positions=lowerCAmelCase__ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[int]: '''simple docstring''' a__ : Optional[Any] =self.num_labels a__ : Dict =NystromformerForSequenceClassification(lowerCAmelCase__ ) model.to(lowerCAmelCase__ ) model.eval() a__ : List[str] =model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> List[str]: '''simple docstring''' a__ : Tuple =self.num_labels a__ : List[str] =NystromformerForTokenClassification(config=lowerCAmelCase__ ) model.to(lowerCAmelCase__ ) model.eval() a__ : List[Any] =model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Tuple: '''simple docstring''' a__ : List[Any] =self.num_choices a__ : Optional[Any] =NystromformerForMultipleChoice(config=lowerCAmelCase__ ) model.to(lowerCAmelCase__ ) model.eval() a__ : List[str] =input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() a__ : List[Any] =token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() a__ : List[Any] =input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() a__ : Dict =model( lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def _lowercase ( self ) -> Tuple: '''simple docstring''' a__ : Optional[Any] =self.prepare_config_and_inputs() ( ( a__ ) , ( a__ ) , ( a__ ) , ( a__ ) , ( a__ ) , ( a__ ) , ( a__ ) , ) : List[str] =config_and_inputs a__ : str ={"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase): _lowercase : int = ( ( NystromformerModel, NystromformerForMaskedLM, NystromformerForMultipleChoice, NystromformerForQuestionAnswering, NystromformerForSequenceClassification, NystromformerForTokenClassification, ) if is_torch_available() else () ) _lowercase : Union[str, Any] = ( { """feature-extraction""": NystromformerModel, """fill-mask""": NystromformerForMaskedLM, """question-answering""": NystromformerForQuestionAnswering, """text-classification""": NystromformerForSequenceClassification, """token-classification""": NystromformerForTokenClassification, """zero-shot""": NystromformerForSequenceClassification, } if is_torch_available() else {} ) _lowercase : Union[str, Any] = False _lowercase : Union[str, Any] = False def _lowercase ( self ) -> List[str]: '''simple docstring''' a__ : Optional[int] =NystromformerModelTester(self ) a__ : Optional[int] =ConfigTester(self , config_class=lowerCAmelCase__ , hidden_size=3_7 ) def _lowercase ( self ) -> int: '''simple docstring''' self.config_tester.run_common_tests() def _lowercase ( self ) -> Dict: '''simple docstring''' a__ : List[str] =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCAmelCase__ ) def _lowercase ( self ) -> Optional[Any]: '''simple docstring''' a__ : Tuple =self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: a__ : int =type self.model_tester.create_and_check_model(*lowerCAmelCase__ ) def _lowercase ( self ) -> Optional[Any]: '''simple docstring''' a__ : Optional[Any] =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*lowerCAmelCase__ ) def _lowercase ( self ) -> Dict: '''simple docstring''' a__ : str =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*lowerCAmelCase__ ) def _lowercase ( self ) -> List[str]: '''simple docstring''' a__ : Tuple =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*lowerCAmelCase__ ) def _lowercase ( self ) -> Union[str, Any]: '''simple docstring''' a__ : Tuple =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*lowerCAmelCase__ ) def _lowercase ( self ) -> Optional[Any]: '''simple docstring''' a__ : int =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*lowerCAmelCase__ ) @slow def _lowercase ( self ) -> str: '''simple docstring''' for model_name in NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: a__ : int =NystromformerModel.from_pretrained(lowerCAmelCase__ ) self.assertIsNotNone(lowerCAmelCase__ ) @require_torch class __lowerCAmelCase ( unittest.TestCase): @slow def _lowercase ( self ) -> str: '''simple docstring''' a__ : str =NystromformerModel.from_pretrained("uw-madison/nystromformer-512" ) a__ : int =torch.tensor([[0, 1, 2, 3, 4, 5]] ) with torch.no_grad(): a__ : Tuple =model(lowerCAmelCase__ )[0] a__ : List[str] =torch.Size((1, 6, 7_6_8) ) self.assertEqual(output.shape , lowerCAmelCase__ ) a__ : int =torch.tensor( [[[-0.45_32, -0.09_36, 0.51_37], [-0.26_76, 0.06_28, 0.61_86], [-0.36_29, -0.17_26, 0.47_16]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , lowerCAmelCase__ , atol=1E-4 ) ) @slow def _lowercase ( self ) -> Optional[Any]: '''simple docstring''' a__ : Union[str, Any] ="the [MASK] of Belgium is Brussels" a__ : str =AutoTokenizer.from_pretrained("uw-madison/nystromformer-512" ) a__ : int =NystromformerForMaskedLM.from_pretrained("uw-madison/nystromformer-512" ) a__ : List[Any] =tokenizer(lowerCAmelCase__ , return_tensors="pt" ) with torch.no_grad(): a__ : str =model(encoding.input_ids ).logits a__ : List[str] =token_logits[:, 2, :].argmax(-1 )[0] self.assertEqual(tokenizer.decode(lowerCAmelCase__ ) , "capital" )
148
1
import math def lowercase_( SCREAMING_SNAKE_CASE_ ): '''simple docstring''' if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(UpperCAmelCase_ ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def lowercase_( SCREAMING_SNAKE_CASE_ = 10001 ): '''simple docstring''' try: lowerCamelCase : Dict = int(UpperCAmelCase_ ) except (TypeError, ValueError): raise TypeError("Parameter nth must be int or castable to int." ) from None if nth <= 0: raise ValueError("Parameter nth must be greater than or equal to one." ) lowerCamelCase : list[int] = [] lowerCamelCase : Tuple = 2 while len(UpperCAmelCase_ ) < nth: if is_prime(UpperCAmelCase_ ): primes.append(UpperCAmelCase_ ) num += 1 else: num += 1 return primes[len(UpperCAmelCase_ ) - 1] if __name__ == "__main__": print(f'''{solution() = }''')
283
"""simple docstring""" import os import unicodedata from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import SPIECE_UNDERLINE, logging _a : str= logging.get_logger(__name__) _a : str= {"vocab_file": "spiece.model"} _a : Tuple= { "vocab_file": { "xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model", "xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model", } } _a : int= { "xlnet-base-cased": None, "xlnet-large-cased": None, } # Segments (not really needed) _a : Optional[int]= 0 _a : str= 1 _a : Tuple= 2 _a : str= 3 _a : Optional[Any]= 4 class UpperCamelCase ( lowercase ): UpperCAmelCase : Union[str, Any] = VOCAB_FILES_NAMES UpperCAmelCase : int = PRETRAINED_VOCAB_FILES_MAP UpperCAmelCase : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCAmelCase : str = """left""" def __init__(self : List[Any] , _A : List[str] , _A : int=False , _A : Tuple=True , _A : Optional[Any]=False , _A : List[Any]="<s>" , _A : Dict="</s>" , _A : str="<unk>" , _A : Optional[Any]="<sep>" , _A : Optional[Any]="<pad>" , _A : Optional[Any]="<cls>" , _A : Dict="<mask>" , _A : List[Any]=["<eop>", "<eod>"] , _A : Optional[Dict[str, Any]] = None , **_A : List[str] , ) -> None: # Mask token behave like a normal word, i.e. include the space before it __snake_case : str = AddedToken(_A , lstrip=_A , rstrip=_A) if isinstance(_A , _A) else mask_token __snake_case : Dict = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( do_lower_case=_A , remove_space=_A , keep_accents=_A , bos_token=_A , eos_token=_A , unk_token=_A , sep_token=_A , pad_token=_A , cls_token=_A , mask_token=_A , additional_special_tokens=_A , sp_model_kwargs=self.sp_model_kwargs , **_A , ) __snake_case : Tuple = 3 __snake_case : Optional[int] = do_lower_case __snake_case : Union[str, Any] = remove_space __snake_case : Dict = keep_accents __snake_case : str = vocab_file __snake_case : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.Load(_A) @property def _lowercase (self : Dict) -> List[str]: return len(self.sp_model) def _lowercase (self : Dict) -> Union[str, Any]: __snake_case : str = {self.convert_ids_to_tokens(_A): i for i in range(self.vocab_size)} vocab.update(self.added_tokens_encoder) return vocab def __getstate__(self : Union[str, Any]) -> List[str]: __snake_case : Optional[Any] = self.__dict__.copy() __snake_case : Union[str, Any] = None return state def __setstate__(self : Union[str, Any] , _A : Optional[Any]) -> str: __snake_case : Optional[int] = d # for backward compatibility if not hasattr(self , 'sp_model_kwargs'): __snake_case : List[Any] = {} __snake_case : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.Load(self.vocab_file) def _lowercase (self : Any , _A : Tuple) -> List[str]: if self.remove_space: __snake_case : List[Any] = ' '.join(inputs.strip().split()) else: __snake_case : Tuple = inputs __snake_case : int = outputs.replace('``' , '"').replace('\'\'' , '"') if not self.keep_accents: __snake_case : str = unicodedata.normalize('NFKD' , _A) __snake_case : Tuple = ''.join([c for c in outputs if not unicodedata.combining(_A)]) if self.do_lower_case: __snake_case : Union[str, Any] = outputs.lower() return outputs def _lowercase (self : List[Any] , _A : str) -> List[str]: __snake_case : int = self.preprocess_text(_A) __snake_case : Dict = self.sp_model.encode(_A , out_type=_A) __snake_case : Union[str, Any] = [] for piece in pieces: if len(_A) > 1 and piece[-1] == str(',') and piece[-2].isdigit(): __snake_case : List[str] = self.sp_model.EncodeAsPieces(piece[:-1].replace(_A , '')) if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE: if len(cur_pieces[0]) == 1: __snake_case : List[str] = cur_pieces[1:] else: __snake_case : Union[str, Any] = cur_pieces[0][1:] cur_pieces.append(piece[-1]) new_pieces.extend(_A) else: new_pieces.append(_A) return new_pieces def _lowercase (self : Union[str, Any] , _A : Union[str, Any]) -> Any: return self.sp_model.PieceToId(_A) def _lowercase (self : Tuple , _A : str) -> Optional[int]: return self.sp_model.IdToPiece(_A) def _lowercase (self : List[str] , _A : Dict) -> List[Any]: __snake_case : str = ''.join(_A).replace(_A , ' ').strip() return out_string def _lowercase (self : Dict , _A : List[int] , _A : bool = False , _A : bool = None , _A : bool = True , **_A : str , ) -> str: __snake_case : Tuple = kwargs.pop('use_source_tokenizer' , _A) __snake_case : Tuple = self.convert_ids_to_tokens(_A , skip_special_tokens=_A) # To avoid mixing byte-level and unicode for byte-level BPT # we need to build string separately for added tokens and byte-level tokens # cf. https://github.com/huggingface/transformers/issues/1133 __snake_case : List[str] = [] __snake_case : str = [] for token in filtered_tokens: if skip_special_tokens and token in self.all_special_ids: continue if token in self.added_tokens_encoder: if current_sub_text: sub_texts.append(self.convert_tokens_to_string(_A)) __snake_case : List[Any] = [] sub_texts.append(_A) else: current_sub_text.append(_A) if current_sub_text: sub_texts.append(self.convert_tokens_to_string(_A)) # Mimic the behavior of the Rust tokenizer: # By default, there are no spaces between special tokens __snake_case : Optional[int] = ''.join(_A) __snake_case : str = ( clean_up_tokenization_spaces if clean_up_tokenization_spaces is not None else self.clean_up_tokenization_spaces ) if clean_up_tokenization_spaces: __snake_case : str = self.clean_up_tokenization(_A) return clean_text else: return text def _lowercase (self : Dict , _A : List[int] , _A : Optional[List[int]] = None) -> List[int]: __snake_case : int = [self.sep_token_id] __snake_case : Any = [self.cls_token_id] if token_ids_a is None: return token_ids_a + sep + cls return token_ids_a + sep + token_ids_a + sep + cls def _lowercase (self : List[str] , _A : List[int] , _A : Optional[List[int]] = None , _A : bool = False) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_A , token_ids_a=_A , already_has_special_tokens=_A) if token_ids_a is not None: return ([0] * len(_A)) + [1] + ([0] * len(_A)) + [1, 1] return ([0] * len(_A)) + [1, 1] def _lowercase (self : Dict , _A : List[int] , _A : Optional[List[int]] = None) -> List[int]: __snake_case : Tuple = [self.sep_token_id] __snake_case : Optional[int] = [2] if token_ids_a is None: return len(token_ids_a + sep) * [0] + cls_segment_id return len(token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1] + cls_segment_id def _lowercase (self : Tuple , _A : str , _A : Optional[str] = None) -> Tuple[str]: if not os.path.isdir(_A): logger.error(f"Vocabulary path ({save_directory}) should be a directory") return __snake_case : str = os.path.join( _A , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file']) if os.path.abspath(self.vocab_file) != os.path.abspath(_A) and os.path.isfile(self.vocab_file): copyfile(self.vocab_file , _A) elif not os.path.isfile(self.vocab_file): with open(_A , 'wb') as fi: __snake_case : Tuple = self.sp_model.serialized_model_proto() fi.write(_A) return (out_vocab_file,)
172
0
"""simple docstring""" from argparse import ArgumentParser from .env import EnvironmentCommand def _SCREAMING_SNAKE_CASE ( ) -> List[Any]: A__ = ArgumentParser("Diffusers CLI tool" , usage="diffusers-cli <command> [<args>]" ) A__ = parser.add_subparsers(help="diffusers-cli command helpers" ) # Register commands EnvironmentCommand.register_subcommand(lowercase_ ) # Let's go A__ = parser.parse_args() if not hasattr(lowercase_ , "func" ): parser.print_help() exit(1 ) # Run A__ = args.func(lowercase_ ) service.run() if __name__ == "__main__": main()
230
"""simple docstring""" from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class UpperCAmelCase_ ( A_ ): lowercase__ = ['''image_processor''', '''tokenizer'''] lowercase__ = '''AutoImageProcessor''' lowercase__ = '''AutoTokenizer''' def __init__( self : str , snake_case_ : Dict , snake_case_ : List[str] ) -> str: '''simple docstring''' super().__init__(snake_case_ , snake_case_ ) A__ = self.image_processor def __call__( self : int , snake_case_ : Any=None , snake_case_ : Any=None , snake_case_ : Union[str, Any]=None , **snake_case_ : Optional[int] ) -> Optional[int]: '''simple docstring''' if text is None and images is None: raise ValueError("You have to specify either text or images. Both cannot be none." ) if text is not None: A__ = self.tokenizer(snake_case_ , return_tensors=snake_case_ , **snake_case_ ) if images is not None: A__ = self.image_processor(snake_case_ , return_tensors=snake_case_ , **snake_case_ ) if text is not None and images is not None: A__ = image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**snake_case_ ) , tensor_type=snake_case_ ) def __magic_name__ ( self : Optional[int] , *snake_case_ : Union[str, Any] , **snake_case_ : List[Any] ) -> int: '''simple docstring''' return self.tokenizer.batch_decode(*snake_case_ , **snake_case_ ) def __magic_name__ ( self : List[str] , *snake_case_ : List[str] , **snake_case_ : Optional[int] ) -> Tuple: '''simple docstring''' return self.tokenizer.decode(*snake_case_ , **snake_case_ ) @property def __magic_name__ ( self : List[Any] ) -> List[Any]: '''simple docstring''' return ["input_ids", "attention_mask", "pixel_values"]
230
1