code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
from __future__ import annotations
def snake_case_ ( lowerCAmelCase_ : list[int] , lowerCAmelCase_ : int ):
if len(lowerCAmelCase_ ) == 0:
return False
__lowercase : List[Any] = len(lowerCAmelCase_ ) // 2
if a_list[midpoint] == item:
return True
if item < a_list[midpoint]:
return binary_search(a_list[:midpoint] , lowerCAmelCase_ )
else:
return binary_search(a_list[midpoint + 1 :] , lowerCAmelCase_ )
if __name__ == "__main__":
lowerCamelCase : int = input('''Enter numbers separated by comma:\n''').strip()
lowerCamelCase : str = [int(item.strip()) for item in user_input.split(''',''')]
lowerCamelCase : Tuple = int(input('''Enter the number to be found in the list:\n''').strip())
lowerCamelCase : Any = '''''' if binary_search(sequence, target) else '''not '''
print(f'''{target} was {not_str}found in {sequence}''') | 233 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCamelCase : int = '''▁'''
lowerCamelCase : Optional[int] = {'''vocab_file''': '''spiece.model'''}
lowerCamelCase : str = {
'''vocab_file''': {'''google/pegasus-xsum''': '''https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model'''}
}
lowerCamelCase : str = {
'''google/pegasus-xsum''': 5_12,
}
lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
class lowerCAmelCase ( __a ):
'''simple docstring'''
_A : Tuple = VOCAB_FILES_NAMES
_A : Tuple = VOCAB_FILES_NAMES
_A : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
_A : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_A : Optional[int] = ['''input_ids''', '''attention_mask''']
def __init__( self : Union[str, Any] , __a : int , __a : Any="<pad>" , __a : Optional[int]="</s>" , __a : Union[str, Any]="<unk>" , __a : Optional[int]="<mask_2>" , __a : Optional[int]="<mask_1>" , __a : Dict=None , __a : List[str]=103 , __a : Optional[Dict[str, Any]] = None , **__a : List[Any] , ) -> None:
"""simple docstring"""
__lowercase : Tuple = offset
if additional_special_tokens is not None:
if not isinstance(__a , __a ):
raise TypeError(
F"additional_special_tokens should be of type {type(__a )}, but is"
F" {type(__a )}" )
__lowercase : Dict = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
F"<unk_{i}>" for i in range(len(__a ) , self.offset - 1 )
]
if len(set(__a ) ) != len(__a ):
raise ValueError(
"""Please make sure that the provided additional_special_tokens do not contain an incorrectly"""
F" shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}." )
__lowercase : Optional[Any] = additional_special_tokens_extended
else:
__lowercase : int = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [F"<unk_{i}>" for i in range(2 , self.offset )]
__lowercase : str = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=__a , unk_token=__a , mask_token=__a , pad_token=__a , mask_token_sent=__a , offset=__a , additional_special_tokens=__a , sp_model_kwargs=self.sp_model_kwargs , **__a , )
__lowercase : Optional[Any] = mask_token_sent
__lowercase : Dict = vocab_file
__lowercase : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__a )
# add special tokens to encoder dict
__lowercase : Dict[int, str] = {
0: self.pad_token,
1: self.eos_token,
}
if self.mask_token_sent is not None:
self.encoder.update(
{
2: self.mask_token_sent,
3: self.mask_token,
} )
if self.offset > 0:
# entries 2-104 are only used for pretraining and called <mask_1>, <mask_2>, unk_2, ...unk_102
# mask_token_sent is already added to list -> so start at 1
self.encoder.update({i + 3: additional_special_tokens[i] for i in range(1 , self.offset - 1 )} )
__lowercase : Dict[str, int] = {v: k for k, v in self.encoder.items()}
@property
def lowerCAmelCase ( self : Dict ) -> int:
"""simple docstring"""
return len(self.sp_model ) + self.offset
def lowerCAmelCase ( self : int ) -> Dict[str, int]:
"""simple docstring"""
__lowercase : Any = {self.convert_ids_to_tokens(__a ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : List[str] ) -> List[str]:
"""simple docstring"""
__lowercase : Union[str, Any] = self.__dict__.copy()
__lowercase : Optional[Any] = None
return state
def __setstate__( self : Tuple , __a : Any ) -> Tuple:
"""simple docstring"""
__lowercase : List[str] = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
__lowercase : List[str] = {}
__lowercase : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowerCAmelCase ( self : Dict , __a : str ) -> List[str]:
"""simple docstring"""
return self.sp_model.encode(__a , out_type=__a )
def lowerCAmelCase ( self : List[str] , __a : str ) -> int:
"""simple docstring"""
if token in self.decoder:
return self.decoder[token]
elif token in self.added_tokens_decoder:
return self.added_tokens_decoder[token]
__lowercase : Union[str, Any] = self.sp_model.piece_to_id(__a )
return sp_id + self.offset
def lowerCAmelCase ( self : Dict , __a : int ) -> str:
"""simple docstring"""
if index in self.encoder:
return self.encoder[index]
elif index in self.added_tokens_encoder:
return self.added_tokens_encoder[index]
else:
__lowercase : List[Any] = self.sp_model.IdToPiece(index - self.offset )
return token
def lowerCAmelCase ( self : Union[str, Any] , __a : int ) -> Dict:
"""simple docstring"""
__lowercase : Optional[int] = []
__lowercase : Tuple = """"""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(__a ) + token
__lowercase : str = []
else:
current_sub_tokens.append(__a )
out_string += self.sp_model.decode(__a )
return out_string.strip()
def lowerCAmelCase ( self : int , __a : Optional[Any]=False ) -> int:
"""simple docstring"""
return 1
def lowerCAmelCase ( self : Optional[int] , __a : List[str] ) -> Optional[Any]:
"""simple docstring"""
__lowercase : List[Any] = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
return [1 if x in all_special_ids else 0 for x in seq]
def lowerCAmelCase ( self : Union[str, Any] , __a : List , __a : Optional[List] = None , __a : bool = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return self._special_token_mask(__a )
elif token_ids_a is None:
return self._special_token_mask(__a ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def lowerCAmelCase ( self : Optional[int] , __a : Dict , __a : Tuple=None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def lowerCAmelCase ( self : Tuple , __a : str , __a : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(__a ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
__lowercase : Optional[int] = os.path.join(
__a , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__a ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __a )
elif not os.path.isfile(self.vocab_file ):
with open(__a , """wb""" ) as fi:
__lowercase : Any = self.sp_model.serialized_model_proto()
fi.write(__a )
return (out_vocab_file,) | 233 | 1 |
'''simple docstring'''
import unittest
from transformers.utils.backbone_utils import (
BackboneMixin,
get_aligned_output_features_output_indices,
verify_out_features_out_indices,
)
class snake_case__ ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
snake_case : Dict = ['''a''', '''b''', '''c''']
# Defaults to last layer if both are None
snake_case ,snake_case : List[Any] = get_aligned_output_features_output_indices(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , ['''c'''] )
self.assertEqual(UpperCamelCase__ , [2] )
# Out indices set to match out features
snake_case ,snake_case : List[str] = get_aligned_output_features_output_indices(['''a''', '''c'''] , UpperCamelCase__ , UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , ['''a''', '''c'''] )
self.assertEqual(UpperCamelCase__ , [0, 2] )
# Out features set to match out indices
snake_case ,snake_case : str = get_aligned_output_features_output_indices(UpperCamelCase__ , [0, 2] , UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , ['''a''', '''c'''] )
self.assertEqual(UpperCamelCase__ , [0, 2] )
# Out features selected from negative indices
snake_case ,snake_case : Union[str, Any] = get_aligned_output_features_output_indices(UpperCamelCase__ , [-3, -1] , UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , ['''a''', '''c'''] )
self.assertEqual(UpperCamelCase__ , [-3, -1] )
def lowerCAmelCase ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
with self.assertRaises(UpperCamelCase__ ):
verify_out_features_out_indices(['''a''', '''b'''] , (0, 1) , UpperCamelCase__ )
# Out features must be a list
with self.assertRaises(UpperCamelCase__ ):
verify_out_features_out_indices(('''a''', '''b''') , (0, 1) , ['''a''', '''b'''] )
# Out features must be a subset of stage names
with self.assertRaises(UpperCamelCase__ ):
verify_out_features_out_indices(['''a''', '''b'''] , (0, 1) , ['''a'''] )
# Out indices must be a list or tuple
with self.assertRaises(UpperCamelCase__ ):
verify_out_features_out_indices(UpperCamelCase__ , 0 , ['''a''', '''b'''] )
# Out indices must be a subset of stage names
with self.assertRaises(UpperCamelCase__ ):
verify_out_features_out_indices(UpperCamelCase__ , (0, 1) , ['''a'''] )
# Out features and out indices must be the same length
with self.assertRaises(UpperCamelCase__ ):
verify_out_features_out_indices(['''a''', '''b'''] , (0,) , ['''a''', '''b''', '''c'''] )
# Out features should match out indices
with self.assertRaises(UpperCamelCase__ ):
verify_out_features_out_indices(['''a''', '''b'''] , (0, 2) , ['''a''', '''b''', '''c'''] )
# Out features and out indices should be in order
with self.assertRaises(UpperCamelCase__ ):
verify_out_features_out_indices(['''b''', '''a'''] , (0, 1) , ['''a''', '''b'''] )
# Check passes with valid inputs
verify_out_features_out_indices(['''a''', '''b''', '''d'''] , (0, 1, -1) , ['''a''', '''b''', '''c''', '''d'''] )
def lowerCAmelCase ( self : str ) -> Union[str, Any]:
"""simple docstring"""
snake_case : List[str] = BackboneMixin()
snake_case : str = ['''a''', '''b''', '''c''']
snake_case : List[Any] = ['''a''', '''c''']
snake_case : List[Any] = [0, 2]
# Check that the output features and indices are set correctly
self.assertEqual(backbone.out_features , ['''a''', '''c'''] )
self.assertEqual(backbone.out_indices , [0, 2] )
# Check out features and indices are updated correctly
snake_case : int = ['''a''', '''b''']
self.assertEqual(backbone.out_features , ['''a''', '''b'''] )
self.assertEqual(backbone.out_indices , [0, 1] )
snake_case : Union[str, Any] = [-3, -1]
self.assertEqual(backbone.out_features , ['''a''', '''c'''] )
self.assertEqual(backbone.out_indices , [-3, -1] )
| 83 |
'''simple docstring'''
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import VideoMAEConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEModel,
)
from transformers.models.videomae.modeling_videomae import VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class snake_case__ :
"""simple docstring"""
def __init__( self : Optional[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Any=13 , UpperCamelCase__ : Tuple=10 , UpperCamelCase__ : List[str]=3 , UpperCamelCase__ : List[str]=2 , UpperCamelCase__ : int=2 , UpperCamelCase__ : Union[str, Any]=2 , UpperCamelCase__ : Dict=True , UpperCamelCase__ : Tuple=True , UpperCamelCase__ : Tuple=32 , UpperCamelCase__ : str=5 , UpperCamelCase__ : Union[str, Any]=4 , UpperCamelCase__ : Any=37 , UpperCamelCase__ : Any="gelu" , UpperCamelCase__ : Optional[int]=0.1 , UpperCamelCase__ : int=0.1 , UpperCamelCase__ : Optional[Any]=10 , UpperCamelCase__ : str=0.02 , UpperCamelCase__ : str=0.9 , UpperCamelCase__ : Any=None , ) -> Tuple:
"""simple docstring"""
snake_case : List[Any] = parent
snake_case : Tuple = batch_size
snake_case : str = image_size
snake_case : Tuple = num_channels
snake_case : List[Any] = patch_size
snake_case : Optional[Any] = tubelet_size
snake_case : Tuple = num_frames
snake_case : Optional[Any] = is_training
snake_case : Tuple = use_labels
snake_case : List[str] = hidden_size
snake_case : Any = num_hidden_layers
snake_case : int = num_attention_heads
snake_case : List[Any] = intermediate_size
snake_case : Tuple = hidden_act
snake_case : Tuple = hidden_dropout_prob
snake_case : int = attention_probs_dropout_prob
snake_case : Optional[Any] = type_sequence_label_size
snake_case : Optional[int] = initializer_range
snake_case : Any = mask_ratio
snake_case : Optional[int] = scope
# in VideoMAE, the number of tokens equals num_frames/tubelet_size * num_patches per frame
snake_case : Dict = (image_size // patch_size) ** 2
snake_case : Optional[int] = (num_frames // tubelet_size) * self.num_patches_per_frame
# use this variable to define bool_masked_pos
snake_case : Optional[int] = int(mask_ratio * self.seq_length )
def lowerCAmelCase ( self : Any ) -> Dict:
"""simple docstring"""
snake_case : Any = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
snake_case : Tuple = None
if self.use_labels:
snake_case : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case : Dict = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
return VideoMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , tubelet_size=self.tubelet_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , )
def lowerCAmelCase ( self : Dict , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Dict , UpperCamelCase__ : List[str] ) -> Optional[int]:
"""simple docstring"""
snake_case : Any = VideoMAEModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
snake_case : Optional[Any] = model(UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase ( self : int , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : List[Any] ) -> str:
"""simple docstring"""
snake_case : Any = VideoMAEForPreTraining(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
snake_case : int = torch.ones((self.num_masks,) )
snake_case : List[str] = torch.cat([mask, torch.zeros(self.seq_length - mask.size(0 ) )] )
snake_case : Tuple = mask.expand(self.batch_size , -1 ).bool()
snake_case : str = model(UpperCamelCase__ , UpperCamelCase__ )
# model only returns predictions for masked patches
snake_case : Tuple = mask.sum().item()
snake_case : Dict = 3 * self.tubelet_size * self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_masked_patches, decoder_num_labels) )
def lowerCAmelCase ( self : Dict ) -> Optional[int]:
"""simple docstring"""
snake_case : Tuple = self.prepare_config_and_inputs()
snake_case ,snake_case ,snake_case : Optional[int] = config_and_inputs
snake_case : Dict = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase = (
(VideoMAEModel, VideoMAEForPreTraining, VideoMAEForVideoClassification) if is_torch_available() else ()
)
lowerCamelCase = (
{"""feature-extraction""": VideoMAEModel, """video-classification""": VideoMAEForVideoClassification}
if is_torch_available()
else {}
)
lowerCamelCase = False
lowerCamelCase = False
lowerCamelCase = False
lowerCamelCase = False
def lowerCAmelCase ( self : Any ) -> str:
"""simple docstring"""
snake_case : List[Any] = VideoMAEModelTester(self )
snake_case : Optional[Any] = ConfigTester(self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ , hidden_size=37 )
def lowerCAmelCase ( self : Dict , UpperCamelCase__ : List[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : int=False ) -> Optional[Any]:
"""simple docstring"""
snake_case : Optional[Any] = copy.deepcopy(UpperCamelCase__ )
if model_class == VideoMAEForPreTraining:
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
snake_case : Optional[int] = torch.ones((self.model_tester.num_masks,) )
snake_case : int = torch.cat([mask, torch.zeros(self.model_tester.seq_length - mask.size(0 ) )] )
snake_case : Dict = mask.expand(self.model_tester.batch_size , -1 ).bool()
snake_case : Optional[int] = bool_masked_pos.to(UpperCamelCase__ )
if return_labels:
if model_class in [
*get_values(UpperCamelCase__ ),
]:
snake_case : Tuple = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCamelCase__ )
return inputs_dict
def lowerCAmelCase ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='''VideoMAE does not use inputs_embeds''' )
def lowerCAmelCase ( self : int ) -> List[str]:
"""simple docstring"""
pass
def lowerCAmelCase ( self : str ) -> str:
"""simple docstring"""
snake_case ,snake_case : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case : Union[str, Any] = model_class(UpperCamelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
snake_case : str = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCamelCase__ , nn.Linear ) )
def lowerCAmelCase ( self : Union[str, Any] ) -> int:
"""simple docstring"""
snake_case ,snake_case : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case : int = model_class(UpperCamelCase__ )
snake_case : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case : str = [*signature.parameters.keys()]
snake_case : Tuple = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , UpperCamelCase__ )
def lowerCAmelCase ( self : int ) -> str:
"""simple docstring"""
snake_case : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def lowerCAmelCase ( self : List[str] ) -> Any:
"""simple docstring"""
snake_case : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*UpperCamelCase__ )
@slow
def lowerCAmelCase ( self : List[str] ) -> List[Any]:
"""simple docstring"""
for model_name in VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case : int = VideoMAEModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
def lowerCAmelCase ( self : int ) -> int:
"""simple docstring"""
if not self.has_attentions:
pass
else:
snake_case ,snake_case : str = self.model_tester.prepare_config_and_inputs_for_common()
snake_case : Optional[int] = True
for model_class in self.all_model_classes:
snake_case : Union[str, Any] = self.model_tester.seq_length - self.model_tester.num_masks
snake_case : List[Any] = (
num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
)
snake_case : Dict = True
snake_case : List[str] = False
snake_case : Tuple = True
snake_case : List[str] = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
snake_case : List[Any] = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
snake_case : List[Any] = outputs.attentions
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
snake_case : Any = True
snake_case : Any = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
snake_case : Dict = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
snake_case : int = outputs.attentions
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
snake_case : Any = len(UpperCamelCase__ )
# Check attention is always last and order is fine
snake_case : Union[str, Any] = True
snake_case : Union[str, Any] = True
snake_case : str = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
snake_case : Optional[Any] = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
self.assertEqual(out_len + 1 , len(UpperCamelCase__ ) )
snake_case : Tuple = outputs.attentions
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def lowerCAmelCase ( self : Optional[Any] ) -> str:
"""simple docstring"""
def check_hidden_states_output(UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int , UpperCamelCase__ : List[Any] ):
snake_case : Union[str, Any] = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
snake_case : Union[str, Any] = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
snake_case : Union[str, Any] = outputs.hidden_states
snake_case : Optional[int] = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ )
snake_case : Optional[Any] = self.model_tester.seq_length - self.model_tester.num_masks
snake_case : int = num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
snake_case ,snake_case : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case : int = True
check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case : List[str] = True
check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def lowerCAmelCase ( self : List[str] ) -> int:
"""simple docstring"""
pass
def _UpperCamelCase ( ) -> str:
'''simple docstring'''
snake_case : int = hf_hub_download(
repo_id='''hf-internal-testing/spaghetti-video''' , filename='''eating_spaghetti.npy''' , repo_type='''dataset''' )
snake_case : str = np.load(SCREAMING_SNAKE_CASE__ )
return list(SCREAMING_SNAKE_CASE__ )
@require_torch
@require_vision
class snake_case__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def lowerCAmelCase ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def lowerCAmelCase ( self : Dict ) -> str:
"""simple docstring"""
snake_case : Tuple = VideoMAEForVideoClassification.from_pretrained('''MCG-NJU/videomae-base-finetuned-kinetics''' ).to(
UpperCamelCase__ )
snake_case : str = self.default_image_processor
snake_case : Dict = prepare_video()
snake_case : int = image_processor(UpperCamelCase__ , return_tensors='''pt''' ).to(UpperCamelCase__ )
# forward pass
with torch.no_grad():
snake_case : int = model(**UpperCamelCase__ )
# verify the logits
snake_case : Optional[int] = torch.Size((1, 400) )
self.assertEqual(outputs.logits.shape , UpperCamelCase__ )
snake_case : Optional[Any] = torch.tensor([0.3_669, -0.0_688, -0.2_421] ).to(UpperCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCamelCase__ , atol=1e-4 ) )
@slow
def lowerCAmelCase ( self : Dict ) -> Any:
"""simple docstring"""
snake_case : List[str] = VideoMAEForPreTraining.from_pretrained('''MCG-NJU/videomae-base-short''' ).to(UpperCamelCase__ )
snake_case : str = self.default_image_processor
snake_case : Tuple = prepare_video()
snake_case : List[Any] = image_processor(UpperCamelCase__ , return_tensors='''pt''' ).to(UpperCamelCase__ )
# add boolean mask, indicating which patches to mask
snake_case : str = hf_hub_download(repo_id='''hf-internal-testing/bool-masked-pos''' , filename='''bool_masked_pos.pt''' )
snake_case : Dict = torch.load(UpperCamelCase__ )
# forward pass
with torch.no_grad():
snake_case : Tuple = model(**UpperCamelCase__ )
# verify the logits
snake_case : str = torch.Size([1, 1408, 1536] )
snake_case : List[str] = torch.tensor(
[[0.7_994, 0.9_612, 0.8_508], [0.7_401, 0.8_958, 0.8_302], [0.5_862, 0.7_468, 0.7_325]] , device=UpperCamelCase__ )
self.assertEqual(outputs.logits.shape , UpperCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , UpperCamelCase__ , atol=1e-4 ) )
# verify the loss (`config.norm_pix_loss` = `True`)
snake_case : Any = torch.tensor([0.5_142] , device=UpperCamelCase__ )
self.assertTrue(torch.allclose(outputs.loss , UpperCamelCase__ , atol=1e-4 ) )
# verify the loss (`config.norm_pix_loss` = `False`)
snake_case : str = VideoMAEForPreTraining.from_pretrained('''MCG-NJU/videomae-base-short''' , norm_pix_loss=UpperCamelCase__ ).to(
UpperCamelCase__ )
with torch.no_grad():
snake_case : Optional[int] = model(**UpperCamelCase__ )
snake_case : str = torch.tensor(torch.tensor([0.6_469] ) , device=UpperCamelCase__ )
self.assertTrue(torch.allclose(outputs.loss , UpperCamelCase__ , atol=1e-4 ) )
| 83 | 1 |
"""simple docstring"""
from __future__ import annotations
def lowerCamelCase ( _UpperCamelCase : float , _UpperCamelCase : float , _UpperCamelCase : float ) -> float:
'''simple docstring'''
if days_between_payments <= 0:
raise ValueError("""days_between_payments must be > 0""" )
if daily_interest_rate < 0:
raise ValueError("""daily_interest_rate must be >= 0""" )
if principal <= 0:
raise ValueError("""principal must be > 0""" )
return principal * daily_interest_rate * days_between_payments
def lowerCamelCase ( _UpperCamelCase : float , _UpperCamelCase : float , _UpperCamelCase : float , ) -> float:
'''simple docstring'''
if number_of_compounding_periods <= 0:
raise ValueError("""number_of_compounding_periods must be > 0""" )
if nominal_annual_interest_rate_percentage < 0:
raise ValueError("""nominal_annual_interest_rate_percentage must be >= 0""" )
if principal <= 0:
raise ValueError("""principal must be > 0""" )
return principal * (
(1 + nominal_annual_interest_rate_percentage) ** number_of_compounding_periods
- 1
)
def lowerCamelCase ( _UpperCamelCase : float , _UpperCamelCase : float , _UpperCamelCase : float , ) -> float:
'''simple docstring'''
if number_of_years <= 0:
raise ValueError("""number_of_years must be > 0""" )
if nominal_annual_percentage_rate < 0:
raise ValueError("""nominal_annual_percentage_rate must be >= 0""" )
if principal <= 0:
raise ValueError("""principal must be > 0""" )
return compound_interest(
__UpperCamelCase , nominal_annual_percentage_rate / 3_6_5 , number_of_years * 3_6_5 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 115 | import socket
def __SCREAMING_SNAKE_CASE ( ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = socket.socket(socket.AF_INET , socket.SOCK_STREAM )
SCREAMING_SNAKE_CASE__ = socket.gethostname()
SCREAMING_SNAKE_CASE__ = 1_23_12
sock.connect((host, port) )
sock.send(B"""Hello server!""" )
with open("""Received_file""" , """wb""" ) as out_file:
print("""File opened""" )
print("""Receiving data...""" )
while True:
SCREAMING_SNAKE_CASE__ = sock.recv(10_24 )
if not data:
break
out_file.write(__UpperCamelCase )
print("""Successfully received the file""" )
sock.close()
print("""Connection closed""" )
if __name__ == "__main__":
main()
| 219 | 0 |
'''simple docstring'''
import json
import os
import unittest
from transformers import BatchEncoding, MvpTokenizer, MvpTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin, filter_roberta_detectors
@require_tokenizers
class _UpperCAmelCase ( lowerCAmelCase_ , unittest.TestCase ):
a : Dict =MvpTokenizer
a : int =MvpTokenizerFast
a : Any =True
a : int =filter_roberta_detectors
def lowerCamelCase__ ( self ):
'''simple docstring'''
super().setUp()
__lowerCAmelCase = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
__lowerCAmelCase = dict(zip(__SCREAMING_SNAKE_CASE,range(len(__SCREAMING_SNAKE_CASE ) ) ) )
__lowerCAmelCase = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
__lowerCAmelCase = {"""unk_token""": """<unk>"""}
__lowerCAmelCase = os.path.join(self.tmpdirname,VOCAB_FILES_NAMES["""vocab_file"""] )
__lowerCAmelCase = os.path.join(self.tmpdirname,VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file,"""w""",encoding="""utf-8""" ) as fp:
fp.write(json.dumps(__SCREAMING_SNAKE_CASE ) + """\n""" )
with open(self.merges_file,"""w""",encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(__SCREAMING_SNAKE_CASE ) )
def lowerCamelCase__ ( self,**__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname,**__SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self,**__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname,**__SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
return "lower newer", "lower newer"
@cached_property
def lowerCamelCase__ ( self ):
'''simple docstring'''
return MvpTokenizer.from_pretrained("""RUCAIBox/mvp""" )
@cached_property
def lowerCamelCase__ ( self ):
'''simple docstring'''
return MvpTokenizerFast.from_pretrained("""RUCAIBox/mvp""" )
@require_torch
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
__lowerCAmelCase = [0, 2_50, 2_51, 1_78_18, 13, 3_91_86, 19_38, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__lowerCAmelCase = tokenizer(__SCREAMING_SNAKE_CASE,max_length=len(__SCREAMING_SNAKE_CASE ),padding=__SCREAMING_SNAKE_CASE,return_tensors="""pt""" )
self.assertIsInstance(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
self.assertEqual((2, 9),batch.input_ids.shape )
self.assertEqual((2, 9),batch.attention_mask.shape )
__lowerCAmelCase = batch.input_ids.tolist()[0]
self.assertListEqual(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
# Test that special tokens are reset
@require_torch
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__lowerCAmelCase = tokenizer(__SCREAMING_SNAKE_CASE,padding=__SCREAMING_SNAKE_CASE,return_tensors="""pt""" )
# check if input_ids are returned and no labels
self.assertIn("""input_ids""",__SCREAMING_SNAKE_CASE )
self.assertIn("""attention_mask""",__SCREAMING_SNAKE_CASE )
self.assertNotIn("""labels""",__SCREAMING_SNAKE_CASE )
self.assertNotIn("""decoder_attention_mask""",__SCREAMING_SNAKE_CASE )
@require_torch
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = [
"""Summary of the text.""",
"""Another summary.""",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__lowerCAmelCase = tokenizer(text_target=__SCREAMING_SNAKE_CASE,max_length=32,padding="""max_length""",return_tensors="""pt""" )
self.assertEqual(32,targets["""input_ids"""].shape[1] )
@require_torch
def lowerCamelCase__ ( self ):
'''simple docstring'''
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__lowerCAmelCase = tokenizer(
["""I am a small frog""" * 10_24, """I am a small frog"""],padding=__SCREAMING_SNAKE_CASE,truncation=__SCREAMING_SNAKE_CASE,return_tensors="""pt""" )
self.assertIsInstance(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
self.assertEqual(batch.input_ids.shape,(2, 10_24) )
@require_torch
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = ["""A long paragraph for summarization."""]
__lowerCAmelCase = [
"""Summary of the text.""",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__lowerCAmelCase = tokenizer(__SCREAMING_SNAKE_CASE,text_target=__SCREAMING_SNAKE_CASE,return_tensors="""pt""" )
__lowerCAmelCase = inputs["""input_ids"""]
__lowerCAmelCase = inputs["""labels"""]
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
def lowerCamelCase__ ( self ):
'''simple docstring'''
pass
def lowerCamelCase__ ( self ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
__lowerCAmelCase = self.rust_tokenizer_class.from_pretrained(__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = self.tokenizer_class.from_pretrained(__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = """A, <mask> AllenNLP sentence."""
__lowerCAmelCase = tokenizer_r.encode_plus(__SCREAMING_SNAKE_CASE,add_special_tokens=__SCREAMING_SNAKE_CASE,return_token_type_ids=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = tokenizer_p.encode_plus(__SCREAMING_SNAKE_CASE,add_special_tokens=__SCREAMING_SNAKE_CASE,return_token_type_ids=__SCREAMING_SNAKE_CASE )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r["""token_type_ids"""] ),sum(tokens_p["""token_type_ids"""] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r["""attention_mask"""] ) / len(tokens_r["""attention_mask"""] ),sum(tokens_p["""attention_mask"""] ) / len(tokens_p["""attention_mask"""] ),)
__lowerCAmelCase = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""] )
__lowerCAmelCase = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p["""input_ids"""],[0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(tokens_r["""input_ids"""],[0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(
__SCREAMING_SNAKE_CASE,["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
self.assertSequenceEqual(
__SCREAMING_SNAKE_CASE,["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
| 46 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaImgaImgPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _UpperCAmelCase ( lowerCAmelCase_ , unittest.TestCase ):
a : Optional[int] =KandinskyVaaImgaImgPipeline
a : List[Any] =["""image_embeds""", """negative_image_embeds""", """image"""]
a : Union[str, Any] =[
"""image_embeds""",
"""negative_image_embeds""",
"""image""",
]
a : Optional[int] =[
"""generator""",
"""height""",
"""width""",
"""strength""",
"""guidance_scale""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
a : Optional[Any] =False
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
return 32
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
return 32
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
return self.time_input_dim
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
return 1_00
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
torch.manual_seed(0 )
__lowerCAmelCase = {
"""in_channels""": 4,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """image""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
__lowerCAmelCase = UNetaDConditionModel(**__SCREAMING_SNAKE_CASE )
return model
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
torch.manual_seed(0 )
__lowerCAmelCase = VQModel(**self.dummy_movq_kwargs )
return model
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = self.dummy_unet
__lowerCAmelCase = self.dummy_movq
__lowerCAmelCase = {
"""num_train_timesteps""": 10_00,
"""beta_schedule""": """linear""",
"""beta_start""": 0.0_0085,
"""beta_end""": 0.012,
"""clip_sample""": False,
"""set_alpha_to_one""": False,
"""steps_offset""": 0,
"""prediction_type""": """epsilon""",
"""thresholding""": False,
}
__lowerCAmelCase = DDIMScheduler(**__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = {
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=0 ):
'''simple docstring'''
__lowerCAmelCase = floats_tensor((1, self.text_embedder_hidden_size),rng=random.Random(__SCREAMING_SNAKE_CASE ) ).to(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = floats_tensor((1, self.text_embedder_hidden_size),rng=random.Random(seed + 1 ) ).to(
__SCREAMING_SNAKE_CASE )
# create init_image
__lowerCAmelCase = floats_tensor((1, 3, 64, 64),rng=random.Random(__SCREAMING_SNAKE_CASE ) ).to(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = image.cpu().permute(0,2,3,1 )[0]
__lowerCAmelCase = Image.fromarray(np.uinta(__SCREAMING_SNAKE_CASE ) ).convert("""RGB""" ).resize((2_56, 2_56) )
if str(__SCREAMING_SNAKE_CASE ).startswith("""mps""" ):
__lowerCAmelCase = torch.manual_seed(__SCREAMING_SNAKE_CASE )
else:
__lowerCAmelCase = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = {
"""image""": init_image,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""num_inference_steps""": 10,
"""guidance_scale""": 7.0,
"""strength""": 0.2,
"""output_type""": """np""",
}
return inputs
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = """cpu"""
__lowerCAmelCase = self.get_dummy_components()
__lowerCAmelCase = self.pipeline_class(**__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = pipe(**self.get_dummy_inputs(__SCREAMING_SNAKE_CASE ) )
__lowerCAmelCase = output.images
__lowerCAmelCase = pipe(
**self.get_dummy_inputs(__SCREAMING_SNAKE_CASE ),return_dict=__SCREAMING_SNAKE_CASE,)[0]
__lowerCAmelCase = image[0, -3:, -3:, -1]
__lowerCAmelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__lowerCAmelCase = np.array(
[0.619_9778, 0.6398_4406, 0.4614_5785, 0.6294_4984, 0.562_2215, 0.4730_6132, 0.4744_1456, 0.460_7606, 0.4871_9263] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
@slow
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
def lowerCamelCase__ ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/kandinskyv22_img2img_frog.npy""" )
__lowerCAmelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" )
__lowerCAmelCase = """A red cartoon frog, 4k"""
__lowerCAmelCase = KandinskyVaaPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-prior""",torch_dtype=torch.floataa )
pipe_prior.to(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = KandinskyVaaImgaImgPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-decoder""",torch_dtype=torch.floataa )
__lowerCAmelCase = pipeline.to(__SCREAMING_SNAKE_CASE )
pipeline.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = torch.Generator(device="""cpu""" ).manual_seed(0 )
__lowerCAmelCase , __lowerCAmelCase = pipe_prior(
__SCREAMING_SNAKE_CASE,generator=__SCREAMING_SNAKE_CASE,num_inference_steps=5,negative_prompt="""""",).to_tuple()
__lowerCAmelCase = pipeline(
image=__SCREAMING_SNAKE_CASE,image_embeds=__SCREAMING_SNAKE_CASE,negative_image_embeds=__SCREAMING_SNAKE_CASE,generator=__SCREAMING_SNAKE_CASE,num_inference_steps=1_00,height=7_68,width=7_68,strength=0.2,output_type="""np""",)
__lowerCAmelCase = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
| 46 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_tf_available,
is_torch_available,
)
a = {
'configuration_speech_to_text': ['SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Speech2TextConfig'],
'processing_speech_to_text': ['Speech2TextProcessor'],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = ['Speech2TextTokenizer']
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = ['Speech2TextFeatureExtractor']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = [
'TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFSpeech2TextForConditionalGeneration',
'TFSpeech2TextModel',
'TFSpeech2TextPreTrainedModel',
]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = [
'SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST',
'Speech2TextForConditionalGeneration',
'Speech2TextModel',
'Speech2TextPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig
from .processing_speech_to_text import SpeechaTextProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speech_to_text import SpeechaTextTokenizer
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_speech_to_text import (
TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSpeechaTextForConditionalGeneration,
TFSpeechaTextModel,
TFSpeechaTextPreTrainedModel,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_to_text import (
SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechaTextForConditionalGeneration,
SpeechaTextModel,
SpeechaTextPreTrainedModel,
)
else:
import sys
a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 155 |
"""simple docstring"""
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
a = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( _a ):
_a = ['audio_values', 'audio_mask']
def __init__( self : Optional[int] , lowerCAmelCase : List[str]=2048 , lowerCAmelCase : List[Any]=1 , lowerCAmelCase : Optional[Any]=[16, 16] , lowerCAmelCase : Optional[Any]=128 , lowerCAmelCase : Union[str, Any]=4_4100 , lowerCAmelCase : Any=86 , lowerCAmelCase : List[Any]=2048 , lowerCAmelCase : List[str]=0.0 , **lowerCAmelCase : Any , ):
super().__init__(
feature_size=lowerCAmelCase , sampling_rate=lowerCAmelCase , padding_value=lowerCAmelCase , **lowerCAmelCase , )
lowerCAmelCase = spectrogram_length
lowerCAmelCase = num_channels
lowerCAmelCase = patch_size
lowerCAmelCase = feature_size // self.patch_size[1]
lowerCAmelCase = n_fft
lowerCAmelCase = sampling_rate // hop_length_to_sampling_rate
lowerCAmelCase = sampling_rate
lowerCAmelCase = padding_value
lowerCAmelCase = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=lowerCAmelCase , min_frequency=0.0 , max_frequency=2_2050.0 , sampling_rate=lowerCAmelCase , norm="""slaney""" , mel_scale="""slaney""" , ).T
def __lowercase ( self : int , lowerCAmelCase : np.array ):
lowerCAmelCase = spectrogram(
lowerCAmelCase , window_function(self.n_fft , """hann""" ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel="""dB""" , db_range=80.0 , )
lowerCAmelCase = log_spec[:, :-1]
lowerCAmelCase = log_spec - 20.0
lowerCAmelCase = np.clip(log_spec / 40.0 , -2.0 , 0.0 ) + 1.0
return log_spec
def __call__( self : Dict , lowerCAmelCase : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , lowerCAmelCase : Optional[Union[str, TensorType]] = None , lowerCAmelCase : Optional[bool] = True , lowerCAmelCase : Optional[int] = None , lowerCAmelCase : bool = False , lowerCAmelCase : bool = False , **lowerCAmelCase : Dict , ):
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
"""This feature extractor is set to support sampling rate"""
f''' of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled'''
f''' with {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
"""It is strongly recommended to pass the `sampling_rate` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
lowerCAmelCase = isinstance(lowerCAmelCase , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'''Only mono-channel audio is supported for input to {self}''' )
lowerCAmelCase = is_batched_numpy or (
isinstance(lowerCAmelCase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
lowerCAmelCase = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(lowerCAmelCase , np.ndarray ):
lowerCAmelCase = np.asarray(lowerCAmelCase , dtype=np.floataa )
elif isinstance(lowerCAmelCase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
lowerCAmelCase = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
lowerCAmelCase = [np.asarray([raw_speech] ).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
lowerCAmelCase = [
self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0] , lowerCAmelCase ):
lowerCAmelCase = [np.asarray(lowerCAmelCase , dtype=np.floataa ) for feature in audio_features]
# Create audio attention mask
lowerCAmelCase = max(
[ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch
if return_attention_mask:
lowerCAmelCase = [
(ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0]
for feature in audio_features
]
lowerCAmelCase = np.array(lowerCAmelCase ).astype(np.floataa )
# convert into correct format for padding
lowerCAmelCase = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
lowerCAmelCase = np.ones([len(lowerCAmelCase ), 1, max_time_len, self.feature_size] ).astype(np.floataa )
lowerCAmelCase = padded_audio_features * self.padding_value
for i in range(len(lowerCAmelCase ) ):
lowerCAmelCase = audio_features[i]
lowerCAmelCase = feature
# return as BatchFeature
if return_attention_mask:
lowerCAmelCase = {"""audio_values""": padded_audio_features, """audio_mask""": audio_mask}
else:
lowerCAmelCase = {"""audio_values""": padded_audio_features}
lowerCAmelCase = BatchFeature(data=lowerCAmelCase , tensor_type=lowerCAmelCase )
return encoded_inputs
| 155 | 1 |
from __future__ import annotations
def lowerCAmelCase( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )-> list[str]:
"""simple docstring"""
if nth_term == "":
return [""]
UpperCamelCase_ = int(_snake_case )
UpperCamelCase_ = int(_snake_case )
UpperCamelCase_ = []
for temp in range(int(_snake_case ) ):
series.append(f"1 / {pow(temp + 1 , int(_snake_case ) )}" if series else "1" )
return series
if __name__ == "__main__":
import doctest
doctest.testmod()
SCREAMING_SNAKE_CASE :str = int(input("""Enter the last number (nth term) of the P-Series"""))
SCREAMING_SNAKE_CASE :str = int(input("""Enter the power for P-Series"""))
print("""Formula of P-Series => 1+1/2^p+1/3^p ..... 1/n^p""")
print(p_series(nth_term, power))
| 369 |
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class __magic_name__ ( unittest.TestCase ):
def __init__( self , _lowercase , _lowercase=7 , _lowercase=3 , _lowercase=18 , _lowercase=30 , _lowercase=400 , _lowercase=True , _lowercase=None , _lowercase=True , )-> Optional[int]:
UpperCamelCase_ = size if size is not None else {"height": 18, "width": 18}
UpperCamelCase_ = parent
UpperCamelCase_ = batch_size
UpperCamelCase_ = num_channels
UpperCamelCase_ = image_size
UpperCamelCase_ = min_resolution
UpperCamelCase_ = max_resolution
UpperCamelCase_ = do_resize
UpperCamelCase_ = size
UpperCamelCase_ = apply_ocr
def UpperCAmelCase_ ( self )-> str:
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class __magic_name__ ( snake_case , unittest.TestCase ):
UpperCamelCase_ :Union[str, Any] = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def UpperCAmelCase_ ( self )-> Any:
UpperCamelCase_ = LayoutLMvaImageProcessingTester(self )
@property
def UpperCAmelCase_ ( self )-> Optional[Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase_ ( self )-> Dict:
UpperCamelCase_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowercase , "do_resize" ) )
self.assertTrue(hasattr(_lowercase , "size" ) )
self.assertTrue(hasattr(_lowercase , "apply_ocr" ) )
def UpperCAmelCase_ ( self )-> List[Any]:
UpperCamelCase_ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"height": 18, "width": 18} )
UpperCamelCase_ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"height": 42, "width": 42} )
def UpperCAmelCase_ ( self )-> Any:
pass
def UpperCAmelCase_ ( self )-> List[str]:
# Initialize image_processing
UpperCamelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase )
for image in image_inputs:
self.assertIsInstance(_lowercase , Image.Image )
# Test not batched input
UpperCamelCase_ = image_processing(image_inputs[0] , return_tensors="pt" )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
self.assertIsInstance(encoding.words , _lowercase )
self.assertIsInstance(encoding.boxes , _lowercase )
# Test batched
UpperCamelCase_ = image_processing(_lowercase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def UpperCAmelCase_ ( self )-> str:
# Initialize image_processing
UpperCamelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCamelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase , numpify=_lowercase )
for image in image_inputs:
self.assertIsInstance(_lowercase , np.ndarray )
# Test not batched input
UpperCamelCase_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
UpperCamelCase_ = image_processing(_lowercase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def UpperCAmelCase_ ( self )-> List[str]:
# Initialize image_processing
UpperCamelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase , torchify=_lowercase )
for image in image_inputs:
self.assertIsInstance(_lowercase , torch.Tensor )
# Test not batched input
UpperCamelCase_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
UpperCamelCase_ = image_processing(_lowercase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def UpperCAmelCase_ ( self )-> Any:
# with apply_OCR = True
UpperCamelCase_ = LayoutLMvaImageProcessor()
from datasets import load_dataset
UpperCamelCase_ = load_dataset("hf-internal-testing/fixtures_docvqa" , split="test" )
UpperCamelCase_ = Image.open(ds[0]["file"] ).convert("RGB" )
UpperCamelCase_ = image_processing(_lowercase , return_tensors="pt" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
UpperCamelCase_ = [["11:14", "to", "11:39", "a.m", "11:39", "to", "11:44", "a.m.", "11:44", "a.m.", "to", "12:25", "p.m.", "12:25", "to", "12:58", "p.m.", "12:58", "to", "4:00", "p.m.", "2:00", "to", "5:00", "p.m.", "Coffee", "Break", "Coffee", "will", "be", "served", "for", "men", "and", "women", "in", "the", "lobby", "adjacent", "to", "exhibit", "area.", "Please", "move", "into", "exhibit", "area.", "(Exhibits", "Open)", "TRRF", "GENERAL", "SESSION", "(PART", "|)", "Presiding:", "Lee", "A.", "Waller", "TRRF", "Vice", "President", "“Introductory", "Remarks”", "Lee", "A.", "Waller,", "TRRF", "Vice", "Presi-", "dent", "Individual", "Interviews", "with", "TRRF", "Public", "Board", "Members", "and", "Sci-", "entific", "Advisory", "Council", "Mem-", "bers", "Conducted", "by", "TRRF", "Treasurer", "Philip", "G.", "Kuehn", "to", "get", "answers", "which", "the", "public", "refrigerated", "warehousing", "industry", "is", "looking", "for.", "Plus", "questions", "from", "the", "floor.", "Dr.", "Emil", "M.", "Mrak,", "University", "of", "Cal-", "ifornia,", "Chairman,", "TRRF", "Board;", "Sam", "R.", "Cecil,", "University", "of", "Georgia", "College", "of", "Agriculture;", "Dr.", "Stanley", "Charm,", "Tufts", "University", "School", "of", "Medicine;", "Dr.", "Robert", "H.", "Cotton,", "ITT", "Continental", "Baking", "Company;", "Dr.", "Owen", "Fennema,", "University", "of", "Wis-", "consin;", "Dr.", "Robert", "E.", "Hardenburg,", "USDA.", "Questions", "and", "Answers", "Exhibits", "Open", "Capt.", "Jack", "Stoney", "Room", "TRRF", "Scientific", "Advisory", "Council", "Meeting", "Ballroom", "Foyer"]] # noqa: E231
UpperCamelCase_ = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , _lowercase )
self.assertListEqual(encoding.boxes , _lowercase )
# with apply_OCR = False
UpperCamelCase_ = LayoutLMvaImageProcessor(apply_ocr=_lowercase )
UpperCamelCase_ = image_processing(_lowercase , return_tensors="pt" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
| 60 | 0 |
import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
snake_case__ : List[Any] = logging.get_logger(__name__)
def _a ( lowerCamelCase: np.ndarray , lowerCamelCase: Union[int, Iterable[int]] , lowerCamelCase: bool , lowerCamelCase: int ) -> Tuple[int, int]:
'''simple docstring'''
def constraint_to_multiple_of(lowerCamelCase: str , lowerCamelCase: List[Any] , lowerCamelCase: Any=0 , lowerCamelCase: Optional[Any]=None ):
__A = round(val / multiple ) * multiple
if max_val is not None and x > max_val:
__A = math.floor(val / multiple ) * multiple
if x < min_val:
__A = math.ceil(val / multiple ) * multiple
return x
__A = (output_size, output_size) if isinstance(lowerCamelCase , lowerCamelCase ) else output_size
__A , __A = get_image_size(lowerCamelCase )
__A , __A = output_size
# determine new height and width
__A = output_height / input_height
__A = output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width ) < abs(1 - scale_height ):
# fit width
__A = scale_width
else:
# fit height
__A = scale_height
__A = constraint_to_multiple_of(scale_height * input_height , multiple=lowerCamelCase )
__A = constraint_to_multiple_of(scale_width * input_width , multiple=lowerCamelCase )
return (new_height, new_width)
class A_ ( _lowerCamelCase ):
lowerCAmelCase__ = ["""pixel_values"""]
def __init__(self :Optional[Any] , _UpperCamelCase :bool = True , _UpperCamelCase :Dict[str, int] = None , _UpperCamelCase :PILImageResampling = PILImageResampling.BILINEAR , _UpperCamelCase :bool = False , _UpperCamelCase :int = 1 , _UpperCamelCase :bool = True , _UpperCamelCase :Union[int, float] = 1 / 255 , _UpperCamelCase :bool = True , _UpperCamelCase :Optional[Union[float, List[float]]] = None , _UpperCamelCase :Optional[Union[float, List[float]]] = None , **_UpperCamelCase :Any , )-> None:
super().__init__(**_UpperCamelCase )
__A = size if size is not None else {'''height''': 384, '''width''': 384}
__A = get_size_dict(_UpperCamelCase )
__A = do_resize
__A = size
__A = keep_aspect_ratio
__A = ensure_multiple_of
__A = resample
__A = do_rescale
__A = rescale_factor
__A = do_normalize
__A = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
__A = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _lowerCAmelCase (self :Optional[Any] , _UpperCamelCase :np.ndarray , _UpperCamelCase :Dict[str, int] , _UpperCamelCase :bool = False , _UpperCamelCase :int = 1 , _UpperCamelCase :PILImageResampling = PILImageResampling.BICUBIC , _UpperCamelCase :Optional[Union[str, ChannelDimension]] = None , **_UpperCamelCase :Dict , )-> np.ndarray:
__A = get_size_dict(_UpperCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(f"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""" )
__A = get_resize_output_image_size(
_UpperCamelCase , output_size=(size['''height'''], size['''width''']) , keep_aspect_ratio=_UpperCamelCase , multiple=_UpperCamelCase , )
return resize(_UpperCamelCase , size=_UpperCamelCase , resample=_UpperCamelCase , data_format=_UpperCamelCase , **_UpperCamelCase )
def _lowerCAmelCase (self :Tuple , _UpperCamelCase :np.ndarray , _UpperCamelCase :Union[int, float] , _UpperCamelCase :Optional[Union[str, ChannelDimension]] = None , **_UpperCamelCase :List[Any] , )-> str:
return rescale(_UpperCamelCase , scale=_UpperCamelCase , data_format=_UpperCamelCase , **_UpperCamelCase )
def _lowerCAmelCase (self :Union[str, Any] , _UpperCamelCase :np.ndarray , _UpperCamelCase :Union[float, List[float]] , _UpperCamelCase :Union[float, List[float]] , _UpperCamelCase :Optional[Union[str, ChannelDimension]] = None , **_UpperCamelCase :List[str] , )-> np.ndarray:
return normalize(_UpperCamelCase , mean=_UpperCamelCase , std=_UpperCamelCase , data_format=_UpperCamelCase , **_UpperCamelCase )
def _lowerCAmelCase (self :Union[str, Any] , _UpperCamelCase :ImageInput , _UpperCamelCase :bool = None , _UpperCamelCase :int = None , _UpperCamelCase :bool = None , _UpperCamelCase :int = None , _UpperCamelCase :PILImageResampling = None , _UpperCamelCase :bool = None , _UpperCamelCase :float = None , _UpperCamelCase :bool = None , _UpperCamelCase :Optional[Union[float, List[float]]] = None , _UpperCamelCase :Optional[Union[float, List[float]]] = None , _UpperCamelCase :Optional[Union[str, TensorType]] = None , _UpperCamelCase :ChannelDimension = ChannelDimension.FIRST , **_UpperCamelCase :Optional[Any] , )-> PIL.Image.Image:
__A = do_resize if do_resize is not None else self.do_resize
__A = size if size is not None else self.size
__A = get_size_dict(_UpperCamelCase )
__A = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
__A = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
__A = resample if resample is not None else self.resample
__A = do_rescale if do_rescale is not None else self.do_rescale
__A = rescale_factor if rescale_factor is not None else self.rescale_factor
__A = do_normalize if do_normalize is not None else self.do_normalize
__A = image_mean if image_mean is not None else self.image_mean
__A = image_std if image_std is not None else self.image_std
__A = make_list_of_images(_UpperCamelCase )
if not valid_images(_UpperCamelCase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
__A = [to_numpy_array(_UpperCamelCase ) for image in images]
if do_resize:
__A = [self.resize(image=_UpperCamelCase , size=_UpperCamelCase , resample=_UpperCamelCase ) for image in images]
if do_rescale:
__A = [self.rescale(image=_UpperCamelCase , scale=_UpperCamelCase ) for image in images]
if do_normalize:
__A = [self.normalize(image=_UpperCamelCase , mean=_UpperCamelCase , std=_UpperCamelCase ) for image in images]
__A = [to_channel_dimension_format(_UpperCamelCase , _UpperCamelCase ) for image in images]
__A = {'''pixel_values''': images}
return BatchFeature(data=_UpperCamelCase , tensor_type=_UpperCamelCase )
def _lowerCAmelCase (self :Optional[Any] , _UpperCamelCase :Any , _UpperCamelCase :List[Tuple] = None )-> int:
__A = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(_UpperCamelCase ) != len(_UpperCamelCase ):
raise ValueError(
'''Make sure that you pass in as many target sizes as the batch dimension of the logits''' )
if is_torch_tensor(_UpperCamelCase ):
__A = target_sizes.numpy()
__A = []
for idx in range(len(_UpperCamelCase ) ):
__A = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='''bilinear''' , align_corners=_UpperCamelCase )
__A = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(_UpperCamelCase )
else:
__A = logits.argmax(dim=1 )
__A = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 117 |
from pathlib import Path
import cva
import numpy as np
from matplotlib import pyplot as plt
def _a ( lowerCamelCase: np.ndarray , lowerCamelCase: np.ndarray , lowerCamelCase: np.ndarray , lowerCamelCase: int , lowerCamelCase: int ) -> np.ndarray:
'''simple docstring'''
__A = cva.getAffineTransform(lowerCamelCase , lowerCamelCase )
return cva.warpAffine(lowerCamelCase , lowerCamelCase , (rows, cols) )
if __name__ == "__main__":
# read original image
snake_case__ : List[Any] = cva.imread(
str(Path(__file__).resolve().parent.parent / 'image_data' / 'lena.jpg')
)
# turn image in gray scale value
snake_case__ : List[str] = cva.cvtColor(image, cva.COLOR_BGR2GRAY)
# get image shape
snake_case__ , snake_case__ : str = gray_img.shape
# set different points to rotate image
snake_case__ : Any = np.array([[50, 50], [200, 50], [50, 200]], np.floataa)
snake_case__ : str = np.array([[10, 100], [200, 50], [100, 250]], np.floataa)
snake_case__ : int = np.array([[50, 50], [150, 50], [120, 200]], np.floataa)
snake_case__ : List[str] = np.array([[10, 100], [80, 50], [180, 250]], np.floataa)
# add all rotated images in a list
snake_case__ : Optional[Any] = [
gray_img,
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
]
# plot different image rotations
snake_case__ : Optional[Any] = plt.figure(1)
snake_case__ : Dict = ['Original', 'Rotation 1', 'Rotation 2', 'Rotation 3']
for i, image in enumerate(images):
plt.subplot(2, 2, i + 1), plt.imshow(image, 'gray')
plt.title(titles[i])
plt.axis('off')
plt.subplots_adjust(left=0.0, bottom=0.05, right=1.0, top=0.95)
plt.show()
| 117 | 1 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DiffusionPipeline,
EulerDiscreteScheduler,
StableDiffusionXLImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.utils import floats_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase : int = StableDiffusionXLImgaImgPipeline
__UpperCamelCase : str = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width'''}
__UpperCamelCase : Optional[Any] = PipelineTesterMixin.required_optional_params - {'''latents'''}
__UpperCamelCase : Dict = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
__UpperCamelCase : Optional[int] = IMAGE_TO_IMAGE_IMAGE_PARAMS
__UpperCamelCase : Union[str, Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS
def __magic_name__ ( self : Optional[int] ):
"""simple docstring"""
torch.manual_seed(0 )
_A: Union[str, Any] = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , attention_head_dim=(2, 4) , use_linear_projection=lowerCAmelCase_ , addition_embed_type='''text_time''' , addition_time_embed_dim=8 , transformer_layers_per_block=(1, 2) , projection_class_embeddings_input_dim=8_0 , cross_attention_dim=6_4 , )
_A: Tuple = EulerDiscreteScheduler(
beta_start=0.00085 , beta_end=0.012 , steps_offset=1 , beta_schedule='''scaled_linear''' , timestep_spacing='''leading''' , )
torch.manual_seed(0 )
_A: str = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=1_2_8 , )
torch.manual_seed(0 )
_A: Optional[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act='''gelu''' , projection_dim=3_2 , )
_A: Dict = CLIPTextModel(lowerCAmelCase_ )
_A: Union[str, Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' , local_files_only=lowerCAmelCase_ )
_A: Dict = CLIPTextModelWithProjection(lowerCAmelCase_ )
_A: Optional[Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' , local_files_only=lowerCAmelCase_ )
_A: List[str] = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''text_encoder_2''': text_encoder_a,
'''tokenizer_2''': tokenizer_a,
# "safety_checker": None,
# "feature_extractor": None,
}
return components
def __magic_name__ ( self : Optional[Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : str=0 ):
"""simple docstring"""
_A: int = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(lowerCAmelCase_ ) ).to(lowerCAmelCase_ )
_A: Optional[Any] = image / 2 + 0.5
if str(lowerCAmelCase_ ).startswith('''mps''' ):
_A: List[str] = torch.manual_seed(lowerCAmelCase_ )
else:
_A: List[str] = torch.Generator(device=lowerCAmelCase_ ).manual_seed(lowerCAmelCase_ )
_A: Union[str, Any] = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 5.0,
'''output_type''': '''numpy''',
'''strength''': 0.75,
}
return inputs
def __magic_name__ ( self : Dict ):
"""simple docstring"""
_A: Optional[int] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
_A: List[Any] = self.get_dummy_components()
_A: List[str] = StableDiffusionXLImgaImgPipeline(**lowerCAmelCase_ )
_A: Optional[Any] = sd_pipe.to(lowerCAmelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
_A: str = self.get_dummy_inputs(lowerCAmelCase_ )
_A: Dict = sd_pipe(**lowerCAmelCase_ ).images
_A: str = image[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
_A: Any = np.array([0.4656, 0.4840, 0.4439, 0.6698, 0.5574, 0.4524, 0.5799, 0.5943, 0.5165] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __magic_name__ ( self : str ):
"""simple docstring"""
super().test_attention_slicing_forward_pass(expected_max_diff=3e-3 )
def __magic_name__ ( self : List[str] ):
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def __magic_name__ ( self : Optional[Any] ):
"""simple docstring"""
pass
def __magic_name__ ( self : Union[str, Any] ):
"""simple docstring"""
_A: Tuple = self.get_dummy_components()
_A: Optional[int] = StableDiffusionXLImgaImgPipeline(**lowerCAmelCase_ )
_A: Dict = sd_pipe.to(lowerCAmelCase_ )
_A: Optional[Any] = sd_pipe.to(lowerCAmelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
# forward without prompt embeds
_A: str = self.get_dummy_inputs(lowerCAmelCase_ )
_A: int = 3 * ['''this is a negative prompt''']
_A: Union[str, Any] = negative_prompt
_A: Dict = 3 * [inputs['''prompt''']]
_A: List[str] = sd_pipe(**lowerCAmelCase_ )
_A: Optional[Any] = output.images[0, -3:, -3:, -1]
# forward with prompt embeds
_A: Optional[Any] = self.get_dummy_inputs(lowerCAmelCase_ )
_A: List[Any] = 3 * ['''this is a negative prompt''']
_A: Any = 3 * [inputs.pop('''prompt''' )]
(
_A
): Tuple = sd_pipe.encode_prompt(lowerCAmelCase_ , negative_prompt=lowerCAmelCase_ )
_A: List[Any] = sd_pipe(
**lowerCAmelCase_ , prompt_embeds=lowerCAmelCase_ , negative_prompt_embeds=lowerCAmelCase_ , pooled_prompt_embeds=lowerCAmelCase_ , negative_pooled_prompt_embeds=lowerCAmelCase_ , )
_A: List[str] = output.images[0, -3:, -3:, -1]
# make sure that it's equal
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1e-4
@slow
@require_torch_gpu
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __magic_name__ ( self : Tuple ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __magic_name__ ( self : List[str] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : str="cpu" , lowerCAmelCase_ : Tuple=torch.floataa , lowerCAmelCase_ : Dict=0 ):
"""simple docstring"""
_A: Tuple = torch.Generator(device=lowerCAmelCase_ ).manual_seed(lowerCAmelCase_ )
_A: Any = np.random.RandomState(lowerCAmelCase_ ).standard_normal((1, 4, 6_4, 6_4) )
_A: Union[str, Any] = torch.from_numpy(lowerCAmelCase_ ).to(device=lowerCAmelCase_ , dtype=lowerCAmelCase_ )
_A: Any = {
'''prompt''': '''a photograph of an astronaut riding a horse''',
'''latents''': latents,
'''generator''': generator,
'''num_inference_steps''': 3,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def __magic_name__ ( self : Dict ):
"""simple docstring"""
_A: Optional[int] = DiffusionPipeline.from_pretrained('''stabilityai/stable-diffusion-2-base''' )
pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
_A: Dict = self.get_inputs(lowerCAmelCase_ )
_A: List[Any] = pipe(**lowerCAmelCase_ ).images
_A: List[Any] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_1_2, 5_1_2, 3)
_A: str = np.array([0.49493, 0.47896, 0.40798, 0.54214, 0.53212, 0.48202, 0.47656, 0.46329, 0.48506] )
assert np.abs(image_slice - expected_slice ).max() < 7e-3
| 360 |
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__UpperCamelCase : Optional[int] = ['''image_processor''', '''tokenizer''']
__UpperCamelCase : Optional[Any] = '''BlipImageProcessor'''
__UpperCamelCase : int = ('''BertTokenizer''', '''BertTokenizerFast''')
def __init__( self : Optional[int] , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[Any] ):
"""simple docstring"""
_A: Optional[Any] = False
super().__init__(lowerCAmelCase_ , lowerCAmelCase_ )
_A: List[Any] = self.image_processor
def __call__( self : Optional[Any] , lowerCAmelCase_ : ImageInput = None , lowerCAmelCase_ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Union[bool, str, PaddingStrategy] = False , lowerCAmelCase_ : Union[bool, str, TruncationStrategy] = None , lowerCAmelCase_ : Optional[int] = None , lowerCAmelCase_ : int = 0 , lowerCAmelCase_ : Optional[int] = None , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Optional[Union[str, TensorType]] = None , **lowerCAmelCase_ : Union[str, Any] , ):
"""simple docstring"""
if images is None and text is None:
raise ValueError('''You have to specify either images or text.''' )
# Get only text
if images is None:
_A: Tuple = self.tokenizer
_A: Optional[int] = self.tokenizer(
text=lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , max_length=lowerCAmelCase_ , stride=lowerCAmelCase_ , pad_to_multiple_of=lowerCAmelCase_ , return_attention_mask=lowerCAmelCase_ , return_overflowing_tokens=lowerCAmelCase_ , return_special_tokens_mask=lowerCAmelCase_ , return_offsets_mapping=lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ , return_length=lowerCAmelCase_ , verbose=lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , **lowerCAmelCase_ , )
return text_encoding
# add pixel_values
_A: List[Any] = self.image_processor(lowerCAmelCase_ , return_tensors=lowerCAmelCase_ )
if text is not None:
_A: Tuple = self.tokenizer(
text=lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , max_length=lowerCAmelCase_ , stride=lowerCAmelCase_ , pad_to_multiple_of=lowerCAmelCase_ , return_attention_mask=lowerCAmelCase_ , return_overflowing_tokens=lowerCAmelCase_ , return_special_tokens_mask=lowerCAmelCase_ , return_offsets_mapping=lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ , return_length=lowerCAmelCase_ , verbose=lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , **lowerCAmelCase_ , )
else:
_A: str = None
if text_encoding is not None:
encoding_image_processor.update(lowerCAmelCase_ )
return encoding_image_processor
def __magic_name__ ( self : Optional[Any] , *lowerCAmelCase_ : Union[str, Any] , **lowerCAmelCase_ : Tuple ):
"""simple docstring"""
return self.tokenizer.batch_decode(*lowerCAmelCase_ , **lowerCAmelCase_ )
def __magic_name__ ( self : Union[str, Any] , *lowerCAmelCase_ : int , **lowerCAmelCase_ : Optional[int] ):
"""simple docstring"""
return self.tokenizer.decode(*lowerCAmelCase_ , **lowerCAmelCase_ )
@property
def __magic_name__ ( self : Dict ):
"""simple docstring"""
_A: Dict = self.tokenizer.model_input_names
_A: List[str] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 301 | 0 |
'''simple docstring'''
def A__ ( UpperCAmelCase_ ):
_UpperCamelCase : str = len(UpperCAmelCase_ )
for i in range(1 , UpperCAmelCase_ ):
_UpperCamelCase : str = collection[i]
_UpperCamelCase : Optional[Any] = 0
_UpperCamelCase : List[Any] = i - 1
while low <= high:
_UpperCamelCase : Any = (low + high) // 2
if val < collection[mid]:
_UpperCamelCase : int = mid - 1
else:
_UpperCamelCase : int = mid + 1
for j in range(UpperCAmelCase_ , UpperCAmelCase_ , -1 ):
_UpperCamelCase : Any = collection[j - 1]
_UpperCamelCase : List[Any] = val
return collection
if __name__ == "__main__":
snake_case_ : Any = input('Enter numbers separated by a comma:\n').strip()
snake_case_ : int = [int(item) for item in user_input.split(',')]
print(binary_insertion_sort(unsorted))
| 83 |
'''simple docstring'''
from typing import Callable, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case_ : Optional[Any] = logging.get_logger(__name__)
snake_case_ : int = {
'microsoft/xprophetnet-large-wiki100-cased': (
'https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/config.json'
),
}
class lowercase__ ( lowercase ):
lowercase__ = """xlm-prophetnet"""
lowercase__ = ["""past_key_values"""]
lowercase__ = {
"""num_attention_heads""": """num_encoder_attention_heads""",
}
def __init__( self : Optional[int] ,lowerCamelCase__ : Optional[float] = 0.1 ,lowerCamelCase__ : Optional[Union[str, Callable]] = "gelu" ,lowerCamelCase__ : Optional[int] = 30522 ,lowerCamelCase__ : Optional[int] = 1024 ,lowerCamelCase__ : Optional[int] = 4096 ,lowerCamelCase__ : Optional[int] = 12 ,lowerCamelCase__ : Optional[int] = 16 ,lowerCamelCase__ : Optional[int] = 4096 ,lowerCamelCase__ : Optional[int] = 12 ,lowerCamelCase__ : Optional[int] = 16 ,lowerCamelCase__ : Optional[float] = 0.1 ,lowerCamelCase__ : Optional[float] = 0.1 ,lowerCamelCase__ : Optional[int] = 512 ,lowerCamelCase__ : Optional[float] = 0.0_2 ,lowerCamelCase__ : Optional[bool] = True ,lowerCamelCase__ : Optional[bool] = True ,lowerCamelCase__ : Optional[int] = 0 ,lowerCamelCase__ : Optional[int] = 2 ,lowerCamelCase__ : Optional[int] = 32 ,lowerCamelCase__ : Optional[int] = 128 ,lowerCamelCase__ : Optional[bool] = False ,lowerCamelCase__ : Optional[float] = 0.0 ,lowerCamelCase__ : Optional[bool] = True ,lowerCamelCase__ : Optional[int] = 0 ,lowerCamelCase__ : Optional[int] = 1 ,lowerCamelCase__ : Optional[int] = 2 ,**lowerCamelCase__ : Union[str, Any] ,):
'''simple docstring'''
_UpperCamelCase : List[Any] = vocab_size
_UpperCamelCase : Union[str, Any] = hidden_size
_UpperCamelCase : str = encoder_ffn_dim
_UpperCamelCase : List[Any] = num_encoder_layers
_UpperCamelCase : Tuple = num_encoder_attention_heads
_UpperCamelCase : Optional[int] = decoder_ffn_dim
_UpperCamelCase : List[Any] = num_decoder_layers
_UpperCamelCase : List[Any] = num_decoder_attention_heads
_UpperCamelCase : Optional[Any] = max_position_embeddings
_UpperCamelCase : str = init_std # Normal(0, this parameter)
_UpperCamelCase : List[str] = activation_function
# parameters for xlmprophetnet
_UpperCamelCase : Tuple = ngram
_UpperCamelCase : Optional[Any] = num_buckets
_UpperCamelCase : Tuple = relative_max_distance
_UpperCamelCase : str = disable_ngram_loss
_UpperCamelCase : str = eps
# 3 Types of Dropout
_UpperCamelCase : Union[str, Any] = attention_dropout
_UpperCamelCase : str = activation_dropout
_UpperCamelCase : List[str] = dropout
_UpperCamelCase : Tuple = use_cache
super().__init__(
pad_token_id=lowerCamelCase__ ,bos_token_id=lowerCamelCase__ ,eos_token_id=lowerCamelCase__ ,is_encoder_decoder=lowerCamelCase__ ,add_cross_attention=lowerCamelCase__ ,decoder_start_token_id=lowerCamelCase__ ,**lowerCamelCase__ ,)
@property
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
return self.num_encoder_layers + self.num_decoder_layers
@num_hidden_layers.setter
def UpperCamelCase_ ( self : str ,lowerCamelCase__ : Union[str, Any] ):
'''simple docstring'''
raise NotImplementedError(
'This model does not support the setting of `num_hidden_layers`. Please set `num_encoder_layers` and'
' `num_decoder_layers`.' )
| 83 | 1 |
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
if len(snake_case_ ) != len(snake_case_ ):
raise ValueError("""String lengths must match!""" )
_A : str = 0
for chara, chara in zip(snake_case_,snake_case_ ):
if chara != chara:
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 343 |
from random import randint
from tempfile import TemporaryFile
import numpy as np
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ):
_A : Tuple = 0
if start < end:
_A : Tuple = randint(snake_case_,snake_case_ )
_A : Any = a[end]
_A : int = a[pivot]
_A : int = temp
_A , _A : List[Any] = _in_place_partition(snake_case_,snake_case_,snake_case_ )
count += _in_place_quick_sort(snake_case_,snake_case_,p - 1 )
count += _in_place_quick_sort(snake_case_,p + 1,snake_case_ )
return count
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ):
_A : str = 0
_A : List[str] = randint(snake_case_,snake_case_ )
_A : Union[str, Any] = a[end]
_A : List[str] = a[pivot]
_A : List[Any] = temp
_A : List[str] = start - 1
for index in range(snake_case_,snake_case_ ):
count += 1
if a[index] < a[end]: # check if current val is less than pivot value
_A : Union[str, Any] = new_pivot_index + 1
_A : List[Any] = a[new_pivot_index]
_A : Optional[int] = a[index]
_A : List[Any] = temp
_A : Optional[Any] = a[new_pivot_index + 1]
_A : Any = a[end]
_A : Dict = temp
return new_pivot_index + 1, count
_snake_case = TemporaryFile()
_snake_case = 100 # 1000 elements are to be sorted
_snake_case , _snake_case = 0, 1 # mean and standard deviation
_snake_case = np.random.normal(mu, sigma, p)
np.save(outfile, X)
print("The array is")
print(X)
outfile.seek(0) # using the same array
_snake_case = np.load(outfile)
_snake_case = len(M) - 1
_snake_case = _in_place_quick_sort(M, 0, r)
print(
"No of Comparisons for 100 elements selected from a standard normal distribution"
"is :"
)
print(z)
| 343 | 1 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionSAGPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class lowercase ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
_SCREAMING_SNAKE_CASE = StableDiffusionSAGPipeline
_SCREAMING_SNAKE_CASE = TEXT_TO_IMAGE_PARAMS
_SCREAMING_SNAKE_CASE = TEXT_TO_IMAGE_BATCH_PARAMS
_SCREAMING_SNAKE_CASE = TEXT_TO_IMAGE_IMAGE_PARAMS
_SCREAMING_SNAKE_CASE = TEXT_TO_IMAGE_IMAGE_PARAMS
_SCREAMING_SNAKE_CASE = False
def _snake_case ( self ) -> Union[str, Any]:
torch.manual_seed(0 )
lowerCAmelCase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
lowerCAmelCase = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=lowercase , set_alpha_to_one=lowercase , )
torch.manual_seed(0 )
lowerCAmelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
lowerCAmelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
lowerCAmelCase = CLIPTextModel(lowercase )
lowerCAmelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
lowerCAmelCase = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def _snake_case ( self , lowercase , lowercase=0 ) -> Union[str, Any]:
if str(lowercase ).startswith("""mps""" ):
lowerCAmelCase = torch.manual_seed(lowercase )
else:
lowerCAmelCase = torch.Generator(device=lowercase ).manual_seed(lowercase )
lowerCAmelCase = {
"""prompt""": """.""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 1.0,
"""sag_scale""": 1.0,
"""output_type""": """numpy""",
}
return inputs
def _snake_case ( self ) -> str:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class lowercase ( unittest.TestCase ):
def _snake_case ( self ) -> Tuple:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _snake_case ( self ) -> Optional[Any]:
lowerCAmelCase = StableDiffusionSAGPipeline.from_pretrained("""CompVis/stable-diffusion-v1-4""" )
lowerCAmelCase = sag_pipe.to(lowercase )
sag_pipe.set_progress_bar_config(disable=lowercase )
lowerCAmelCase = """."""
lowerCAmelCase = torch.manual_seed(0 )
lowerCAmelCase = sag_pipe(
[prompt] , generator=lowercase , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type="""np""" )
lowerCAmelCase = output.images
lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowerCAmelCase = np.array([0.1_568, 0.1_738, 0.1_695, 0.1_693, 0.1_507, 0.1_705, 0.1_547, 0.1_751, 0.1_949] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-2
def _snake_case ( self ) -> str:
lowerCAmelCase = StableDiffusionSAGPipeline.from_pretrained("""stabilityai/stable-diffusion-2-1-base""" )
lowerCAmelCase = sag_pipe.to(lowercase )
sag_pipe.set_progress_bar_config(disable=lowercase )
lowerCAmelCase = """."""
lowerCAmelCase = torch.manual_seed(0 )
lowerCAmelCase = sag_pipe(
[prompt] , generator=lowercase , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type="""np""" )
lowerCAmelCase = output.images
lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowerCAmelCase = np.array([0.3_459, 0.2_876, 0.2_537, 0.3_002, 0.2_671, 0.2_160, 0.3_026, 0.2_262, 0.2_371] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-2
def _snake_case ( self ) -> Tuple:
lowerCAmelCase = StableDiffusionSAGPipeline.from_pretrained("""stabilityai/stable-diffusion-2-1-base""" )
lowerCAmelCase = sag_pipe.to(lowercase )
sag_pipe.set_progress_bar_config(disable=lowercase )
lowerCAmelCase = """."""
lowerCAmelCase = torch.manual_seed(0 )
lowerCAmelCase = sag_pipe(
[prompt] , width=768 , height=512 , generator=lowercase , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type="""np""" , )
lowerCAmelCase = output.images
assert image.shape == (1, 512, 768, 3)
| 46 |
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = {
"A": ".-", "B": "-...", "C": "-.-.", "D": "-..", "E": ".", "F": "..-.", "G": "--.",
"H": "....", "I": "..", "J": ".---", "K": "-.-", "L": ".-..", "M": "--", "N": "-.",
"O": "---", "P": ".--.", "Q": "--.-", "R": ".-.", "S": "...", "T": "-", "U": "..-",
"V": "...-", "W": ".--", "X": "-..-", "Y": "-.--", "Z": "--..", "1": ".----",
"2": "..---", "3": "...--", "4": "....-", "5": ".....", "6": "-....", "7": "--...",
"8": "---..", "9": "----.", "0": "-----", "&": ".-...", "@": ".--.-.",
":": "---...", ",": "--..--", ".": ".-.-.-", "'": ".----.", "\"": ".-..-.",
"?": "..--..", "/": "-..-.", "=": "-...-", "+": ".-.-.", "-": "-....-",
"(": "-.--.", ")": "-.--.-", "!": "-.-.--", " ": "/"
} # Exclamation mark is not in ITU-R recommendation
# fmt: on
SCREAMING_SNAKE_CASE__ = {value: key for key, value in MORSE_CODE_DICT.items()}
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
return " ".join(MORSE_CODE_DICT[char] for char in message.upper() )
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
return "".join(REVERSE_DICT[char] for char in message.split() )
def UpperCAmelCase__ ( ):
'''simple docstring'''
lowerCAmelCase = """Morse code here!"""
print(SCREAMING_SNAKE_CASE )
lowerCAmelCase = encrypt(SCREAMING_SNAKE_CASE )
print(SCREAMING_SNAKE_CASE )
lowerCAmelCase = decrypt(SCREAMING_SNAKE_CASE )
print(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 46 | 1 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class a ( snake_case_ , unittest.TestCase ):
_lowerCAmelCase = KandinskyVaaControlnetPipeline
_lowerCAmelCase = ["""image_embeds""", """negative_image_embeds""", """hint"""]
_lowerCAmelCase = ["""image_embeds""", """negative_image_embeds""", """hint"""]
_lowerCAmelCase = [
"""generator""",
"""height""",
"""width""",
"""latents""",
"""guidance_scale""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
_lowerCAmelCase = False
@property
def __UpperCAmelCase ( self ) -> Optional[Any]:
return 32
@property
def __UpperCAmelCase ( self ) -> Any:
return 32
@property
def __UpperCAmelCase ( self ) -> Optional[int]:
return self.time_input_dim
@property
def __UpperCAmelCase ( self ) -> Any:
return self.time_input_dim * 4
@property
def __UpperCAmelCase ( self ) -> int:
return 1_00
@property
def __UpperCAmelCase ( self ) -> Tuple:
torch.manual_seed(0 )
_a = {
'in_channels': 8,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'image_hint',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
_a = UNetaDConditionModel(**__magic_name__ )
return model
@property
def __UpperCAmelCase ( self ) -> int:
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def __UpperCAmelCase ( self ) -> Optional[Any]:
torch.manual_seed(0 )
_a = VQModel(**self.dummy_movq_kwargs )
return model
def __UpperCAmelCase ( self ) -> str:
_a = self.dummy_unet
_a = self.dummy_movq
_a = DDIMScheduler(
num_train_timesteps=10_00 , beta_schedule='linear' , beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , clip_sample=__magic_name__ , set_alpha_to_one=__magic_name__ , steps_offset=1 , prediction_type='epsilon' , thresholding=__magic_name__ , )
_a = {
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__=0 ) -> Dict:
_a = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(__magic_name__ ) ).to(__magic_name__ )
_a = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
__magic_name__ )
# create hint
_a = floats_tensor((1, 3, 64, 64) , rng=random.Random(__magic_name__ ) ).to(__magic_name__ )
if str(__magic_name__ ).startswith('mps' ):
_a = torch.manual_seed(__magic_name__ )
else:
_a = torch.Generator(device=__magic_name__ ).manual_seed(__magic_name__ )
_a = {
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'hint': hint,
'generator': generator,
'height': 64,
'width': 64,
'guidance_scale': 4.0,
'num_inference_steps': 2,
'output_type': 'np',
}
return inputs
def __UpperCAmelCase ( self ) -> str:
_a = 'cpu'
_a = self.get_dummy_components()
_a = self.pipeline_class(**__magic_name__ )
_a = pipe.to(__magic_name__ )
pipe.set_progress_bar_config(disable=__magic_name__ )
_a = pipe(**self.get_dummy_inputs(__magic_name__ ) )
_a = output.images
_a = pipe(
**self.get_dummy_inputs(__magic_name__ ) , return_dict=__magic_name__ , )[0]
_a = image[0, -3:, -3:, -1]
_a = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_a = np.array(
[0.6_9_5_9_8_2_6, 0.8_6_8_2_7_9, 0.7_5_5_8_0_9_2, 0.6_8_7_6_9_4_6_7, 0.8_5_8_0_5_8_0_4, 0.6_5_9_7_7_4_9_6, 0.4_4_8_8_5_3_0_2, 0.5_9_5_9_1_1_1, 0.4_2_5_1_5_9_5] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
@slow
@require_torch_gpu
class a ( unittest.TestCase ):
def __UpperCAmelCase ( self ) -> Optional[int]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCAmelCase ( self ) -> List[str]:
_a = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/kandinskyv22_controlnet_robotcat_fp16.npy' )
_a = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/hint_image_cat.png' )
_a = torch.from_numpy(np.array(__magic_name__ ) ).float() / 2_5_5.0
_a = hint.permute(2 , 0 , 1 ).unsqueeze(0 )
_a = KandinskyVaaPriorPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-prior' , torch_dtype=torch.floataa )
pipe_prior.to(__magic_name__ )
_a = KandinskyVaaControlnetPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-controlnet-depth' , torch_dtype=torch.floataa )
_a = pipeline.to(__magic_name__ )
pipeline.set_progress_bar_config(disable=__magic_name__ )
_a = 'A robot, 4k photo'
_a = torch.Generator(device='cuda' ).manual_seed(0 )
_a = pipe_prior(
__magic_name__ , generator=__magic_name__ , num_inference_steps=5 , negative_prompt='' , ).to_tuple()
_a = torch.Generator(device='cuda' ).manual_seed(0 )
_a = pipeline(
image_embeds=__magic_name__ , negative_image_embeds=__magic_name__ , hint=__magic_name__ , generator=__magic_name__ , num_inference_steps=1_00 , output_type='np' , )
_a = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert_mean_pixel_difference(__magic_name__ , __magic_name__ )
| 356 |
'''simple docstring'''
def _A (lowerCAmelCase__ :int ) -> int:
'''simple docstring'''
assert (
isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and number_of_steps > 0
), f'number_of_steps needs to be positive integer, your input {number_of_steps}'
if number_of_steps == 1:
return 1
_a , _a = 1, 1
for _ in range(number_of_steps - 1 ):
_a , _a = current + previous, current
return current
if __name__ == "__main__":
import doctest
doctest.testmod()
| 104 | 0 |
from __future__ import annotations
def __A ( __lowerCAmelCase , __lowerCAmelCase )-> list[tuple[int, int]]:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = position
_UpperCAmelCase = [
(y + 1, x + 2),
(y - 1, x + 2),
(y + 1, x - 2),
(y - 1, x - 2),
(y + 2, x + 1),
(y + 2, x - 1),
(y - 2, x + 1),
(y - 2, x - 1),
]
_UpperCAmelCase = []
for position in positions:
_UpperCAmelCase , _UpperCAmelCase = position
if 0 <= y_test < n and 0 <= x_test < n:
permissible_positions.append(__lowerCAmelCase )
return permissible_positions
def __A ( __lowerCAmelCase )-> bool:
"""simple docstring"""
return not any(elem == 0 for row in board for elem in row )
def __A ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )-> bool:
"""simple docstring"""
if is_complete(__lowerCAmelCase ):
return True
for position in get_valid_pos(__lowerCAmelCase , len(__lowerCAmelCase ) ):
_UpperCAmelCase , _UpperCAmelCase = position
if board[y][x] == 0:
_UpperCAmelCase = curr + 1
if open_knight_tour_helper(__lowerCAmelCase , __lowerCAmelCase , curr + 1 ):
return True
_UpperCAmelCase = 0
return False
def __A ( __lowerCAmelCase )-> list[list[int]]:
"""simple docstring"""
_UpperCAmelCase = [[0 for i in range(__lowerCAmelCase )] for j in range(__lowerCAmelCase )]
for i in range(__lowerCAmelCase ):
for j in range(__lowerCAmelCase ):
_UpperCAmelCase = 1
if open_knight_tour_helper(__lowerCAmelCase , (i, j) , 1 ):
return board
_UpperCAmelCase = 0
_UpperCAmelCase = F"""Open Kight Tour cannot be performed on a board of size {n}"""
raise ValueError(__lowerCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 39 |
"""simple docstring"""
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
snake_case__ : Dict = logging.getLogger(__name__)
def _snake_case ( _snake_case : Any , _snake_case : Any ):
return (preds == labels).mean()
@dataclass
class snake_case_:
__UpperCamelCase = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
__UpperCamelCase = field(
default=a__ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
__UpperCamelCase = field(
default=a__ , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
__UpperCamelCase = field(
default=a__ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
@dataclass
class snake_case_:
__UpperCamelCase = field(metadata={'''help''': '''The name of the task to train on: ''' + ''', '''.join(processors.keys() )} )
__UpperCamelCase = field(metadata={'''help''': '''Should contain the data files for the task.'''} )
__UpperCamelCase = field(
default=128 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
__UpperCamelCase = field(
default=a__ , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
def _snake_case ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowerCAmelCase : str = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
lowerCAmelCase, lowerCAmelCase, lowerCAmelCase : Optional[int] = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. Use'''
''' --overwrite_output_dir to overcome.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('''Training/evaluation parameters %s''' , _snake_case )
# Set seed
set_seed(training_args.seed )
try:
lowerCAmelCase : Tuple = processors[data_args.task_name]()
lowerCAmelCase : Any = processor.get_labels()
lowerCAmelCase : Union[str, Any] = len(_snake_case )
except KeyError:
raise ValueError('''Task not found: %s''' % (data_args.task_name) )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowerCAmelCase : List[Any] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=_snake_case , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , )
lowerCAmelCase : Optional[Any] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
lowerCAmelCase : List[str] = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=_snake_case , cache_dir=model_args.cache_dir , )
# Get datasets
lowerCAmelCase : Dict = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=_snake_case , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
lowerCAmelCase : Any = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=_snake_case , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def compute_metrics(_snake_case : EvalPrediction ) -> Dict:
lowerCAmelCase : int = np.argmax(p.predictions , axis=1 )
return {"acc": simple_accuracy(_snake_case , p.label_ids )}
# Data collator
lowerCAmelCase : List[Any] = DataCollatorWithPadding(_snake_case , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
lowerCAmelCase : Union[str, Any] = Trainer(
model=_snake_case , args=_snake_case , train_dataset=_snake_case , eval_dataset=_snake_case , compute_metrics=_snake_case , data_collator=_snake_case , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
lowerCAmelCase : int = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
lowerCAmelCase : Any = trainer.evaluate()
lowerCAmelCase : int = os.path.join(training_args.output_dir , '''eval_results.txt''' )
if trainer.is_world_master():
with open(_snake_case , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key, value in result.items():
logger.info(''' %s = %s''' , _snake_case , _snake_case )
writer.write('''%s = %s\n''' % (key, value) )
results.update(_snake_case )
return results
def _snake_case ( _snake_case : List[str] ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 60 | 0 |
def __UpperCAmelCase ( __a : int = 1_000_000 ) -> int:
"""simple docstring"""
_a : List[Any] = set(range(3 ,__a ,2 ) )
primes.add(2 )
for p in range(3 ,__a ,2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p ,__a ,__a ) ) )
_a : Any = [float(__a ) for n in range(limit + 1 )]
for p in primes:
for n in range(__a ,limit + 1 ,__a ):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:] ) )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 350 |
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : List[str] = ["image_processor", "tokenizer"]
UpperCAmelCase__ : str = "ViltImageProcessor"
UpperCAmelCase__ : Union[str, Any] = ("BertTokenizer", "BertTokenizerFast")
def __init__( self , _a=None , _a=None , **_a ) -> Any:
_a : Union[str, Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , _a , )
_a : Dict = kwargs.pop('''feature_extractor''' )
_a : Optional[int] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(_a , _a )
_a : int = self.image_processor
def __call__( self , _a , _a = None , _a = True , _a = False , _a = None , _a = None , _a = 0 , _a = None , _a = None , _a = None , _a = False , _a = False , _a = False , _a = False , _a = True , _a = None , **_a , ) -> BatchEncoding:
_a : Tuple = self.tokenizer(
text=_a , add_special_tokens=_a , padding=_a , truncation=_a , max_length=_a , stride=_a , pad_to_multiple_of=_a , return_token_type_ids=_a , return_attention_mask=_a , return_overflowing_tokens=_a , return_special_tokens_mask=_a , return_offsets_mapping=_a , return_length=_a , verbose=_a , return_tensors=_a , **_a , )
# add pixel_values + pixel_mask
_a : str = self.image_processor(_a , return_tensors=_a )
encoding.update(_a )
return encoding
def __lowercase ( self , *_a , **_a ) -> Optional[Any]:
return self.tokenizer.batch_decode(*_a , **_a )
def __lowercase ( self , *_a , **_a ) -> str:
return self.tokenizer.decode(*_a , **_a )
@property
def __lowercase ( self ) -> Optional[int]:
_a : str = self.tokenizer.model_input_names
_a : Optional[Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def __lowercase ( self ) -> Optional[Any]:
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , _a , )
return self.image_processor_class
@property
def __lowercase ( self ) -> Any:
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , _a , )
return self.image_processor
| 15 | 0 |
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCamelCase : List[str] = {
"""configuration_informer""": [
"""INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""InformerConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Optional[Any] = [
"""INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""InformerForPrediction""",
"""InformerModel""",
"""InformerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_informer import (
INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
InformerForPrediction,
InformerModel,
InformerPreTrainedModel,
)
else:
import sys
__UpperCamelCase : Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 307 |
"""simple docstring"""
import math
def lowercase (_lowerCAmelCase ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(_lowerCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def lowercase (_lowerCAmelCase = 0.1 ):
__lowerCAmelCase = 3
__lowerCAmelCase = 3
while primes / (2 * j - 1) >= ratio:
for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1 ):
primes += is_prime(_lowerCAmelCase )
j += 2
return j
if __name__ == "__main__":
import doctest
doctest.testmod()
| 301 | 0 |
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextModel,
CLIPTokenizer,
WhisperForConditionalGeneration,
WhisperProcessor,
)
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.utils import logging
_SCREAMING_SNAKE_CASE : Union[str, Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
class A__ ( snake_case__ ):
"""simple docstring"""
def __init__( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , ):
super().__init__()
if safety_checker is None:
logger.warning(
F'''You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure'''
''' that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered'''
''' results in services or applications open to the public. Both the diffusers team and Hugging Face'''
''' strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling'''
''' it only for use-cases that involve analyzing network behavior or auditing its results. For more'''
''' information, please have a look at https://github.com/huggingface/diffusers/pull/254 .''' )
self.register_modules(
speech_model=__snake_case , speech_processor=__snake_case , vae=__snake_case , text_encoder=__snake_case , tokenizer=__snake_case , unet=__snake_case , scheduler=__snake_case , feature_extractor=__snake_case , )
def a_ ( self , __snake_case = "auto" ):
if slice_size == "auto":
snake_case = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(__snake_case )
def a_ ( self ):
self.enable_attention_slicing(__snake_case )
@torch.no_grad()
def __call__( self , __snake_case , __snake_case=1_6_0_0_0 , __snake_case = 5_1_2 , __snake_case = 5_1_2 , __snake_case = 5_0 , __snake_case = 7.5 , __snake_case = None , __snake_case = 1 , __snake_case = 0.0 , __snake_case = None , __snake_case = None , __snake_case = "pil" , __snake_case = True , __snake_case = None , __snake_case = 1 , **__snake_case , ):
snake_case = self.speech_processor.feature_extractor(
__snake_case , return_tensors='''pt''' , sampling_rate=__snake_case ).input_features.to(self.device )
snake_case = self.speech_model.generate(__snake_case , max_length=4_8_0_0_0_0 )
snake_case = self.speech_processor.tokenizer.batch_decode(__snake_case , skip_special_tokens=__snake_case , normalize=__snake_case )[
0
]
if isinstance(__snake_case , __snake_case ):
snake_case = 1
elif isinstance(__snake_case , __snake_case ):
snake_case = len(__snake_case )
else:
raise ValueError(F'''`prompt` has to be of type `str` or `list` but is {type(__snake_case )}''' )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F'''`height` and `width` have to be divisible by 8 but are {height} and {width}.''' )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(__snake_case , __snake_case ) or callback_steps <= 0)
):
raise ValueError(
F'''`callback_steps` has to be a positive integer but is {callback_steps} of type'''
F''' {type(__snake_case )}.''' )
# get prompt text embeddings
snake_case = self.tokenizer(
__snake_case , padding='''max_length''' , max_length=self.tokenizer.model_max_length , return_tensors='''pt''' , )
snake_case = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
snake_case = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
'''The following part of your input was truncated because CLIP can only handle sequences up to'''
F''' {self.tokenizer.model_max_length} tokens: {removed_text}''' )
snake_case = text_input_ids[:, : self.tokenizer.model_max_length]
snake_case = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
snake_case , snake_case , snake_case = text_embeddings.shape
snake_case = text_embeddings.repeat(1 , __snake_case , 1 )
snake_case = text_embeddings.view(bs_embed * num_images_per_prompt , __snake_case , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
snake_case = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
snake_case = 42
if negative_prompt is None:
snake_case = [''''''] * batch_size
elif type(__snake_case ) is not type(__snake_case ):
raise TypeError(
F'''`negative_prompt` should be the same type to `prompt`, but got {type(__snake_case )} !='''
F''' {type(__snake_case )}.''' )
elif isinstance(__snake_case , __snake_case ):
snake_case = [negative_prompt]
elif batch_size != len(__snake_case ):
raise ValueError(
F'''`negative_prompt`: {negative_prompt} has batch size {len(__snake_case )}, but `prompt`:'''
F''' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches'''
''' the batch size of `prompt`.''' )
else:
snake_case = negative_prompt
snake_case = text_input_ids.shape[-1]
snake_case = self.tokenizer(
__snake_case , padding='''max_length''' , max_length=__snake_case , truncation=__snake_case , return_tensors='''pt''' , )
snake_case = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
snake_case = uncond_embeddings.shape[1]
snake_case = uncond_embeddings.repeat(1 , __snake_case , 1 )
snake_case = uncond_embeddings.view(batch_size * num_images_per_prompt , __snake_case , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
snake_case = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
snake_case = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
snake_case = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
snake_case = torch.randn(__snake_case , generator=__snake_case , device='''cpu''' , dtype=__snake_case ).to(
self.device )
else:
snake_case = torch.randn(__snake_case , generator=__snake_case , device=self.device , dtype=__snake_case )
else:
if latents.shape != latents_shape:
raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
snake_case = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(__snake_case )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
snake_case = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
snake_case = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
snake_case = '''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
snake_case = {}
if accepts_eta:
snake_case = eta
for i, t in enumerate(self.progress_bar(__snake_case ) ):
# expand the latents if we are doing classifier free guidance
snake_case = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
snake_case = self.scheduler.scale_model_input(__snake_case , __snake_case )
# predict the noise residual
snake_case = self.unet(__snake_case , __snake_case , encoder_hidden_states=__snake_case ).sample
# perform guidance
if do_classifier_free_guidance:
snake_case , snake_case = noise_pred.chunk(2 )
snake_case = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
snake_case = self.scheduler.step(__snake_case , __snake_case , __snake_case , **__snake_case ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(__snake_case , __snake_case , __snake_case )
snake_case = 1 / 0.1_8215 * latents
snake_case = self.vae.decode(__snake_case ).sample
snake_case = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
snake_case = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
snake_case = self.numpy_to_pil(__snake_case )
if not return_dict:
return image
return StableDiffusionPipelineOutput(images=__snake_case , nsfw_content_detected=__snake_case )
| 213 |
import unittest
from transformers import GPTNeoXJapaneseConfig, is_torch_available
from transformers.models.gpt_neox_japanese.tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import GPTNeoXJapaneseForCausalLM, GPTNeoXJapaneseModel
class A__ :
"""simple docstring"""
def __init__( self , __snake_case , __snake_case=1_3 , __snake_case=7 , __snake_case=True , __snake_case=True , __snake_case=True , __snake_case=True , __snake_case=9_9 , __snake_case=3_2 , __snake_case=5 , __snake_case=4 , __snake_case=4 , __snake_case="gelu" , __snake_case=0.0 , __snake_case=0.1 , __snake_case=True , __snake_case=5_1_2 , __snake_case=1_6 , __snake_case=2 , __snake_case=0.02 , __snake_case=3 , __snake_case=4 , __snake_case=None , ):
snake_case = parent
snake_case = batch_size
snake_case = seq_length
snake_case = is_training
snake_case = use_input_mask
snake_case = use_token_type_ids
snake_case = use_labels
snake_case = vocab_size
snake_case = hidden_size
snake_case = num_hidden_layers
snake_case = num_attention_heads
snake_case = intermediate_multiple_size
snake_case = hidden_act
snake_case = hidden_dropout
snake_case = attention_dropout
snake_case = weight_tying
snake_case = max_position_embeddings
snake_case = type_vocab_size
snake_case = type_sequence_label_size
snake_case = initializer_range
snake_case = num_labels
snake_case = num_choices
snake_case = scope
def a_ ( self ):
snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case = None
if self.use_input_mask:
snake_case = random_attention_mask([self.batch_size, self.seq_length] )
snake_case = None
if self.use_labels:
snake_case = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case = self.get_config()
return config, input_ids, input_mask, token_labels
def a_ ( self ):
return GPTNeoXJapaneseConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_multiple_size=self.intermediate_multiple_size , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , weight_tying=self.weight_tying , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__snake_case , initializer_range=self.initializer_range , )
def a_ ( self ):
snake_case , snake_case , snake_case , snake_case = self.prepare_config_and_inputs()
snake_case = True
return config, input_ids, input_mask, token_labels
def a_ ( self , __snake_case , __snake_case , __snake_case ):
snake_case = GPTNeoXJapaneseModel(config=__snake_case )
model.to(__snake_case )
model.eval()
snake_case = model(__snake_case , attention_mask=__snake_case )
snake_case = model(__snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a_ ( self , __snake_case , __snake_case , __snake_case ):
snake_case = True
snake_case = GPTNeoXJapaneseModel(__snake_case )
model.to(__snake_case )
model.eval()
snake_case = model(__snake_case , attention_mask=__snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a_ ( self , __snake_case , __snake_case , __snake_case , __snake_case ):
snake_case = GPTNeoXJapaneseForCausalLM(config=__snake_case )
model.to(__snake_case )
model.eval()
snake_case = model(__snake_case , attention_mask=__snake_case , labels=__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def a_ ( self , __snake_case , __snake_case , __snake_case ):
snake_case = True
snake_case = GPTNeoXJapaneseForCausalLM(config=__snake_case )
model.to(__snake_case )
model.eval()
# first forward pass
snake_case = model(__snake_case , attention_mask=__snake_case , use_cache=__snake_case )
snake_case = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
snake_case = ids_tensor((self.batch_size, 3) , config.vocab_size )
snake_case = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
snake_case = torch.cat([input_ids, next_tokens] , dim=-1 )
snake_case = torch.cat([input_mask, next_mask] , dim=-1 )
snake_case = model(__snake_case , attention_mask=__snake_case , output_hidden_states=__snake_case )
snake_case = output_from_no_past['''hidden_states'''][0]
snake_case = model(
__snake_case , attention_mask=__snake_case , past_key_values=__snake_case , output_hidden_states=__snake_case , )['''hidden_states'''][0]
# select random slice
snake_case = ids_tensor((1,) , output_from_past.shape[-1] ).item()
snake_case = output_from_no_past[:, -3:, random_slice_idx].detach()
snake_case = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__snake_case , __snake_case , atol=1E-3 ) )
def a_ ( self ):
snake_case = self.prepare_config_and_inputs()
snake_case , snake_case , snake_case , snake_case = config_and_inputs
snake_case = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class A__ ( snake_case__ , snake_case__ , unittest.TestCase ):
"""simple docstring"""
__magic_name__ = (GPTNeoXJapaneseModel, GPTNeoXJapaneseForCausalLM) if is_torch_available() else ()
__magic_name__ = (GPTNeoXJapaneseForCausalLM,) if is_torch_available() else ()
__magic_name__ = (
{'feature-extraction': GPTNeoXJapaneseModel, 'text-generation': GPTNeoXJapaneseForCausalLM}
if is_torch_available()
else {}
)
__magic_name__ = False
__magic_name__ = False
__magic_name__ = False
__magic_name__ = False
def a_ ( self ):
snake_case = GPTNeoXJapaneseModelTester(self )
snake_case = ConfigTester(self , config_class=__snake_case , hidden_size=3_7 )
def a_ ( self ):
self.config_tester.run_common_tests()
def a_ ( self ):
snake_case , snake_case , snake_case , snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(__snake_case , __snake_case , __snake_case )
def a_ ( self ):
snake_case , snake_case , snake_case , snake_case = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(__snake_case , __snake_case , __snake_case )
def a_ ( self ):
# This regression test was failing with PyTorch < 1.3
snake_case , snake_case , snake_case , snake_case = self.model_tester.prepare_config_and_inputs_for_decoder()
snake_case = None
self.model_tester.create_and_check_model_as_decoder(__snake_case , __snake_case , __snake_case )
def a_ ( self ):
snake_case , snake_case , snake_case , snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(__snake_case , __snake_case , __snake_case )
def a_ ( self ):
snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*__snake_case )
@slow
def a_ ( self ):
snake_case = '''abeja/gpt-neox-japanese-2.7b'''
snake_case = ['''データサイエンティストとは、''', '''100年後に必要とされる会社は、''', '''フルリモートの環境で働くために必要なことは、''', '''国境の長いトンネルを抜けると''', '''美味しい日本食といえば、''']
snake_case = [
'''データサイエンティストとは、データを分析し、ビジネスに役立つ知見を導き出す専門家のことです。''',
'''100年後に必要とされる会社は、「人」が中心の会社です。''',
'''フルリモートの環境で働くために必要なことは、「自分の時間をコントロールする」ことです。''',
'''国境の長いトンネルを抜けると、そこは雪国だった。''',
'''美味しい日本食といえば、やっぱりお寿司ですよね。''',
]
snake_case = GPTNeoXJapaneseTokenizer.from_pretrained(__snake_case )
snake_case = GPTNeoXJapaneseForCausalLM.from_pretrained(__snake_case )
snake_case = []
for prompt in prompts:
snake_case = tokenizer(__snake_case , return_tensors='''pt''' ).input_ids
snake_case = model.generate(__snake_case , max_length=5_0 )
snake_case = tokenizer.batch_decode(__snake_case , skip_special_tokens=__snake_case )
predicted_outputs += generated_string
self.assertListEqual(__snake_case , __snake_case )
| 213 | 1 |
def lowercase( UpperCamelCase_ , UpperCamelCase_ ) -> int:
'''simple docstring'''
if len(UpperCamelCase_ ) != len(UpperCamelCase_ ):
raise ValueError("""String lengths must match!""" )
UpperCamelCase = 0
for chara, chara in zip(UpperCamelCase_ , UpperCamelCase_ ):
if chara != chara:
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 343 | import inspect
import unittest
from transformers import BitConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel
from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class SCREAMING_SNAKE_CASE_ :
def __init__( self : Tuple , lowerCamelCase_ : List[str] , lowerCamelCase_ : List[Any]=3 , lowerCamelCase_ : Dict=32 , lowerCamelCase_ : Tuple=3 , lowerCamelCase_ : int=10 , lowerCamelCase_ : Optional[int]=[8, 16, 32, 64] , lowerCamelCase_ : List[str]=[1, 1, 2, 1] , lowerCamelCase_ : Optional[int]=True , lowerCamelCase_ : Any=True , lowerCamelCase_ : List[Any]="relu" , lowerCamelCase_ : List[Any]=3 , lowerCamelCase_ : Dict=None , lowerCamelCase_ : List[Any]=["stage2", "stage3", "stage4"] , lowerCamelCase_ : Optional[Any]=[2, 3, 4] , lowerCamelCase_ : List[Any]=1 , ):
"""simple docstring"""
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = image_size
UpperCamelCase = num_channels
UpperCamelCase = embeddings_size
UpperCamelCase = hidden_sizes
UpperCamelCase = depths
UpperCamelCase = is_training
UpperCamelCase = use_labels
UpperCamelCase = hidden_act
UpperCamelCase = num_labels
UpperCamelCase = scope
UpperCamelCase = len(lowerCamelCase_ )
UpperCamelCase = out_features
UpperCamelCase = out_indices
UpperCamelCase = num_groups
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase = None
if self.use_labels:
UpperCamelCase = ids_tensor([self.batch_size] , self.num_labels )
UpperCamelCase = self.get_config()
return config, pixel_values, labels
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
return BitConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , )
def lowerCamelCase_ ( self : int , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : List[str] ):
"""simple docstring"""
UpperCamelCase = BitModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCamelCase = model(lowerCamelCase_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : List[Any] , lowerCamelCase_ : str ):
"""simple docstring"""
UpperCamelCase = self.num_labels
UpperCamelCase = BitForImageClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCamelCase = model(lowerCamelCase_ , labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase_ ( self : int , lowerCamelCase_ : Any , lowerCamelCase_ : List[Any] , lowerCamelCase_ : int ):
"""simple docstring"""
UpperCamelCase = BitBackbone(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCamelCase = model(lowerCamelCase_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
UpperCamelCase = None
UpperCamelCase = BitBackbone(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCamelCase = model(lowerCamelCase_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def lowerCamelCase_ ( self : List[str] ):
"""simple docstring"""
UpperCamelCase = self.prepare_config_and_inputs()
UpperCamelCase , UpperCamelCase , UpperCamelCase = config_and_inputs
UpperCamelCase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ):
__lowerCAmelCase = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else ()
__lowerCAmelCase = (
{"""feature-extraction""": BitModel, """image-classification""": BitForImageClassification}
if is_torch_available()
else {}
)
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase = BitModelTester(self )
UpperCamelCase = ConfigTester(self , config_class=lowerCamelCase_ , has_text_modality=lowerCamelCase_ )
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
return
@unittest.skip(reason="""Bit does not output attentions""" )
def lowerCamelCase_ ( self : int ):
"""simple docstring"""
pass
@unittest.skip(reason="""Bit does not use inputs_embeds""" )
def lowerCamelCase_ ( self : List[str] ):
"""simple docstring"""
pass
@unittest.skip(reason="""Bit does not support input and output embeddings""" )
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
pass
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase = model_class(lowerCamelCase_ )
UpperCamelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase = [*signature.parameters.keys()]
UpperCamelCase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowerCamelCase_ )
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def lowerCamelCase_ ( self : List[str] ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*lowerCamelCase_ )
def lowerCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase = model_class(config=lowerCamelCase_ )
for name, module in model.named_modules():
if isinstance(lowerCamelCase_ , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
def lowerCamelCase_ ( self : int ):
"""simple docstring"""
def check_hidden_states_output(lowerCamelCase_ : List[Any] , lowerCamelCase_ : Tuple , lowerCamelCase_ : Any ):
UpperCamelCase = model_class(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
with torch.no_grad():
UpperCamelCase = model(**self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) )
UpperCamelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
UpperCamelCase = self.model_tester.num_stages
self.assertEqual(len(lowerCamelCase_ ) , expected_num_stages + 1 )
# Bit's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase = ["""preactivation""", """bottleneck"""]
for model_class in self.all_model_classes:
for layer_type in layers_type:
UpperCamelCase = layer_type
UpperCamelCase = True
check_hidden_states_output(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCamelCase = True
check_hidden_states_output(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
@unittest.skip(reason="""Bit does not use feedforward chunking""" )
def lowerCamelCase_ ( self : List[str] ):
"""simple docstring"""
pass
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase_ )
@slow
def lowerCamelCase_ ( self : int ):
"""simple docstring"""
for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase = BitModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
def lowercase( ) -> Any:
'''simple docstring'''
UpperCamelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
@cached_property
def lowerCamelCase_ ( self : Optional[Any] ):
"""simple docstring"""
return (
BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None
)
@slow
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(lowerCamelCase_ )
UpperCamelCase = self.default_image_processor
UpperCamelCase = prepare_img()
UpperCamelCase = image_processor(images=lowerCamelCase_ , return_tensors="""pt""" ).to(lowerCamelCase_ )
# forward pass
with torch.no_grad():
UpperCamelCase = model(**lowerCamelCase_ )
# verify the logits
UpperCamelCase = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , lowerCamelCase_ )
UpperCamelCase = torch.tensor([[-0.6_5_2_6, -0.5_2_6_3, -1.4_3_9_8]] ).to(lowerCamelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCamelCase_ , atol=1E-4 ) )
@require_torch
class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase , unittest.TestCase ):
__lowerCAmelCase = (BitBackbone,) if is_torch_available() else ()
__lowerCAmelCase = BitConfig
__lowerCAmelCase = False
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase = BitModelTester(self )
| 343 | 1 |
"""simple docstring"""
from typing import Dict
from .base import GenericTensor, Pipeline
class __a (UpperCamelCase_):
'''simple docstring'''
def _a ( self , _a=None , _a=None , _a=None , **_a ) -> Any:
"""simple docstring"""
if tokenize_kwargs is None:
SCREAMING_SNAKE_CASE__ : Any = {}
if truncation is not None:
if "truncation" in tokenize_kwargs:
raise ValueError(
"""truncation parameter defined twice (given as keyword argument as well as in tokenize_kwargs)""" )
SCREAMING_SNAKE_CASE__ : str = truncation
SCREAMING_SNAKE_CASE__ : Optional[Any] = tokenize_kwargs
SCREAMING_SNAKE_CASE__ : int = {}
if return_tensors is not None:
SCREAMING_SNAKE_CASE__ : Any = return_tensors
return preprocess_params, {}, postprocess_params
def _a ( self , _a , **_a ) -> Dict[str, GenericTensor]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = self.framework
SCREAMING_SNAKE_CASE__ : List[Any] = self.tokenizer(_a , return_tensors=_a , **_a )
return model_inputs
def _a ( self , _a ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = self.model(**_a )
return model_outputs
def _a ( self , _a , _a=False ) -> str:
"""simple docstring"""
if return_tensors:
return model_outputs[0]
if self.framework == "pt":
return model_outputs[0].tolist()
elif self.framework == "tf":
return model_outputs[0].numpy().tolist()
def __call__( self , *_a , **_a ) -> int:
"""simple docstring"""
return super().__call__(*_a , **_a )
| 56 |
"""simple docstring"""
def _lowercase ( __lowerCAmelCase ) -> Dict:
SCREAMING_SNAKE_CASE__ : Dict = []
SCREAMING_SNAKE_CASE__ : Optional[Any] = []
SCREAMING_SNAKE_CASE__ : int = {
"""^""": 3,
"""*""": 2,
"""/""": 2,
"""%""": 2,
"""+""": 1,
"""-""": 1,
} # Priority of each operator
SCREAMING_SNAKE_CASE__ : List[Any] = len(__lowerCAmelCase ) if (len(__lowerCAmelCase ) > 7) else 7
# Print table header for output
print(
"""Symbol""".center(8 ) , """Stack""".center(__lowerCAmelCase ) , """Postfix""".center(__lowerCAmelCase ) , sep=""" | """ , )
print("""-""" * (print_width * 3 + 7) )
for x in infix:
if x.isalpha() or x.isdigit():
post_fix.append(__lowerCAmelCase ) # if x is Alphabet / Digit, add it to Postfix
elif x == "(":
stack.append(__lowerCAmelCase ) # if x is "(" push to Stack
elif x == ")": # if x is ")" pop stack until "(" is encountered
while stack[-1] != "(":
post_fix.append(stack.pop() ) # Pop stack & add the content to Postfix
stack.pop()
else:
if len(__lowerCAmelCase ) == 0:
stack.append(__lowerCAmelCase ) # If stack is empty, push x to stack
else: # while priority of x is not > priority of element in the stack
while len(__lowerCAmelCase ) > 0 and priority[x] <= priority[stack[-1]]:
post_fix.append(stack.pop() ) # pop stack & add to Postfix
stack.append(__lowerCAmelCase ) # push x to stack
print(
x.center(8 ) , ("""""".join(__lowerCAmelCase )).ljust(__lowerCAmelCase ) , ("""""".join(__lowerCAmelCase )).ljust(__lowerCAmelCase ) , sep=""" | """ , ) # Output in tabular format
while len(__lowerCAmelCase ) > 0: # while stack is not empty
post_fix.append(stack.pop() ) # pop stack & add to Postfix
print(
""" """.center(8 ) , ("""""".join(__lowerCAmelCase )).ljust(__lowerCAmelCase ) , ("""""".join(__lowerCAmelCase )).ljust(__lowerCAmelCase ) , sep=""" | """ , ) # Output in tabular format
return "".join(__lowerCAmelCase ) # return Postfix as str
def _lowercase ( __lowerCAmelCase ) -> str:
SCREAMING_SNAKE_CASE__ : List[str] = list(infix[::-1] ) # reverse the infix equation
for i in range(len(__lowerCAmelCase ) ):
if infix[i] == "(":
SCREAMING_SNAKE_CASE__ : Optional[int] = """)""" # change "(" to ")"
elif infix[i] == ")":
SCREAMING_SNAKE_CASE__ : Optional[Any] = """(""" # change ")" to "("
return (infix_2_postfix("""""".join(__lowerCAmelCase ) ))[
::-1
] # call infix_2_postfix on Infix, return reverse of Postfix
if __name__ == "__main__":
a :Optional[int] = input("\nEnter an Infix Equation = ") # Input an Infix equation
a :Dict = "".join(Infix.split()) # Remove spaces from the input
print("\n\t", Infix, "(Infix) -> ", infix_2_prefix(Infix), "(Prefix)")
| 56 | 1 |
'''simple docstring'''
import os
import socket
from contextlib import contextmanager
import torch
from ..commands.config.default import write_basic_config # noqa: F401
from ..state import PartialState
from .dataclasses import DistributedType
from .imports import is_deepspeed_available, is_tpu_available
from .transformer_engine import convert_model
from .versions import is_torch_version
if is_deepspeed_available():
from deepspeed import DeepSpeedEngine
if is_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
def _lowerCAmelCase ( _UpperCamelCase : Union[str, Any] ) -> Dict:
"""simple docstring"""
if is_torch_version('<' , '2.0.0' ) or not hasattr(_UpperCamelCase , '_dynamo' ):
return False
return isinstance(_UpperCamelCase , torch._dynamo.eval_frame.OptimizedModule )
def _lowerCAmelCase ( _UpperCamelCase : List[Any] , _UpperCamelCase : bool = True ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =(torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel)
_SCREAMING_SNAKE_CASE =is_compiled_module(_UpperCamelCase )
if is_compiled:
_SCREAMING_SNAKE_CASE =model
_SCREAMING_SNAKE_CASE =model._orig_mod
if is_deepspeed_available():
options += (DeepSpeedEngine,)
while isinstance(_UpperCamelCase , _UpperCamelCase ):
_SCREAMING_SNAKE_CASE =model.module
if not keep_fpaa_wrapper:
_SCREAMING_SNAKE_CASE =getattr(_UpperCamelCase , 'forward' )
_SCREAMING_SNAKE_CASE =model.__dict__.pop('_original_forward' , _UpperCamelCase )
if original_forward is not None:
while hasattr(_UpperCamelCase , '__wrapped__' ):
_SCREAMING_SNAKE_CASE =forward.__wrapped__
if forward == original_forward:
break
_SCREAMING_SNAKE_CASE =forward
if getattr(_UpperCamelCase , '_converted_to_transformer_engine' , _UpperCamelCase ):
convert_model(_UpperCamelCase , to_transformer_engine=_UpperCamelCase )
if is_compiled:
_SCREAMING_SNAKE_CASE =model
_SCREAMING_SNAKE_CASE =compiled_model
return model
def _lowerCAmelCase ( ) -> Optional[Any]:
"""simple docstring"""
PartialState().wait_for_everyone()
def _lowerCAmelCase ( _UpperCamelCase : str , _UpperCamelCase : Tuple ) -> Any:
"""simple docstring"""
if PartialState().distributed_type == DistributedType.TPU:
xm.save(_UpperCamelCase , _UpperCamelCase )
elif PartialState().local_process_index == 0:
torch.save(_UpperCamelCase , _UpperCamelCase )
@contextmanager
def _lowerCAmelCase ( **_UpperCamelCase : Union[str, Any] ) -> int:
"""simple docstring"""
for key, value in kwargs.items():
_SCREAMING_SNAKE_CASE =str(_UpperCamelCase )
yield
for key in kwargs:
if key.upper() in os.environ:
del os.environ[key.upper()]
def _lowerCAmelCase ( _UpperCamelCase : int ) -> Any:
"""simple docstring"""
if not hasattr(_UpperCamelCase , '__qualname__' ) and not hasattr(_UpperCamelCase , '__name__' ):
_SCREAMING_SNAKE_CASE =getattr(_UpperCamelCase , '__class__' , _UpperCamelCase )
if hasattr(_UpperCamelCase , '__qualname__' ):
return obj.__qualname__
if hasattr(_UpperCamelCase , '__name__' ):
return obj.__name__
return str(_UpperCamelCase )
def _lowerCAmelCase ( _UpperCamelCase : List[Any] , _UpperCamelCase : Optional[Any] ) -> str:
"""simple docstring"""
for key, value in source.items():
if isinstance(_UpperCamelCase , _UpperCamelCase ):
_SCREAMING_SNAKE_CASE =destination.setdefault(_UpperCamelCase , {} )
merge_dicts(_UpperCamelCase , _UpperCamelCase )
else:
_SCREAMING_SNAKE_CASE =value
return destination
def _lowerCAmelCase ( _UpperCamelCase : int = None ) -> bool:
"""simple docstring"""
if port is None:
_SCREAMING_SNAKE_CASE =2_95_00
with socket.socket(socket.AF_INET , socket.SOCK_STREAM ) as s:
return s.connect_ex(('localhost', port) ) == 0
| 47 |
'''simple docstring'''
lowerCAmelCase__ = '''Input must be a string of 8 numbers plus letter'''
lowerCAmelCase__ = '''TRWAGMYFPDXBNJZSQVHLCKE'''
def _A ( A__ ):
"""simple docstring"""
if not isinstance(A__ , A__ ):
__lowercase = F"Expected string as input, found {type(A__ ).__name__}"
raise TypeError(A__ )
__lowercase = spanish_id.replace('''-''' , '''''' ).upper()
if len(A__ ) != 9:
raise ValueError(A__ )
try:
__lowercase = int(spanish_id_clean[0:8] )
__lowercase = spanish_id_clean[8]
except ValueError as ex:
raise ValueError(A__ ) from ex
if letter.isdigit():
raise ValueError(A__ )
return letter == LOOKUP_LETTERS[number % 23]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 104 | 0 |
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import MaMaaaTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from transformers.utils import is_sentencepiece_available
if is_sentencepiece_available():
from transformers.models.mam_aaa.tokenization_mam_aaa import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
if is_sentencepiece_available():
UpperCamelCase_ =get_tests_dir("""fixtures/test_sentencepiece.model""")
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
UpperCamelCase_ =128_022
UpperCamelCase_ =128_028
@require_sentencepiece
class _a ( UpperCAmelCase__ , unittest.TestCase ):
UpperCamelCase = MaMaaaTokenizer
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = True
def snake_case ( self : Dict ) -> Union[str, Any]:
'''simple docstring'''
super().setUp()
_UpperCamelCase : int = ["""</s>""", """<unk>""", """▁This""", """▁is""", """▁a""", """▁t""", """est""", """\u0120""", """<pad>"""]
_UpperCamelCase : Optional[int] = dict(zip(_SCREAMING_SNAKE_CASE, range(len(_SCREAMING_SNAKE_CASE ) ) ) )
_UpperCamelCase : str = Path(self.tmpdirname )
save_json(_SCREAMING_SNAKE_CASE, save_dir / VOCAB_FILES_NAMES['''vocab_file'''] )
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(_SCREAMING_SNAKE_CASE, save_dir / VOCAB_FILES_NAMES['''spm_file'''] )
_UpperCamelCase : Union[str, Any] = MaMaaaTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def snake_case ( self : Dict, **lowerCAmelCase__ : int ) -> Optional[int]:
'''simple docstring'''
return MaMaaaTokenizer.from_pretrained(self.tmpdirname, **_SCREAMING_SNAKE_CASE )
def snake_case ( self : Dict, lowerCAmelCase__ : int ) -> int:
'''simple docstring'''
return (
"This is a test",
"This is a test",
)
def snake_case ( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
_UpperCamelCase : Tuple = """</s>"""
_UpperCamelCase : Optional[Any] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_SCREAMING_SNAKE_CASE ), _SCREAMING_SNAKE_CASE )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_SCREAMING_SNAKE_CASE ), _SCREAMING_SNAKE_CASE )
def snake_case ( self : int ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase : int = self.get_tokenizer()
_UpperCamelCase : Dict = list(tokenizer.get_vocab().keys() )
self.assertEqual(vocab_keys[0], '''</s>''' )
self.assertEqual(vocab_keys[1], '''<unk>''' )
self.assertEqual(vocab_keys[-1], '''<s>''' )
self.assertEqual(len(_SCREAMING_SNAKE_CASE ), tokenizer.vocab_size + len(tokenizer.get_added_vocab() ) )
@unittest.skip('''Skip this test while all models are still to be uploaded.''' )
def snake_case ( self : List[str] ) -> Tuple:
'''simple docstring'''
pass
def snake_case ( self : List[str] ) -> Tuple:
'''simple docstring'''
_UpperCamelCase : Optional[int] = self.get_tokenizer()
_UpperCamelCase : str = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(_SCREAMING_SNAKE_CASE, ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE ), [2, 3, 4, 5, 6], )
_UpperCamelCase : int = tokenizer.convert_ids_to_tokens([2, 3, 4, 5, 6] )
self.assertListEqual(_SCREAMING_SNAKE_CASE, ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
_UpperCamelCase : Dict = tokenizer.convert_tokens_to_string(_SCREAMING_SNAKE_CASE )
self.assertEqual(_SCREAMING_SNAKE_CASE, '''This is a test''' )
@slow
def snake_case ( self : List[str] ) -> Any:
'''simple docstring'''
_UpperCamelCase : Any = {"""input_ids""": [[1_2_8_0_2_2, 1_1_0_1_0_8, 3_9_7, 1_1, 3_8_2_7_2, 2_2_4_7, 1_2_4_8_1_1, 2_8_5, 1_8_1_0_5, 1_5_8_6, 2_0_7, 7, 3_9_5_3_4, 4_4_2_8, 3_9_7, 1_0_1_9, 1_8_1_0_5, 1_5_8_6, 2_0_7, 7, 4_1_3_3_7, 1_6_7_8_6, 2_4_1, 7, 2_0_2_1_4, 1_7, 1_2_5_6_9_0, 1_0_3_9_8, 7, 4_4_3_7_8, 5_8_0_6_9, 6_8_3_4_2, 7_7_9_8, 7_3_4_3, 1_1, 2_9_9, 3_3_3_1_0, 4, 1_5_8, 3_7_3_5_0, 9_4_0_7_7, 4_5_6_9, 2_9_9, 3_3_3_1_0, 9_0, 4, 5_2_8_4_0, 2_9_0, 4, 3_1_2_7_0, 1_1_2, 2_9_9, 6_8_2, 4, 5_2_8_4_0, 3_9_9_5_3, 1_4_0_7_9, 1_9_3, 5_2_5_1_9, 9_0_8_9_4, 1_7_8_9_4, 1_2_0_6_9_7, 1_1, 4_0_4_4_5, 5_5_1, 1_7, 1_0_1_9, 5_2_5_1_9, 9_0_8_9_4, 1_7_7_5_6, 9_6_3, 1_1, 4_0_4_4_5, 4_8_0, 1_7, 9_7_9_2, 1_1_2_0, 5_1_7_3, 1_3_9_3, 6_2_4_0, 1_6_7_8_6, 2_4_1, 1_2_0_9_9_6, 2_8, 1_2_4_5, 1_3_9_3, 1_1_8_2_4_0, 1_1_1_2_3, 1_0_1_9, 9_3_6_1_2, 2_6_9_1, 1_0_6_1_8, 9_8_0_5_8, 1_2_0_4_0_9, 1_9_2_8, 2_7_9, 4, 4_0_6_8_3, 3_6_7, 1_7_8, 2_0_7, 1_0_1_9, 1_0_3, 1_0_3_1_2_1, 5_0_6, 6_5_2_9_6, 5, 2], [1_2_8_0_2_2, 2_1_2_1_7, 3_6_7, 1_1_7, 1_2_5_4_5_0, 1_2_8, 7_1_9, 7, 7_3_0_8, 4_0, 9_3_6_1_2, 1_2_6_6_9, 1_1_1_6, 1_6_7_0_4, 7_1, 1_7_7_8_5, 3_6_9_9, 1_5_5_9_2, 3_5, 1_4_4, 9_5_8_4, 2_4_1, 1_1_9_4_3, 7_1_3, 9_5_0, 7_9_9, 2_2_4_7, 8_8_4_2_7, 1_5_0, 1_4_9, 1_1_8_8_1_3, 1_2_0_7_0_6, 1_0_1_9, 1_0_6_9_0_6, 8_1_5_1_8, 2_8, 1_2_2_4, 2_2_7_9_9, 3_9_7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1_2_8_0_2_2, 1_6_5_8, 1_2_3_3_1_1, 5_1_5_5, 5_5_7_8, 4_7_2_2, 2_7_9, 1_4_9_4_7, 2_3_6_6, 1_1_2_0, 1_1_9_7, 1_4, 1_3_4_8, 9_2_3_2, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_SCREAMING_SNAKE_CASE, model_name='''facebook/m2m100_418M''', revision='''c168bae485c864188cf9aa0e4108b0b6934dc91e''', )
@require_torch
@require_sentencepiece
@require_tokenizers
class _a ( unittest.TestCase ):
UpperCamelCase = 'facebook/m2m100_418M'
UpperCamelCase = [
'In my opinion, there are two levels of response from the French government.',
'NSA Affair Emphasizes Complete Lack of Debate on Intelligence',
]
UpperCamelCase = [
'Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.',
'L\'affaire NSA souligne l\'absence totale de débat sur le renseignement',
]
# fmt: off
UpperCamelCase = [EN_CODE, 593, 1949, 11_5781, 4, 7_1586, 4234, 6_0633, 12_6233, 432, 12_3808, 1_5592, 1197, 11_7132, 12_0618, 5, 2]
@classmethod
def snake_case ( cls : Dict ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase : MaMaaaTokenizer = MaMaaaTokenizer.from_pretrained(
cls.checkpoint_name, src_lang='''en''', tgt_lang='''fr''' )
_UpperCamelCase : Optional[int] = 1
return cls
def snake_case ( self : Any ) -> List[Any]:
'''simple docstring'''
self.assertEqual(self.tokenizer.get_lang_id('''ar''' ), 1_2_8_0_0_6 )
self.assertEqual(self.tokenizer.get_lang_id('''en''' ), 1_2_8_0_2_2 )
self.assertEqual(self.tokenizer.get_lang_id('''ro''' ), 1_2_8_0_7_6 )
self.assertEqual(self.tokenizer.get_lang_id('''mr''' ), 1_2_8_0_6_3 )
def snake_case ( self : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase : str = self.tokenizer.get_vocab()
self.assertEqual(len(_SCREAMING_SNAKE_CASE ), self.tokenizer.vocab_size )
self.assertEqual(vocab['''<unk>'''], 3 )
self.assertIn(self.tokenizer.get_lang_token('''en''' ), _SCREAMING_SNAKE_CASE )
def snake_case ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
_UpperCamelCase : Any = """en"""
_UpperCamelCase : List[Any] = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens, _SCREAMING_SNAKE_CASE )
def snake_case ( self : List[str] ) -> Tuple:
'''simple docstring'''
self.assertIn(_SCREAMING_SNAKE_CASE, self.tokenizer.all_special_ids )
# fmt: off
_UpperCamelCase : List[Any] = [FR_CODE, 5_3_6_4, 8_2, 8_6_4_2, 4, 2_9_4, 4_7, 8, 1_4_0_2_8, 1_3_6, 3_2_8_6, 9_7_0_6, 6, 9_0_7_9_7, 6, 1_4_4_0_1_2, 1_6_2, 8_8_1_2_8, 3_0_0_6_1, 5, 2]
# fmt: on
_UpperCamelCase : List[str] = self.tokenizer.decode(_SCREAMING_SNAKE_CASE, skip_special_tokens=_SCREAMING_SNAKE_CASE )
_UpperCamelCase : Optional[int] = self.tokenizer.decode(generated_ids[1:], skip_special_tokens=_SCREAMING_SNAKE_CASE )
self.assertEqual(_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE )
self.assertNotIn(self.tokenizer.eos_token, _SCREAMING_SNAKE_CASE )
def snake_case ( self : str ) -> List[Any]:
'''simple docstring'''
_UpperCamelCase : str = tempfile.mkdtemp()
_UpperCamelCase : Union[str, Any] = self.tokenizer.lang_token_to_id
self.tokenizer.save_pretrained(_SCREAMING_SNAKE_CASE )
_UpperCamelCase : str = MaMaaaTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertDictEqual(new_tok.lang_token_to_id, _SCREAMING_SNAKE_CASE )
@require_torch
def snake_case ( self : Union[str, Any] ) -> List[str]:
'''simple docstring'''
_UpperCamelCase : List[Any] = """en"""
_UpperCamelCase : Dict = """fr"""
_UpperCamelCase : Optional[int] = self.tokenizer(self.src_text, text_target=self.tgt_text, padding=_SCREAMING_SNAKE_CASE, return_tensors='''pt''' )
_UpperCamelCase : Tuple = shift_tokens_right(
batch['''labels'''], self.tokenizer.pad_token_id, self.tokenizer.eos_token_id )
for k in batch:
_UpperCamelCase : int = batch[k].tolist()
# batch = {k: v.tolist() for k,v in batch.items()}
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
# batch.decoder_inputs_ids[0][0] ==
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == FR_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2] == [2, FR_CODE]
@require_torch
def snake_case ( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase : Optional[Any] = """mr"""
self.assertListEqual(self.tokenizer.prefix_tokens, [self.tokenizer.get_lang_id('''mr''' )] )
self.assertListEqual(self.tokenizer.suffix_tokens, [self.tokenizer.eos_token_id] )
_UpperCamelCase : int = """zh"""
self.assertListEqual(self.tokenizer.prefix_tokens, [self.tokenizer.get_lang_id('''zh''' )] )
self.assertListEqual(self.tokenizer.suffix_tokens, [self.tokenizer.eos_token_id] )
@require_torch
def snake_case ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
_UpperCamelCase : Optional[Any] = """mr"""
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens, [self.tokenizer.get_lang_id('''mr''' )] )
self.assertListEqual(self.tokenizer.suffix_tokens, [self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens, [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
_UpperCamelCase : Union[str, Any] = """zh"""
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens, [self.tokenizer.get_lang_id('''zh''' )] )
self.assertListEqual(self.tokenizer.suffix_tokens, [self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens, [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
@require_torch
def snake_case ( self : str ) -> Dict:
'''simple docstring'''
_UpperCamelCase : Dict = self.tokenizer._build_translation_inputs('''A test''', return_tensors='''pt''', src_lang='''en''', tgt_lang='''ar''' )
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE ), {
# en_XX, A, test, EOS
'''input_ids''': [[1_2_8_0_2_2, 5_8, 4_1_8_3, 2]],
'''attention_mask''': [[1, 1, 1, 1]],
# ar_AR
'''forced_bos_token_id''': 1_2_8_0_0_6,
}, )
| 360 |
"""simple docstring"""
import warnings
from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401
warnings.warn(
"""The `inpainting.py` script is outdated. Please use directly `from diffusers import"""
""" StableDiffusionInpaintPipeline` instead."""
)
| 128 | 0 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import DistilBertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.distilbert.modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
)
class a ( unittest.TestCase ):
def __init__( self : List[Any] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Tuple=13 , __SCREAMING_SNAKE_CASE : List[Any]=7 , __SCREAMING_SNAKE_CASE : Optional[int]=True , __SCREAMING_SNAKE_CASE : Tuple=True , __SCREAMING_SNAKE_CASE : List[Any]=True , __SCREAMING_SNAKE_CASE : Optional[int]=True , __SCREAMING_SNAKE_CASE : str=99 , __SCREAMING_SNAKE_CASE : Tuple=32 , __SCREAMING_SNAKE_CASE : List[str]=5 , __SCREAMING_SNAKE_CASE : Dict=4 , __SCREAMING_SNAKE_CASE : int=37 , __SCREAMING_SNAKE_CASE : Optional[int]="gelu" , __SCREAMING_SNAKE_CASE : Optional[int]=0.1 , __SCREAMING_SNAKE_CASE : Any=0.1 , __SCREAMING_SNAKE_CASE : int=512 , __SCREAMING_SNAKE_CASE : List[str]=16 , __SCREAMING_SNAKE_CASE : List[str]=2 , __SCREAMING_SNAKE_CASE : int=0.02 , __SCREAMING_SNAKE_CASE : Union[str, Any]=4 , ) -> List[Any]:
lowerCamelCase_ = parent
lowerCamelCase_ = batch_size
lowerCamelCase_ = seq_length
lowerCamelCase_ = is_training
lowerCamelCase_ = use_attention_mask
lowerCamelCase_ = use_token_type_ids
lowerCamelCase_ = use_labels
lowerCamelCase_ = vocab_size
lowerCamelCase_ = hidden_size
lowerCamelCase_ = num_hidden_layers
lowerCamelCase_ = num_attention_heads
lowerCamelCase_ = intermediate_size
lowerCamelCase_ = hidden_act
lowerCamelCase_ = hidden_dropout_prob
lowerCamelCase_ = attention_probs_dropout_prob
lowerCamelCase_ = max_position_embeddings
lowerCamelCase_ = type_vocab_size
lowerCamelCase_ = type_sequence_label_size
lowerCamelCase_ = initializer_range
lowerCamelCase_ = num_choices
def UpperCamelCase ( self : Tuple ) -> Optional[Any]:
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase_ = None
if self.use_attention_mask:
lowerCamelCase_ = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase_ = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , tie_weights_=__SCREAMING_SNAKE_CASE , )
return config, input_ids, attention_mask
def UpperCamelCase ( self : Optional[int] ) -> Union[str, Any]:
lowerCamelCase_ = self.prepare_config_and_inputs()
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = config_and_inputs
lowerCamelCase_ = {'input_ids': input_ids, 'attention_mask': attention_mask}
return config, inputs_dict
@require_flax
class a ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
SCREAMING_SNAKE_CASE : List[Any] = (
(
FlaxDistilBertModel,
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def UpperCamelCase ( self : int ) -> int:
lowerCamelCase_ = FlaxDistilBertModelTester(self )
@slow
def UpperCamelCase ( self : Union[str, Any] ) -> int:
for model_class_name in self.all_model_classes:
lowerCamelCase_ = model_class_name.from_pretrained('distilbert-base-uncased' )
lowerCamelCase_ = model(np.ones((1, 1) ) )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
@require_flax
class a ( unittest.TestCase ):
@slow
def UpperCamelCase ( self : str ) -> int:
lowerCamelCase_ = FlaxDistilBertModel.from_pretrained('distilbert-base-uncased' )
lowerCamelCase_ = np.array([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
lowerCamelCase_ = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
lowerCamelCase_ = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE )[0]
lowerCamelCase_ = (1, 11, 768)
self.assertEqual(output.shape , __SCREAMING_SNAKE_CASE )
lowerCamelCase_ = np.array([[[-0.1_639, 0.3_299, 0.1_648], [-0.1_746, 0.3_289, 0.1_710], [-0.1_884, 0.3_357, 0.1_810]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , __SCREAMING_SNAKE_CASE , atol=1e-4 ) )
| 183 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
SCREAMING_SNAKE_CASE :Optional[Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE :List[Any] = {
'hustvl/yolos-small': 'https://huggingface.co/hustvl/yolos-small/resolve/main/config.json',
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
snake_case_ = "yolos"
def __init__( self : Any ,A : Optional[Any]=7_68 ,A : Dict=12 ,A : Any=12 ,A : str=30_72 ,A : Any="gelu" ,A : str=0.0 ,A : List[str]=0.0 ,A : Dict=0.02 ,A : int=1E-12 ,A : Tuple=[5_12, 8_64] ,A : List[Any]=16 ,A : str=3 ,A : str=True ,A : Any=1_00 ,A : Dict=True ,A : Dict=False ,A : Tuple=1 ,A : Union[str, Any]=5 ,A : Optional[Any]=2 ,A : Union[str, Any]=5 ,A : int=2 ,A : int=0.1 ,**A : List[str] ,):
super().__init__(**A )
__A = hidden_size
__A = num_hidden_layers
__A = num_attention_heads
__A = intermediate_size
__A = hidden_act
__A = hidden_dropout_prob
__A = attention_probs_dropout_prob
__A = initializer_range
__A = layer_norm_eps
__A = image_size
__A = patch_size
__A = num_channels
__A = qkv_bias
__A = num_detection_tokens
__A = use_mid_position_embeddings
__A = auxiliary_loss
# Hungarian matcher
__A = class_cost
__A = bbox_cost
__A = giou_cost
# Loss coefficients
__A = bbox_loss_coefficient
__A = giou_loss_coefficient
__A = eos_coefficient
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
snake_case_ = version.parse("1.11" )
@property
def UpperCamelCase_ ( self : str ):
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def UpperCamelCase_ ( self : List[Any] ):
return 1E-4
@property
def UpperCamelCase_ ( self : Optional[Any] ):
return 12
| 15 | 0 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionSAGPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class a_ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ = StableDiffusionSAGPipeline
UpperCAmelCase_ = TEXT_TO_IMAGE_PARAMS
UpperCAmelCase_ = TEXT_TO_IMAGE_BATCH_PARAMS
UpperCAmelCase_ = TEXT_TO_IMAGE_IMAGE_PARAMS
UpperCAmelCase_ = TEXT_TO_IMAGE_IMAGE_PARAMS
UpperCAmelCase_ = False
def __snake_case ( self : int):
'''simple docstring'''
torch.manual_seed(0)
lowerCAmelCase__ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
lowerCAmelCase__ = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=lowercase__ , set_alpha_to_one=lowercase__ , )
torch.manual_seed(0)
lowerCAmelCase__ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0)
lowerCAmelCase__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
lowerCAmelCase__ = CLIPTextModel(lowercase__)
lowerCAmelCase__ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip')
lowerCAmelCase__ = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def __snake_case ( self : Dict , lowercase__ : List[Any] , lowercase__ : int=0):
'''simple docstring'''
if str(lowercase__).startswith('mps'):
lowerCAmelCase__ = torch.manual_seed(lowercase__)
else:
lowerCAmelCase__ = torch.Generator(device=lowercase__).manual_seed(lowercase__)
lowerCAmelCase__ = {
'prompt': '.',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 1.0,
'sag_scale': 1.0,
'output_type': 'numpy',
}
return inputs
def __snake_case ( self : Optional[Any]):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3e-3)
@slow
@require_torch_gpu
class a_ ( unittest.TestCase ):
'''simple docstring'''
def __snake_case ( self : int):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __snake_case ( self : List[Any]):
'''simple docstring'''
lowerCAmelCase__ = StableDiffusionSAGPipeline.from_pretrained('CompVis/stable-diffusion-v1-4')
lowerCAmelCase__ = sag_pipe.to(lowercase__)
sag_pipe.set_progress_bar_config(disable=lowercase__)
lowerCAmelCase__ = '.'
lowerCAmelCase__ = torch.manual_seed(0)
lowerCAmelCase__ = sag_pipe(
[prompt] , generator=lowercase__ , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='np')
lowerCAmelCase__ = output.images
lowerCAmelCase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowerCAmelCase__ = np.array([0.1_568, 0.1_738, 0.1_695, 0.1_693, 0.1_507, 0.1_705, 0.1_547, 0.1_751, 0.1_949])
assert np.abs(image_slice.flatten() - expected_slice).max() < 5e-2
def __snake_case ( self : Tuple):
'''simple docstring'''
lowerCAmelCase__ = StableDiffusionSAGPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base')
lowerCAmelCase__ = sag_pipe.to(lowercase__)
sag_pipe.set_progress_bar_config(disable=lowercase__)
lowerCAmelCase__ = '.'
lowerCAmelCase__ = torch.manual_seed(0)
lowerCAmelCase__ = sag_pipe(
[prompt] , generator=lowercase__ , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='np')
lowerCAmelCase__ = output.images
lowerCAmelCase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowerCAmelCase__ = np.array([0.3_459, 0.2_876, 0.2_537, 0.3_002, 0.2_671, 0.2_160, 0.3_026, 0.2_262, 0.2_371])
assert np.abs(image_slice.flatten() - expected_slice).max() < 5e-2
def __snake_case ( self : Dict):
'''simple docstring'''
lowerCAmelCase__ = StableDiffusionSAGPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base')
lowerCAmelCase__ = sag_pipe.to(lowercase__)
sag_pipe.set_progress_bar_config(disable=lowercase__)
lowerCAmelCase__ = '.'
lowerCAmelCase__ = torch.manual_seed(0)
lowerCAmelCase__ = sag_pipe(
[prompt] , width=768 , height=512 , generator=lowercase__ , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='np' , )
lowerCAmelCase__ = output.images
assert image.shape == (1, 512, 768, 3)
| 358 | from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxCrossAttnUpBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
FlaxUpBlockaD,
)
@flax.struct.dataclass
class a_ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCAmelCase_ = 42
@flax_register_to_config
class a_ ( nn.Module , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCAmelCase_ = 32
UpperCAmelCase_ = 4
UpperCAmelCase_ = 4
UpperCAmelCase_ = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
UpperCAmelCase_ = ("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D")
UpperCAmelCase_ = False
UpperCAmelCase_ = (320, 640, 1_280, 1_280)
UpperCAmelCase_ = 2
UpperCAmelCase_ = 8
UpperCAmelCase_ = None
UpperCAmelCase_ = 1_280
UpperCAmelCase_ = 0.0
UpperCAmelCase_ = False
UpperCAmelCase_ = jnp.floataa
UpperCAmelCase_ = True
UpperCAmelCase_ = 0
UpperCAmelCase_ = False
def __snake_case ( self : Optional[int] , lowercase__ : jax.random.KeyArray):
'''simple docstring'''
lowerCAmelCase__ = (1, self.in_channels, self.sample_size, self.sample_size)
lowerCAmelCase__ = jnp.zeros(lowercase__ , dtype=jnp.floataa)
lowerCAmelCase__ = jnp.ones((1,) , dtype=jnp.intaa)
lowerCAmelCase__ = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa)
lowerCAmelCase__ , lowerCAmelCase__ = jax.random.split(lowercase__)
lowerCAmelCase__ = {'params': params_rng, 'dropout': dropout_rng}
return self.init(lowercase__ , lowercase__ , lowercase__ , lowercase__)["params"]
def __snake_case ( self : Union[str, Any]):
'''simple docstring'''
lowerCAmelCase__ = self.block_out_channels
lowerCAmelCase__ = block_out_channels[0] * 4
if self.num_attention_heads is not None:
raise ValueError(
'At the moment it is not possible to define the number of attention heads via `num_attention_heads` because of a naming issue as described in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131. Passing `num_attention_heads` will only be supported in diffusers v0.19.')
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
lowerCAmelCase__ = self.num_attention_heads or self.attention_head_dim
# input
lowerCAmelCase__ = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
lowerCAmelCase__ = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift)
lowerCAmelCase__ = FlaxTimestepEmbedding(lowercase__ , dtype=self.dtype)
lowerCAmelCase__ = self.only_cross_attention
if isinstance(lowercase__ , lowercase__):
lowerCAmelCase__ = (only_cross_attention,) * len(self.down_block_types)
if isinstance(lowercase__ , lowercase__):
lowerCAmelCase__ = (num_attention_heads,) * len(self.down_block_types)
# down
lowerCAmelCase__ = []
lowerCAmelCase__ = block_out_channels[0]
for i, down_block_type in enumerate(self.down_block_types):
lowerCAmelCase__ = output_channel
lowerCAmelCase__ = block_out_channels[i]
lowerCAmelCase__ = i == len(lowercase__) - 1
if down_block_type == "CrossAttnDownBlock2D":
lowerCAmelCase__ = FlaxCrossAttnDownBlockaD(
in_channels=lowercase__ , out_channels=lowercase__ , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
lowerCAmelCase__ = FlaxDownBlockaD(
in_channels=lowercase__ , out_channels=lowercase__ , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(lowercase__)
lowerCAmelCase__ = down_blocks
# mid
lowerCAmelCase__ = FlaxUNetMidBlockaDCrossAttn(
in_channels=block_out_channels[-1] , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
# up
lowerCAmelCase__ = []
lowerCAmelCase__ = list(reversed(lowercase__))
lowerCAmelCase__ = list(reversed(lowercase__))
lowerCAmelCase__ = list(reversed(lowercase__))
lowerCAmelCase__ = reversed_block_out_channels[0]
for i, up_block_type in enumerate(self.up_block_types):
lowerCAmelCase__ = output_channel
lowerCAmelCase__ = reversed_block_out_channels[i]
lowerCAmelCase__ = reversed_block_out_channels[min(i + 1 , len(lowercase__) - 1)]
lowerCAmelCase__ = i == len(lowercase__) - 1
if up_block_type == "CrossAttnUpBlock2D":
lowerCAmelCase__ = FlaxCrossAttnUpBlockaD(
in_channels=lowercase__ , out_channels=lowercase__ , prev_output_channel=lowercase__ , num_layers=self.layers_per_block + 1 , num_attention_heads=reversed_num_attention_heads[i] , add_upsample=not is_final_block , dropout=self.dropout , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
lowerCAmelCase__ = FlaxUpBlockaD(
in_channels=lowercase__ , out_channels=lowercase__ , prev_output_channel=lowercase__ , num_layers=self.layers_per_block + 1 , add_upsample=not is_final_block , dropout=self.dropout , dtype=self.dtype , )
up_blocks.append(lowercase__)
lowerCAmelCase__ = output_channel
lowerCAmelCase__ = up_blocks
# out
lowerCAmelCase__ = nn.GroupNorm(num_groups=32 , epsilon=1e-5)
lowerCAmelCase__ = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self : Optional[Any] , lowercase__ : Optional[int] , lowercase__ : List[str] , lowercase__ : int , lowercase__ : List[Any]=None , lowercase__ : Union[str, Any]=None , lowercase__ : bool = True , lowercase__ : bool = False , ):
'''simple docstring'''
if not isinstance(lowercase__ , jnp.ndarray):
lowerCAmelCase__ = jnp.array([timesteps] , dtype=jnp.intaa)
elif isinstance(lowercase__ , jnp.ndarray) and len(timesteps.shape) == 0:
lowerCAmelCase__ = timesteps.astype(dtype=jnp.floataa)
lowerCAmelCase__ = jnp.expand_dims(lowercase__ , 0)
lowerCAmelCase__ = self.time_proj(lowercase__)
lowerCAmelCase__ = self.time_embedding(lowercase__)
# 2. pre-process
lowerCAmelCase__ = jnp.transpose(lowercase__ , (0, 2, 3, 1))
lowerCAmelCase__ = self.conv_in(lowercase__)
# 3. down
lowerCAmelCase__ = (sample,)
for down_block in self.down_blocks:
if isinstance(lowercase__ , lowercase__):
lowerCAmelCase__ , lowerCAmelCase__ = down_block(lowercase__ , lowercase__ , lowercase__ , deterministic=not train)
else:
lowerCAmelCase__ , lowerCAmelCase__ = down_block(lowercase__ , lowercase__ , deterministic=not train)
down_block_res_samples += res_samples
if down_block_additional_residuals is not None:
lowerCAmelCase__ = ()
for down_block_res_sample, down_block_additional_residual in zip(
lowercase__ , lowercase__):
down_block_res_sample += down_block_additional_residual
new_down_block_res_samples += (down_block_res_sample,)
lowerCAmelCase__ = new_down_block_res_samples
# 4. mid
lowerCAmelCase__ = self.mid_block(lowercase__ , lowercase__ , lowercase__ , deterministic=not train)
if mid_block_additional_residual is not None:
sample += mid_block_additional_residual
# 5. up
for up_block in self.up_blocks:
lowerCAmelCase__ = down_block_res_samples[-(self.layers_per_block + 1) :]
lowerCAmelCase__ = down_block_res_samples[: -(self.layers_per_block + 1)]
if isinstance(lowercase__ , lowercase__):
lowerCAmelCase__ = up_block(
lowercase__ , temb=lowercase__ , encoder_hidden_states=lowercase__ , res_hidden_states_tuple=lowercase__ , deterministic=not train , )
else:
lowerCAmelCase__ = up_block(lowercase__ , temb=lowercase__ , res_hidden_states_tuple=lowercase__ , deterministic=not train)
# 6. post-process
lowerCAmelCase__ = self.conv_norm_out(lowercase__)
lowerCAmelCase__ = nn.silu(lowercase__)
lowerCAmelCase__ = self.conv_out(lowercase__)
lowerCAmelCase__ = jnp.transpose(lowercase__ , (0, 3, 1, 2))
if not return_dict:
return (sample,)
return FlaxUNetaDConditionOutput(sample=lowercase__)
| 119 | 0 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import (
DiffusionPipeline,
UnCLIPImageVariationPipeline,
UnCLIPScheduler,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.pipelines.unclip.text_proj import UnCLIPTextProjModel
from diffusers.utils import floats_tensor, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, load_image, require_torch_gpu, skip_mps
from ..pipeline_params import IMAGE_VARIATION_BATCH_PARAMS, IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class UpperCamelCase ( lowercase_ , unittest.TestCase ):
lowercase = UnCLIPImageVariationPipeline
lowercase = IMAGE_VARIATION_PARAMS - {'height', 'width', 'guidance_scale'}
lowercase = IMAGE_VARIATION_BATCH_PARAMS
lowercase = [
'generator',
'return_dict',
'decoder_num_inference_steps',
'super_res_num_inference_steps',
]
lowercase = False
@property
def _UpperCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
return 32
@property
def _UpperCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
return 32
@property
def _UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
return self.time_input_dim
@property
def _UpperCAmelCase ( self ) -> Dict:
'''simple docstring'''
return self.time_input_dim * 4
@property
def _UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
return 100
@property
def _UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
lowercase_ : Union[str, Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
return tokenizer
@property
def _UpperCAmelCase ( self ) -> int:
'''simple docstring'''
torch.manual_seed(0 )
lowercase_ : int = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=self.text_embedder_hidden_size ,projection_dim=self.text_embedder_hidden_size ,intermediate_size=37 ,layer_norm_eps=1e-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1000 ,)
return CLIPTextModelWithProjection(__UpperCamelCase )
@property
def _UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
torch.manual_seed(0 )
lowercase_ : List[Any] = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size ,projection_dim=self.text_embedder_hidden_size ,num_hidden_layers=5 ,num_attention_heads=4 ,image_size=32 ,intermediate_size=37 ,patch_size=1 ,)
return CLIPVisionModelWithProjection(__UpperCamelCase )
@property
def _UpperCAmelCase ( self ) -> Dict:
'''simple docstring'''
torch.manual_seed(0 )
lowercase_ : Tuple = {
'clip_embeddings_dim': self.text_embedder_hidden_size,
'time_embed_dim': self.time_embed_dim,
'cross_attention_dim': self.cross_attention_dim,
}
lowercase_ : int = UnCLIPTextProjModel(**__UpperCamelCase )
return model
@property
def _UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
torch.manual_seed(0 )
lowercase_ : Optional[int] = {
'sample_size': 32,
# RGB in channels
'in_channels': 3,
# Out channels is double in channels because predicts mean and variance
'out_channels': 6,
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': 'identity',
}
lowercase_ : Dict = UNetaDConditionModel(**__UpperCamelCase )
return model
@property
def _UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
return {
"sample_size": 64,
"layers_per_block": 1,
"down_block_types": ("ResnetDownsampleBlock2D", "ResnetDownsampleBlock2D"),
"up_block_types": ("ResnetUpsampleBlock2D", "ResnetUpsampleBlock2D"),
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"in_channels": 6,
"out_channels": 3,
}
@property
def _UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
torch.manual_seed(0 )
lowercase_ : Tuple = UNetaDModel(**self.dummy_super_res_kwargs )
return model
@property
def _UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
torch.manual_seed(1 )
lowercase_ : Dict = UNetaDModel(**self.dummy_super_res_kwargs )
return model
def _UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase_ : Dict = self.dummy_decoder
lowercase_ : Optional[int] = self.dummy_text_proj
lowercase_ : Union[str, Any] = self.dummy_text_encoder
lowercase_ : Optional[Any] = self.dummy_tokenizer
lowercase_ : Optional[Any] = self.dummy_super_res_first
lowercase_ : List[Any] = self.dummy_super_res_last
lowercase_ : str = UnCLIPScheduler(
variance_type='learned_range' ,prediction_type='epsilon' ,num_train_timesteps=1000 ,)
lowercase_ : List[Any] = UnCLIPScheduler(
variance_type='fixed_small_log' ,prediction_type='epsilon' ,num_train_timesteps=1000 ,)
lowercase_ : Optional[Any] = CLIPImageProcessor(crop_size=32 ,size=32 )
lowercase_ : List[Any] = self.dummy_image_encoder
return {
"decoder": decoder,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"text_proj": text_proj,
"feature_extractor": feature_extractor,
"image_encoder": image_encoder,
"super_res_first": super_res_first,
"super_res_last": super_res_last,
"decoder_scheduler": decoder_scheduler,
"super_res_scheduler": super_res_scheduler,
}
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase=0 ,__UpperCamelCase=True ) -> Optional[Any]:
'''simple docstring'''
lowercase_ : int = floats_tensor((1, 3, 32, 32) ,rng=random.Random(__UpperCamelCase ) ).to(__UpperCamelCase )
if str(__UpperCamelCase ).startswith('mps' ):
lowercase_ : Any = torch.manual_seed(__UpperCamelCase )
else:
lowercase_ : Optional[Any] = torch.Generator(device=__UpperCamelCase ).manual_seed(__UpperCamelCase )
if pil_image:
lowercase_ : Dict = input_image * 0.5 + 0.5
lowercase_ : int = input_image.clamp(0 ,1 )
lowercase_ : str = input_image.cpu().permute(0 ,2 ,3 ,1 ).float().numpy()
lowercase_ : Union[str, Any] = DiffusionPipeline.numpy_to_pil(__UpperCamelCase )[0]
return {
"image": input_image,
"generator": generator,
"decoder_num_inference_steps": 2,
"super_res_num_inference_steps": 2,
"output_type": "np",
}
def _UpperCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ : List[str] = 'cpu'
lowercase_ : Dict = self.get_dummy_components()
lowercase_ : Union[str, Any] = self.pipeline_class(**__UpperCamelCase )
lowercase_ : Optional[Any] = pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
lowercase_ : List[Any] = self.get_dummy_inputs(__UpperCamelCase ,pil_image=__UpperCamelCase )
lowercase_ : Optional[int] = pipe(**__UpperCamelCase )
lowercase_ : List[Any] = output.images
lowercase_ : int = self.get_dummy_inputs(__UpperCamelCase ,pil_image=__UpperCamelCase )
lowercase_ : str = pipe(
**__UpperCamelCase ,return_dict=__UpperCamelCase ,)[0]
lowercase_ : List[str] = image[0, -3:, -3:, -1]
lowercase_ : int = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase_ : Any = np.array(
[
0.9997,
0.0002,
0.9997,
0.9997,
0.9969,
0.0023,
0.9997,
0.9969,
0.9970,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def _UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase_ : Union[str, Any] = 'cpu'
lowercase_ : Any = self.get_dummy_components()
lowercase_ : Any = self.pipeline_class(**__UpperCamelCase )
lowercase_ : str = pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
lowercase_ : str = self.get_dummy_inputs(__UpperCamelCase ,pil_image=__UpperCamelCase )
lowercase_ : Any = pipe(**__UpperCamelCase )
lowercase_ : List[str] = output.images
lowercase_ : Union[str, Any] = self.get_dummy_inputs(__UpperCamelCase ,pil_image=__UpperCamelCase )
lowercase_ : Tuple = pipe(
**__UpperCamelCase ,return_dict=__UpperCamelCase ,)[0]
lowercase_ : Optional[int] = image[0, -3:, -3:, -1]
lowercase_ : Optional[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase_ : Any = np.array([0.9997, 0.0003, 0.9997, 0.9997, 0.9970, 0.0024, 0.9997, 0.9971, 0.9971] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def _UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
lowercase_ : Tuple = 'cpu'
lowercase_ : Optional[Any] = self.get_dummy_components()
lowercase_ : Optional[int] = self.pipeline_class(**__UpperCamelCase )
lowercase_ : Optional[int] = pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
lowercase_ : str = self.get_dummy_inputs(__UpperCamelCase ,pil_image=__UpperCamelCase )
lowercase_ : Any = [
pipeline_inputs['image'],
pipeline_inputs['image'],
]
lowercase_ : Optional[Any] = pipe(**__UpperCamelCase )
lowercase_ : Tuple = output.images
lowercase_ : Dict = self.get_dummy_inputs(__UpperCamelCase ,pil_image=__UpperCamelCase )
lowercase_ : Any = [
tuple_pipeline_inputs['image'],
tuple_pipeline_inputs['image'],
]
lowercase_ : List[Any] = pipe(
**__UpperCamelCase ,return_dict=__UpperCamelCase ,)[0]
lowercase_ : Optional[Any] = image[0, -3:, -3:, -1]
lowercase_ : Optional[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (2, 64, 64, 3)
lowercase_ : Optional[Any] = np.array(
[
0.9997,
0.9989,
0.0008,
0.0021,
0.9960,
0.0018,
0.0014,
0.0002,
0.9933,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def _UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
lowercase_ : Tuple = torch.device('cpu' )
class UpperCamelCase :
lowercase = 1
lowercase_ : Any = self.get_dummy_components()
lowercase_ : Optional[int] = self.pipeline_class(**__UpperCamelCase )
lowercase_ : Optional[int] = pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
lowercase_ : Tuple = torch.Generator(device=__UpperCamelCase ).manual_seed(0 )
lowercase_ : Optional[Any] = pipe.decoder.dtype
lowercase_ : Tuple = 1
lowercase_ : Dict = (
batch_size,
pipe.decoder.config.in_channels,
pipe.decoder.config.sample_size,
pipe.decoder.config.sample_size,
)
lowercase_ : str = pipe.prepare_latents(
__UpperCamelCase ,dtype=__UpperCamelCase ,device=__UpperCamelCase ,generator=__UpperCamelCase ,latents=__UpperCamelCase ,scheduler=DummyScheduler() )
lowercase_ : Any = (
batch_size,
pipe.super_res_first.config.in_channels // 2,
pipe.super_res_first.config.sample_size,
pipe.super_res_first.config.sample_size,
)
lowercase_ : List[str] = pipe.prepare_latents(
__UpperCamelCase ,dtype=__UpperCamelCase ,device=__UpperCamelCase ,generator=__UpperCamelCase ,latents=__UpperCamelCase ,scheduler=DummyScheduler() )
lowercase_ : Union[str, Any] = self.get_dummy_inputs(__UpperCamelCase ,pil_image=__UpperCamelCase )
lowercase_ : int = pipe(
**__UpperCamelCase ,decoder_latents=__UpperCamelCase ,super_res_latents=__UpperCamelCase ).images
lowercase_ : Optional[Any] = self.get_dummy_inputs(__UpperCamelCase ,pil_image=__UpperCamelCase )
# Don't pass image, instead pass embedding
lowercase_ : Union[str, Any] = pipeline_inputs.pop('image' )
lowercase_ : Union[str, Any] = pipe.image_encoder(__UpperCamelCase ).image_embeds
lowercase_ : Tuple = pipe(
**__UpperCamelCase ,decoder_latents=__UpperCamelCase ,super_res_latents=__UpperCamelCase ,image_embeddings=__UpperCamelCase ,).images
# make sure passing text embeddings manually is identical
assert np.abs(img_out_a - img_out_a ).max() < 1e-4
@skip_mps
def _UpperCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
lowercase_ : Dict = torch_device == 'cpu'
# Check is relaxed because there is not a torch 2.0 sliced attention added kv processor
lowercase_ : Any = 1e-2
self._test_attention_slicing_forward_pass(
test_max_difference=__UpperCamelCase ,expected_max_diff=__UpperCamelCase )
@skip_mps
def _UpperCAmelCase ( self ) -> int:
'''simple docstring'''
lowercase_ : Optional[int] = torch_device == 'cpu'
lowercase_ : List[Any] = True
lowercase_ : str = [
'decoder_num_inference_steps',
'super_res_num_inference_steps',
]
self._test_inference_batch_single_identical(
test_max_difference=__UpperCamelCase ,relax_max_difference=__UpperCamelCase ,additional_params_copy_to_batched_inputs=__UpperCamelCase ,)
def _UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
lowercase_ : Tuple = [
'decoder_num_inference_steps',
'super_res_num_inference_steps',
]
if torch_device == "mps":
# TODO: MPS errors with larger batch sizes
lowercase_ : List[str] = [2, 3]
self._test_inference_batch_consistent(
batch_sizes=__UpperCamelCase ,additional_params_copy_to_batched_inputs=__UpperCamelCase ,)
else:
self._test_inference_batch_consistent(
additional_params_copy_to_batched_inputs=__UpperCamelCase )
@skip_mps
def _UpperCAmelCase ( self ) -> str:
'''simple docstring'''
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def _UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
return super().test_save_load_local()
@skip_mps
def _UpperCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
return super().test_save_load_optional_components()
@slow
@require_torch_gpu
class UpperCamelCase ( unittest.TestCase ):
def _UpperCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _UpperCAmelCase ( self ) -> int:
'''simple docstring'''
lowercase_ : Union[str, Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/unclip/cat.png' )
lowercase_ : Any = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/unclip/karlo_v1_alpha_cat_variation_fp16.npy' )
lowercase_ : List[str] = UnCLIPImageVariationPipeline.from_pretrained(
'kakaobrain/karlo-v1-alpha-image-variations' ,torch_dtype=torch.floataa )
lowercase_ : Optional[Any] = pipeline.to(__UpperCamelCase )
pipeline.set_progress_bar_config(disable=__UpperCamelCase )
lowercase_ : Dict = torch.Generator(device='cpu' ).manual_seed(0 )
lowercase_ : Union[str, Any] = pipeline(
__UpperCamelCase ,generator=__UpperCamelCase ,output_type='np' ,)
lowercase_ : Any = output.images[0]
assert image.shape == (256, 256, 3)
assert_mean_pixel_difference(__UpperCamelCase ,__UpperCamelCase ,15 )
| 213 | """simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_mbart import MBartTokenizer
else:
__SCREAMING_SNAKE_CASE =None
__SCREAMING_SNAKE_CASE =logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE ={"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"}
__SCREAMING_SNAKE_CASE ={
"vocab_file": {
"facebook/mbart-large-en-ro": (
"https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model"
),
"facebook/mbart-large-cc25": (
"https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model"
),
},
"tokenizer_file": {
"facebook/mbart-large-en-ro": "https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json",
"facebook/mbart-large-cc25": "https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json",
},
}
__SCREAMING_SNAKE_CASE ={
"facebook/mbart-large-en-ro": 1024,
"facebook/mbart-large-cc25": 1024,
}
# fmt: off
__SCREAMING_SNAKE_CASE =["ar_AR", "cs_CZ", "de_DE", "en_XX", "es_XX", "et_EE", "fi_FI", "fr_XX", "gu_IN", "hi_IN", "it_IT", "ja_XX", "kk_KZ", "ko_KR", "lt_LT", "lv_LV", "my_MM", "ne_NP", "nl_XX", "ro_RO", "ru_RU", "si_LK", "tr_TR", "vi_VN", "zh_CN"]
class UpperCamelCase ( lowercase_ ):
lowercase = VOCAB_FILES_NAMES
lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase = PRETRAINED_VOCAB_FILES_MAP
lowercase = ['input_ids', 'attention_mask']
lowercase = MBartTokenizer
lowercase = []
lowercase = []
def __init__( self ,__UpperCamelCase=None ,__UpperCamelCase=None ,__UpperCamelCase="<s>" ,__UpperCamelCase="</s>" ,__UpperCamelCase="</s>" ,__UpperCamelCase="<s>" ,__UpperCamelCase="<unk>" ,__UpperCamelCase="<pad>" ,__UpperCamelCase="<mask>" ,__UpperCamelCase=None ,__UpperCamelCase=None ,__UpperCamelCase=None ,**__UpperCamelCase ,) -> List[str]:
'''simple docstring'''
lowercase_ : str = AddedToken(__UpperCamelCase ,lstrip=__UpperCamelCase ,rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase ,__UpperCamelCase ) else mask_token
super().__init__(
vocab_file=__UpperCamelCase ,tokenizer_file=__UpperCamelCase ,bos_token=__UpperCamelCase ,eos_token=__UpperCamelCase ,sep_token=__UpperCamelCase ,cls_token=__UpperCamelCase ,unk_token=__UpperCamelCase ,pad_token=__UpperCamelCase ,mask_token=__UpperCamelCase ,src_lang=__UpperCamelCase ,tgt_lang=__UpperCamelCase ,additional_special_tokens=__UpperCamelCase ,**__UpperCamelCase ,)
lowercase_ : str = vocab_file
lowercase_ : Optional[Any] = False if not self.vocab_file else True
lowercase_ : List[str] = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({'additional_special_tokens': _additional_special_tokens} )
lowercase_ : List[Any] = {
lang_code: self.convert_tokens_to_ids(__UpperCamelCase ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
lowercase_ : Dict = src_lang if src_lang is not None else 'en_XX'
lowercase_ : Union[str, Any] = self.convert_tokens_to_ids(self._src_lang )
lowercase_ : int = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def _UpperCAmelCase ( self ) -> str:
'''simple docstring'''
return self._src_lang
@src_lang.setter
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> None:
'''simple docstring'''
lowercase_ : Optional[int] = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase = None ) -> List[int]:
'''simple docstring'''
lowercase_ : List[Any] = [self.sep_token_id]
lowercase_ : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,**__UpperCamelCase ) -> Union[str, Any]:
'''simple docstring'''
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
lowercase_ : Dict = src_lang
lowercase_ : List[Any] = self(__UpperCamelCase ,add_special_tokens=__UpperCamelCase ,return_tensors=__UpperCamelCase ,**__UpperCamelCase )
lowercase_ : Optional[Any] = self.convert_tokens_to_ids(__UpperCamelCase )
lowercase_ : Dict = tgt_lang_id
return inputs
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase = "en_XX" ,__UpperCamelCase = None ,__UpperCamelCase = "ro_RO" ,**__UpperCamelCase ,) -> BatchEncoding:
'''simple docstring'''
lowercase_ : Union[str, Any] = src_lang
lowercase_ : List[Any] = tgt_lang
return super().prepare_seqaseq_batch(__UpperCamelCase ,__UpperCamelCase ,**__UpperCamelCase )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
return self.set_src_lang_special_tokens(self.src_lang )
def _UpperCAmelCase ( self ) -> int:
'''simple docstring'''
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> None:
'''simple docstring'''
lowercase_ : Any = self.convert_tokens_to_ids(__UpperCamelCase )
lowercase_ : Optional[Any] = []
lowercase_ : int = [self.eos_token_id, self.cur_lang_code]
lowercase_ : Any = self.convert_ids_to_tokens(self.prefix_tokens )
lowercase_ : int = self.convert_ids_to_tokens(self.suffix_tokens )
lowercase_ : Union[str, Any] = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str ,pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str ,special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str ,self.prefix_tokens + self.suffix_tokens ) ) ,)
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> None:
'''simple docstring'''
lowercase_ : Optional[int] = self.convert_tokens_to_ids(__UpperCamelCase )
lowercase_ : str = []
lowercase_ : Dict = [self.eos_token_id, self.cur_lang_code]
lowercase_ : str = self.convert_ids_to_tokens(self.prefix_tokens )
lowercase_ : List[Any] = self.convert_ids_to_tokens(self.suffix_tokens )
lowercase_ : int = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str ,pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str ,special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str ,self.prefix_tokens + self.suffix_tokens ) ) ,)
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase = None ) -> Tuple[str]:
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(__UpperCamelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory.''' )
return
lowercase_ : Tuple = os.path.join(
__UpperCamelCase ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCamelCase ):
copyfile(self.vocab_file ,__UpperCamelCase )
return (out_vocab_file,)
| 213 | 1 |
'''simple docstring'''
lowercase__ = "0.21.0"
from .accelerator import Accelerator
from .big_modeling import (
cpu_offload,
cpu_offload_with_hook,
disk_offload,
dispatch_model,
init_empty_weights,
init_on_device,
load_checkpoint_and_dispatch,
)
from .data_loader import skip_first_batches
from .launchers import debug_launcher, notebook_launcher
from .state import PartialState
from .utils import (
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
FullyShardedDataParallelPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
find_executable_batch_size,
infer_auto_device_map,
is_rich_available,
load_checkpoint_in_model,
synchronize_rng_states,
)
if is_rich_available():
from .utils import rich
| 356 |
'''simple docstring'''
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.text import TextDatasetReader
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
assert isinstance(UpperCAmelCase_ , UpperCAmelCase_ )
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('keep_in_memory' , [False, True] )
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase : int = tmp_path / 'cache'
UpperCAmelCase : List[str] = {'text': 'string'}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
UpperCAmelCase : Tuple = TextDatasetReader(UpperCAmelCase_ , cache_dir=UpperCAmelCase_ , keep_in_memory=UpperCAmelCase_ ).read()
_check_text_dataset(UpperCAmelCase_ , UpperCAmelCase_ )
@pytest.mark.parametrize(
'features' , [
None,
{'text': 'string'},
{'text': 'int32'},
{'text': 'float32'},
] , )
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase : Optional[int] = tmp_path / 'cache'
UpperCAmelCase : List[str] = {'text': 'string'}
UpperCAmelCase : Optional[int] = features.copy() if features else default_expected_features
UpperCAmelCase : int = (
Features({feature: Value(UpperCAmelCase_ ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCAmelCase : Union[str, Any] = TextDatasetReader(UpperCAmelCase_ , features=UpperCAmelCase_ , cache_dir=UpperCAmelCase_ ).read()
_check_text_dataset(UpperCAmelCase_ , UpperCAmelCase_ )
@pytest.mark.parametrize('split' , [None, NamedSplit('train' ), 'train', 'test'] )
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase : Optional[int] = tmp_path / 'cache'
UpperCAmelCase : Tuple = {'text': 'string'}
UpperCAmelCase : List[str] = TextDatasetReader(UpperCAmelCase_ , cache_dir=UpperCAmelCase_ , split=UpperCAmelCase_ ).read()
_check_text_dataset(UpperCAmelCase_ , UpperCAmelCase_ )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('path_type' , [str, list] )
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
if issubclass(UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase : Tuple = text_path
elif issubclass(UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase : Optional[Any] = [text_path]
UpperCAmelCase : List[Any] = tmp_path / 'cache'
UpperCAmelCase : Union[str, Any] = {'text': 'string'}
UpperCAmelCase : List[Any] = TextDatasetReader(UpperCAmelCase_ , cache_dir=UpperCAmelCase_ ).read()
_check_text_dataset(UpperCAmelCase_ , UpperCAmelCase_ )
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_=("train",) ):
assert isinstance(UpperCAmelCase_ , UpperCAmelCase_ )
for split in splits:
UpperCAmelCase : Union[str, Any] = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('keep_in_memory' , [False, True] )
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase : Any = tmp_path / 'cache'
UpperCAmelCase : List[str] = {'text': 'string'}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
UpperCAmelCase : int = TextDatasetReader({'train': text_path} , cache_dir=UpperCAmelCase_ , keep_in_memory=UpperCAmelCase_ ).read()
_check_text_datasetdict(UpperCAmelCase_ , UpperCAmelCase_ )
@pytest.mark.parametrize(
'features' , [
None,
{'text': 'string'},
{'text': 'int32'},
{'text': 'float32'},
] , )
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase : Union[str, Any] = tmp_path / 'cache'
# CSV file loses col_1 string dtype information: default now is "int64" instead of "string"
UpperCAmelCase : Tuple = {'text': 'string'}
UpperCAmelCase : Union[str, Any] = features.copy() if features else default_expected_features
UpperCAmelCase : int = (
Features({feature: Value(UpperCAmelCase_ ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCAmelCase : List[Any] = TextDatasetReader({'train': text_path} , features=UpperCAmelCase_ , cache_dir=UpperCAmelCase_ ).read()
_check_text_datasetdict(UpperCAmelCase_ , UpperCAmelCase_ )
@pytest.mark.parametrize('split' , [None, NamedSplit('train' ), 'train', 'test'] )
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
if split:
UpperCAmelCase : int = {split: text_path}
else:
UpperCAmelCase : int = 'train'
UpperCAmelCase : Any = {'train': text_path, 'test': text_path}
UpperCAmelCase : Dict = tmp_path / 'cache'
UpperCAmelCase : Any = {'text': 'string'}
UpperCAmelCase : List[str] = TextDatasetReader(UpperCAmelCase_ , cache_dir=UpperCAmelCase_ ).read()
_check_text_datasetdict(UpperCAmelCase_ , UpperCAmelCase_ , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
| 280 | 0 |
'''simple docstring'''
# This is the module that test_patching.py uses to test patch_submodule()
import os # noqa: this is just for tests
import os as renamed_os # noqa: this is just for tests
from os import path # noqa: this is just for tests
from os import path as renamed_path # noqa: this is just for tests
from os.path import join # noqa: this is just for tests
from os.path import join as renamed_join # noqa: this is just for tests
a : Optional[Any] = open # noqa: we just need to have a builtin inside this module to test it properly
| 56 |
'''simple docstring'''
import unittest
from transformers import MPNetConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
class a :
def __init__( self : List[Any] , lowercase_ : Optional[Any] , lowercase_ : Any=13 , lowercase_ : Optional[Any]=7 , lowercase_ : Optional[Any]=True , lowercase_ : Dict=True , lowercase_ : Tuple=False , lowercase_ : Optional[Any]=True , lowercase_ : Any=99 , lowercase_ : Union[str, Any]=64 , lowercase_ : str=5 , lowercase_ : int=4 , lowercase_ : List[Any]=64 , lowercase_ : Dict="gelu" , lowercase_ : Optional[int]=0.1 , lowercase_ : Optional[Any]=0.1 , lowercase_ : Tuple=512 , lowercase_ : List[Any]=16 , lowercase_ : str=2 , lowercase_ : List[str]=0.02 , lowercase_ : Optional[Any]=3 , lowercase_ : Optional[Any]=4 , lowercase_ : List[Any]=None , ):
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = seq_length
snake_case_ = is_training
snake_case_ = use_input_mask
snake_case_ = use_token_type_ids
snake_case_ = use_labels
snake_case_ = vocab_size
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = max_position_embeddings
snake_case_ = type_vocab_size
snake_case_ = type_sequence_label_size
snake_case_ = initializer_range
snake_case_ = num_labels
snake_case_ = num_choices
snake_case_ = scope
def A_ ( self : List[str] ):
return MPNetConfig.from_pretrained('''microsoft/mpnet-base''' )
def A_ ( self : str ):
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ = None
if self.use_input_mask:
snake_case_ = random_attention_mask([self.batch_size, self.seq_length] )
snake_case_ = None
snake_case_ = None
snake_case_ = None
if self.use_labels:
snake_case_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case_ = ids_tensor([self.batch_size] , self.num_choices )
snake_case_ = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def A_ ( self : Tuple ):
return MPNetConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def A_ ( self : Any , lowercase_ : Dict , lowercase_ : str , lowercase_ : Optional[Any] , lowercase_ : Union[str, Any] , lowercase_ : Dict , lowercase_ : Optional[int] ):
snake_case_ = MPNetModel(config=lowercase_ )
model.to(lowercase_ )
model.eval()
snake_case_ = model(lowercase_ , lowercase_ )
snake_case_ = model(lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def A_ ( self : str , lowercase_ : Optional[Any] , lowercase_ : Any , lowercase_ : str , lowercase_ : Tuple , lowercase_ : Tuple , lowercase_ : Optional[int] ):
snake_case_ = MPNetForQuestionAnswering(config=lowercase_ )
model.to(lowercase_ )
model.eval()
snake_case_ = model(
lowercase_ , attention_mask=lowercase_ , start_positions=lowercase_ , end_positions=lowercase_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A_ ( self : Tuple , lowercase_ : Tuple , lowercase_ : List[str] , lowercase_ : str , lowercase_ : Optional[int] , lowercase_ : Optional[Any] , lowercase_ : Any ):
snake_case_ = self.num_labels
snake_case_ = MPNetForSequenceClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
snake_case_ = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A_ ( self : Any , lowercase_ : Any , lowercase_ : str , lowercase_ : Tuple , lowercase_ : List[str] , lowercase_ : Union[str, Any] , lowercase_ : Dict ):
snake_case_ = self.num_choices
snake_case_ = MPNetForMultipleChoice(config=lowercase_ )
model.to(lowercase_ )
model.eval()
snake_case_ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case_ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case_ = model(
lowercase_ , attention_mask=lowercase_ , labels=lowercase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def A_ ( self : Union[str, Any] , lowercase_ : List[Any] , lowercase_ : Optional[Any] , lowercase_ : Optional[int] , lowercase_ : str , lowercase_ : int , lowercase_ : List[str] ):
snake_case_ = self.num_labels
snake_case_ = MPNetForTokenClassification(config=lowercase_ )
model.to(lowercase_ )
model.eval()
snake_case_ = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A_ ( self : Union[str, Any] ):
snake_case_ = self.prepare_config_and_inputs()
((snake_case_) ,(snake_case_) ,(snake_case_) ,(snake_case_) ,(snake_case_) ,(snake_case_)) = config_and_inputs
snake_case_ = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class a ( _lowerCamelCase , _lowerCamelCase , unittest.TestCase ):
snake_case_ = (
(
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
if is_torch_available()
else ()
)
snake_case_ = (
{
"feature-extraction": MPNetModel,
"fill-mask": MPNetForMaskedLM,
"question-answering": MPNetForQuestionAnswering,
"text-classification": MPNetForSequenceClassification,
"token-classification": MPNetForTokenClassification,
"zero-shot": MPNetForSequenceClassification,
}
if is_torch_available()
else {}
)
snake_case_ = False
snake_case_ = True
def A_ ( self : Tuple ):
snake_case_ = MPNetModelTester(self )
snake_case_ = ConfigTester(self , config_class=lowercase_ , hidden_size=37 )
def A_ ( self : Union[str, Any] ):
self.config_tester.run_common_tests()
def A_ ( self : Tuple ):
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_model(*lowercase_ )
def A_ ( self : List[Any] ):
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_sequence_classification(*lowercase_ )
def A_ ( self : List[Any] ):
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_multiple_choice(*lowercase_ )
def A_ ( self : Union[str, Any] ):
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_token_classification(*lowercase_ )
def A_ ( self : Tuple ):
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_question_answering(*lowercase_ )
@require_torch
class a ( unittest.TestCase ):
@slow
def A_ ( self : List[Any] ):
snake_case_ = MPNetModel.from_pretrained('''microsoft/mpnet-base''' )
snake_case_ = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
snake_case_ = model(lowercase_ )[0]
snake_case_ = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , lowercase_ )
snake_case_ = torch.tensor(
[[[-0.0550, 0.1943, -0.0740], [-0.0562, 0.2211, -0.0579], [-0.0437, 0.3337, -0.0641]]] )
# compare the actual values for a slice.
self.assertTrue(torch.allclose(output[:, :3, :3] , lowercase_ , atol=1e-4 ) )
| 56 | 1 |
"""simple docstring"""
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
'microsoft/git-base': 'https://huggingface.co/microsoft/git-base/resolve/main/config.json',
}
class snake_case ( SCREAMING_SNAKE_CASE_ ):
a_ : List[Any] = """git_vision_model"""
def __init__( self , __UpperCAmelCase=7_68 , __UpperCAmelCase=30_72 , __UpperCAmelCase=12 , __UpperCAmelCase=12 , __UpperCAmelCase=3 , __UpperCAmelCase=2_24 , __UpperCAmelCase=16 , __UpperCAmelCase="quick_gelu" , __UpperCAmelCase=1E-5 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.02 , **__UpperCAmelCase , ) ->int:
super().__init__(**__UpperCAmelCase)
a_ = hidden_size
a_ = intermediate_size
a_ = num_hidden_layers
a_ = num_attention_heads
a_ = num_channels
a_ = patch_size
a_ = image_size
a_ = initializer_range
a_ = attention_dropout
a_ = layer_norm_eps
a_ = hidden_act
@classmethod
def UpperCAmelCase__ ( cls , __UpperCAmelCase , **__UpperCAmelCase) ->"PretrainedConfig":
cls._set_token_in_kwargs(__UpperCAmelCase)
a_ , a_ = cls.get_config_dict(__UpperCAmelCase , **__UpperCAmelCase)
# get the vision config dict if we are loading from GITConfig
if config_dict.get("model_type") == "git":
a_ = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type") and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''')
return cls.from_dict(__UpperCAmelCase , **__UpperCAmelCase)
class snake_case ( SCREAMING_SNAKE_CASE_ ):
a_ : int = """git"""
def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=3_05_22 , __UpperCAmelCase=7_68 , __UpperCAmelCase=6 , __UpperCAmelCase=12 , __UpperCAmelCase=30_72 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=10_24 , __UpperCAmelCase=0.02 , __UpperCAmelCase=1E-12 , __UpperCAmelCase=0 , __UpperCAmelCase="absolute" , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase=1_01 , __UpperCAmelCase=1_02 , __UpperCAmelCase=None , **__UpperCAmelCase , ) ->Optional[int]:
super().__init__(bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , pad_token_id=__UpperCAmelCase , **__UpperCAmelCase)
if vision_config is None:
a_ = {}
logger.info("vision_config is None. initializing the GitVisionConfig with default values.")
a_ = GitVisionConfig(**__UpperCAmelCase)
a_ = vocab_size
a_ = hidden_size
a_ = num_hidden_layers
a_ = num_attention_heads
a_ = hidden_act
a_ = intermediate_size
a_ = hidden_dropout_prob
a_ = attention_probs_dropout_prob
a_ = max_position_embeddings
a_ = initializer_range
a_ = layer_norm_eps
a_ = position_embedding_type
a_ = use_cache
a_ = tie_word_embeddings
a_ = num_image_with_embedding
a_ = bos_token_id
a_ = eos_token_id
def UpperCAmelCase__ ( self) ->Tuple:
a_ = copy.deepcopy(self.__dict__)
a_ = self.vision_config.to_dict()
a_ = self.__class__.model_type
return output | 358 |
"""simple docstring"""
import unittest
from transformers import BertGenerationConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import BertGenerationDecoder, BertGenerationEncoder
class snake_case :
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=99 , __UpperCAmelCase=32 , __UpperCAmelCase=5 , __UpperCAmelCase=4 , __UpperCAmelCase=37 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=50 , __UpperCAmelCase=0.02 , __UpperCAmelCase=True , __UpperCAmelCase=None , ) ->Dict:
a_ = parent
a_ = batch_size
a_ = seq_length
a_ = is_training
a_ = use_input_mask
a_ = vocab_size
a_ = hidden_size
a_ = num_hidden_layers
a_ = num_attention_heads
a_ = intermediate_size
a_ = hidden_act
a_ = hidden_dropout_prob
a_ = attention_probs_dropout_prob
a_ = max_position_embeddings
a_ = initializer_range
a_ = use_labels
a_ = scope
def UpperCAmelCase__ ( self) ->Any:
a_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
a_ = None
if self.use_input_mask:
a_ = random_attention_mask([self.batch_size, self.seq_length])
if self.use_labels:
a_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
a_ = self.get_config()
return config, input_ids, input_mask, token_labels
def UpperCAmelCase__ ( self) ->Optional[Any]:
return BertGenerationConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , is_decoder=__UpperCAmelCase , initializer_range=self.initializer_range , )
def UpperCAmelCase__ ( self) ->List[str]:
(
(
a_
) , (
a_
) , (
a_
) , (
a_
) ,
) = self.prepare_config_and_inputs()
a_ = True
a_ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size])
a_ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2)
return (
config,
input_ids,
input_mask,
token_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase , ) ->str:
a_ = BertGenerationEncoder(config=__UpperCAmelCase)
model.to(__UpperCAmelCase)
model.eval()
a_ = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase)
a_ = model(__UpperCAmelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase , ) ->Union[str, Any]:
a_ = True
a_ = BertGenerationEncoder(config=__UpperCAmelCase)
model.to(__UpperCAmelCase)
model.eval()
a_ = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , encoder_attention_mask=__UpperCAmelCase , )
a_ = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase , ) ->List[str]:
a_ = True
a_ = True
a_ = BertGenerationDecoder(config=__UpperCAmelCase).to(__UpperCAmelCase).eval()
# first forward pass
a_ = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , encoder_attention_mask=__UpperCAmelCase , use_cache=__UpperCAmelCase , )
a_ = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
a_ = ids_tensor((self.batch_size, 3) , config.vocab_size)
a_ = ids_tensor((self.batch_size, 3) , vocab_size=2)
# append to next input_ids and
a_ = torch.cat([input_ids, next_tokens] , dim=-1)
a_ = torch.cat([input_mask, next_mask] , dim=-1)
a_ = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , encoder_attention_mask=__UpperCAmelCase , output_hidden_states=__UpperCAmelCase , )["hidden_states"][0]
a_ = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , encoder_attention_mask=__UpperCAmelCase , past_key_values=__UpperCAmelCase , output_hidden_states=__UpperCAmelCase , )["hidden_states"][0]
# select random slice
a_ = ids_tensor((1,) , output_from_past.shape[-1]).item()
a_ = output_from_no_past[:, -3:, random_slice_idx].detach()
a_ = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1])
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-3))
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , *__UpperCAmelCase , ) ->Tuple:
a_ = BertGenerationDecoder(__UpperCAmelCase)
model.to(__UpperCAmelCase)
model.eval()
a_ = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , labels=__UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def UpperCAmelCase__ ( self) ->str:
a_ , a_ , a_ , a_ = self.prepare_config_and_inputs()
a_ = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class snake_case ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
a_ : List[str] = (BertGenerationEncoder, BertGenerationDecoder) if is_torch_available() else ()
a_ : Optional[int] = (BertGenerationDecoder,) if is_torch_available() else ()
a_ : List[Any] = (
{"""feature-extraction""": BertGenerationEncoder, """text-generation""": BertGenerationDecoder}
if is_torch_available()
else {}
)
def UpperCAmelCase__ ( self) ->List[Any]:
a_ = BertGenerationEncoderTester(self)
a_ = ConfigTester(self , config_class=__UpperCAmelCase , hidden_size=37)
def UpperCAmelCase__ ( self) ->Optional[Any]:
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self) ->Tuple:
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase)
def UpperCAmelCase__ ( self) ->Tuple:
a_ , a_ , a_ , a_ = self.model_tester.prepare_config_and_inputs()
a_ = "bert"
self.model_tester.create_and_check_model(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase)
def UpperCAmelCase__ ( self) ->int:
a_ = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*__UpperCAmelCase)
def UpperCAmelCase__ ( self) ->List[str]:
a_ = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*__UpperCAmelCase)
def UpperCAmelCase__ ( self) ->Optional[int]:
# This regression test was failing with PyTorch < 1.3
(
(
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) ,
) = self.model_tester.prepare_config_and_inputs_for_decoder()
a_ = None
self.model_tester.create_and_check_model_as_decoder(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , )
def UpperCAmelCase__ ( self) ->List[Any]:
a_ = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_for_causal_lm(*__UpperCAmelCase)
@slow
def UpperCAmelCase__ ( self) ->str:
a_ = BertGenerationEncoder.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder")
self.assertIsNotNone(__UpperCAmelCase)
@require_torch
class snake_case ( unittest.TestCase ):
@slow
def UpperCAmelCase__ ( self) ->int:
a_ = BertGenerationEncoder.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder")
a_ = torch.tensor([[1_01, 75_92, 10_10, 20_26, 38_99, 20_03, 1_01_40, 1_02]])
with torch.no_grad():
a_ = model(__UpperCAmelCase)[0]
a_ = torch.Size([1, 8, 10_24])
self.assertEqual(output.shape , __UpperCAmelCase)
a_ = torch.tensor(
[[[0.1_775, 0.0_083, -0.0_321], [1.6_002, 0.1_287, 0.3_912], [2.1_473, 0.5_791, 0.6_066]]])
self.assertTrue(torch.allclose(output[:, :3, :3] , __UpperCAmelCase , atol=1E-4))
@require_torch
class snake_case ( unittest.TestCase ):
@slow
def UpperCAmelCase__ ( self) ->List[str]:
a_ = BertGenerationDecoder.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder")
a_ = torch.tensor([[1_01, 75_92, 10_10, 20_26, 38_99, 20_03, 1_01_40, 1_02]])
with torch.no_grad():
a_ = model(__UpperCAmelCase)[0]
a_ = torch.Size([1, 8, 5_03_58])
self.assertEqual(output.shape , __UpperCAmelCase)
a_ = torch.tensor(
[[[-0.5_788, -2.5_994, -3.7_054], [0.0_438, 4.7_997, 1.8_795], [1.5_862, 6.6_409, 4.4_638]]])
self.assertTrue(torch.allclose(output[:, :3, :3] , __UpperCAmelCase , atol=1E-4)) | 303 | 0 |
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
a =logging.get_logger(__name__)
class A_ ( SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : int = ['''audio_values''', '''audio_mask''']
def __init__( self : Any ,SCREAMING_SNAKE_CASE__ : str=2_0_4_8 ,SCREAMING_SNAKE_CASE__ : Dict=1 ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=[1_6, 1_6] ,SCREAMING_SNAKE_CASE__ : Dict=1_2_8 ,SCREAMING_SNAKE_CASE__ : str=4_4_1_0_0 ,SCREAMING_SNAKE_CASE__ : Optional[Any]=8_6 ,SCREAMING_SNAKE_CASE__ : Tuple=2_0_4_8 ,SCREAMING_SNAKE_CASE__ : Optional[Any]=0.0 ,**SCREAMING_SNAKE_CASE__ : Any ,):
super().__init__(
feature_size=SCREAMING_SNAKE_CASE__ ,sampling_rate=SCREAMING_SNAKE_CASE__ ,padding_value=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ ,)
__lowerCamelCase : int = spectrogram_length
__lowerCamelCase : Tuple = num_channels
__lowerCamelCase : int = patch_size
__lowerCamelCase : Any = feature_size // self.patch_size[1]
__lowerCamelCase : Tuple = n_fft
__lowerCamelCase : List[str] = sampling_rate // hop_length_to_sampling_rate
__lowerCamelCase : Tuple = sampling_rate
__lowerCamelCase : str = padding_value
__lowerCamelCase : Tuple = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 ,num_mel_filters=SCREAMING_SNAKE_CASE__ ,min_frequency=0.0 ,max_frequency=22050.0 ,sampling_rate=SCREAMING_SNAKE_CASE__ ,norm='slaney' ,mel_scale='slaney' ,).T
def lowerCAmelCase ( self : List[Any] ,SCREAMING_SNAKE_CASE__ : np.array):
__lowerCamelCase : Dict = spectrogram(
SCREAMING_SNAKE_CASE__ ,window_function(self.n_fft ,'hann') ,frame_length=self.n_fft ,hop_length=self.hop_length ,power=2.0 ,mel_filters=self.mel_filters.T ,log_mel='dB' ,db_range=80.0 ,)
__lowerCamelCase : Optional[Any] = log_spec[:, :-1]
__lowerCamelCase : List[Any] = log_spec - 20.0
__lowerCamelCase : Tuple = np.clip(log_spec / 40.0 ,-2.0 ,0.0) + 1.0
return log_spec
def __call__( self : List[str] ,SCREAMING_SNAKE_CASE__ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] ,SCREAMING_SNAKE_CASE__ : Optional[Union[str, TensorType]] = None ,SCREAMING_SNAKE_CASE__ : Optional[bool] = True ,SCREAMING_SNAKE_CASE__ : Optional[int] = None ,SCREAMING_SNAKE_CASE__ : bool = False ,SCREAMING_SNAKE_CASE__ : bool = False ,**SCREAMING_SNAKE_CASE__ : int ,):
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
'This feature extractor is set to support sampling rate'
F" of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled"
F" with {self.sampling_rate} and not {sampling_rate}.")
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.')
__lowerCamelCase : int = isinstance(SCREAMING_SNAKE_CASE__ ,np.ndarray) and len(raw_speech.shape) > 1
if is_batched_numpy and len(raw_speech.shape) > 2:
raise ValueError(F"Only mono-channel audio is supported for input to {self}")
__lowerCamelCase : Tuple = is_batched_numpy or (
isinstance(SCREAMING_SNAKE_CASE__ ,(list, tuple)) and (isinstance(raw_speech[0] ,(np.ndarray, tuple, list)))
)
if is_batched:
__lowerCamelCase : int = [np.asarray([speech] ,dtype=np.floataa).T for speech in raw_speech]
elif not is_batched and not isinstance(SCREAMING_SNAKE_CASE__ ,np.ndarray):
__lowerCamelCase : Optional[Any] = np.asarray(SCREAMING_SNAKE_CASE__ ,dtype=np.floataa)
elif isinstance(SCREAMING_SNAKE_CASE__ ,np.ndarray) and raw_speech.dtype is np.dtype(np.floataa):
__lowerCamelCase : Optional[int] = raw_speech.astype(np.floataa)
# always return batch
if not is_batched:
__lowerCamelCase : int = [np.asarray([raw_speech]).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
__lowerCamelCase : int = [
self._np_extract_fbank_features(waveform.squeeze()).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0] ,SCREAMING_SNAKE_CASE__):
__lowerCamelCase : Any = [np.asarray(SCREAMING_SNAKE_CASE__ ,dtype=np.floataa) for feature in audio_features]
# Create audio attention mask
__lowerCamelCase : List[Any] = max(
[ceil(feature.shape[0] / self.patch_size[0]) * self.freq_len for feature in audio_features]) # The maximum number of audio patches in a batch
if return_attention_mask:
__lowerCamelCase : Dict = [
(ceil(feature.shape[0] / self.patch_size[0]) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0]) * self.freq_len) * [0]
for feature in audio_features
]
__lowerCamelCase : int = np.array(SCREAMING_SNAKE_CASE__).astype(np.floataa)
# convert into correct format for padding
__lowerCamelCase : Any = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
__lowerCamelCase : List[str] = np.ones([len(SCREAMING_SNAKE_CASE__), 1, max_time_len, self.feature_size]).astype(np.floataa)
__lowerCamelCase : Any = padded_audio_features * self.padding_value
for i in range(len(SCREAMING_SNAKE_CASE__)):
__lowerCamelCase : int = audio_features[i]
__lowerCamelCase : Union[str, Any] = feature
# return as BatchFeature
if return_attention_mask:
__lowerCamelCase : int = {'audio_values': padded_audio_features, 'audio_mask': audio_mask}
else:
__lowerCamelCase : List[str] = {'audio_values': padded_audio_features}
__lowerCamelCase : List[str] = BatchFeature(data=SCREAMING_SNAKE_CASE__ ,tensor_type=SCREAMING_SNAKE_CASE__)
return encoded_inputs
| 73 |
from unittest.mock import patch
import pyspark
from datasets.packaged_modules.spark.spark import (
Spark,
SparkExamplesIterable,
_generate_iterable_examples,
)
from ..utils import (
require_dill_gt_0_3_2,
require_not_windows,
)
def _lowerCAmelCase (_lowerCAmelCase , _lowerCAmelCase):
UpperCamelCase_ = []
for part_id in partition_order:
UpperCamelCase_ = df.where(f"""SPARK_PARTITION_ID() = {part_id}""").collect()
for row_idx, row in enumerate(_lowerCAmelCase):
expected_row_ids_and_row_dicts.append((f"""{part_id}_{row_idx}""", row.asDict()))
return expected_row_ids_and_row_dicts
@require_not_windows
@require_dill_gt_0_3_2
def _lowerCAmelCase ():
UpperCamelCase_ = pyspark.sql.SparkSession.builder.master("local[*]").appName("pyspark").getOrCreate()
UpperCamelCase_ = spark.range(1_00).repartition(1)
UpperCamelCase_ = Spark(_lowerCAmelCase)
# The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means
# that each partition can hold 2 rows.
spark_builder._repartition_df_if_needed(max_shard_size=16)
# Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions.
assert spark_builder.df.rdd.getNumPartitions() == 50
@require_not_windows
@require_dill_gt_0_3_2
def _lowerCAmelCase ():
UpperCamelCase_ = pyspark.sql.SparkSession.builder.master("local[*]").appName("pyspark").getOrCreate()
UpperCamelCase_ = spark.range(10).repartition(2)
UpperCamelCase_ = [1, 0]
UpperCamelCase_ = _generate_iterable_examples(_lowerCAmelCase , _lowerCAmelCase) # Reverse the partitions.
UpperCamelCase_ = _get_expected_row_ids_and_row_dicts_for_partition_order(_lowerCAmelCase , _lowerCAmelCase)
for i, (row_id, row_dict) in enumerate(generate_fn()):
UpperCamelCase_ , UpperCamelCase_ = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def _lowerCAmelCase ():
UpperCamelCase_ = pyspark.sql.SparkSession.builder.master("local[*]").appName("pyspark").getOrCreate()
UpperCamelCase_ = spark.range(10).repartition(1)
UpperCamelCase_ = SparkExamplesIterable(_lowerCAmelCase)
assert it.n_shards == 1
for i, (row_id, row_dict) in enumerate(_lowerCAmelCase):
assert row_id == f"""0_{i}"""
assert row_dict == {"id": i}
@require_not_windows
@require_dill_gt_0_3_2
def _lowerCAmelCase ():
UpperCamelCase_ = pyspark.sql.SparkSession.builder.master("local[*]").appName("pyspark").getOrCreate()
UpperCamelCase_ = spark.range(30).repartition(3)
# Mock the generator so that shuffle reverses the partition indices.
with patch("numpy.random.Generator") as generator_mock:
UpperCamelCase_ = lambda _lowerCAmelCase: x.reverse()
UpperCamelCase_ = _get_expected_row_ids_and_row_dicts_for_partition_order(_lowerCAmelCase , [2, 1, 0])
UpperCamelCase_ = SparkExamplesIterable(_lowerCAmelCase).shuffle_data_sources(_lowerCAmelCase)
assert shuffled_it.n_shards == 3
for i, (row_id, row_dict) in enumerate(_lowerCAmelCase):
UpperCamelCase_ , UpperCamelCase_ = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def _lowerCAmelCase ():
UpperCamelCase_ = pyspark.sql.SparkSession.builder.master("local[*]").appName("pyspark").getOrCreate()
UpperCamelCase_ = spark.range(20).repartition(4)
# Partitions 0 and 2
UpperCamelCase_ = SparkExamplesIterable(_lowerCAmelCase).shard_data_sources(worker_id=0 , num_workers=2)
assert shard_it_a.n_shards == 2
UpperCamelCase_ = _get_expected_row_ids_and_row_dicts_for_partition_order(_lowerCAmelCase , [0, 2])
for i, (row_id, row_dict) in enumerate(_lowerCAmelCase):
UpperCamelCase_ , UpperCamelCase_ = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
# Partitions 1 and 3
UpperCamelCase_ = SparkExamplesIterable(_lowerCAmelCase).shard_data_sources(worker_id=1 , num_workers=2)
assert shard_it_a.n_shards == 2
UpperCamelCase_ = _get_expected_row_ids_and_row_dicts_for_partition_order(_lowerCAmelCase , [1, 3])
for i, (row_id, row_dict) in enumerate(_lowerCAmelCase):
UpperCamelCase_ , UpperCamelCase_ = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def _lowerCAmelCase ():
UpperCamelCase_ = pyspark.sql.SparkSession.builder.master("local[*]").appName("pyspark").getOrCreate()
UpperCamelCase_ = spark.range(1_00).repartition(1)
UpperCamelCase_ = Spark(_lowerCAmelCase)
# Choose a small max_shard_size for maximum partitioning.
spark_builder._repartition_df_if_needed(max_shard_size=1)
# The new number of partitions should not be greater than the number of rows.
assert spark_builder.df.rdd.getNumPartitions() == 1_00
| 128 | 0 |
from __future__ import annotations
from collections.abc import Iterator
class _SCREAMING_SNAKE_CASE :
def __init__( self , _SCREAMING_SNAKE_CASE )-> List[str]:
lowerCamelCase_ =value
lowerCamelCase_ =None
lowerCamelCase_ =None
class _SCREAMING_SNAKE_CASE :
def __init__( self , _SCREAMING_SNAKE_CASE )-> List[Any]:
lowerCamelCase_ =tree
def _snake_case ( self , _SCREAMING_SNAKE_CASE )-> str:
if node is None:
return 0
return node.value + (
self.depth_first_search(node.left ) + self.depth_first_search(node.right )
)
def __iter__( self )-> str:
yield self.depth_first_search(self.tree )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 354 |
from __future__ import annotations
class _SCREAMING_SNAKE_CASE :
def __init__( self , _SCREAMING_SNAKE_CASE )-> None:
lowerCamelCase_ =data
lowerCamelCase_ =None
lowerCamelCase_ =None
def __UpperCamelCase ( _A : Node | None ) ->None: # In Order traversal of the tree
"""simple docstring"""
if tree:
display(tree.left )
print(tree.data )
display(tree.right )
def __UpperCamelCase ( _A : Node | None ) ->int:
"""simple docstring"""
return 1 + max(depth_of_tree(tree.left ) , depth_of_tree(tree.right ) ) if tree else 0
def __UpperCamelCase ( _A : Node ) ->bool:
"""simple docstring"""
if not tree:
return True
if tree.left and tree.right:
return is_full_binary_tree(tree.left ) and is_full_binary_tree(tree.right )
else:
return not tree.left and not tree.right
def __UpperCamelCase ( ) ->None: # Main function for testing.
"""simple docstring"""
lowerCamelCase_ =Node(1 )
lowerCamelCase_ =Node(2 )
lowerCamelCase_ =Node(3 )
lowerCamelCase_ =Node(4 )
lowerCamelCase_ =Node(5 )
lowerCamelCase_ =Node(6 )
lowerCamelCase_ =Node(7 )
lowerCamelCase_ =Node(8 )
lowerCamelCase_ =Node(9 )
print(is_full_binary_tree(_A ) )
print(depth_of_tree(_A ) )
print("""Tree is: """ )
display(_A )
if __name__ == "__main__":
main()
| 49 | 0 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConfig,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaForPreTraining,
WavaVecaProcessor,
logging,
)
from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification
logging.set_verbosity_info()
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"adapter_layer": "encoder.layers.*.adapter_layer",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
"pooling_layer.linear": "projector",
"pooling_layer.projection": "classifier",
}
__UpperCAmelCase = [
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
"projector",
"classifier",
]
def A__ ( __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = {}
with open(snake_case__, '''r''' ) as file:
for line_number, line in enumerate(snake_case__ ):
SCREAMING_SNAKE_CASE_ = line.strip()
if line:
SCREAMING_SNAKE_CASE_ = line.split()
SCREAMING_SNAKE_CASE_ = line_number
SCREAMING_SNAKE_CASE_ = words[0]
SCREAMING_SNAKE_CASE_ = value
return result
def A__ ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
for attribute in key.split('''.''' ):
SCREAMING_SNAKE_CASE_ = getattr(snake_case__, snake_case__ )
SCREAMING_SNAKE_CASE_ = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(snake_case__ ):
SCREAMING_SNAKE_CASE_ = PARAM_MAPPING[full_name.split('''.''' )[-1]]
SCREAMING_SNAKE_CASE_ = 'param'
if weight_type is not None and weight_type != "param":
SCREAMING_SNAKE_CASE_ = getattr(snake_case__, snake_case__ ).shape
elif weight_type is not None and weight_type == "param":
SCREAMING_SNAKE_CASE_ = hf_pointer
for attribute in hf_param_name.split('''.''' ):
SCREAMING_SNAKE_CASE_ = getattr(snake_case__, snake_case__ )
SCREAMING_SNAKE_CASE_ = shape_pointer.shape
# let's reduce dimension
SCREAMING_SNAKE_CASE_ = value[0]
else:
SCREAMING_SNAKE_CASE_ = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}''' )
if weight_type == "weight":
SCREAMING_SNAKE_CASE_ = value
elif weight_type == "weight_g":
SCREAMING_SNAKE_CASE_ = value
elif weight_type == "weight_v":
SCREAMING_SNAKE_CASE_ = value
elif weight_type == "bias":
SCREAMING_SNAKE_CASE_ = value
elif weight_type == "param":
for attribute in hf_param_name.split('''.''' ):
SCREAMING_SNAKE_CASE_ = getattr(snake_case__, snake_case__ )
SCREAMING_SNAKE_CASE_ = value
else:
SCREAMING_SNAKE_CASE_ = value
logger.info(F'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def A__ ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(snake_case__ ):
SCREAMING_SNAKE_CASE_ = PARAM_MAPPING[full_name.split('''.''' )[-1]]
SCREAMING_SNAKE_CASE_ = 'param'
if weight_type is not None and weight_type != "param":
SCREAMING_SNAKE_CASE_ = '.'.join([key, weight_type] )
elif weight_type is not None and weight_type == "param":
SCREAMING_SNAKE_CASE_ = '.'.join([key, hf_param_name] )
else:
SCREAMING_SNAKE_CASE_ = key
SCREAMING_SNAKE_CASE_ = value if 'lm_head' in full_key else value[0]
__UpperCAmelCase = {
"W_a": "linear_1.weight",
"W_b": "linear_2.weight",
"b_a": "linear_1.bias",
"b_b": "linear_2.bias",
"ln_W": "norm.weight",
"ln_b": "norm.bias",
}
def A__ ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase=None, __lowerCamelCase=None ):
SCREAMING_SNAKE_CASE_ = False
for key, mapped_key in MAPPING.items():
SCREAMING_SNAKE_CASE_ = 'wav2vec2.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
SCREAMING_SNAKE_CASE_ = True
if "*" in mapped_key:
SCREAMING_SNAKE_CASE_ = name.split(snake_case__ )[0].split('''.''' )[-2]
SCREAMING_SNAKE_CASE_ = mapped_key.replace('''*''', snake_case__ )
if "weight_g" in name:
SCREAMING_SNAKE_CASE_ = 'weight_g'
elif "weight_v" in name:
SCREAMING_SNAKE_CASE_ = 'weight_v'
elif "bias" in name:
SCREAMING_SNAKE_CASE_ = 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
SCREAMING_SNAKE_CASE_ = 'weight'
else:
SCREAMING_SNAKE_CASE_ = None
if hf_dict is not None:
rename_dict(snake_case__, snake_case__, snake_case__, snake_case__, snake_case__ )
else:
set_recursively(snake_case__, snake_case__, snake_case__, snake_case__, snake_case__ )
return is_used
return is_used
def A__ ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = fairseq_model.state_dict()
SCREAMING_SNAKE_CASE_ = hf_model.wavaveca.feature_extractor
for name, value in fairseq_dict.items():
SCREAMING_SNAKE_CASE_ = False
if "conv_layers" in name:
load_conv_layer(
snake_case__, snake_case__, snake_case__, snake_case__, hf_model.config.feat_extract_norm == '''group''', )
SCREAMING_SNAKE_CASE_ = True
else:
SCREAMING_SNAKE_CASE_ = load_wavaveca_layer(snake_case__, snake_case__, snake_case__ )
if not is_used:
unused_weights.append(snake_case__ )
logger.warning(F'''Unused weights: {unused_weights}''' )
def A__ ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = full_name.split('''conv_layers.''' )[-1]
SCREAMING_SNAKE_CASE_ = name.split('''.''' )
SCREAMING_SNAKE_CASE_ = int(items[0] )
SCREAMING_SNAKE_CASE_ = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' )
SCREAMING_SNAKE_CASE_ = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' )
SCREAMING_SNAKE_CASE_ = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.''' )
SCREAMING_SNAKE_CASE_ = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.''' )
SCREAMING_SNAKE_CASE_ = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(snake_case__ )
@torch.no_grad()
def A__ ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase=None, __lowerCamelCase=None, __lowerCamelCase=True, __lowerCamelCase=False ):
if config_path is not None:
SCREAMING_SNAKE_CASE_ = WavaVecaConfig.from_pretrained(snake_case__ )
else:
SCREAMING_SNAKE_CASE_ = WavaVecaConfig()
if is_seq_class:
SCREAMING_SNAKE_CASE_ = read_txt_into_dict(snake_case__ )
SCREAMING_SNAKE_CASE_ = idalabel
SCREAMING_SNAKE_CASE_ = WavaVecaForSequenceClassification(snake_case__ )
SCREAMING_SNAKE_CASE_ = WavaVecaFeatureExtractor(
feature_size=1, sampling_rate=1_60_00, padding_value=0, do_normalize=snake_case__, return_attention_mask=snake_case__, )
feature_extractor.save_pretrained(snake_case__ )
elif is_finetuned:
if dict_path:
SCREAMING_SNAKE_CASE_ = Dictionary.load(snake_case__ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
SCREAMING_SNAKE_CASE_ = target_dict.pad_index
SCREAMING_SNAKE_CASE_ = target_dict.bos_index
SCREAMING_SNAKE_CASE_ = target_dict.eos_index
SCREAMING_SNAKE_CASE_ = len(target_dict.symbols )
SCREAMING_SNAKE_CASE_ = os.path.join(snake_case__, '''vocab.json''' )
if not os.path.isdir(snake_case__ ):
logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(snake_case__ ) )
return
os.makedirs(snake_case__, exist_ok=snake_case__ )
SCREAMING_SNAKE_CASE_ = target_dict.indices
# fairseq has the <pad> and <s> switched
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = 1
with open(snake_case__, '''w''', encoding='''utf-8''' ) as vocab_handle:
json.dump(snake_case__, snake_case__ )
SCREAMING_SNAKE_CASE_ = WavaVecaCTCTokenizer(
snake_case__, unk_token=target_dict.unk_word, pad_token=target_dict.pad_word, bos_token=target_dict.bos_word, eos_token=target_dict.eos_word, word_delimiter_token='''|''', do_lower_case=snake_case__, )
SCREAMING_SNAKE_CASE_ = True if config.feat_extract_norm == 'layer' else False
SCREAMING_SNAKE_CASE_ = WavaVecaFeatureExtractor(
feature_size=1, sampling_rate=1_60_00, padding_value=0, do_normalize=snake_case__, return_attention_mask=snake_case__, )
SCREAMING_SNAKE_CASE_ = WavaVecaProcessor(feature_extractor=snake_case__, tokenizer=snake_case__ )
processor.save_pretrained(snake_case__ )
SCREAMING_SNAKE_CASE_ = WavaVecaForCTC(snake_case__ )
else:
SCREAMING_SNAKE_CASE_ = WavaVecaForPreTraining(snake_case__ )
if is_finetuned or is_seq_class:
SCREAMING_SNAKE_CASE_ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path], arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
else:
SCREAMING_SNAKE_CASE_ = argparse.Namespace(task='''audio_pretraining''' )
SCREAMING_SNAKE_CASE_ = fairseq.tasks.setup_task(snake_case__ )
SCREAMING_SNAKE_CASE_ = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path], task=snake_case__ )
SCREAMING_SNAKE_CASE_ = model[0].eval()
recursively_load_weights(snake_case__, snake_case__, not is_finetuned )
hf_wavavec.save_pretrained(snake_case__ )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
parser.add_argument(
"--is_seq_class",
action="store_true",
help="Whether the model to convert is a fine-tuned sequence classification model or not",
)
__UpperCAmelCase = parser.parse_args()
__UpperCAmelCase = not args.not_finetuned and not args.is_seq_class
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.dict_path,
is_finetuned,
args.is_seq_class,
)
| 299 |
import functools
from typing import Any
def UpperCamelCase ( snake_case__ : str , snake_case__ : list[str] ) -> bool:
# Validation
if not isinstance(snake_case__ , snake_case__ ) or len(snake_case__ ) == 0:
raise ValueError('the string should be not empty string' )
if not isinstance(snake_case__ , snake_case__ ) or not all(
isinstance(snake_case__ , snake_case__ ) and len(snake_case__ ) > 0 for item in words ):
raise ValueError('the words should be a list of non-empty strings' )
# Build trie
UpperCamelCase : dict[str, Any] = {}
UpperCamelCase : List[str] = 'WORD_KEEPER'
for word in words:
UpperCamelCase : List[str] = trie
for c in word:
if c not in trie_node:
UpperCamelCase : int = {}
UpperCamelCase : str = trie_node[c]
UpperCamelCase : Tuple = True
UpperCamelCase : List[Any] = len(snake_case__ )
# Dynamic programming method
@functools.cache
def is_breakable(snake_case__ : int ) -> bool:
if index == len_string:
return True
UpperCamelCase : Dict = trie
for i in range(snake_case__ , snake_case__ ):
UpperCamelCase : List[Any] = trie_node.get(string[i] , snake_case__ )
if trie_node is None:
return False
if trie_node.get(snake_case__ , snake_case__ ) and is_breakable(i + 1 ):
return True
return False
return is_breakable(0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 119 | 0 |
from ..utils import DummyObject, requires_backends
class a__ ( metaclass=__snake_case ):
A__ : List[Any] = ['torch', 'transformers', 'onnx']
def __init__( self , *UpperCAmelCase , **UpperCAmelCase ) -> Any:
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def __SCREAMING_SNAKE_CASE ( cls , *UpperCAmelCase , **UpperCAmelCase ) -> Union[str, Any]:
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def __SCREAMING_SNAKE_CASE ( cls , *UpperCAmelCase , **UpperCAmelCase ) -> List[str]:
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class a__ ( metaclass=__snake_case ):
A__ : Union[str, Any] = ['torch', 'transformers', 'onnx']
def __init__( self , *UpperCAmelCase , **UpperCAmelCase ) -> int:
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def __SCREAMING_SNAKE_CASE ( cls , *UpperCAmelCase , **UpperCAmelCase ) -> int:
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def __SCREAMING_SNAKE_CASE ( cls , *UpperCAmelCase , **UpperCAmelCase ) -> Dict:
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class a__ ( metaclass=__snake_case ):
A__ : Dict = ['torch', 'transformers', 'onnx']
def __init__( self , *UpperCAmelCase , **UpperCAmelCase ) -> Union[str, Any]:
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def __SCREAMING_SNAKE_CASE ( cls , *UpperCAmelCase , **UpperCAmelCase ) -> Any:
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def __SCREAMING_SNAKE_CASE ( cls , *UpperCAmelCase , **UpperCAmelCase ) -> int:
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class a__ ( metaclass=__snake_case ):
A__ : Union[str, Any] = ['torch', 'transformers', 'onnx']
def __init__( self , *UpperCAmelCase , **UpperCAmelCase ) -> Optional[Any]:
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def __SCREAMING_SNAKE_CASE ( cls , *UpperCAmelCase , **UpperCAmelCase ) -> int:
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def __SCREAMING_SNAKE_CASE ( cls , *UpperCAmelCase , **UpperCAmelCase ) -> Union[str, Any]:
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class a__ ( metaclass=__snake_case ):
A__ : Dict = ['torch', 'transformers', 'onnx']
def __init__( self , *UpperCAmelCase , **UpperCAmelCase ) -> Optional[int]:
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def __SCREAMING_SNAKE_CASE ( cls , *UpperCAmelCase , **UpperCAmelCase ) -> Any:
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def __SCREAMING_SNAKE_CASE ( cls , *UpperCAmelCase , **UpperCAmelCase ) -> str:
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class a__ ( metaclass=__snake_case ):
A__ : Tuple = ['torch', 'transformers', 'onnx']
def __init__( self , *UpperCAmelCase , **UpperCAmelCase ) -> int:
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def __SCREAMING_SNAKE_CASE ( cls , *UpperCAmelCase , **UpperCAmelCase ) -> List[str]:
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def __SCREAMING_SNAKE_CASE ( cls , *UpperCAmelCase , **UpperCAmelCase ) -> Any:
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
| 197 | from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase_ : Tuple = logging.get_logger(__name__)
lowerCamelCase_ : List[Any] = {
"""google/fnet-base""": """https://huggingface.co/google/fnet-base/resolve/main/config.json""",
"""google/fnet-large""": """https://huggingface.co/google/fnet-large/resolve/main/config.json"""
# See all FNet models at https://huggingface.co/models?filter=fnet
}
class a__ ( __snake_case ):
A__ : Dict = 'fnet'
def __init__( self , UpperCAmelCase=3_2_0_0_0 , UpperCAmelCase=7_6_8 , UpperCAmelCase=1_2 , UpperCAmelCase=3_0_7_2 , UpperCAmelCase="gelu_new" , UpperCAmelCase=0.1 , UpperCAmelCase=5_1_2 , UpperCAmelCase=4 , UpperCAmelCase=0.02 , UpperCAmelCase=1e-12 , UpperCAmelCase=False , UpperCAmelCase=5_1_2 , UpperCAmelCase=3 , UpperCAmelCase=1 , UpperCAmelCase=2 , **UpperCAmelCase , ) -> Dict:
super().__init__(pad_token_id=UpperCAmelCase , bos_token_id=UpperCAmelCase , eos_token_id=UpperCAmelCase , **UpperCAmelCase )
__a = vocab_size
__a = max_position_embeddings
__a = hidden_size
__a = num_hidden_layers
__a = intermediate_size
__a = hidden_act
__a = hidden_dropout_prob
__a = initializer_range
__a = type_vocab_size
__a = layer_norm_eps
__a = use_tpu_fourier_optimizations
__a = tpu_short_seq_length
| 197 | 1 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor
from .base import PipelineTool
class _lowerCamelCase( _a ):
lowercase_ : int = """openai/whisper-base"""
lowercase_ : Union[str, Any] = (
"""This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the """
"""transcribed text."""
)
lowercase_ : Any = """transcriber"""
lowercase_ : List[Any] = WhisperProcessor
lowercase_ : List[str] = WhisperForConditionalGeneration
lowercase_ : Any = ["""audio"""]
lowercase_ : Union[str, Any] = ["""text"""]
def UpperCamelCase ( self, lowerCamelCase) -> Tuple:
"""simple docstring"""
return self.pre_processor(lowerCamelCase, return_tensors='pt').input_features
def UpperCamelCase ( self, lowerCamelCase) -> List[str]:
"""simple docstring"""
return self.model.generate(inputs=lowerCamelCase)
def UpperCamelCase ( self, lowerCamelCase) -> Optional[Any]:
"""simple docstring"""
return self.pre_processor.batch_decode(lowerCamelCase, skip_special_tokens=lowerCamelCase)[0]
| 21 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import YolosImageProcessor
class _A( unittest.TestCase ):
"""simple docstring"""
def __init__( self , _A , _A=7 , _A=3 , _A=30 , _A=400 , _A=True , _A=None , _A=True , _A=[0.5, 0.5, 0.5] , _A=[0.5, 0.5, 0.5] , _A=True , _A=1 / 255 , _A=True , ):
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
__A : List[Any] = size if size is not None else {'shortest_edge': 18, 'longest_edge': 1333}
__A : Union[str, Any] = parent
__A : Optional[int] = batch_size
__A : int = num_channels
__A : int = min_resolution
__A : Any = max_resolution
__A : List[Any] = do_resize
__A : List[Any] = size
__A : Union[str, Any] = do_normalize
__A : Optional[int] = image_mean
__A : Optional[int] = image_std
__A : int = do_rescale
__A : str = rescale_factor
__A : Tuple = do_pad
def UpperCAmelCase_ ( self ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def UpperCAmelCase_ ( self , _A , _A=False ):
if not batched:
__A : List[str] = image_inputs[0]
if isinstance(_A , Image.Image ):
__A , __A : int = image.size
else:
__A , __A : Any = image.shape[1], image.shape[2]
if w < h:
__A : List[Any] = int(self.size['shortest_edge'] * h / w )
__A : List[Any] = self.size['shortest_edge']
elif w > h:
__A : Union[str, Any] = self.size['shortest_edge']
__A : str = int(self.size['shortest_edge'] * w / h )
else:
__A : Dict = self.size['shortest_edge']
__A : str = self.size['shortest_edge']
else:
__A : int = []
for image in image_inputs:
__A , __A : Optional[Any] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
__A : List[str] = max(_A , key=lambda _A : item[0] )[0]
__A : str = max(_A , key=lambda _A : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class _A( snake_case__ , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase : List[str] = YolosImageProcessor if is_vision_available() else None
def UpperCAmelCase_ ( self ):
__A : Dict = YolosImageProcessingTester(self )
@property
def UpperCAmelCase_ ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase_ ( self ):
__A : str = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_A , 'image_mean' ) )
self.assertTrue(hasattr(_A , 'image_std' ) )
self.assertTrue(hasattr(_A , 'do_normalize' ) )
self.assertTrue(hasattr(_A , 'do_resize' ) )
self.assertTrue(hasattr(_A , 'size' ) )
def UpperCAmelCase_ ( self ):
__A : Tuple = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 18, 'longest_edge': 1333} )
self.assertEqual(image_processor.do_pad , _A )
__A : Dict = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=_A )
self.assertEqual(image_processor.size , {'shortest_edge': 42, 'longest_edge': 84} )
self.assertEqual(image_processor.do_pad , _A )
def UpperCAmelCase_ ( self ):
pass
def UpperCAmelCase_ ( self ):
# Initialize image_processing
__A : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__A : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A )
for image in image_inputs:
self.assertIsInstance(_A , Image.Image )
# Test not batched input
__A : Any = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
__A , __A : Optional[int] = self.image_processor_tester.get_expected_values(_A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__A , __A : Optional[Any] = self.image_processor_tester.get_expected_values(_A , batched=_A )
__A : str = image_processing(_A , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCAmelCase_ ( self ):
# Initialize image_processing
__A : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__A : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , numpify=_A )
for image in image_inputs:
self.assertIsInstance(_A , np.ndarray )
# Test not batched input
__A : str = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
__A , __A : List[Any] = self.image_processor_tester.get_expected_values(_A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__A : Tuple = image_processing(_A , return_tensors='pt' ).pixel_values
__A , __A : Optional[int] = self.image_processor_tester.get_expected_values(_A , batched=_A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCAmelCase_ ( self ):
# Initialize image_processing
__A : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__A : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , torchify=_A )
for image in image_inputs:
self.assertIsInstance(_A , torch.Tensor )
# Test not batched input
__A : Union[str, Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
__A , __A : Union[str, Any] = self.image_processor_tester.get_expected_values(_A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__A : Optional[int] = image_processing(_A , return_tensors='pt' ).pixel_values
__A , __A : Optional[int] = self.image_processor_tester.get_expected_values(_A , batched=_A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCAmelCase_ ( self ):
# Initialize image_processings
__A : Tuple = self.image_processing_class(**self.image_processor_dict )
__A : Any = self.image_processing_class(do_resize=_A , do_normalize=_A , do_rescale=_A )
# create random PyTorch tensors
__A : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , torchify=_A )
for image in image_inputs:
self.assertIsInstance(_A , torch.Tensor )
# Test whether the method "pad" and calling the image processor return the same tensors
__A : Optional[int] = image_processing_a.pad(_A , return_tensors='pt' )
__A : Optional[int] = image_processing_a(_A , return_tensors='pt' )
self.assertTrue(
torch.allclose(encoded_images_with_method['pixel_values'] , encoded_images['pixel_values'] , atol=1e-4 ) )
@slow
def UpperCAmelCase_ ( self ):
# prepare image and target
__A : List[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' , 'r' ) as f:
__A : Optional[Any] = json.loads(f.read() )
__A : Optional[Any] = {'image_id': 39769, 'annotations': target}
# encode them
__A : str = YolosImageProcessor.from_pretrained('hustvl/yolos-small' )
__A : List[Any] = image_processing(images=_A , annotations=_A , return_tensors='pt' )
# verify pixel values
__A : List[Any] = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['pixel_values'].shape , _A )
__A : Union[str, Any] = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , _A , atol=1e-4 ) )
# verify area
__A : List[Any] = torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , _A ) )
# verify boxes
__A : Any = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , _A )
__A : Optional[Any] = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , _A , atol=1e-3 ) )
# verify image_id
__A : Optional[int] = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , _A ) )
# verify is_crowd
__A : str = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , _A ) )
# verify class_labels
__A : Any = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , _A ) )
# verify orig_size
__A : int = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , _A ) )
# verify size
__A : str = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , _A ) )
@slow
def UpperCAmelCase_ ( self ):
# prepare image, target and masks_path
__A : Dict = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' , 'r' ) as f:
__A : Tuple = json.loads(f.read() )
__A : Any = {'file_name': '000000039769.png', 'image_id': 39769, 'segments_info': target}
__A : List[Any] = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' )
# encode them
__A : Any = YolosImageProcessor(format='coco_panoptic' )
__A : List[Any] = image_processing(images=_A , annotations=_A , masks_path=_A , return_tensors='pt' )
# verify pixel values
__A : Any = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['pixel_values'].shape , _A )
__A : Union[str, Any] = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , _A , atol=1e-4 ) )
# verify area
__A : int = torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , _A ) )
# verify boxes
__A : Optional[int] = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , _A )
__A : Optional[Any] = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , _A , atol=1e-3 ) )
# verify image_id
__A : Union[str, Any] = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , _A ) )
# verify is_crowd
__A : Tuple = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , _A ) )
# verify class_labels
__A : List[str] = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , _A ) )
# verify masks
__A : Tuple = 822873
self.assertEqual(encoding['labels'][0]['masks'].sum().item() , _A )
# verify orig_size
__A : str = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , _A ) )
# verify size
__A : int = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , _A ) )
| 280 | 0 |
"""simple docstring"""
import unittest
from transformers import AutoTokenizer, FalconConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
)
class UpperCamelCase :
def __init__(self : Optional[Any] , _A : Dict , _A : int=3 , _A : Optional[int]=7 , _A : str=True , _A : Dict=True , _A : Optional[int]=False , _A : Tuple=True , _A : Any=99 , _A : List[str]=32 , _A : List[Any]=5 , _A : Optional[Any]=4 , _A : Any=37 , _A : Tuple="gelu" , _A : Dict=0.1 , _A : Optional[Any]=0.1 , _A : Optional[int]=5_12 , _A : Union[str, Any]=16 , _A : Optional[Any]=2 , _A : Union[str, Any]=0.02 , _A : str=3 , _A : Optional[Any]=4 , _A : Dict=None , ) -> List[Any]:
__snake_case : Dict = parent
__snake_case : Dict = batch_size
__snake_case : Optional[int] = seq_length
__snake_case : Tuple = is_training
__snake_case : List[str] = use_input_mask
__snake_case : Union[str, Any] = use_token_type_ids
__snake_case : Optional[Any] = use_labels
__snake_case : str = vocab_size
__snake_case : Dict = hidden_size
__snake_case : Optional[int] = num_hidden_layers
__snake_case : Tuple = num_attention_heads
__snake_case : Optional[int] = intermediate_size
__snake_case : List[str] = hidden_act
__snake_case : Optional[Any] = hidden_dropout_prob
__snake_case : Any = attention_probs_dropout_prob
__snake_case : Tuple = max_position_embeddings
__snake_case : int = type_vocab_size
__snake_case : List[Any] = type_sequence_label_size
__snake_case : Tuple = initializer_range
__snake_case : Tuple = num_labels
__snake_case : Dict = num_choices
__snake_case : List[Any] = scope
def _lowercase (self : Optional[Any]) -> int:
__snake_case : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
__snake_case : Union[str, Any] = None
if self.use_input_mask:
__snake_case : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length])
__snake_case : List[str] = None
__snake_case : List[Any] = None
__snake_case : List[Any] = None
__snake_case : Dict = None
if self.use_labels:
__snake_case : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size)
__snake_case : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
__snake_case : int = ids_tensor([self.batch_size] , self.num_choices)
__snake_case : Any = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowercase (self : Optional[Any]) -> Any:
return FalconConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_A , initializer_range=self.initializer_range , pad_token_id=1 , new_decoder_architecture=_A , )
def _lowercase (self : int , _A : Optional[Any] , _A : List[str] , _A : Union[str, Any] , _A : List[Any] , _A : Dict , _A : List[Any] , _A : Union[str, Any]) -> str:
__snake_case : Union[str, Any] = FalconModel(config=_A)
model.to(_A)
model.eval()
__snake_case : int = model(_A , attention_mask=_A)
__snake_case : Optional[int] = model(_A)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def _lowercase (self : Optional[Any] , _A : List[str] , _A : Tuple , _A : Dict , _A : Any , _A : List[str] , _A : List[Any] , _A : List[Any] , _A : Any , _A : List[Any] , ) -> Union[str, Any]:
__snake_case : str = True
__snake_case : Optional[int] = FalconModel(_A)
model.to(_A)
model.eval()
__snake_case : Tuple = model(
_A , attention_mask=_A , encoder_hidden_states=_A , encoder_attention_mask=_A , )
__snake_case : int = model(
_A , attention_mask=_A , encoder_hidden_states=_A , )
__snake_case : Union[str, Any] = model(_A , attention_mask=_A)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def _lowercase (self : Optional[int] , _A : Optional[int] , _A : Optional[Any] , _A : Dict , _A : Union[str, Any] , _A : int , _A : List[str] , _A : str , _A : int , _A : Optional[int] , ) -> Optional[Any]:
__snake_case : str = FalconForCausalLM(config=_A)
model.to(_A)
model.eval()
__snake_case : List[Any] = model(_A , attention_mask=_A , labels=_A)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def _lowercase (self : Dict , _A : int , _A : List[Any] , _A : str , _A : Union[str, Any] , _A : Optional[int] , _A : Any , _A : Tuple , _A : List[Any] , _A : Dict , ) -> Optional[int]:
__snake_case : str = True
__snake_case : Any = True
__snake_case : int = FalconForCausalLM(config=_A)
model.to(_A)
model.eval()
# first forward pass
__snake_case : int = model(
_A , attention_mask=_A , encoder_hidden_states=_A , encoder_attention_mask=_A , use_cache=_A , )
__snake_case : List[Any] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
__snake_case : Dict = ids_tensor((self.batch_size, 3) , config.vocab_size)
__snake_case : Union[str, Any] = ids_tensor((self.batch_size, 3) , vocab_size=2)
# append to next input_ids and
__snake_case : List[str] = torch.cat([input_ids, next_tokens] , dim=-1)
__snake_case : str = torch.cat([input_mask, next_mask] , dim=-1)
__snake_case : str = model(
_A , attention_mask=_A , encoder_hidden_states=_A , encoder_attention_mask=_A , output_hidden_states=_A , )['hidden_states'][0]
__snake_case : Optional[Any] = model(
_A , attention_mask=_A , encoder_hidden_states=_A , encoder_attention_mask=_A , past_key_values=_A , output_hidden_states=_A , )['hidden_states'][0]
# select random slice
__snake_case : Optional[Any] = ids_tensor((1,) , output_from_past.shape[-1]).item()
__snake_case : List[Any] = output_from_no_past[:, -3:, random_slice_idx].detach()
__snake_case : Optional[Any] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1])
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_A , _A , atol=1E-3))
def _lowercase (self : Optional[int]) -> Optional[int]:
__snake_case : Tuple = self.prepare_config_and_inputs()
(
(
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) ,
) : Union[str, Any] = config_and_inputs
__snake_case : Any = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase ( lowercase , lowercase , lowercase , unittest.TestCase ):
UpperCAmelCase : List[str] = (
(
FalconModel,
FalconForCausalLM,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconForQuestionAnswering,
)
if is_torch_available()
else ()
)
UpperCAmelCase : Union[str, Any] = (FalconForCausalLM,) if is_torch_available() else ()
UpperCAmelCase : Optional[int] = (
{
"""feature-extraction""": FalconModel,
"""text-classification""": FalconForSequenceClassification,
"""text-generation""": FalconForCausalLM,
"""question-answering""": FalconForQuestionAnswering,
"""token-classification""": FalconForTokenClassification,
"""zero-shot""": FalconForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCAmelCase : str = False
UpperCAmelCase : List[Any] = False
def _lowercase (self : int) -> Union[str, Any]:
__snake_case : Optional[int] = FalconModelTester(self)
__snake_case : Union[str, Any] = ConfigTester(self , config_class=_A , hidden_size=37)
def _lowercase (self : List[str]) -> Dict:
self.config_tester.run_common_tests()
def _lowercase (self : List[str]) -> Any:
__snake_case : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A)
def _lowercase (self : Union[str, Any]) -> Any:
__snake_case , *__snake_case : Tuple = self.model_tester.prepare_config_and_inputs()
for alibi in [True, False]:
__snake_case : int = alibi
self.model_tester.create_and_check_model(_A , *_A)
def _lowercase (self : Dict) -> List[str]:
__snake_case , __snake_case : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case : List[str] = 3
__snake_case : Union[str, Any] = input_dict['input_ids']
__snake_case : Optional[int] = input_ids.ne(1).to(_A)
__snake_case : List[Any] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size)
__snake_case : Optional[Any] = FalconForSequenceClassification(_A)
model.to(_A)
model.eval()
__snake_case : Any = model(_A , attention_mask=_A , labels=_A)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
def _lowercase (self : Any) -> Optional[int]:
__snake_case , __snake_case : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case : Optional[Any] = 3
__snake_case : Optional[Any] = 'single_label_classification'
__snake_case : Union[str, Any] = input_dict['input_ids']
__snake_case : Optional[int] = input_ids.ne(1).to(_A)
__snake_case : List[Any] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size)
__snake_case : Any = FalconForSequenceClassification(_A)
model.to(_A)
model.eval()
__snake_case : Any = model(_A , attention_mask=_A , labels=_A)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
def _lowercase (self : Optional[Any]) -> List[Any]:
__snake_case , __snake_case : Dict = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case : Tuple = input_dict['input_ids']
__snake_case : Optional[Any] = FalconForCausalLM(_A)
model.to(_A)
model.eval()
__snake_case : Dict = model(_A , use_cache=_A)
__snake_case : Optional[Any] = input_ids.shape[0]
__snake_case : List[str] = model._convert_to_rw_cache(result.past_key_values)
__snake_case : List[str] = model._convert_cache_to_standard_format(_A , _A)
for layer in range(len(_A)):
for tensor_idx in range(2):
self.assertTrue(rw_cache[layer][tensor_idx].ndim == 3)
self.assertTrue(result.past_key_values[layer][tensor_idx].ndim == 4)
self.assertTrue(
torch.all(result.past_key_values[layer][tensor_idx] == standard_cache[layer][tensor_idx]))
def _lowercase (self : str) -> Optional[Any]:
__snake_case , __snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case : Union[str, Any] = 3
__snake_case : Optional[int] = 'multi_label_classification'
__snake_case : List[str] = input_dict['input_ids']
__snake_case : List[Any] = input_ids.ne(1).to(_A)
__snake_case : int = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size).to(torch.float)
__snake_case : List[Any] = FalconForSequenceClassification(_A)
model.to(_A)
model.eval()
__snake_case : Union[str, Any] = model(_A , attention_mask=_A , labels=_A)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
def _lowercase (self : int) -> str:
# Falcon can have different numbers of KV-heads than the number of query heads, so we need
# to override this test to use the right head counts.
for model_class in self.all_generative_model_classes:
__snake_case , __snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
# If it doesn't support cache, pass the test
if not hasattr(_A , 'use_cache'):
return
__snake_case : Optional[int] = model_class(_A).to(_A)
if "use_cache" not in inputs:
__snake_case : Union[str, Any] = True
__snake_case : Any = model(**_A)
# If "past_key_values" is not returned, pass the test (e.g. RWKV uses a different cache name and format)
if "past_key_values" not in outputs:
return
__snake_case : Union[str, Any] = (
getattr(_A , 'decoder_layers' , _A)
or getattr(_A , 'num_decoder_layers' , _A)
or config.num_hidden_layers
)
__snake_case : Optional[int] = getattr(_A , 'num_kv_heads' , config.num_attention_heads)
__snake_case : Optional[int] = getattr(_A , 'd_model' , config.hidden_size)
__snake_case : List[str] = embed_dim // num_attention_heads
__snake_case : Any = outputs['past_key_values']
self.assertEqual(len(_A) , _A)
__snake_case , __snake_case : str = inputs['input_ids'].shape
for i in range(_A):
if config.new_decoder_architecture:
__snake_case : int = config.num_attention_heads
elif config.multi_query:
__snake_case : Tuple = 1
self.assertEqual(len(past_kv[0]) , 2) # K V for the decoder = 2
self.assertEqual(
past_kv[i][0].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim))
self.assertEqual(
past_kv[i][1].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim))
@require_torch
class UpperCamelCase ( unittest.TestCase ):
@slow
def _lowercase (self : Any) -> Union[str, Any]:
__snake_case : Tuple = AutoTokenizer.from_pretrained('Rocketknight1/falcon-rw-1b')
__snake_case : Any = FalconForCausalLM.from_pretrained('Rocketknight1/falcon-rw-1b')
model.eval()
model.to(_A)
__snake_case : Optional[int] = tokenizer('My favorite food is' , return_tensors='pt').to(_A)
__snake_case : Dict = (
'My favorite food is pizza. I love it so much that I have a pizza party every year for my birthday.'
)
__snake_case : Tuple = model.generate(**_A , do_sample=_A , max_new_tokens=19)
__snake_case : Optional[int] = tokenizer.batch_decode(_A)[0]
self.assertEqual(_A , _A)
@slow
def _lowercase (self : Optional[Any]) -> Any:
# The big models are way too big for the CI, so we use tiny random models that resemble their
# architectures but with much smaller and fewer layers
for repo in ["Rocketknight1/tiny-random-falcon-7b", "Rocketknight1/tiny-random-falcon-40b"]:
__snake_case : Any = AutoTokenizer.from_pretrained(_A)
__snake_case : Optional[int] = FalconForCausalLM.from_pretrained(_A)
model.eval()
model.to(_A)
__snake_case : Union[str, Any] = tokenizer('My favorite food is' , return_tensors='pt').to(_A)
# We just test that these run without errors - the models are randomly initialized
# and so the actual text outputs will be garbage
model.generate(**_A , do_sample=_A , max_new_tokens=4)
model.generate(**_A , do_sample=_A , max_new_tokens=4)
model.generate(**_A , num_beams=2 , max_new_tokens=4)
@slow
def _lowercase (self : List[str]) -> Optional[Any]:
# The big models are way too big for the CI, so we use tiny random models that resemble their
# architectures but with much smaller and fewer layers
with torch.no_grad():
for repo in [
"Rocketknight1/falcon-rw-1b",
"Rocketknight1/tiny-random-falcon-7b",
"Rocketknight1/tiny-random-falcon-40b",
]:
__snake_case : List[str] = AutoTokenizer.from_pretrained(_A)
__snake_case : int = FalconForCausalLM.from_pretrained(_A)
model.eval()
model.to(device=_A)
__snake_case : List[str] = tokenizer('My favorite food is' , return_tensors='pt').to(_A)
# Test results are the same with and without cache
__snake_case : str = model.generate(**_A , do_sample=_A , max_new_tokens=20 , use_cache=_A)
__snake_case : List[str] = model.generate(**_A , do_sample=_A , max_new_tokens=20 , use_cache=_A)
self.assertTrue((outputs_cache - outputs_no_cache).sum().item() == 0)
| 95 | """simple docstring"""
from argparse import ArgumentParser, Namespace
from ..utils import logging
from . import BaseTransformersCLICommand
def __UpperCAmelCase ( UpperCAmelCase_ : Namespace ) -> Union[str, Any]:
'''simple docstring'''
return ConvertCommand(
args.model_type , args.tf_checkpoint , args.pytorch_dump_output , args.config , args.finetuning_task_name )
_a : str= "\ntransformers can only be used from the commandline to convert TensorFlow models in PyTorch, In that case, it requires\nTensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions.\n"
class UpperCamelCase ( lowercase ):
@staticmethod
def _lowercase (_A : ArgumentParser) -> Tuple:
__snake_case : Optional[Any] = parser.add_parser(
'convert' , help='CLI tool to run convert model from original author checkpoints to Transformers PyTorch checkpoints.' , )
train_parser.add_argument('--model_type' , type=_A , required=_A , help='Model\'s type.')
train_parser.add_argument(
'--tf_checkpoint' , type=_A , required=_A , help='TensorFlow checkpoint path or folder.')
train_parser.add_argument(
'--pytorch_dump_output' , type=_A , required=_A , help='Path to the PyTorch saved model output.')
train_parser.add_argument('--config' , type=_A , default='' , help='Configuration file path or folder.')
train_parser.add_argument(
'--finetuning_task_name' , type=_A , default=_A , help='Optional fine-tuning task name if the TF model was a finetuned model.' , )
train_parser.set_defaults(func=_A)
def __init__(self : List[str] , _A : str , _A : str , _A : str , _A : str , _A : str , *_A : Any , ) -> Optional[Any]:
__snake_case : List[Any] = logging.get_logger('transformers-cli/converting')
self._logger.info(f"Loading model {model_type}")
__snake_case : List[str] = model_type
__snake_case : int = tf_checkpoint
__snake_case : Optional[int] = pytorch_dump_output
__snake_case : Optional[Any] = config
__snake_case : Optional[Any] = finetuning_task_name
def _lowercase (self : List[str]) -> str:
if self._model_type == "albert":
try:
from ..models.albert.convert_albert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_A)
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output)
elif self._model_type == "bert":
try:
from ..models.bert.convert_bert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_A)
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output)
elif self._model_type == "funnel":
try:
from ..models.funnel.convert_funnel_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_A)
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output)
elif self._model_type == "t5":
try:
from ..models.ta.convert_ta_original_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch
except ImportError:
raise ImportError(_A)
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output)
elif self._model_type == "gpt":
from ..models.openai.convert_openai_original_tf_checkpoint_to_pytorch import (
convert_openai_checkpoint_to_pytorch,
)
convert_openai_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output)
elif self._model_type == "transfo_xl":
try:
from ..models.transfo_xl.convert_transfo_xl_original_tf_checkpoint_to_pytorch import (
convert_transfo_xl_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_A)
if "ckpt" in self._tf_checkpoint.lower():
__snake_case : Union[str, Any] = self._tf_checkpoint
__snake_case : List[Any] = ''
else:
__snake_case : Optional[Any] = self._tf_checkpoint
__snake_case : List[Any] = ''
convert_transfo_xl_checkpoint_to_pytorch(
_A , self._config , self._pytorch_dump_output , _A)
elif self._model_type == "gpt2":
try:
from ..models.gpta.convert_gpta_original_tf_checkpoint_to_pytorch import (
convert_gpta_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_A)
convert_gpta_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output)
elif self._model_type == "xlnet":
try:
from ..models.xlnet.convert_xlnet_original_tf_checkpoint_to_pytorch import (
convert_xlnet_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_A)
convert_xlnet_checkpoint_to_pytorch(
self._tf_checkpoint , self._config , self._pytorch_dump_output , self._finetuning_task_name)
elif self._model_type == "xlm":
from ..models.xlm.convert_xlm_original_pytorch_checkpoint_to_pytorch import (
convert_xlm_checkpoint_to_pytorch,
)
convert_xlm_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output)
elif self._model_type == "lxmert":
from ..models.lxmert.convert_lxmert_original_tf_checkpoint_to_pytorch import (
convert_lxmert_checkpoint_to_pytorch,
)
convert_lxmert_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output)
elif self._model_type == "rembert":
from ..models.rembert.convert_rembert_tf_checkpoint_to_pytorch import (
convert_rembert_tf_checkpoint_to_pytorch,
)
convert_rembert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output)
else:
raise ValueError(
'--model_type should be selected in the list [bert, gpt, gpt2, t5, transfo_xl, xlnet, xlm, lxmert]')
| 95 | 1 |
from numpy import exp, pi, sqrt
def UpperCamelCase ( __lowercase : Tuple ,__lowercase : Any = 0.0 ,__lowercase : List[str] = 1.0 ):
'''simple docstring'''
return 1 / sqrt(2 * pi * sigma**2 ) * exp(-((x - mu) ** 2) / (2 * sigma**2) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 140 |
from .data_collator import (
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForSeqaSeq,
DataCollatorForSOP,
DataCollatorForTokenClassification,
DataCollatorForWholeWordMask,
DataCollatorWithPadding,
DefaultDataCollator,
default_data_collator,
)
from .metrics import glue_compute_metrics, xnli_compute_metrics
from .processors import (
DataProcessor,
InputExample,
InputFeatures,
SingleSentenceClassificationProcessor,
SquadExample,
SquadFeatures,
SquadVaProcessor,
SquadVaProcessor,
glue_convert_examples_to_features,
glue_output_modes,
glue_processors,
glue_tasks_num_labels,
squad_convert_examples_to_features,
xnli_output_modes,
xnli_processors,
xnli_tasks_num_labels,
)
| 303 | 0 |
import gc
import unittest
from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class lowerCAmelCase__( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self ) -> int:
super().tearDown()
gc.collect()
def UpperCamelCase_ ( self ) -> Optional[int]:
_SCREAMING_SNAKE_CASE : List[str] = FlaxControlNetModel.from_pretrained(
"lllyasviel/sd-controlnet-canny" , from_pt=lowerCamelCase__ , dtype=jnp.bfloataa )
_SCREAMING_SNAKE_CASE : Optional[int] = FlaxStableDiffusionControlNetPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , controlnet=lowerCamelCase__ , from_pt=lowerCamelCase__ , dtype=jnp.bfloataa )
_SCREAMING_SNAKE_CASE : Any = controlnet_params
_SCREAMING_SNAKE_CASE : List[str] = '''bird'''
_SCREAMING_SNAKE_CASE : Dict = jax.device_count()
_SCREAMING_SNAKE_CASE : Optional[int] = pipe.prepare_text_inputs([prompts] * num_samples )
_SCREAMING_SNAKE_CASE : List[Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png" )
_SCREAMING_SNAKE_CASE : Optional[int] = pipe.prepare_image_inputs([canny_image] * num_samples )
_SCREAMING_SNAKE_CASE : str = jax.random.PRNGKey(0 )
_SCREAMING_SNAKE_CASE : Dict = jax.random.split(lowerCamelCase__ , jax.device_count() )
_SCREAMING_SNAKE_CASE : Union[str, Any] = replicate(lowerCamelCase__ )
_SCREAMING_SNAKE_CASE : Tuple = shard(lowerCamelCase__ )
_SCREAMING_SNAKE_CASE : Optional[Any] = shard(lowerCamelCase__ )
_SCREAMING_SNAKE_CASE : List[str] = pipe(
prompt_ids=lowerCamelCase__ , image=lowerCamelCase__ , params=lowerCamelCase__ , prng_seed=lowerCamelCase__ , num_inference_steps=5_0 , jit=lowerCamelCase__ , ).images
assert images.shape == (jax.device_count(), 1, 7_6_8, 5_1_2, 3)
_SCREAMING_SNAKE_CASE : Dict = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
_SCREAMING_SNAKE_CASE : Any = images[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
_SCREAMING_SNAKE_CASE : Optional[int] = jnp.asarray(jax.device_get(image_slice.flatten() ) )
_SCREAMING_SNAKE_CASE : Optional[Any] = jnp.array(
[0.16_7969, 0.11_6699, 0.08_1543, 0.15_4297, 0.13_2812, 0.10_8887, 0.16_9922, 0.16_9922, 0.20_5078] )
print(F"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
def UpperCamelCase_ ( self ) -> str:
_SCREAMING_SNAKE_CASE : str = FlaxControlNetModel.from_pretrained(
"lllyasviel/sd-controlnet-openpose" , from_pt=lowerCamelCase__ , dtype=jnp.bfloataa )
_SCREAMING_SNAKE_CASE : str = FlaxStableDiffusionControlNetPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , controlnet=lowerCamelCase__ , from_pt=lowerCamelCase__ , dtype=jnp.bfloataa )
_SCREAMING_SNAKE_CASE : List[str] = controlnet_params
_SCREAMING_SNAKE_CASE : Optional[Any] = '''Chef in the kitchen'''
_SCREAMING_SNAKE_CASE : Tuple = jax.device_count()
_SCREAMING_SNAKE_CASE : Optional[Any] = pipe.prepare_text_inputs([prompts] * num_samples )
_SCREAMING_SNAKE_CASE : List[str] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png" )
_SCREAMING_SNAKE_CASE : List[str] = pipe.prepare_image_inputs([pose_image] * num_samples )
_SCREAMING_SNAKE_CASE : Union[str, Any] = jax.random.PRNGKey(0 )
_SCREAMING_SNAKE_CASE : Dict = jax.random.split(lowerCamelCase__ , jax.device_count() )
_SCREAMING_SNAKE_CASE : Any = replicate(lowerCamelCase__ )
_SCREAMING_SNAKE_CASE : Optional[Any] = shard(lowerCamelCase__ )
_SCREAMING_SNAKE_CASE : str = shard(lowerCamelCase__ )
_SCREAMING_SNAKE_CASE : Dict = pipe(
prompt_ids=lowerCamelCase__ , image=lowerCamelCase__ , params=lowerCamelCase__ , prng_seed=lowerCamelCase__ , num_inference_steps=5_0 , jit=lowerCamelCase__ , ).images
assert images.shape == (jax.device_count(), 1, 7_6_8, 5_1_2, 3)
_SCREAMING_SNAKE_CASE : Optional[int] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
_SCREAMING_SNAKE_CASE : List[str] = images[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
_SCREAMING_SNAKE_CASE : Dict = jnp.asarray(jax.device_get(image_slice.flatten() ) )
_SCREAMING_SNAKE_CASE : str = jnp.array(
[[0.27_1484, 0.26_1719, 0.27_5391, 0.27_7344, 0.27_9297, 0.29_1016, 0.29_4922, 0.30_2734, 0.30_2734]] )
print(F"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2 | 357 |
import argparse
import glob
import importlib.util
import os
import re
import black
from doc_builder.style_doc import style_docstrings_in_code
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
UpperCamelCase__ ='src/diffusers'
UpperCamelCase__ ='.'
# This is to make sure the diffusers module imported is the one in the repo.
UpperCamelCase__ =importlib.util.spec_from_file_location(
'diffusers',
os.path.join(DIFFUSERS_PATH, '__init__.py'),
submodule_search_locations=[DIFFUSERS_PATH],
)
UpperCamelCase__ =spec.loader.load_module()
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase ):
return line.startswith(__lowerCamelCase ) or len(__lowerCamelCase ) <= 1 or re.search(R"^\s*\)(\s*->.*:|:)\s*$", __lowerCamelCase ) is not None
def lowerCamelCase__ (__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Any = object_name.split("." )
_SCREAMING_SNAKE_CASE : List[Any] = 0
# First let's find the module where our object lives.
_SCREAMING_SNAKE_CASE : Any = parts[i]
while i < len(__lowerCamelCase ) and not os.path.isfile(os.path.join(__lowerCamelCase, f"""{module}.py""" ) ):
i += 1
if i < len(__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Any = os.path.join(__lowerCamelCase, parts[i] )
if i >= len(__lowerCamelCase ):
raise ValueError(f"""`object_name` should begin with the name of a module of diffusers but got {object_name}.""" )
with open(os.path.join(__lowerCamelCase, f"""{module}.py""" ), "r", encoding="utf-8", newline="\n" ) as f:
_SCREAMING_SNAKE_CASE : int = f.readlines()
# Now let's find the class / func in the code!
_SCREAMING_SNAKE_CASE : Union[str, Any] = ""
_SCREAMING_SNAKE_CASE : Union[str, Any] = 0
for name in parts[i + 1 :]:
while (
line_index < len(__lowerCamelCase ) and re.search(Rf"""^{indent}(class|def)\s+{name}(\(|\:)""", lines[line_index] ) is None
):
line_index += 1
indent += " "
line_index += 1
if line_index >= len(__lowerCamelCase ):
raise ValueError(f""" {object_name} does not match any function or class in {module}.""" )
# We found the beginning of the class / func, now let's find the end (when the indent diminishes).
_SCREAMING_SNAKE_CASE : Optional[int] = line_index
while line_index < len(__lowerCamelCase ) and _should_continue(lines[line_index], __lowerCamelCase ):
line_index += 1
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
_SCREAMING_SNAKE_CASE : Optional[int] = lines[start_index:line_index]
return "".join(__lowerCamelCase )
UpperCamelCase__ =re.compile(R'^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)')
UpperCamelCase__ =re.compile(R'^\s*(\S+)->(\S+)(\s+.*|$)')
UpperCamelCase__ =re.compile(R'<FILL\s+[^>]*>')
def lowerCamelCase__ (__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = code.split("\n" )
_SCREAMING_SNAKE_CASE : Union[str, Any] = 0
while idx < len(__lowerCamelCase ) and len(lines[idx] ) == 0:
idx += 1
if idx < len(__lowerCamelCase ):
return re.search(R"^(\s*)\S", lines[idx] ).groups()[0]
return ""
def lowerCamelCase__ (__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Optional[int] = len(get_indent(__lowerCamelCase ) ) > 0
if has_indent:
_SCREAMING_SNAKE_CASE : Union[str, Any] = f"""class Bla:\n{code}"""
_SCREAMING_SNAKE_CASE : Any = black.Mode(target_versions={black.TargetVersion.PYaa}, line_length=119, preview=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[Any] = black.format_str(__lowerCamelCase, mode=__lowerCamelCase )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[Any] = style_docstrings_in_code(__lowerCamelCase )
return result[len("class Bla:\n" ) :] if has_indent else result
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase=False ):
with open(__lowerCamelCase, "r", encoding="utf-8", newline="\n" ) as f:
_SCREAMING_SNAKE_CASE : int = f.readlines()
_SCREAMING_SNAKE_CASE : Dict = []
_SCREAMING_SNAKE_CASE : Tuple = 0
# Not a for loop cause `lines` is going to change (if `overwrite=True`).
while line_index < len(__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = _re_copy_warning.search(lines[line_index] )
if search is None:
line_index += 1
continue
# There is some copied code here, let's retrieve the original.
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : str = search.groups()
_SCREAMING_SNAKE_CASE : Any = find_code_in_diffusers(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Union[str, Any] = get_indent(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Union[str, Any] = line_index + 1 if indent == theoretical_indent else line_index + 2
_SCREAMING_SNAKE_CASE : int = theoretical_indent
_SCREAMING_SNAKE_CASE : str = start_index
# Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment.
_SCREAMING_SNAKE_CASE : Any = True
while line_index < len(__lowerCamelCase ) and should_continue:
line_index += 1
if line_index >= len(__lowerCamelCase ):
break
_SCREAMING_SNAKE_CASE : Union[str, Any] = lines[line_index]
_SCREAMING_SNAKE_CASE : str = _should_continue(__lowerCamelCase, __lowerCamelCase ) and re.search(f"""^{indent}# End copy""", __lowerCamelCase ) is None
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
_SCREAMING_SNAKE_CASE : List[Any] = lines[start_index:line_index]
_SCREAMING_SNAKE_CASE : Optional[Any] = "".join(__lowerCamelCase )
# Remove any nested `Copied from` comments to avoid circular copies
_SCREAMING_SNAKE_CASE : Dict = [line for line in theoretical_code.split("\n" ) if _re_copy_warning.search(__lowerCamelCase ) is None]
_SCREAMING_SNAKE_CASE : str = "\n".join(__lowerCamelCase )
# Before comparing, use the `replace_pattern` on the original code.
if len(__lowerCamelCase ) > 0:
_SCREAMING_SNAKE_CASE : str = replace_pattern.replace("with", "" ).split("," )
_SCREAMING_SNAKE_CASE : Union[str, Any] = [_re_replace_pattern.search(__lowerCamelCase ) for p in patterns]
for pattern in patterns:
if pattern is None:
continue
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[str] = pattern.groups()
_SCREAMING_SNAKE_CASE : Tuple = re.sub(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
if option.strip() == "all-casing":
_SCREAMING_SNAKE_CASE : List[Any] = re.sub(obja.lower(), obja.lower(), __lowerCamelCase )
_SCREAMING_SNAKE_CASE : Any = re.sub(obja.upper(), obja.upper(), __lowerCamelCase )
# Blackify after replacement. To be able to do that, we need the header (class or function definition)
# from the previous line
_SCREAMING_SNAKE_CASE : int = blackify(lines[start_index - 1] + theoretical_code )
_SCREAMING_SNAKE_CASE : List[str] = theoretical_code[len(lines[start_index - 1] ) :]
# Test for a diff and act accordingly.
if observed_code != theoretical_code:
diffs.append([object_name, start_index] )
if overwrite:
_SCREAMING_SNAKE_CASE : Optional[int] = lines[:start_index] + [theoretical_code] + lines[line_index:]
_SCREAMING_SNAKE_CASE : int = start_index + 1
if overwrite and len(__lowerCamelCase ) > 0:
# Warn the user a file has been modified.
print(f"""Detected changes, rewriting {filename}.""" )
with open(__lowerCamelCase, "w", encoding="utf-8", newline="\n" ) as f:
f.writelines(__lowerCamelCase )
return diffs
def lowerCamelCase__ (__lowerCamelCase = False ):
_SCREAMING_SNAKE_CASE : int = glob.glob(os.path.join(__lowerCamelCase, "**/*.py" ), recursive=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Any = []
for filename in all_files:
_SCREAMING_SNAKE_CASE : int = is_copy_consistent(__lowerCamelCase, __lowerCamelCase )
diffs += [f"""- {filename}: copy does not match {d[0]} at line {d[1]}""" for d in new_diffs]
if not overwrite and len(__lowerCamelCase ) > 0:
_SCREAMING_SNAKE_CASE : Dict = "\n".join(__lowerCamelCase )
raise Exception(
"Found the following copy inconsistencies:\n"
+ diff
+ "\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them." )
if __name__ == "__main__":
UpperCamelCase__ =argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
UpperCamelCase__ =parser.parse_args()
check_copies(args.fix_and_overwrite) | 325 | 0 |
"""simple docstring"""
import argparse
import os
from . import (
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
AlbertConfig,
BartConfig,
BertConfig,
CamembertConfig,
CTRLConfig,
DistilBertConfig,
DPRConfig,
ElectraConfig,
FlaubertConfig,
GPTaConfig,
LayoutLMConfig,
LxmertConfig,
OpenAIGPTConfig,
RobertaConfig,
TaConfig,
TFAlbertForPreTraining,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFCamembertForMaskedLM,
TFCTRLLMHeadModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
TFElectraForPreTraining,
TFFlaubertWithLMHeadModel,
TFGPTaLMHeadModel,
TFLayoutLMForMaskedLM,
TFLxmertForPreTraining,
TFLxmertVisualFeatureEncoder,
TFOpenAIGPTLMHeadModel,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForSequenceClassification,
TFTaForConditionalGeneration,
TFTransfoXLLMHeadModel,
TFWavaVecaModel,
TFXLMRobertaForMaskedLM,
TFXLMWithLMHeadModel,
TFXLNetLMHeadModel,
TransfoXLConfig,
WavaVecaConfig,
WavaVecaModel,
XLMConfig,
XLMRobertaConfig,
XLNetConfig,
is_torch_available,
load_pytorch_checkpoint_in_tfa_model,
)
from .utils import CONFIG_NAME, WEIGHTS_NAME, cached_file, logging
if is_torch_available():
import numpy as np
import torch
from . import (
AlbertForPreTraining,
BartForConditionalGeneration,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
CamembertForMaskedLM,
CTRLLMHeadModel,
DistilBertForMaskedLM,
DistilBertForQuestionAnswering,
DPRContextEncoder,
DPRQuestionEncoder,
DPRReader,
ElectraForPreTraining,
FlaubertWithLMHeadModel,
GPTaLMHeadModel,
LayoutLMForMaskedLM,
LxmertForPreTraining,
LxmertVisualFeatureEncoder,
OpenAIGPTLMHeadModel,
RobertaForMaskedLM,
RobertaForSequenceClassification,
TaForConditionalGeneration,
TransfoXLLMHeadModel,
XLMRobertaForMaskedLM,
XLMWithLMHeadModel,
XLNetLMHeadModel,
)
logging.set_verbosity_info()
__UpperCamelCase = {
'''bart''': (
BartConfig,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
BartForConditionalGeneration,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
),
'''bert''': (
BertConfig,
TFBertForPreTraining,
BertForPreTraining,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''bert-large-uncased-whole-word-masking-finetuned-squad''': (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''bert-large-cased-whole-word-masking-finetuned-squad''': (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''bert-base-cased-finetuned-mrpc''': (
BertConfig,
TFBertForSequenceClassification,
BertForSequenceClassification,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''dpr''': (
DPRConfig,
TFDPRQuestionEncoder,
TFDPRContextEncoder,
TFDPRReader,
DPRQuestionEncoder,
DPRContextEncoder,
DPRReader,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
),
'''gpt2''': (
GPTaConfig,
TFGPTaLMHeadModel,
GPTaLMHeadModel,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''xlnet''': (
XLNetConfig,
TFXLNetLMHeadModel,
XLNetLMHeadModel,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''xlm''': (
XLMConfig,
TFXLMWithLMHeadModel,
XLMWithLMHeadModel,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''xlm-roberta''': (
XLMRobertaConfig,
TFXLMRobertaForMaskedLM,
XLMRobertaForMaskedLM,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''transfo-xl''': (
TransfoXLConfig,
TFTransfoXLLMHeadModel,
TransfoXLLMHeadModel,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''openai-gpt''': (
OpenAIGPTConfig,
TFOpenAIGPTLMHeadModel,
OpenAIGPTLMHeadModel,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''roberta''': (
RobertaConfig,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
RobertaForMaskedLM,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''layoutlm''': (
LayoutLMConfig,
TFLayoutLMForMaskedLM,
LayoutLMForMaskedLM,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
),
'''roberta-large-mnli''': (
RobertaConfig,
TFRobertaForSequenceClassification,
RobertaForSequenceClassification,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''camembert''': (
CamembertConfig,
TFCamembertForMaskedLM,
CamembertForMaskedLM,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''flaubert''': (
FlaubertConfig,
TFFlaubertWithLMHeadModel,
FlaubertWithLMHeadModel,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''distilbert''': (
DistilBertConfig,
TFDistilBertForMaskedLM,
DistilBertForMaskedLM,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''distilbert-base-distilled-squad''': (
DistilBertConfig,
TFDistilBertForQuestionAnswering,
DistilBertForQuestionAnswering,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''lxmert''': (
LxmertConfig,
TFLxmertForPreTraining,
LxmertForPreTraining,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''lxmert-visual-feature-encoder''': (
LxmertConfig,
TFLxmertVisualFeatureEncoder,
LxmertVisualFeatureEncoder,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''ctrl''': (
CTRLConfig,
TFCTRLLMHeadModel,
CTRLLMHeadModel,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''albert''': (
AlbertConfig,
TFAlbertForPreTraining,
AlbertForPreTraining,
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''t5''': (
TaConfig,
TFTaForConditionalGeneration,
TaForConditionalGeneration,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''electra''': (
ElectraConfig,
TFElectraForPreTraining,
ElectraForPreTraining,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''wav2vec2''': (
WavaVecaConfig,
TFWavaVecaModel,
WavaVecaModel,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
}
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=False , UpperCAmelCase=True ) -> int:
if model_type not in MODEL_CLASSES:
raise ValueError(f'Unrecognized model type, should be one of {list(MODEL_CLASSES.keys() )}.' )
snake_case_ , snake_case_ , snake_case_ , snake_case_ = MODEL_CLASSES[model_type]
# Initialise TF model
if config_file in aws_config_map:
snake_case_ = cached_file(UpperCAmelCase , UpperCAmelCase , force_download=not use_cached_models )
snake_case_ = config_class.from_json_file(UpperCAmelCase )
snake_case_ = True
snake_case_ = True
print(f'Building TensorFlow model from configuration: {config}' )
snake_case_ = model_class(UpperCAmelCase )
# Load weights from tf checkpoint
if pytorch_checkpoint_path in aws_config_map.keys():
snake_case_ = cached_file(
UpperCAmelCase , UpperCAmelCase , force_download=not use_cached_models )
# Load PyTorch checkpoint in tf2 model:
snake_case_ = load_pytorch_checkpoint_in_tfa_model(UpperCAmelCase , UpperCAmelCase )
if compare_with_pt_model:
snake_case_ = tf_model(tf_model.dummy_inputs , training=UpperCAmelCase ) # build the network
snake_case_ = torch.load(UpperCAmelCase , map_location='cpu' )
snake_case_ = pt_model_class.from_pretrained(
pretrained_model_name_or_path=UpperCAmelCase , config=UpperCAmelCase , state_dict=UpperCAmelCase )
with torch.no_grad():
snake_case_ = pt_model(**pt_model.dummy_inputs )
snake_case_ = pto[0].numpy()
snake_case_ = tfo[0].numpy()
snake_case_ = np.amax(np.abs(np_pt - np_tf ) )
print(f'Max absolute difference between models outputs {diff}' )
assert diff <= 2e-2, f'Error, model absolute difference is >2e-2: {diff}'
# Save pytorch-model
print(f'Save TensorFlow model to {tf_dump_path}' )
tf_model.save_weights(UpperCAmelCase , save_format='h5' )
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=False , UpperCAmelCase=False , UpperCAmelCase=False , UpperCAmelCase=False , ) -> Union[str, Any]:
if args_model_type is None:
snake_case_ = list(MODEL_CLASSES.keys() )
else:
snake_case_ = [args_model_type]
for j, model_type in enumerate(UpperCAmelCase , start=1 ):
print('=' * 100 )
print(f' Converting model type {j}/{len(UpperCAmelCase )}: {model_type}' )
print('=' * 100 )
if model_type not in MODEL_CLASSES:
raise ValueError(f'Unrecognized model type {model_type}, should be one of {list(MODEL_CLASSES.keys() )}.' )
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ = MODEL_CLASSES[model_type]
if model_shortcut_names_or_path is None:
snake_case_ = list(aws_model_maps.keys() )
if config_shortcut_names_or_path is None:
snake_case_ = model_shortcut_names_or_path
for i, (model_shortcut_name, config_shortcut_name) in enumerate(
zip(UpperCAmelCase , UpperCAmelCase ) , start=1 ):
print('-' * 100 )
if "-squad" in model_shortcut_name or "-mrpc" in model_shortcut_name or "-mnli" in model_shortcut_name:
if not only_convert_finetuned_models:
print(f' Skipping finetuned checkpoint {model_shortcut_name}' )
continue
snake_case_ = model_shortcut_name
elif only_convert_finetuned_models:
print(f' Skipping not finetuned checkpoint {model_shortcut_name}' )
continue
print(
f' Converting checkpoint {i}/{len(UpperCAmelCase )}: {model_shortcut_name} - model_type {model_type}' )
print('-' * 100 )
if config_shortcut_name in aws_config_map:
snake_case_ = cached_file(UpperCAmelCase , UpperCAmelCase , force_download=not use_cached_models )
else:
snake_case_ = config_shortcut_name
if model_shortcut_name in aws_model_maps:
snake_case_ = cached_file(UpperCAmelCase , UpperCAmelCase , force_download=not use_cached_models )
else:
snake_case_ = model_shortcut_name
if os.path.isfile(UpperCAmelCase ):
snake_case_ = 'converted_model'
convert_pt_checkpoint_to_tf(
model_type=UpperCAmelCase , pytorch_checkpoint_path=UpperCAmelCase , config_file=UpperCAmelCase , tf_dump_path=os.path.join(UpperCAmelCase , model_shortcut_name + '-tf_model.h5' ) , compare_with_pt_model=UpperCAmelCase , )
if remove_cached_files:
os.remove(UpperCAmelCase )
os.remove(UpperCAmelCase )
if __name__ == "__main__":
__UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_dump_path''', default=None, type=str, required=True, help='''Path to the output Tensorflow dump file.'''
)
parser.add_argument(
'''--model_type''',
default=None,
type=str,
help=(
F"""Model type selected in the list of {list(MODEL_CLASSES.keys())}. If not given, will download and """
'''convert all the models from AWS.'''
),
)
parser.add_argument(
'''--pytorch_checkpoint_path''',
default=None,
type=str,
help=(
'''Path to the PyTorch checkpoint path or shortcut name to download from AWS. '''
'''If not given, will download and convert all the checkpoints from AWS.'''
),
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
help=(
'''The config json file corresponding to the pre-trained model. \n'''
'''This specifies the model architecture. If not given and '''
'''--pytorch_checkpoint_path is not given or is a shortcut name '''
'''use the configuration associated to the shortcut name on the AWS'''
),
)
parser.add_argument(
'''--compare_with_pt_model''', action='''store_true''', help='''Compare Tensorflow and PyTorch model predictions.'''
)
parser.add_argument(
'''--use_cached_models''',
action='''store_true''',
help='''Use cached models if possible instead of updating to latest checkpoint versions.''',
)
parser.add_argument(
'''--remove_cached_files''',
action='''store_true''',
help='''Remove pytorch models after conversion (save memory when converting in batches).''',
)
parser.add_argument('''--only_convert_finetuned_models''', action='''store_true''', help='''Only convert finetuned models.''')
__UpperCamelCase = parser.parse_args()
# if args.pytorch_checkpoint_path is not None:
# convert_pt_checkpoint_to_tf(args.model_type.lower(),
# args.pytorch_checkpoint_path,
# args.config_file if args.config_file is not None else args.pytorch_checkpoint_path,
# args.tf_dump_path,
# compare_with_pt_model=args.compare_with_pt_model,
# use_cached_models=args.use_cached_models)
# else:
convert_all_pt_checkpoints_to_tf(
args.model_type.lower() if args.model_type is not None else None,
args.tf_dump_path,
model_shortcut_names_or_path=[args.pytorch_checkpoint_path]
if args.pytorch_checkpoint_path is not None
else None,
config_shortcut_names_or_path=[args.config_file] if args.config_file is not None else None,
compare_with_pt_model=args.compare_with_pt_model,
use_cached_models=args.use_cached_models,
remove_cached_files=args.remove_cached_files,
only_convert_finetuned_models=args.only_convert_finetuned_models,
)
| 69 |
import unicodedata
from dataclasses import dataclass
from typing import Optional, Union
import numpy as np
from transformers.data.data_collator import DataCollatorMixin
from transformers.file_utils import PaddingStrategy
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
__a = np.full((len(_UpperCAmelCase ), sequence_length, 2) , _UpperCAmelCase )
else:
__a = np.full((len(_UpperCAmelCase ), sequence_length) , _UpperCAmelCase )
for i, tensor in enumerate(_UpperCAmelCase ):
if padding_side == "right":
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
__a = tensor[:sequence_length]
else:
__a = tensor[:sequence_length]
else:
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
__a = tensor[:sequence_length]
else:
__a = tensor[:sequence_length]
return out_tensor.tolist()
def __snake_case ( _UpperCAmelCase ):
__a = ord(_UpperCAmelCase )
if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126):
return True
__a = unicodedata.category(_UpperCAmelCase )
if cat.startswith('''P''' ):
return True
return False
@dataclass
class _A ( __UpperCAmelCase ):
UpperCamelCase__ : PreTrainedTokenizerBase
UpperCamelCase__ : Union[bool, str, PaddingStrategy] = True
UpperCamelCase__ : Optional[int] = None
UpperCamelCase__ : Optional[int] = None
UpperCamelCase__ : int = -100
UpperCamelCase__ : str = "pt"
def _lowerCamelCase ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[Any]):
'''simple docstring'''
import torch
__a = '''label''' if '''label''' in features[0].keys() else '''labels'''
__a = [feature[label_name] for feature in features] if label_name in features[0].keys() else None
__a = self.tokenizer.pad(
__SCREAMING_SNAKE_CASE , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='''pt''' if labels is None else None , )
if labels is None:
return batch
__a = torch.tensor(batch['''entity_ids''']).shape[1]
__a = self.tokenizer.padding_side
if padding_side == "right":
__a = [
list(__SCREAMING_SNAKE_CASE) + [self.label_pad_token_id] * (sequence_length - len(__SCREAMING_SNAKE_CASE)) for label in labels
]
else:
__a = [
[self.label_pad_token_id] * (sequence_length - len(__SCREAMING_SNAKE_CASE)) + list(__SCREAMING_SNAKE_CASE) for label in labels
]
__a = [feature['''ner_tags'''] for feature in features]
__a = padding_tensor(__SCREAMING_SNAKE_CASE , -1 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
__a = [feature['''original_entity_spans'''] for feature in features]
__a = padding_tensor(__SCREAMING_SNAKE_CASE , (-1, -1) , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
__a = {k: torch.tensor(__SCREAMING_SNAKE_CASE , dtype=torch.intaa) for k, v in batch.items()}
return batch
| 49 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCamelCase = {
"""configuration_ctrl""": ["""CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP""", """CTRLConfig"""],
"""tokenization_ctrl""": ["""CTRLTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"""CTRL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""CTRLForSequenceClassification""",
"""CTRLLMHeadModel""",
"""CTRLModel""",
"""CTRLPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"""TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFCTRLForSequenceClassification""",
"""TFCTRLLMHeadModel""",
"""TFCTRLModel""",
"""TFCTRLPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_ctrl import CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRLConfig
from .tokenization_ctrl import CTRLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ctrl import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
CTRLPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_ctrl import (
TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCTRLForSequenceClassification,
TFCTRLLMHeadModel,
TFCTRLModel,
TFCTRLPreTrainedModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 353 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConfig,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaForPreTraining,
WavaVecaProcessor,
logging,
)
from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification
logging.set_verbosity_info()
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'adapter_layer': 'encoder.layers.*.adapter_layer',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
'pooling_layer.linear': 'projector',
'pooling_layer.projection': 'classifier',
}
UpperCamelCase = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
'projector',
'classifier',
]
def _A ( lowerCAmelCase_ : Optional[int] ):
"""simple docstring"""
lowerCAmelCase__ = {}
with open(lowerCAmelCase_ , "r" ) as file:
for line_number, line in enumerate(lowerCAmelCase_ ):
lowerCAmelCase__ = line.strip()
if line:
lowerCAmelCase__ = line.split()
lowerCAmelCase__ = line_number
lowerCAmelCase__ = words[0]
lowerCAmelCase__ = value
return result
def _A ( lowerCAmelCase_ : Any , lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : int ):
"""simple docstring"""
for attribute in key.split("." ):
lowerCAmelCase__ = getattr(lowerCAmelCase_ , lowerCAmelCase_ )
lowerCAmelCase__ = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(lowerCAmelCase_ ):
lowerCAmelCase__ = PARAM_MAPPING[full_name.split("." )[-1]]
lowerCAmelCase__ = "param"
if weight_type is not None and weight_type != "param":
lowerCAmelCase__ = getattr(lowerCAmelCase_ , lowerCAmelCase_ ).shape
elif weight_type is not None and weight_type == "param":
lowerCAmelCase__ = hf_pointer
for attribute in hf_param_name.split("." ):
lowerCAmelCase__ = getattr(lowerCAmelCase_ , lowerCAmelCase_ )
lowerCAmelCase__ = shape_pointer.shape
# let's reduce dimension
lowerCAmelCase__ = value[0]
else:
lowerCAmelCase__ = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
F' {value.shape} for {full_name}' )
if weight_type == "weight":
lowerCAmelCase__ = value
elif weight_type == "weight_g":
lowerCAmelCase__ = value
elif weight_type == "weight_v":
lowerCAmelCase__ = value
elif weight_type == "bias":
lowerCAmelCase__ = value
elif weight_type == "param":
for attribute in hf_param_name.split("." ):
lowerCAmelCase__ = getattr(lowerCAmelCase_ , lowerCAmelCase_ )
lowerCAmelCase__ = value
else:
lowerCAmelCase__ = value
logger.info(F'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def _A ( lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[Any] ):
"""simple docstring"""
lowerCAmelCase__ = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(lowerCAmelCase_ ):
lowerCAmelCase__ = PARAM_MAPPING[full_name.split("." )[-1]]
lowerCAmelCase__ = "param"
if weight_type is not None and weight_type != "param":
lowerCAmelCase__ = ".".join([key, weight_type] )
elif weight_type is not None and weight_type == "param":
lowerCAmelCase__ = ".".join([key, hf_param_name] )
else:
lowerCAmelCase__ = key
lowerCAmelCase__ = value if "lm_head" in full_key else value[0]
UpperCamelCase = {
'W_a': 'linear_1.weight',
'W_b': 'linear_2.weight',
'b_a': 'linear_1.bias',
'b_b': 'linear_2.bias',
'ln_W': 'norm.weight',
'ln_b': 'norm.bias',
}
def _A ( lowerCAmelCase_ : int , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : str=None , lowerCAmelCase_ : Any=None ):
"""simple docstring"""
lowerCAmelCase__ = False
for key, mapped_key in MAPPING.items():
lowerCAmelCase__ = "wav2vec2." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
lowerCAmelCase__ = True
if "*" in mapped_key:
lowerCAmelCase__ = name.split(lowerCAmelCase_ )[0].split("." )[-2]
lowerCAmelCase__ = mapped_key.replace("*" , lowerCAmelCase_ )
if "weight_g" in name:
lowerCAmelCase__ = "weight_g"
elif "weight_v" in name:
lowerCAmelCase__ = "weight_v"
elif "bias" in name:
lowerCAmelCase__ = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
lowerCAmelCase__ = "weight"
else:
lowerCAmelCase__ = None
if hf_dict is not None:
rename_dict(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
else:
set_recursively(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
return is_used
return is_used
def _A ( lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Dict ):
"""simple docstring"""
lowerCAmelCase__ = []
lowerCAmelCase__ = fairseq_model.state_dict()
lowerCAmelCase__ = hf_model.wavaveca.feature_extractor
for name, value in fairseq_dict.items():
lowerCAmelCase__ = False
if "conv_layers" in name:
load_conv_layer(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , hf_model.config.feat_extract_norm == "group" , )
lowerCAmelCase__ = True
else:
lowerCAmelCase__ = load_wavaveca_layer(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
if not is_used:
unused_weights.append(lowerCAmelCase_ )
logger.warning(F'Unused weights: {unused_weights}' )
def _A ( lowerCAmelCase_ : str , lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Optional[int] ):
"""simple docstring"""
lowerCAmelCase__ = full_name.split("conv_layers." )[-1]
lowerCAmelCase__ = name.split("." )
lowerCAmelCase__ = int(items[0] )
lowerCAmelCase__ = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.' )
lowerCAmelCase__ = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.' )
lowerCAmelCase__ = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.' )
lowerCAmelCase__ = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.' )
lowerCAmelCase__ = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(lowerCAmelCase_ )
@torch.no_grad()
def _A ( lowerCAmelCase_ : str , lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[Any]=None , lowerCAmelCase_ : Optional[Any]=None , lowerCAmelCase_ : Optional[int]=True , lowerCAmelCase_ : List[str]=False ):
"""simple docstring"""
if config_path is not None:
lowerCAmelCase__ = WavaVecaConfig.from_pretrained(lowerCAmelCase_ )
else:
lowerCAmelCase__ = WavaVecaConfig()
if is_seq_class:
lowerCAmelCase__ = read_txt_into_dict(lowerCAmelCase_ )
lowerCAmelCase__ = idalabel
lowerCAmelCase__ = WavaVecaForSequenceClassification(lowerCAmelCase_ )
lowerCAmelCase__ = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=lowerCAmelCase_ , return_attention_mask=lowerCAmelCase_ , )
feature_extractor.save_pretrained(lowerCAmelCase_ )
elif is_finetuned:
if dict_path:
lowerCAmelCase__ = Dictionary.load(lowerCAmelCase_ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
lowerCAmelCase__ = target_dict.pad_index
lowerCAmelCase__ = target_dict.bos_index
lowerCAmelCase__ = target_dict.eos_index
lowerCAmelCase__ = len(target_dict.symbols )
lowerCAmelCase__ = os.path.join(lowerCAmelCase_ , "vocab.json" )
if not os.path.isdir(lowerCAmelCase_ ):
logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(lowerCAmelCase_ ) )
return
os.makedirs(lowerCAmelCase_ , exist_ok=lowerCAmelCase_ )
lowerCAmelCase__ = target_dict.indices
# fairseq has the <pad> and <s> switched
lowerCAmelCase__ = 0
lowerCAmelCase__ = 1
with open(lowerCAmelCase_ , "w" , encoding="utf-8" ) as vocab_handle:
json.dump(lowerCAmelCase_ , lowerCAmelCase_ )
lowerCAmelCase__ = WavaVecaCTCTokenizer(
lowerCAmelCase_ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="|" , do_lower_case=lowerCAmelCase_ , )
lowerCAmelCase__ = True if config.feat_extract_norm == "layer" else False
lowerCAmelCase__ = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=lowerCAmelCase_ , return_attention_mask=lowerCAmelCase_ , )
lowerCAmelCase__ = WavaVecaProcessor(feature_extractor=lowerCAmelCase_ , tokenizer=lowerCAmelCase_ )
processor.save_pretrained(lowerCAmelCase_ )
lowerCAmelCase__ = WavaVecaForCTC(lowerCAmelCase_ )
else:
lowerCAmelCase__ = WavaVecaForPreTraining(lowerCAmelCase_ )
if is_finetuned or is_seq_class:
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} )
else:
lowerCAmelCase__ = argparse.Namespace(task="audio_pretraining" )
lowerCAmelCase__ = fairseq.tasks.setup_task(lowerCAmelCase_ )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=lowerCAmelCase_ )
lowerCAmelCase__ = model[0].eval()
recursively_load_weights(lowerCAmelCase_ , lowerCAmelCase_ , not is_finetuned )
hf_wavavec.save_pretrained(lowerCAmelCase_ )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
parser.add_argument(
'--is_seq_class',
action='store_true',
help='Whether the model to convert is a fine-tuned sequence classification model or not',
)
UpperCamelCase = parser.parse_args()
UpperCamelCase = not args.not_finetuned and not args.is_seq_class
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.dict_path,
is_finetuned,
args.is_seq_class,
)
| 221 | 0 |
"""simple docstring"""
def UpperCAmelCase__ ( lowerCAmelCase__ :int , lowerCAmelCase__ :int ) -> int:
'''simple docstring'''
return int((input_a, input_a).count(1 ) != 0 )
def UpperCAmelCase__ ( ) -> None:
'''simple docstring'''
assert or_gate(0 , 0 ) == 0
assert or_gate(0 , 1 ) == 1
assert or_gate(1 , 0 ) == 1
assert or_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(or_gate(0, 1))
print(or_gate(1, 0))
print(or_gate(0, 0))
print(or_gate(1, 1))
| 197 | """simple docstring"""
from __future__ import annotations
def UpperCAmelCase__ ( lowerCAmelCase__ :list[int] ) -> int:
'''simple docstring'''
if not nums:
return 0
lowercase = nums[0]
lowercase = 0
for num in nums[1:]:
lowercase , lowercase = (
max_excluding + num,
max(lowerCAmelCase__ , lowerCAmelCase__ ),
)
return max(lowerCAmelCase__ , lowerCAmelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 197 | 1 |
"""simple docstring"""
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, PLBartTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
A_ = get_tests_dir('''fixtures/test_sentencepiece.model''')
if is_torch_available():
from transformers.models.plbart.modeling_plbart import shift_tokens_right
A_ = 5_00_03
A_ = 5_00_02
@require_sentencepiece
@require_tokenizers
class lowercase( __a , unittest.TestCase ):
'''simple docstring'''
lowercase__ = PLBartTokenizer
lowercase__ = None
lowercase__ = False
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
_snake_case : Any = PLBartTokenizer(a_, language_codes="""base""", keep_accents=a_ )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
_snake_case : List[Any] = PLBartTokenizer(a_, language_codes="""base""", keep_accents=a_ )
_snake_case : int = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(a_, ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(a_ ), [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]], )
_snake_case : Union[str, Any] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
a_, [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
], )
_snake_case : List[str] = tokenizer.convert_tokens_to_ids(a_ )
self.assertListEqual(
a_, [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
], )
_snake_case : int = tokenizer.convert_ids_to_tokens(a_ )
self.assertListEqual(
a_, [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
], )
_snake_case : Tuple = tokenizer.vocab_size
_snake_case : Dict = [tokenizer.convert_ids_to_tokens(a_ ) for x in range(end - 4, a_ )]
self.assertListEqual(a_, ["""__java__""", """__python__""", """__en_XX__""", """<mask>"""] )
_snake_case : List[Any] = """java.lang.Exception, python.lang.Exception, javascript, php, ruby, go"""
_snake_case : Optional[int] = tokenizer(a_ ).input_ids
self.assertEqual(
tokenizer.decode(a_, skip_special_tokens=a_, clean_up_tokenization_spaces=a_ ), a_, )
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
_snake_case : int = PLBartTokenizer(a_, language_codes="""multi""", keep_accents=a_ )
_snake_case : int = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(a_, ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(a_ ), [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]], )
_snake_case : str = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
a_, [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
], )
_snake_case : Dict = tokenizer.convert_tokens_to_ids(a_ )
self.assertListEqual(
a_, [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
], )
_snake_case : int = tokenizer.convert_ids_to_tokens(a_ )
self.assertListEqual(
a_, [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
], )
_snake_case : Optional[int] = tokenizer.vocab_size
_snake_case : Tuple = [tokenizer.convert_ids_to_tokens(a_ ) for x in range(end - 7, a_ )]
self.assertListEqual(
a_, ["""__java__""", """__python__""", """__en_XX__""", """__javascript__""", """__php__""", """__ruby__""", """__go__"""] )
_snake_case : Any = """java.lang.Exception, python.lang.Exception, javascript, php, ruby, go"""
_snake_case : int = tokenizer(a_ ).input_ids
self.assertEqual(
tokenizer.decode(a_, skip_special_tokens=a_, clean_up_tokenization_spaces=a_ ), a_, )
@require_torch
@require_sentencepiece
@require_tokenizers
class lowercase( unittest.TestCase ):
'''simple docstring'''
lowercase__ = "uclanlp/plbart-python-en_XX"
lowercase__ = [
"def maximum(a,b,c):NEW_LINE_INDENTreturn max([a,b,c])",
"def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])",
]
lowercase__ = [
"Returns the maximum value of a b c.",
"Sums the values of a b c.",
]
lowercase__ = [
1_34,
54_52,
3_34_60,
3_34_41,
3_34_63,
3_34_65,
3_34_63,
3_34_49,
9_88,
20,
3_34_56,
19,
3_34_56,
7_71,
39,
42_58,
8_89,
33_18,
3_34_41,
3_34_63,
3_34_65,
3_34_63,
3_34_49,
24_71,
2,
PYTHON_CODE,
]
@classmethod
def UpperCamelCase_ ( cls: List[str] ):
'''simple docstring'''
_snake_case : PLBartTokenizer = PLBartTokenizer.from_pretrained(
cls.checkpoint_name, language_codes="""base""", src_lang="""python""", tgt_lang="""en_XX""" )
_snake_case : Tuple = 1
return cls
def UpperCamelCase_ ( self: List[Any] ):
'''simple docstring'''
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""__java__"""], 50_001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""__python__"""], 50_002 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""__en_XX__"""], 50_003 )
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
_snake_case : List[str] = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens, a_ )
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
self.assertIn(a_, self.tokenizer.all_special_ids )
_snake_case : Optional[int] = [EN_CODE, 9_037, 33_442, 57, 752, 153, 14, 56, 18, 9, 2]
_snake_case : Optional[Any] = self.tokenizer.decode(a_, skip_special_tokens=a_ )
_snake_case : Dict = self.tokenizer.decode(generated_ids[1:], skip_special_tokens=a_ )
self.assertEqual(a_, a_ )
self.assertNotIn(self.tokenizer.eos_token, a_ )
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
_snake_case : Optional[Any] = ["""def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])""" * 20]
self.assertIsInstance(src_text[0], a_ )
_snake_case : str = 10
_snake_case : Optional[Any] = self.tokenizer(a_, max_length=a_, truncation=a_ ).input_ids[0]
self.assertEqual(ids[-2], 2 )
self.assertEqual(ids[-1], a_ )
self.assertEqual(len(a_ ), a_ )
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["""<mask>""", """__java__"""] ), [50_004, 50_001] )
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
_snake_case : Union[str, Any] = tempfile.mkdtemp()
_snake_case : Union[str, Any] = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(a_ )
_snake_case : Dict = PLBartTokenizer.from_pretrained(a_ )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids, a_ )
@require_torch
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
_snake_case : List[Any] = self.tokenizer(self.src_text, text_target=self.tgt_text, padding=a_, return_tensors="""pt""" )
_snake_case : Any = shift_tokens_right(batch["""labels"""], self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
self.assertEqual(batch.input_ids[1][-2:].tolist(), [2, PYTHON_CODE] )
self.assertEqual(batch.decoder_input_ids[1][0], a_ )
self.assertEqual(batch.decoder_input_ids[1][-1], 2 )
self.assertEqual(batch.labels[1][-2:].tolist(), [2, EN_CODE] )
@require_torch
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
_snake_case : List[Any] = self.tokenizer(
self.src_text, text_target=self.tgt_text, padding=a_, truncation=a_, max_length=len(self.expected_src_tokens ), return_tensors="""pt""", )
_snake_case : int = shift_tokens_right(batch["""labels"""], self.tokenizer.pad_token_id )
self.assertIsInstance(a_, a_ )
self.assertEqual((2, 26), batch.input_ids.shape )
self.assertEqual((2, 26), batch.attention_mask.shape )
_snake_case : Union[str, Any] = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens, a_ )
self.assertEqual(2, batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens, [] )
self.assertEqual(self.tokenizer.suffix_tokens, [self.tokenizer.eos_token_id, PYTHON_CODE] )
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
_snake_case : Optional[Any] = self.tokenizer(self.src_text, padding=a_, truncation=a_, max_length=3, return_tensors="""pt""" )
_snake_case : Optional[int] = self.tokenizer(
text_target=self.tgt_text, padding=a_, truncation=a_, max_length=10, return_tensors="""pt""" )
_snake_case : List[Any] = targets["""input_ids"""]
_snake_case : str = shift_tokens_right(a_, self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1], 3 )
self.assertEqual(batch.decoder_input_ids.shape[1], 10 )
@require_torch
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
_snake_case : List[Any] = self.tokenizer._build_translation_inputs(
"""A test""", return_tensors="""pt""", src_lang="""en_XX""", tgt_lang="""java""" )
self.assertEqual(
nested_simplify(a_ ), {
# A, test, EOS, en_XX
"""input_ids""": [[150, 242, 2, 50_003]],
"""attention_mask""": [[1, 1, 1, 1]],
# java
"""forced_bos_token_id""": 50_001,
}, )
| 132 |
"""simple docstring"""
def UpperCAmelCase__ (snake_case__ : int = 1_00_00_00 ):
"""simple docstring"""
_snake_case : Dict = [i - 1 for i in range(limit + 1 )]
for i in range(2 , limit + 1 ):
if phi[i] == i - 1:
for j in range(2 * i , limit + 1 , snake_case__ ):
phi[j] -= phi[j] // i
return sum(phi[2 : limit + 1] )
if __name__ == "__main__":
print(solution())
| 132 | 1 |
import enum
import warnings
from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING
from ..utils import add_end_docstrings, is_tf_available
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
class __lowerCAmelCase ( enum.Enum):
_lowercase : Any = 0
_lowercase : str = 1
_lowercase : str = 2
@add_end_docstrings(UpperCamelCase__)
class __lowerCAmelCase ( UpperCamelCase__):
_lowercase : Dict = """
In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The
voice of Nicholas's young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western
Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision
and denounces one of the men as a horse thief. Although his father initially slaps him for making such an
accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of
the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,
begging for his blessing. <eod> </s> <eos>
"""
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Optional[Any]:
'''simple docstring'''
super().__init__(*lowerCAmelCase__ , **lowerCAmelCase__ )
self.check_model_type(
TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == "tf" else MODEL_FOR_CAUSAL_LM_MAPPING )
if "prefix" not in self._preprocess_params:
# This is very specific. The logic is quite complex and needs to be done
# as a "default".
# It also defines both some preprocess_kwargs and generate_kwargs
# which is why we cannot put them in their respective methods.
a__ : Optional[Any] =None
if self.model.config.prefix is not None:
a__ : str =self.model.config.prefix
if prefix is None and self.model.__class__.__name__ in [
"XLNetLMHeadModel",
"TransfoXLLMHeadModel",
"TFXLNetLMHeadModel",
"TFTransfoXLLMHeadModel",
]:
# For XLNet and TransformerXL we add an article to the prompt to give more state to the model.
a__ : Any =self.XL_PREFIX
if prefix is not None:
# Recalculate some generate_kwargs linked to prefix.
a__ , a__ , a__ : Tuple =self._sanitize_parameters(prefix=lowerCAmelCase__ , **self._forward_params )
a__ : List[str] ={**self._preprocess_params, **preprocess_params}
a__ : Tuple ={**self._forward_params, **forward_params}
def _lowercase ( self , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , **lowerCAmelCase__ , ) -> Tuple:
'''simple docstring'''
a__ : Tuple ={}
if prefix is not None:
a__ : Optional[Any] =prefix
if prefix:
a__ : Optional[Any] =self.tokenizer(
lowerCAmelCase__ , padding=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , return_tensors=self.framework )
a__ : str =prefix_inputs["input_ids"].shape[-1]
if handle_long_generation is not None:
if handle_long_generation not in {"hole"}:
raise ValueError(
F'''{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected'''
" [None, 'hole']" )
a__ : List[Any] =handle_long_generation
preprocess_params.update(lowerCAmelCase__ )
a__ : Tuple =generate_kwargs
a__ : Union[str, Any] ={}
if return_full_text is not None and return_type is None:
if return_text is not None:
raise ValueError("`return_text` is mutually exclusive with `return_full_text`" )
if return_tensors is not None:
raise ValueError("`return_full_text` is mutually exclusive with `return_tensors`" )
a__ : str =ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT
if return_tensors is not None and return_type is None:
if return_text is not None:
raise ValueError("`return_text` is mutually exclusive with `return_tensors`" )
a__ : Any =ReturnType.TENSORS
if return_type is not None:
a__ : Dict =return_type
if clean_up_tokenization_spaces is not None:
a__ : int =clean_up_tokenization_spaces
if stop_sequence is not None:
a__ : Optional[int] =self.tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
if len(lowerCAmelCase__ ) > 1:
warnings.warn(
"Stopping on a multiple token sequence is not yet supported on transformers. The first token of"
" the stop sequence will be used as the stop sequence string in the interim." )
a__ : Optional[Any] =stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def _lowercase ( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> int:
'''simple docstring'''
if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]:
kwargs.update({"add_space_before_punct_symbol": True} )
return super()._parse_and_tokenize(*lowerCAmelCase__ , **lowerCAmelCase__ )
def __call__( self , lowerCAmelCase__ , **lowerCAmelCase__ ) -> Any:
'''simple docstring'''
return super().__call__(lowerCAmelCase__ , **lowerCAmelCase__ )
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__="" , lowerCAmelCase__=None , **lowerCAmelCase__ ) -> Optional[Any]:
'''simple docstring'''
a__ : Optional[Any] =self.tokenizer(
prefix + prompt_text , padding=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , return_tensors=self.framework )
a__ : Optional[Any] =prompt_text
if handle_long_generation == "hole":
a__ : Tuple =inputs["input_ids"].shape[-1]
if "max_new_tokens" in generate_kwargs:
a__ : List[str] =generate_kwargs["max_new_tokens"]
else:
a__ : Dict =generate_kwargs.get("max_length" , self.model.config.max_length ) - cur_len
if new_tokens < 0:
raise ValueError("We cannot infer how many new tokens are expected" )
if cur_len + new_tokens > self.tokenizer.model_max_length:
a__ : Optional[Any] =self.tokenizer.model_max_length - new_tokens
if keep_length <= 0:
raise ValueError(
"We cannot use `hole` to handle this generation the number of desired tokens exceeds the"
" models max length" )
a__ : Tuple =inputs["input_ids"][:, -keep_length:]
if "attention_mask" in inputs:
a__ : List[Any] =inputs["attention_mask"][:, -keep_length:]
return inputs
def _lowercase ( self , lowerCAmelCase__ , **lowerCAmelCase__ ) -> List[str]:
'''simple docstring'''
a__ : Optional[int] =model_inputs["input_ids"]
a__ : Optional[Any] =model_inputs.get("attention_mask" , lowerCAmelCase__ )
# Allow empty prompts
if input_ids.shape[1] == 0:
a__ : Tuple =None
a__ : Tuple =None
a__ : Dict =1
else:
a__ : List[str] =input_ids.shape[0]
a__ : Any =model_inputs.pop("prompt_text" )
# If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying
# generate_kwargs, as some of the parameterization may come from the initialization of the pipeline.
a__ : List[Any] =generate_kwargs.pop("prefix_length" , 0 )
if prefix_length > 0:
a__ : Any ="max_new_tokens" in generate_kwargs or (
"generation_config" in generate_kwargs
and generate_kwargs["generation_config"].max_new_tokens is not None
)
if not has_max_new_tokens:
a__ : Any =generate_kwargs.get("max_length" ) or self.model.config.max_length
generate_kwargs["max_length"] += prefix_length
a__ : List[Any] ="min_new_tokens" in generate_kwargs or (
"generation_config" in generate_kwargs
and generate_kwargs["generation_config"].min_new_tokens is not None
)
if not has_min_new_tokens and "min_length" in generate_kwargs:
generate_kwargs["min_length"] += prefix_length
# BS x SL
a__ : Union[str, Any] =self.model.generate(input_ids=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , **lowerCAmelCase__ )
a__ : str =generated_sequence.shape[0]
if self.framework == "pt":
a__ : Any =generated_sequence.reshape(lowerCAmelCase__ , out_b // in_b , *generated_sequence.shape[1:] )
elif self.framework == "tf":
a__ : Tuple =tf.reshape(lowerCAmelCase__ , (in_b, out_b // in_b, *generated_sequence.shape[1:]) )
return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text}
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__=ReturnType.FULL_TEXT , lowerCAmelCase__=True ) -> Union[str, Any]:
'''simple docstring'''
a__ : str =model_outputs["generated_sequence"][0]
a__ : Dict =model_outputs["input_ids"]
a__ : Optional[int] =model_outputs["prompt_text"]
a__ : str =generated_sequence.numpy().tolist()
a__ : Any =[]
for sequence in generated_sequence:
if return_type == ReturnType.TENSORS:
a__ : str ={"generated_token_ids": sequence}
elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}:
# Decode text
a__ : List[Any] =self.tokenizer.decode(
lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ , clean_up_tokenization_spaces=lowerCAmelCase__ , )
# Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used
if input_ids is None:
a__ : int =0
else:
a__ : Any =len(
self.tokenizer.decode(
input_ids[0] , skip_special_tokens=lowerCAmelCase__ , clean_up_tokenization_spaces=lowerCAmelCase__ , ) )
if return_type == ReturnType.FULL_TEXT:
a__ : str =prompt_text + text[prompt_length:]
else:
a__ : Union[str, Any] =text[prompt_length:]
a__ : str ={"generated_text": all_text}
records.append(lowerCAmelCase__ )
return records
| 95 |
import argparse
import os
import torch
from transformers import (
XLNetConfig,
XLNetForQuestionAnswering,
XLNetForSequenceClassification,
XLNetLMHeadModel,
load_tf_weights_in_xlnet,
)
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
UpperCAmelCase : int = {
"""cola""": 2,
"""mnli""": 3,
"""mrpc""": 2,
"""sst-2""": 2,
"""sts-b""": 1,
"""qqp""": 2,
"""qnli""": 2,
"""rte""": 2,
"""wnli""": 2,
}
logging.set_verbosity_info()
def _A ( SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Union[str, Any]=None ):
"""simple docstring"""
a__ : Optional[int] =XLNetConfig.from_json_file(SCREAMING_SNAKE_CASE )
a__ : Dict =finetuning_task.lower() if finetuning_task is not None else ""
if finetuning_task in GLUE_TASKS_NUM_LABELS:
print(f'''Building PyTorch XLNetForSequenceClassification model from configuration: {config}''' )
a__ : List[str] =finetuning_task
a__ : Tuple =GLUE_TASKS_NUM_LABELS[finetuning_task]
a__ : List[Any] =XLNetForSequenceClassification(SCREAMING_SNAKE_CASE )
elif "squad" in finetuning_task:
a__ : Optional[int] =finetuning_task
a__ : Dict =XLNetForQuestionAnswering(SCREAMING_SNAKE_CASE )
else:
a__ : List[Any] =XLNetLMHeadModel(SCREAMING_SNAKE_CASE )
# Load weights from tf checkpoint
load_tf_weights_in_xlnet(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Save pytorch-model
a__ : Dict =os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
a__ : Dict =os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
print(f'''Save PyTorch model to {os.path.abspath(SCREAMING_SNAKE_CASE )}''' )
torch.save(model.state_dict() , SCREAMING_SNAKE_CASE )
print(f'''Save configuration file to {os.path.abspath(SCREAMING_SNAKE_CASE )}''' )
with open(SCREAMING_SNAKE_CASE , "w" , encoding="utf-8" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
UpperCAmelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--xlnet_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained XLNet model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=str,
required=True,
help="""Path to the folder to store the PyTorch model or dataset/vocab.""",
)
parser.add_argument(
"""--finetuning_task""",
default=None,
type=str,
help="""Name of a task on which the XLNet TensorFlow model was fine-tuned""",
)
UpperCAmelCase : int = parser.parse_args()
print(args)
convert_xlnet_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task
)
| 95 | 1 |
import warnings
from ...utils import logging
from .image_processing_flava import FlavaImageProcessor
_lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
class lowercase ( a ):
def __init__( self : Union[str, Any] , *_UpperCamelCase : List[Any] , **_UpperCamelCase : Optional[int] ) -> None:
'''simple docstring'''
warnings.warn(
"The class FlavaFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use FlavaImageProcessor instead." , _UpperCamelCase , )
super().__init__(*_UpperCamelCase , **_UpperCamelCase )
| 206 | import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_funnel import FunnelTokenizer
_lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
_lowerCamelCase : Dict = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
_lowerCamelCase : List[Any] = [
'''small''',
'''small-base''',
'''medium''',
'''medium-base''',
'''intermediate''',
'''intermediate-base''',
'''large''',
'''large-base''',
'''xlarge''',
'''xlarge-base''',
]
_lowerCamelCase : Optional[Any] = {
'''vocab_file''': {
'''funnel-transformer/small''': '''https://huggingface.co/funnel-transformer/small/resolve/main/vocab.txt''',
'''funnel-transformer/small-base''': '''https://huggingface.co/funnel-transformer/small-base/resolve/main/vocab.txt''',
'''funnel-transformer/medium''': '''https://huggingface.co/funnel-transformer/medium/resolve/main/vocab.txt''',
'''funnel-transformer/medium-base''': (
'''https://huggingface.co/funnel-transformer/medium-base/resolve/main/vocab.txt'''
),
'''funnel-transformer/intermediate''': (
'''https://huggingface.co/funnel-transformer/intermediate/resolve/main/vocab.txt'''
),
'''funnel-transformer/intermediate-base''': (
'''https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/vocab.txt'''
),
'''funnel-transformer/large''': '''https://huggingface.co/funnel-transformer/large/resolve/main/vocab.txt''',
'''funnel-transformer/large-base''': '''https://huggingface.co/funnel-transformer/large-base/resolve/main/vocab.txt''',
'''funnel-transformer/xlarge''': '''https://huggingface.co/funnel-transformer/xlarge/resolve/main/vocab.txt''',
'''funnel-transformer/xlarge-base''': (
'''https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''funnel-transformer/small''': '''https://huggingface.co/funnel-transformer/small/resolve/main/tokenizer.json''',
'''funnel-transformer/small-base''': (
'''https://huggingface.co/funnel-transformer/small-base/resolve/main/tokenizer.json'''
),
'''funnel-transformer/medium''': '''https://huggingface.co/funnel-transformer/medium/resolve/main/tokenizer.json''',
'''funnel-transformer/medium-base''': (
'''https://huggingface.co/funnel-transformer/medium-base/resolve/main/tokenizer.json'''
),
'''funnel-transformer/intermediate''': (
'''https://huggingface.co/funnel-transformer/intermediate/resolve/main/tokenizer.json'''
),
'''funnel-transformer/intermediate-base''': (
'''https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/tokenizer.json'''
),
'''funnel-transformer/large''': '''https://huggingface.co/funnel-transformer/large/resolve/main/tokenizer.json''',
'''funnel-transformer/large-base''': (
'''https://huggingface.co/funnel-transformer/large-base/resolve/main/tokenizer.json'''
),
'''funnel-transformer/xlarge''': '''https://huggingface.co/funnel-transformer/xlarge/resolve/main/tokenizer.json''',
'''funnel-transformer/xlarge-base''': (
'''https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/tokenizer.json'''
),
},
}
_lowerCamelCase : Any = {f"""funnel-transformer/{name}""": 5_12 for name in _model_names}
_lowerCamelCase : Optional[Any] = {f"""funnel-transformer/{name}""": {'''do_lower_case''': True} for name in _model_names}
class lowercase ( a ):
lowercase__ : Optional[int] = VOCAB_FILES_NAMES
lowercase__ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
lowercase__ : Optional[Any] = PRETRAINED_INIT_CONFIGURATION
lowercase__ : Union[str, Any] = FunnelTokenizer
lowercase__ : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ : int = 2
def __init__( self : str , _UpperCamelCase : str=None , _UpperCamelCase : str=None , _UpperCamelCase : Union[str, Any]=True , _UpperCamelCase : str="<unk>" , _UpperCamelCase : Optional[Any]="<sep>" , _UpperCamelCase : Optional[int]="<pad>" , _UpperCamelCase : int="<cls>" , _UpperCamelCase : Dict="<mask>" , _UpperCamelCase : Union[str, Any]="<s>" , _UpperCamelCase : Optional[int]="</s>" , _UpperCamelCase : Dict=True , _UpperCamelCase : List[Any]=True , _UpperCamelCase : Any=None , _UpperCamelCase : Dict="##" , **_UpperCamelCase : Dict , ) -> Optional[int]:
'''simple docstring'''
super().__init__(
_UpperCamelCase , tokenizer_file=_UpperCamelCase , do_lower_case=_UpperCamelCase , unk_token=_UpperCamelCase , sep_token=_UpperCamelCase , pad_token=_UpperCamelCase , cls_token=_UpperCamelCase , mask_token=_UpperCamelCase , bos_token=_UpperCamelCase , eos_token=_UpperCamelCase , clean_text=_UpperCamelCase , tokenize_chinese_chars=_UpperCamelCase , strip_accents=_UpperCamelCase , wordpieces_prefix=_UpperCamelCase , **_UpperCamelCase , )
SCREAMING_SNAKE_CASE = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , _UpperCamelCase ) != do_lower_case
or normalizer_state.get("strip_accents" , _UpperCamelCase ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , _UpperCamelCase ) != tokenize_chinese_chars
):
SCREAMING_SNAKE_CASE = getattr(_UpperCamelCase , normalizer_state.pop("type" ) )
SCREAMING_SNAKE_CASE = do_lower_case
SCREAMING_SNAKE_CASE = strip_accents
SCREAMING_SNAKE_CASE = tokenize_chinese_chars
SCREAMING_SNAKE_CASE = normalizer_class(**_UpperCamelCase )
SCREAMING_SNAKE_CASE = do_lower_case
def __snake_case( self : Union[str, Any] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Dict=None ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __snake_case( self : int , _UpperCamelCase : List[int] , _UpperCamelCase : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = [self.sep_token_id]
SCREAMING_SNAKE_CASE = [self.cls_token_id]
if token_ids_a is None:
return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0]
return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __snake_case( self : Optional[Any] , _UpperCamelCase : str , _UpperCamelCase : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self._tokenizer.model.save(_UpperCamelCase , name=_UpperCamelCase )
return tuple(_UpperCamelCase )
| 206 | 1 |
import os
import tempfile
import unittest
from transformers import FlaubertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FlaubertForMultipleChoice,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertModel,
FlaubertWithLMHeadModel,
)
from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
def __init__( self , __lowercase , __lowercase=13 , __lowercase=7 , __lowercase=True , __lowercase=True , __lowercase=True , __lowercase=True , __lowercase=True , __lowercase=False , __lowercase=False , __lowercase=False , __lowercase=2 , __lowercase=99 , __lowercase=0 , __lowercase=32 , __lowercase=5 , __lowercase=4 , __lowercase=0.1 , __lowercase=0.1 , __lowercase=512 , __lowercase=12 , __lowercase=2 , __lowercase=0.02 , __lowercase=3 , __lowercase=4 , __lowercase="last" , __lowercase=None , __lowercase=None , ) -> Optional[Any]:
__UpperCamelCase :Union[str, Any] = parent
__UpperCamelCase :Dict = batch_size
__UpperCamelCase :Any = seq_length
__UpperCamelCase :List[str] = is_training
__UpperCamelCase :Dict = use_input_lengths
__UpperCamelCase :Tuple = use_token_type_ids
__UpperCamelCase :Union[str, Any] = use_labels
__UpperCamelCase :Dict = gelu_activation
__UpperCamelCase :int = sinusoidal_embeddings
__UpperCamelCase :Optional[int] = causal
__UpperCamelCase :int = asm
__UpperCamelCase :Tuple = n_langs
__UpperCamelCase :Optional[int] = vocab_size
__UpperCamelCase :Tuple = n_special
__UpperCamelCase :str = hidden_size
__UpperCamelCase :int = num_hidden_layers
__UpperCamelCase :int = num_attention_heads
__UpperCamelCase :Optional[Any] = hidden_dropout_prob
__UpperCamelCase :Union[str, Any] = attention_probs_dropout_prob
__UpperCamelCase :Tuple = max_position_embeddings
__UpperCamelCase :Dict = type_vocab_size
__UpperCamelCase :Tuple = type_sequence_label_size
__UpperCamelCase :List[Any] = initializer_range
__UpperCamelCase :int = num_labels
__UpperCamelCase :Optional[Any] = num_choices
__UpperCamelCase :List[Any] = summary_type
__UpperCamelCase :str = use_proj
__UpperCamelCase :List[Any] = scope
def UpperCamelCase__ ( self) -> Optional[int]:
__UpperCamelCase :Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
__UpperCamelCase :Any = random_attention_mask([self.batch_size, self.seq_length])
__UpperCamelCase :Optional[Any] = None
if self.use_input_lengths:
__UpperCamelCase :str = (
ids_tensor([self.batch_size] , vocab_size=2) + self.seq_length - 2
) # small variation of seq_length
__UpperCamelCase :List[str] = None
if self.use_token_type_ids:
__UpperCamelCase :Any = ids_tensor([self.batch_size, self.seq_length] , self.n_langs)
__UpperCamelCase :Optional[int] = None
__UpperCamelCase :List[str] = None
__UpperCamelCase :str = None
if self.use_labels:
__UpperCamelCase :int = ids_tensor([self.batch_size] , self.type_sequence_label_size)
__UpperCamelCase :Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
__UpperCamelCase :List[str] = ids_tensor([self.batch_size] , 2).float()
__UpperCamelCase :Optional[Any] = ids_tensor([self.batch_size] , self.num_choices)
__UpperCamelCase :Optional[Any] = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def UpperCamelCase__ ( self) -> Optional[Any]:
return FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , )
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , ) -> Union[str, Any]:
__UpperCamelCase :Dict = FlaubertModel(config=__lowercase)
model.to(__lowercase)
model.eval()
__UpperCamelCase :str = model(__lowercase , lengths=__lowercase , langs=__lowercase)
__UpperCamelCase :Tuple = model(__lowercase , langs=__lowercase)
__UpperCamelCase :Optional[Any] = model(__lowercase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , ) -> List[str]:
__UpperCamelCase :Optional[int] = FlaubertWithLMHeadModel(__lowercase)
model.to(__lowercase)
model.eval()
__UpperCamelCase :Optional[int] = model(__lowercase , token_type_ids=__lowercase , labels=__lowercase)
self.parent.assertEqual(result.loss.shape , ())
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , ) -> int:
__UpperCamelCase :int = FlaubertForQuestionAnsweringSimple(__lowercase)
model.to(__lowercase)
model.eval()
__UpperCamelCase :Union[str, Any] = model(__lowercase)
__UpperCamelCase :Optional[int] = model(__lowercase , start_positions=__lowercase , end_positions=__lowercase)
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , ) -> Tuple:
__UpperCamelCase :Tuple = FlaubertForQuestionAnswering(__lowercase)
model.to(__lowercase)
model.eval()
__UpperCamelCase :Union[str, Any] = model(__lowercase)
__UpperCamelCase :Dict = model(
__lowercase , start_positions=__lowercase , end_positions=__lowercase , cls_index=__lowercase , is_impossible=__lowercase , p_mask=__lowercase , )
__UpperCamelCase :int = model(
__lowercase , start_positions=__lowercase , end_positions=__lowercase , cls_index=__lowercase , is_impossible=__lowercase , )
((__UpperCamelCase) , ) :Dict = result_with_labels.to_tuple()
__UpperCamelCase :Union[str, Any] = model(__lowercase , start_positions=__lowercase , end_positions=__lowercase)
((__UpperCamelCase) , ) :Any = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , ())
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top))
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top))
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top))
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top))
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,))
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , ) -> Any:
__UpperCamelCase :int = FlaubertForSequenceClassification(__lowercase)
model.to(__lowercase)
model.eval()
__UpperCamelCase :Union[str, Any] = model(__lowercase)
__UpperCamelCase :List[Any] = model(__lowercase , labels=__lowercase)
self.parent.assertEqual(result.loss.shape , ())
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , ) -> Optional[int]:
__UpperCamelCase :Union[str, Any] = self.num_labels
__UpperCamelCase :List[str] = FlaubertForTokenClassification(__lowercase)
model.to(__lowercase)
model.eval()
__UpperCamelCase :Any = model(__lowercase , attention_mask=__lowercase , labels=__lowercase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , ) -> Dict:
__UpperCamelCase :List[str] = self.num_choices
__UpperCamelCase :int = FlaubertForMultipleChoice(config=__lowercase)
model.to(__lowercase)
model.eval()
__UpperCamelCase :Any = input_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
__UpperCamelCase :Tuple = token_type_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
__UpperCamelCase :Dict = input_mask.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
__UpperCamelCase :int = model(
__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , labels=__lowercase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def UpperCamelCase__ ( self) -> List[Any]:
__UpperCamelCase :Union[str, Any] = self.prepare_config_and_inputs()
(
(
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) ,
) :int = config_and_inputs
__UpperCamelCase :List[Any] = {
'''input_ids''': input_ids,
'''token_type_ids''': token_type_ids,
'''lengths''': input_lengths,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_torch
class lowerCamelCase_ ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
a__ : Dict = (
(
FlaubertModel,
FlaubertWithLMHeadModel,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertForMultipleChoice,
)
if is_torch_available()
else ()
)
a__ : Optional[int] = (
{
"""feature-extraction""": FlaubertModel,
"""fill-mask""": FlaubertWithLMHeadModel,
"""question-answering""": FlaubertForQuestionAnsweringSimple,
"""text-classification""": FlaubertForSequenceClassification,
"""token-classification""": FlaubertForTokenClassification,
"""zero-shot""": FlaubertForSequenceClassification,
}
if is_torch_available()
else {}
)
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase) -> Tuple:
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('''Fast''')
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase=False) -> int:
__UpperCamelCase :Union[str, Any] = super()._prepare_for_class(__lowercase , __lowercase , return_labels=__lowercase)
if return_labels:
if model_class.__name__ == "FlaubertForQuestionAnswering":
__UpperCamelCase :Tuple = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__lowercase)
__UpperCamelCase :Optional[int] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__lowercase)
return inputs_dict
def UpperCamelCase__ ( self) -> Any:
__UpperCamelCase :Optional[Any] = FlaubertModelTester(self)
__UpperCamelCase :Any = ConfigTester(self , config_class=__lowercase , emb_dim=37)
def UpperCamelCase__ ( self) -> Optional[Any]:
self.config_tester.run_common_tests()
def UpperCamelCase__ ( self) -> int:
__UpperCamelCase :List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*__lowercase)
def UpperCamelCase__ ( self) -> int:
__UpperCamelCase :Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*__lowercase)
def UpperCamelCase__ ( self) -> List[str]:
__UpperCamelCase :Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_simple_qa(*__lowercase)
def UpperCamelCase__ ( self) -> Dict:
__UpperCamelCase :List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*__lowercase)
def UpperCamelCase__ ( self) -> List[str]:
__UpperCamelCase :Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*__lowercase)
def UpperCamelCase__ ( self) -> str:
__UpperCamelCase :List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_token_classif(*__lowercase)
def UpperCamelCase__ ( self) -> List[str]:
__UpperCamelCase :Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_multiple_choice(*__lowercase)
@slow
def UpperCamelCase__ ( self) -> Any:
for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase :Tuple = FlaubertModel.from_pretrained(__lowercase)
self.assertIsNotNone(__lowercase)
@slow
@require_torch_gpu
def UpperCamelCase__ ( self) -> Optional[int]:
__UpperCamelCase , __UpperCamelCase :str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# FlauBertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == FlaubertForMultipleChoice:
return
__UpperCamelCase :int = True
__UpperCamelCase :List[str] = model_class(config=__lowercase)
__UpperCamelCase :Dict = self._prepare_for_class(__lowercase , __lowercase)
__UpperCamelCase :List[str] = torch.jit.trace(
__lowercase , (inputs_dict['''input_ids'''].to('''cpu'''), inputs_dict['''attention_mask'''].to('''cpu''')))
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(__lowercase , os.path.join(__lowercase , '''traced_model.pt'''))
__UpperCamelCase :Optional[Any] = torch.jit.load(os.path.join(__lowercase , '''traced_model.pt''') , map_location=__lowercase)
loaded(inputs_dict['''input_ids'''].to(__lowercase) , inputs_dict['''attention_mask'''].to(__lowercase))
@require_torch
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCamelCase__ ( self) -> Any:
__UpperCamelCase :List[Any] = FlaubertModel.from_pretrained('''flaubert/flaubert_base_cased''')
__UpperCamelCase :Tuple = torch.tensor([[0, 345, 232, 328, 740, 140, 1_695, 69, 6_078, 1_588, 2]])
with torch.no_grad():
__UpperCamelCase :str = model(__lowercase)[0]
__UpperCamelCase :Optional[Any] = torch.Size((1, 11, 768))
self.assertEqual(output.shape , __lowercase)
__UpperCamelCase :Dict = torch.tensor(
[[[-2.62_51, -1.42_98, -0.02_27], [-2.85_10, -1.63_87, 0.22_58], [-2.81_14, -1.18_32, -0.30_66]]])
self.assertTrue(torch.allclose(output[:, :3, :3] , __lowercase , atol=1E-4))
| 43 |
import copy
from typing import Dict, List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
SCREAMING_SNAKE_CASE__ = {
"""facebook/mask2former-swin-small-coco-instance""": (
"""https://huggingface.co/facebook/mask2former-swin-small-coco-instance/blob/main/config.json"""
)
# See all Mask2Former models at https://huggingface.co/models?filter=mask2former
}
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
class A__ ( lowerCAmelCase__ ):
lowerCAmelCase__ : Tuple = "mask2former"
lowerCAmelCase__ : List[Any] = ["swin"]
lowerCAmelCase__ : str = {"hidden_size": "hidden_dim"}
def __init__( self : Optional[int] , _UpperCAmelCase : Optional[Dict] = None , _UpperCAmelCase : int = 2_56 , _UpperCAmelCase : int = 2_56 , _UpperCAmelCase : int = 2_56 , _UpperCAmelCase : int = 10_24 , _UpperCAmelCase : str = "relu" , _UpperCAmelCase : int = 6 , _UpperCAmelCase : int = 10 , _UpperCAmelCase : int = 8 , _UpperCAmelCase : float = 0.0 , _UpperCAmelCase : int = 20_48 , _UpperCAmelCase : bool = False , _UpperCAmelCase : bool = False , _UpperCAmelCase : int = 4 , _UpperCAmelCase : int = 2_55 , _UpperCAmelCase : int = 1_00 , _UpperCAmelCase : float = 0.1 , _UpperCAmelCase : float = 2.0 , _UpperCAmelCase : float = 5.0 , _UpperCAmelCase : float = 5.0 , _UpperCAmelCase : int = 1_25_44 , _UpperCAmelCase : float = 3.0 , _UpperCAmelCase : float = 0.75 , _UpperCAmelCase : float = 0.02 , _UpperCAmelCase : float = 1.0 , _UpperCAmelCase : bool = True , _UpperCAmelCase : List[int] = [4, 8, 16, 32] , _UpperCAmelCase : bool = None , **_UpperCAmelCase : List[str] , ) -> int:
"""simple docstring"""
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `Swin` backbone.' )
__lowercase = CONFIG_MAPPING['swin'](
image_size=2_24 , in_channels=3 , patch_size=4 , embed_dim=96 , depths=[2, 2, 18, 2] , num_heads=[3, 6, 12, 24] , window_size=7 , drop_path_rate=0.3 , use_absolute_embeddings=_UpperCAmelCase , out_features=['stage1', 'stage2', 'stage3', 'stage4'] , )
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
__lowercase = backbone_config.pop('model_type' )
__lowercase = CONFIG_MAPPING[backbone_model_type]
__lowercase = config_class.from_dict(_UpperCAmelCase )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
f"""Backbone {backbone_config.model_type} is not a supported model and may not be compatible with Mask2Former. """
f"""Supported model types: {",".join(self.backbones_supported )}""" )
__lowercase = backbone_config
__lowercase = feature_size
__lowercase = mask_feature_size
__lowercase = hidden_dim
__lowercase = encoder_feedforward_dim
__lowercase = activation_function
__lowercase = encoder_layers
__lowercase = decoder_layers
__lowercase = num_attention_heads
__lowercase = dropout
__lowercase = dim_feedforward
__lowercase = pre_norm
__lowercase = enforce_input_projection
__lowercase = common_stride
__lowercase = ignore_value
__lowercase = num_queries
__lowercase = no_object_weight
__lowercase = class_weight
__lowercase = mask_weight
__lowercase = dice_weight
__lowercase = train_num_points
__lowercase = oversample_ratio
__lowercase = importance_sample_ratio
__lowercase = init_std
__lowercase = init_xavier_std
__lowercase = use_auxiliary_loss
__lowercase = feature_strides
__lowercase = output_auxiliary_logits
__lowercase = decoder_layers
super().__init__(**_UpperCAmelCase )
@classmethod
def a__ ( cls : Union[str, Any] , _UpperCAmelCase : PretrainedConfig , **_UpperCAmelCase : Optional[int] ) -> Dict:
"""simple docstring"""
return cls(
backbone_config=_UpperCAmelCase , **_UpperCAmelCase , )
def a__ ( self : str ) -> Dict[str, any]:
"""simple docstring"""
__lowercase = copy.deepcopy(self.__dict__ )
__lowercase = self.backbone_config.to_dict()
__lowercase = self.__class__.model_type
return output
| 325 | 0 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import GLPNImageProcessor
class __magic_name__ ( unittest.TestCase ):
def __init__( self : Optional[Any] , snake_case__ : Optional[Any] , snake_case__ : int=7 , snake_case__ : List[str]=3 , snake_case__ : List[str]=1_8 , snake_case__ : int=3_0 , snake_case__ : List[str]=4_0_0 , snake_case__ : List[str]=True , snake_case__ : Optional[Any]=3_2 , snake_case__ : Dict=True , ):
'''simple docstring'''
lowercase :Any = parent
lowercase :str = batch_size
lowercase :List[Any] = num_channels
lowercase :Tuple = image_size
lowercase :List[str] = min_resolution
lowercase :Union[str, Any] = max_resolution
lowercase :Any = do_resize
lowercase :int = size_divisor
lowercase :Union[str, Any] = do_rescale
def __snake_case ( self : str ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size_divisor": self.size_divisor,
"do_rescale": self.do_rescale,
}
@require_torch
@require_vision
class __magic_name__ ( __UpperCAmelCase , unittest.TestCase ):
__A : List[Any] = GLPNImageProcessor if is_vision_available() else None
def __snake_case ( self : Optional[int] ):
'''simple docstring'''
lowercase :List[Any] = GLPNImageProcessingTester(self )
@property
def __snake_case ( self : List[str] ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def __snake_case ( self : Optional[int] ):
'''simple docstring'''
lowercase :Dict = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(snake_case__ , '''do_resize''' ) )
self.assertTrue(hasattr(snake_case__ , '''size_divisor''' ) )
self.assertTrue(hasattr(snake_case__ , '''resample''' ) )
self.assertTrue(hasattr(snake_case__ , '''do_rescale''' ) )
def __snake_case ( self : str ):
'''simple docstring'''
pass
def __snake_case ( self : int ):
'''simple docstring'''
lowercase :Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase :str = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ , Image.Image )
# Test not batched input (GLPNImageProcessor doesn't support batching)
lowercase :List[str] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def __snake_case ( self : int ):
'''simple docstring'''
lowercase :str = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowercase :Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case__ , numpify=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ , np.ndarray )
# Test not batched input (GLPNImageProcessor doesn't support batching)
lowercase :Any = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def __snake_case ( self : List[str] ):
'''simple docstring'''
lowercase :List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowercase :int = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case__ , torchify=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ , torch.Tensor )
# Test not batched input (GLPNImageProcessor doesn't support batching)
lowercase :Union[str, Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
| 370 |
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, TensorType
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {
'''openai/imagegpt-small''': '''''',
'''openai/imagegpt-medium''': '''''',
'''openai/imagegpt-large''': '''''',
}
class __magic_name__ ( __UpperCAmelCase ):
__A : str = "imagegpt"
__A : str = ["past_key_values"]
__A : Optional[Any] = {
"hidden_size": "n_embd",
"max_position_embeddings": "n_positions",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self : Optional[Any] , snake_case__ : Union[str, Any]=5_1_2 + 1 , snake_case__ : Optional[int]=3_2 * 3_2 , snake_case__ : Optional[Any]=5_1_2 , snake_case__ : List[str]=2_4 , snake_case__ : Any=8 , snake_case__ : str=None , snake_case__ : Any="quick_gelu" , snake_case__ : Optional[int]=0.1 , snake_case__ : Optional[Any]=0.1 , snake_case__ : Tuple=0.1 , snake_case__ : Tuple=1e-5 , snake_case__ : List[Any]=0.02 , snake_case__ : Tuple=True , snake_case__ : Dict=True , snake_case__ : str=False , snake_case__ : Optional[int]=False , snake_case__ : Union[str, Any]=False , **snake_case__ : Union[str, Any] , ):
'''simple docstring'''
lowercase :int = vocab_size
lowercase :str = n_positions
lowercase :List[str] = n_embd
lowercase :int = n_layer
lowercase :List[str] = n_head
lowercase :Tuple = n_inner
lowercase :Tuple = activation_function
lowercase :Optional[Any] = resid_pdrop
lowercase :Tuple = embd_pdrop
lowercase :Dict = attn_pdrop
lowercase :List[Any] = layer_norm_epsilon
lowercase :List[Any] = initializer_range
lowercase :List[Any] = scale_attn_weights
lowercase :Dict = use_cache
lowercase :List[str] = scale_attn_by_inverse_layer_idx
lowercase :List[str] = reorder_and_upcast_attn
lowercase :Dict = tie_word_embeddings
super().__init__(tie_word_embeddings=snake_case__ , **snake_case__ )
class __magic_name__ ( __UpperCAmelCase ):
@property
def __snake_case ( self : Any ):
'''simple docstring'''
return OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''sequence'''}),
] )
def __snake_case ( self : Union[str, Any] , snake_case__ : "FeatureExtractionMixin" , snake_case__ : int = 1 , snake_case__ : int = -1 , snake_case__ : bool = False , snake_case__ : Optional["TensorType"] = None , snake_case__ : int = 3 , snake_case__ : int = 3_2 , snake_case__ : int = 3_2 , ):
'''simple docstring'''
lowercase :Union[str, Any] = self._generate_dummy_images(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
lowercase :List[str] = dict(preprocessor(images=snake_case__ , return_tensors=snake_case__ ) )
return inputs
| 172 | 0 |
import unittest
from transformers import AlbertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
)
from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class __snake_case :
def __init__( self ,snake_case ,snake_case=13 ,snake_case=7 ,snake_case=True ,snake_case=True ,snake_case=True ,snake_case=True ,snake_case=99 ,snake_case=16 ,snake_case=36 ,snake_case=6 ,snake_case=6 ,snake_case=6 ,snake_case=37 ,snake_case="gelu" ,snake_case=0.1 ,snake_case=0.1 ,snake_case=512 ,snake_case=16 ,snake_case=2 ,snake_case=0.02 ,snake_case=3 ,snake_case=4 ,snake_case=None ,):
'''simple docstring'''
lowercase : Union[str, Any] = parent
lowercase : Dict = batch_size
lowercase : Optional[int] = seq_length
lowercase : Union[str, Any] = is_training
lowercase : Dict = use_input_mask
lowercase : Dict = use_token_type_ids
lowercase : str = use_labels
lowercase : Union[str, Any] = vocab_size
lowercase : int = embedding_size
lowercase : List[str] = hidden_size
lowercase : Dict = num_hidden_layers
lowercase : Optional[Any] = num_hidden_groups
lowercase : List[Any] = num_attention_heads
lowercase : Union[str, Any] = intermediate_size
lowercase : Any = hidden_act
lowercase : Tuple = hidden_dropout_prob
lowercase : Dict = attention_probs_dropout_prob
lowercase : Any = max_position_embeddings
lowercase : List[str] = type_vocab_size
lowercase : int = type_sequence_label_size
lowercase : Optional[Any] = initializer_range
lowercase : int = num_labels
lowercase : Optional[Any] = num_choices
lowercase : Union[str, Any] = scope
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : str = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
lowercase : Union[str, Any] = None
if self.use_input_mask:
lowercase : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
lowercase : Optional[int] = None
if self.use_token_type_ids:
lowercase : Any = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
lowercase : Tuple = None
lowercase : Any = None
lowercase : Any = None
if self.use_labels:
lowercase : List[Any] = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
lowercase : Dict = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
lowercase : List[str] = ids_tensor([self.batch_size] ,self.num_choices )
lowercase : Tuple = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
return AlbertConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,initializer_range=self.initializer_range ,num_hidden_groups=self.num_hidden_groups ,)
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ):
'''simple docstring'''
lowercase : int = AlbertModel(config=snake_case )
model.to(snake_case )
model.eval()
lowercase : List[str] = model(snake_case ,attention_mask=snake_case ,token_type_ids=snake_case )
lowercase : str = model(snake_case ,token_type_ids=snake_case )
lowercase : Optional[Any] = model(snake_case )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.hidden_size) )
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ):
'''simple docstring'''
lowercase : Tuple = AlbertForPreTraining(config=snake_case )
model.to(snake_case )
model.eval()
lowercase : Union[str, Any] = model(
snake_case ,attention_mask=snake_case ,token_type_ids=snake_case ,labels=snake_case ,sentence_order_label=snake_case ,)
self.parent.assertEqual(result.prediction_logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.sop_logits.shape ,(self.batch_size, config.num_labels) )
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ):
'''simple docstring'''
lowercase : int = AlbertForMaskedLM(config=snake_case )
model.to(snake_case )
model.eval()
lowercase : int = model(snake_case ,attention_mask=snake_case ,token_type_ids=snake_case ,labels=snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ):
'''simple docstring'''
lowercase : str = AlbertForQuestionAnswering(config=snake_case )
model.to(snake_case )
model.eval()
lowercase : List[str] = model(
snake_case ,attention_mask=snake_case ,token_type_ids=snake_case ,start_positions=snake_case ,end_positions=snake_case ,)
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ):
'''simple docstring'''
lowercase : str = self.num_labels
lowercase : Tuple = AlbertForSequenceClassification(snake_case )
model.to(snake_case )
model.eval()
lowercase : Tuple = model(snake_case ,attention_mask=snake_case ,token_type_ids=snake_case ,labels=snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ):
'''simple docstring'''
lowercase : Tuple = self.num_labels
lowercase : Optional[int] = AlbertForTokenClassification(config=snake_case )
model.to(snake_case )
model.eval()
lowercase : int = model(snake_case ,attention_mask=snake_case ,token_type_ids=snake_case ,labels=snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ):
'''simple docstring'''
lowercase : Any = self.num_choices
lowercase : Any = AlbertForMultipleChoice(config=snake_case )
model.to(snake_case )
model.eval()
lowercase : List[Any] = input_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
lowercase : Tuple = token_type_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
lowercase : Tuple = input_mask.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
lowercase : Dict = model(
snake_case ,attention_mask=snake_case ,token_type_ids=snake_case ,labels=snake_case ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Tuple = self.prepare_config_and_inputs()
(
(
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) ,
) : str = config_and_inputs
lowercase : int = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class __snake_case ( lowerCAmelCase , lowerCAmelCase , unittest.TestCase ):
_a : Tuple= (
(
AlbertModel,
AlbertForPreTraining,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertForQuestionAnswering,
)
if is_torch_available()
else ()
)
_a : List[str]= (
{
"feature-extraction": AlbertModel,
"fill-mask": AlbertForMaskedLM,
"question-answering": AlbertForQuestionAnswering,
"text-classification": AlbertForSequenceClassification,
"token-classification": AlbertForTokenClassification,
"zero-shot": AlbertForSequenceClassification,
}
if is_torch_available()
else {}
)
_a : Dict= True
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ,snake_case=False ):
'''simple docstring'''
lowercase : List[str] = super()._prepare_for_class(snake_case ,snake_case ,return_labels=snake_case )
if return_labels:
if model_class in get_values(snake_case ):
lowercase : List[Any] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) ,dtype=torch.long ,device=snake_case )
lowercase : str = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=snake_case )
return inputs_dict
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : str = AlbertModelTester(self )
lowercase : Optional[int] = ConfigTester(self ,config_class=snake_case ,hidden_size=37 )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*snake_case )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*snake_case )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*snake_case )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*snake_case )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*snake_case )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : str = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowercase : Optional[Any] = type
self.model_tester.create_and_check_model(*snake_case )
@slow
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase : List[Any] = AlbertModel.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
@require_torch
class __snake_case ( unittest.TestCase ):
@slow
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Optional[int] = AlbertModel.from_pretrained("""albert-base-v2""" )
lowercase : int = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
lowercase : Optional[int] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
lowercase : List[str] = model(snake_case ,attention_mask=snake_case )[0]
lowercase : Any = torch.Size((1, 11, 768) )
self.assertEqual(output.shape ,snake_case )
lowercase : int = torch.tensor(
[[[-0.6_513, 1.5_035, -0.2_766], [-0.6_515, 1.5_046, -0.2_780], [-0.6_512, 1.5_049, -0.2_784]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] ,snake_case ,atol=1e-4 ) )
| 20 | """simple docstring"""
from __future__ import annotations
from typing import Generic, TypeVar
__lowerCamelCase = TypeVar("T")
class UpperCamelCase__( Generic[T] ):
def __init__( self ,__UpperCAmelCase ) -> None:
A__ = data
A__ = self
A__ = 0
class UpperCamelCase__( Generic[T] ):
def __init__( self ) -> None:
# map from node name to the node object
A__ = {}
def snake_case__ ( self ,__UpperCAmelCase ) -> None:
# create a new set with x as its member
A__ = DisjointSetTreeNode(__UpperCAmelCase )
def snake_case__ ( self ,__UpperCAmelCase ) -> DisjointSetTreeNode[T]:
# find the set x belongs to (with path-compression)
A__ = self.map[data]
if elem_ref != elem_ref.parent:
A__ = self.find_set(elem_ref.parent.data )
return elem_ref.parent
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase ) -> None:
# helper function for union operation
if nodea.rank > nodea.rank:
A__ = nodea
else:
A__ = nodea
if nodea.rank == nodea.rank:
nodea.rank += 1
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase ) -> None:
# merge 2 disjoint sets
self.link(self.find_set(__UpperCAmelCase ) ,self.find_set(__UpperCAmelCase ) )
class UpperCamelCase__( Generic[T] ):
def __init__( self ) -> None:
# connections: map from the node to the neighbouring nodes (with weights)
A__ = {}
def snake_case__ ( self ,__UpperCAmelCase ) -> None:
# add a node ONLY if its not present in the graph
if node not in self.connections:
A__ = {}
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) -> None:
# add an edge with the given weight
self.add_node(__UpperCAmelCase )
self.add_node(__UpperCAmelCase )
A__ = weight
A__ = weight
def snake_case__ ( self ) -> GraphUndirectedWeighted[T]:
A__ = []
A__ = set()
for start in self.connections:
for end in self.connections[start]:
if (start, end) not in seen:
seen.add((end, start) )
edges.append((start, end, self.connections[start][end]) )
edges.sort(key=lambda __UpperCAmelCase : x[2] )
# creating the disjoint set
A__ = DisjointSetTree[T]()
for node in self.connections:
disjoint_set.make_set(__UpperCAmelCase )
# MST generation
A__ = 0
A__ = 0
A__ = GraphUndirectedWeighted[T]()
while num_edges < len(self.connections ) - 1:
A__ , A__ , A__ = edges[index]
index += 1
A__ = disjoint_set.find_set(__UpperCAmelCase )
A__ = disjoint_set.find_set(__UpperCAmelCase )
if parent_u != parent_v:
num_edges += 1
graph.add_edge(__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase )
disjoint_set.union(__UpperCAmelCase ,__UpperCAmelCase )
return graph
| 221 | 0 |
'''simple docstring'''
from arguments import InitializationArguments
from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, HfArgumentParser
# Configuration
_SCREAMING_SNAKE_CASE = HfArgumentParser(InitializationArguments)
_SCREAMING_SNAKE_CASE = parser.parse_args()
# Load codeparrot tokenizer trained for Python code tokenization
_SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(args.tokenizer_name)
# Config: "scale_attn_by_layer_idx" and "reorder_and_upcast_attn" are Mistral stability tweaks
_SCREAMING_SNAKE_CASE = {
"vocab_size": len(tokenizer),
"scale_attn_by_inverse_layer_idx": True,
"reorder_and_upcast_attn": True,
}
# Load model config (GPT-2 large in this case)
_SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(args.config_name, **config_kwargs)
# Initialize new model with config
_SCREAMING_SNAKE_CASE = AutoModelForCausalLM.from_config(config)
# Save model to the hub
model.save_pretrained(args.model_name, push_to_hub=args.push_to_hub)
| 3 |
'''simple docstring'''
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
AutoConfig,
AutoImageProcessor,
CLIPConfig,
CLIPImageProcessor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase ( self : Tuple )-> Optional[Any]:
snake_case = 0
def lowerCAmelCase ( self : str )-> Any:
snake_case = AutoImageProcessor.from_pretrained("""openai/clip-vit-base-patch32""" )
self.assertIsInstance(__snake_case , __snake_case )
def lowerCAmelCase ( self : List[Any] )-> str:
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case = Path(__snake_case ) / """preprocessor_config.json"""
snake_case = Path(__snake_case ) / """config.json"""
json.dump(
{"""image_processor_type""": """CLIPImageProcessor""", """processor_class""": """CLIPProcessor"""} , open(__snake_case , """w""" ) , )
json.dump({"""model_type""": """clip"""} , open(__snake_case , """w""" ) )
snake_case = AutoImageProcessor.from_pretrained(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
def lowerCAmelCase ( self : List[str] )-> Optional[Any]:
# Ensure we can load the image processor from the feature extractor config
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case = Path(__snake_case ) / """preprocessor_config.json"""
snake_case = Path(__snake_case ) / """config.json"""
json.dump(
{"""feature_extractor_type""": """CLIPFeatureExtractor""", """processor_class""": """CLIPProcessor"""} , open(__snake_case , """w""" ) , )
json.dump({"""model_type""": """clip"""} , open(__snake_case , """w""" ) )
snake_case = AutoImageProcessor.from_pretrained(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
def lowerCAmelCase ( self : Tuple )-> Optional[int]:
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case = CLIPConfig()
# Create a dummy config file with image_proceesor_type
snake_case = Path(__snake_case ) / """preprocessor_config.json"""
snake_case = Path(__snake_case ) / """config.json"""
json.dump(
{"""image_processor_type""": """CLIPImageProcessor""", """processor_class""": """CLIPProcessor"""} , open(__snake_case , """w""" ) , )
json.dump({"""model_type""": """clip"""} , open(__snake_case , """w""" ) )
# remove image_processor_type to make sure config.json alone is enough to load image processor locally
snake_case = AutoImageProcessor.from_pretrained(__snake_case ).to_dict()
config_dict.pop("""image_processor_type""" )
snake_case = CLIPImageProcessor(**__snake_case )
# save in new folder
model_config.save_pretrained(__snake_case )
config.save_pretrained(__snake_case )
snake_case = AutoImageProcessor.from_pretrained(__snake_case )
# make sure private variable is not incorrectly saved
snake_case = json.loads(config.to_json_string() )
self.assertTrue("""_processor_class""" not in dict_as_saved )
self.assertIsInstance(__snake_case , __snake_case )
def lowerCAmelCase ( self : List[Any] )-> Optional[Any]:
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case = Path(__snake_case ) / """preprocessor_config.json"""
json.dump(
{"""image_processor_type""": """CLIPImageProcessor""", """processor_class""": """CLIPProcessor"""} , open(__snake_case , """w""" ) , )
snake_case = AutoImageProcessor.from_pretrained(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
def lowerCAmelCase ( self : int )-> Dict:
with self.assertRaisesRegex(
__snake_case , """clip-base is not a local folder and is not a valid model identifier""" ):
snake_case = AutoImageProcessor.from_pretrained("""clip-base""" )
def lowerCAmelCase ( self : Tuple )-> int:
with self.assertRaisesRegex(
__snake_case , r"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
snake_case = AutoImageProcessor.from_pretrained(__snake_case , revision="""aaaaaa""" )
def lowerCAmelCase ( self : str )-> Union[str, Any]:
with self.assertRaisesRegex(
__snake_case , """hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.""" , ):
snake_case = AutoImageProcessor.from_pretrained("""hf-internal-testing/config-no-model""" )
def lowerCAmelCase ( self : List[str] )-> List[str]:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(__snake_case ):
snake_case = AutoImageProcessor.from_pretrained("""hf-internal-testing/test_dynamic_image_processor""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(__snake_case ):
snake_case = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=__snake_case )
snake_case = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=__snake_case )
self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" )
# Test image processor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(__snake_case )
snake_case = AutoImageProcessor.from_pretrained(__snake_case , trust_remote_code=__snake_case )
self.assertEqual(reloaded_image_processor.__class__.__name__ , """NewImageProcessor""" )
def lowerCAmelCase ( self : List[str] )-> Dict:
try:
AutoConfig.register("""custom""" , __snake_case )
AutoImageProcessor.register(__snake_case , __snake_case )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__snake_case ):
AutoImageProcessor.register(__snake_case , __snake_case )
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case = Path(__snake_case ) / """preprocessor_config.json"""
snake_case = Path(__snake_case ) / """config.json"""
json.dump(
{"""feature_extractor_type""": """CLIPFeatureExtractor""", """processor_class""": """CLIPProcessor"""} , open(__snake_case , """w""" ) , )
json.dump({"""model_type""": """clip"""} , open(__snake_case , """w""" ) )
snake_case = CustomImageProcessor.from_pretrained(__snake_case )
# Now that the config is registered, it can be used as any other config with the auto-API
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(__snake_case )
snake_case = AutoImageProcessor.from_pretrained(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
def lowerCAmelCase ( self : Dict )-> Optional[int]:
class _lowerCAmelCase ( A__ ):
"""simple docstring"""
snake_case_ = True
try:
AutoConfig.register("""custom""" , __snake_case )
AutoImageProcessor.register(__snake_case , __snake_case )
# If remote code is not set, the default is to use local
snake_case = AutoImageProcessor.from_pretrained("""hf-internal-testing/test_dynamic_image_processor""" )
self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" )
self.assertTrue(image_processor.is_local )
# If remote code is disabled, we load the local one.
snake_case = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=__snake_case )
self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" )
self.assertTrue(image_processor.is_local )
# If remote is enabled, we load from the Hub
snake_case = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=__snake_case )
self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" )
self.assertTrue(not hasattr(__snake_case , """is_local""" ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
| 3 | 1 |
"""simple docstring"""
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
a :str = logging.get_logger(__name__)
class __a (UpperCamelCase_):
'''simple docstring'''
_SCREAMING_SNAKE_CASE :List[str] = """linear"""
_SCREAMING_SNAKE_CASE :List[str] = """cosine"""
_SCREAMING_SNAKE_CASE :int = """cosine_with_restarts"""
_SCREAMING_SNAKE_CASE :List[Any] = """polynomial"""
_SCREAMING_SNAKE_CASE :Any = """constant"""
_SCREAMING_SNAKE_CASE :Union[str, Any] = """constant_with_warmup"""
_SCREAMING_SNAKE_CASE :int = """piecewise_constant"""
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase = -1 ) -> Dict:
return LambdaLR(__lowerCAmelCase , lambda __lowerCAmelCase : 1 , last_epoch=__lowerCAmelCase )
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = -1 ) -> int:
def lr_lambda(__lowerCAmelCase ):
if current_step < num_warmup_steps:
return float(__lowerCAmelCase ) / float(max(1.0 , __lowerCAmelCase ) )
return 1.0
return LambdaLR(__lowerCAmelCase , __lowerCAmelCase , last_epoch=__lowerCAmelCase )
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = -1 ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ : Optional[int] = {}
SCREAMING_SNAKE_CASE__ : Tuple = step_rules.split(""",""" )
for rule_str in rule_list[:-1]:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str = rule_str.split(""":""" )
SCREAMING_SNAKE_CASE__ : Any = int(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = float(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Any = value
SCREAMING_SNAKE_CASE__ : Union[str, Any] = float(rule_list[-1] )
def create_rules_function(__lowerCAmelCase , __lowerCAmelCase ):
def rule_func(__lowerCAmelCase ) -> float:
SCREAMING_SNAKE_CASE__ : Any = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(__lowerCAmelCase ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
SCREAMING_SNAKE_CASE__ : List[str] = create_rules_function(__lowerCAmelCase , __lowerCAmelCase )
return LambdaLR(__lowerCAmelCase , __lowerCAmelCase , last_epoch=__lowerCAmelCase )
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=-1 ) -> Union[str, Any]:
def lr_lambda(__lowerCAmelCase ):
if current_step < num_warmup_steps:
return float(__lowerCAmelCase ) / float(max(1 , __lowerCAmelCase ) )
return max(
0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) )
return LambdaLR(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = 0.5 , __lowerCAmelCase = -1 ) -> Any:
def lr_lambda(__lowerCAmelCase ):
if current_step < num_warmup_steps:
return float(__lowerCAmelCase ) / float(max(1 , __lowerCAmelCase ) )
SCREAMING_SNAKE_CASE__ : str = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(__lowerCAmelCase ) * 2.0 * progress )) )
return LambdaLR(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = 1 , __lowerCAmelCase = -1 ) -> Tuple:
def lr_lambda(__lowerCAmelCase ):
if current_step < num_warmup_steps:
return float(__lowerCAmelCase ) / float(max(1 , __lowerCAmelCase ) )
SCREAMING_SNAKE_CASE__ : Optional[Any] = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(__lowerCAmelCase ) * progress) % 1.0) )) )
return LambdaLR(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=1E-7 , __lowerCAmelCase=1.0 , __lowerCAmelCase=-1 ) -> Any:
SCREAMING_SNAKE_CASE__ : str = optimizer.defaults["""lr"""]
if not (lr_init > lr_end):
raise ValueError(F'''lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})''' )
def lr_lambda(__lowerCAmelCase ):
if current_step < num_warmup_steps:
return float(__lowerCAmelCase ) / float(max(1 , __lowerCAmelCase ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
SCREAMING_SNAKE_CASE__ : List[Any] = lr_init - lr_end
SCREAMING_SNAKE_CASE__ : Dict = num_training_steps - num_warmup_steps
SCREAMING_SNAKE_CASE__ : str = 1 - (current_step - num_warmup_steps) / decay_steps
SCREAMING_SNAKE_CASE__ : str = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
a :str = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = 1 , __lowerCAmelCase = 1.0 , __lowerCAmelCase = -1 , ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ : int = SchedulerType(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : List[str] = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(__lowerCAmelCase , last_epoch=__lowerCAmelCase )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(__lowerCAmelCase , step_rules=__lowerCAmelCase , last_epoch=__lowerCAmelCase )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(F'''{name} requires `num_warmup_steps`, please provide that argument.''' )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(__lowerCAmelCase , num_warmup_steps=__lowerCAmelCase , last_epoch=__lowerCAmelCase )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(F'''{name} requires `num_training_steps`, please provide that argument.''' )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
__lowerCAmelCase , num_warmup_steps=__lowerCAmelCase , num_training_steps=__lowerCAmelCase , num_cycles=__lowerCAmelCase , last_epoch=__lowerCAmelCase , )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
__lowerCAmelCase , num_warmup_steps=__lowerCAmelCase , num_training_steps=__lowerCAmelCase , power=__lowerCAmelCase , last_epoch=__lowerCAmelCase , )
return schedule_func(
__lowerCAmelCase , num_warmup_steps=__lowerCAmelCase , num_training_steps=__lowerCAmelCase , last_epoch=__lowerCAmelCase )
| 132 |
"""simple docstring"""
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
import numpy as np
import PIL
from PIL import Image
from ...utils import BaseOutput, is_torch_available, is_transformers_available
@dataclass
class __a (UpperCamelCase_):
'''simple docstring'''
_SCREAMING_SNAKE_CASE :Union[List[PIL.Image.Image], np.ndarray]
_SCREAMING_SNAKE_CASE :Optional[List[bool]]
if is_transformers_available() and is_torch_available():
from .pipeline_semantic_stable_diffusion import SemanticStableDiffusionPipeline
| 132 | 1 |
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_torch_available
from transformers.testing_utils import require_torch, torch_device
if is_torch_available():
from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments
@require_torch
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase__ ( self : Union[str, Any] , A : str ):
for model_result in results.values():
for batch_size, sequence_length in zip(model_result["""bs"""] , model_result["""ss"""] ):
__snake_case: str = model_result["""result"""][batch_size][sequence_length]
self.assertIsNotNone(A )
def UpperCAmelCase__ ( self : List[str] ):
__snake_case: List[str] = """sshleifer/tiny-gpt2"""
__snake_case: Optional[int] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A , inference=A , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A , )
__snake_case: str = PyTorchBenchmark(A )
__snake_case: List[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCAmelCase__ ( self : Any ):
__snake_case: str = """sgugger/tiny-distilbert-classification"""
__snake_case: List[str] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A , inference=A , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A , only_pretrain_model=A , )
__snake_case: Optional[Any] = PyTorchBenchmark(A )
__snake_case: Tuple = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCAmelCase__ ( self : Tuple ):
__snake_case: Dict = """sshleifer/tiny-gpt2"""
__snake_case: List[str] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A , inference=A , torchscript=A , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A , )
__snake_case: Tuple = PyTorchBenchmark(A )
__snake_case: Optional[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(torch_device == """cpu""" , """Cant do half precision""" )
def UpperCAmelCase__ ( self : Tuple ):
__snake_case: str = """sshleifer/tiny-gpt2"""
__snake_case: Dict = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A , inference=A , fpaa=A , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A , )
__snake_case: Union[str, Any] = PyTorchBenchmark(A )
__snake_case: Dict = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCAmelCase__ ( self : List[Any] ):
__snake_case: List[str] = """sshleifer/tiny-gpt2"""
__snake_case: List[Any] = AutoConfig.from_pretrained(A )
# set architectures equal to `None`
__snake_case: Union[str, Any] = None
__snake_case: Any = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A , inference=A , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A , )
__snake_case: List[Any] = PyTorchBenchmark(A , configs=[config] )
__snake_case: Tuple = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCAmelCase__ ( self : List[str] ):
__snake_case: str = """sshleifer/tiny-gpt2"""
__snake_case: int = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A , inference=A , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A , )
__snake_case: str = PyTorchBenchmark(A )
__snake_case: Tuple = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
@unittest.skipIf(torch_device == """cpu""" , """Can't do half precision""" )
def UpperCAmelCase__ ( self : Dict ):
__snake_case: List[str] = """sshleifer/tiny-gpt2"""
__snake_case: List[str] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A , inference=A , sequence_lengths=[8] , batch_sizes=[1] , fpaa=A , multi_process=A , )
__snake_case: int = PyTorchBenchmark(A )
__snake_case: List[str] = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def UpperCAmelCase__ ( self : int ):
__snake_case: Optional[int] = """sshleifer/tiny-gpt2"""
__snake_case: Tuple = AutoConfig.from_pretrained(A )
__snake_case: Union[str, Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A , inference=A , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A , )
__snake_case: List[Any] = PyTorchBenchmark(A , configs=[config] )
__snake_case: Tuple = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCAmelCase__ ( self : Optional[int] ):
__snake_case: Tuple = """sshleifer/tinier_bart"""
__snake_case: Optional[int] = AutoConfig.from_pretrained(A )
__snake_case: List[str] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A , inference=A , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A , )
__snake_case: Union[str, Any] = PyTorchBenchmark(A , configs=[config] )
__snake_case: Tuple = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCAmelCase__ ( self : Any ):
__snake_case: Tuple = """sshleifer/tiny-gpt2"""
__snake_case: Union[str, Any] = AutoConfig.from_pretrained(A )
__snake_case: List[str] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A , inference=A , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A , )
__snake_case: int = PyTorchBenchmark(A , configs=[config] )
__snake_case: List[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def UpperCAmelCase__ ( self : Dict ):
__snake_case: Tuple = """sshleifer/tinier_bart"""
__snake_case: List[Any] = AutoConfig.from_pretrained(A )
__snake_case: Tuple = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A , inference=A , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A , )
__snake_case: Optional[Any] = PyTorchBenchmark(A , configs=[config] )
__snake_case: int = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def UpperCAmelCase__ ( self : Tuple ):
__snake_case: Optional[int] = """sshleifer/tiny-gpt2"""
with tempfile.TemporaryDirectory() as tmp_dir:
__snake_case: Optional[Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A , inference=A , save_to_csv=A , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(A , """inf_time.csv""" ) , train_memory_csv_file=os.path.join(A , """train_mem.csv""" ) , inference_memory_csv_file=os.path.join(A , """inf_mem.csv""" ) , train_time_csv_file=os.path.join(A , """train_time.csv""" ) , env_info_csv_file=os.path.join(A , """env.csv""" ) , multi_process=A , )
__snake_case: Dict = PyTorchBenchmark(A )
benchmark.run()
self.assertTrue(Path(os.path.join(A , """inf_time.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(A , """train_time.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(A , """inf_mem.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(A , """train_mem.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(A , """env.csv""" ) ).exists() )
def UpperCAmelCase__ ( self : List[str] ):
__snake_case: int = """sshleifer/tiny-gpt2"""
def _check_summary_is_not_empty(A : List[str] ):
self.assertTrue(hasattr(A , """sequential""" ) )
self.assertTrue(hasattr(A , """cumulative""" ) )
self.assertTrue(hasattr(A , """current""" ) )
self.assertTrue(hasattr(A , """total""" ) )
with tempfile.TemporaryDirectory() as tmp_dir:
__snake_case: Optional[int] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A , inference=A , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(A , """log.txt""" ) , log_print=A , trace_memory_line_by_line=A , multi_process=A , )
__snake_case: List[Any] = PyTorchBenchmark(A )
__snake_case: List[Any] = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
_check_summary_is_not_empty(result.train_summary )
self.assertTrue(Path(os.path.join(A , """log.txt""" ) ).exists() )
| 293 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
ImageTextPipelineOutput,
UniDiffuserPipeline,
)
else:
from .modeling_text_decoder import UniDiffuserTextDecoder
from .modeling_uvit import UniDiffuserModel, UTransformeraDModel
from .pipeline_unidiffuser import ImageTextPipelineOutput, UniDiffuserPipeline
| 293 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase :Optional[int] = logging.get_logger(__name__)
lowerCamelCase :str = {
'''hustvl/yolos-small''': '''https://huggingface.co/hustvl/yolos-small/resolve/main/config.json''',
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : str = 'yolos'
def __init__(self , lowercase=768 , lowercase=12 , lowercase=12 , lowercase=3072 , lowercase="gelu" , lowercase=0.0 , lowercase=0.0 , lowercase=0.02 , lowercase=1E-12 , lowercase=[512, 864] , lowercase=16 , lowercase=3 , lowercase=True , lowercase=100 , lowercase=True , lowercase=False , lowercase=1 , lowercase=5 , lowercase=2 , lowercase=5 , lowercase=2 , lowercase=0.1 , **lowercase , ):
super().__init__(**lowercase )
A_ : Optional[Any] = hidden_size
A_ : Dict = num_hidden_layers
A_ : Union[str, Any] = num_attention_heads
A_ : Dict = intermediate_size
A_ : List[str] = hidden_act
A_ : Dict = hidden_dropout_prob
A_ : List[Any] = attention_probs_dropout_prob
A_ : str = initializer_range
A_ : Optional[Any] = layer_norm_eps
A_ : Dict = image_size
A_ : int = patch_size
A_ : Union[str, Any] = num_channels
A_ : Any = qkv_bias
A_ : Union[str, Any] = num_detection_tokens
A_ : Optional[int] = use_mid_position_embeddings
A_ : Dict = auxiliary_loss
# Hungarian matcher
A_ : Optional[Any] = class_cost
A_ : List[Any] = bbox_cost
A_ : List[Any] = giou_cost
# Loss coefficients
A_ : Union[str, Any] = bbox_loss_coefficient
A_ : Optional[Any] = giou_loss_coefficient
A_ : Tuple = eos_coefficient
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : Any = version.parse('1.11' )
@property
def _a (self ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def _a (self ):
return 1E-4
@property
def _a (self ):
return 12 | 206 |
'''simple docstring'''
import argparse
import struct
import unittest
class _lowerCAmelCase :
def __init__(self , lowercase ):
A_ : List[str] = data
# Initialize hash values
A_ : Tuple = [
0X6A09_E667,
0XBB67_AE85,
0X3C6E_F372,
0XA54F_F53A,
0X510E_527F,
0X9B05_688C,
0X1F83_D9AB,
0X5BE0_CD19,
]
# Initialize round constants
A_ : List[Any] = [
0X428A_2F98,
0X7137_4491,
0XB5C0_FBCF,
0XE9B5_DBA5,
0X3956_C25B,
0X59F1_11F1,
0X923F_82A4,
0XAB1C_5ED5,
0XD807_AA98,
0X1283_5B01,
0X2431_85BE,
0X550C_7DC3,
0X72BE_5D74,
0X80DE_B1FE,
0X9BDC_06A7,
0XC19B_F174,
0XE49B_69C1,
0XEFBE_4786,
0X0FC1_9DC6,
0X240C_A1CC,
0X2DE9_2C6F,
0X4A74_84AA,
0X5CB0_A9DC,
0X76F9_88DA,
0X983E_5152,
0XA831_C66D,
0XB003_27C8,
0XBF59_7FC7,
0XC6E0_0BF3,
0XD5A7_9147,
0X06CA_6351,
0X1429_2967,
0X27B7_0A85,
0X2E1B_2138,
0X4D2C_6DFC,
0X5338_0D13,
0X650A_7354,
0X766A_0ABB,
0X81C2_C92E,
0X9272_2C85,
0XA2BF_E8A1,
0XA81A_664B,
0XC24B_8B70,
0XC76C_51A3,
0XD192_E819,
0XD699_0624,
0XF40E_3585,
0X106A_A070,
0X19A4_C116,
0X1E37_6C08,
0X2748_774C,
0X34B0_BCB5,
0X391C_0CB3,
0X4ED8_AA4A,
0X5B9C_CA4F,
0X682E_6FF3,
0X748F_82EE,
0X78A5_636F,
0X84C8_7814,
0X8CC7_0208,
0X90BE_FFFA,
0XA450_6CEB,
0XBEF9_A3F7,
0XC671_78F2,
]
A_ : Tuple = self.preprocessing(self.data )
self.final_hash()
@staticmethod
def _a (lowercase ):
A_ : Any = b"""\x80""" + (b"""\x00""" * (63 - (len(lowercase ) + 8) % 64))
A_ : Optional[Any] = struct.pack(""">Q""" , (len(lowercase ) * 8) )
return data + padding + big_endian_integer
def _a (self ):
# Convert into blocks of 64 bytes
A_ : str = [
self.preprocessed_data[x : x + 64]
for x in range(0 , len(self.preprocessed_data ) , 64 )
]
for block in self.blocks:
# Convert the given block into a list of 4 byte integers
A_ : Tuple = list(struct.unpack(""">16L""" , lowercase ) )
# add 48 0-ed integers
words += [0] * 48
A_, A_, A_, A_, A_, A_, A_, A_ : Dict = self.hashes
for index in range(0 , 64 ):
if index > 15:
# modify the zero-ed indexes at the end of the array
A_ : Optional[Any] = (
self.ror(words[index - 15] , 7 )
^ self.ror(words[index - 15] , 18 )
^ (words[index - 15] >> 3)
)
A_ : Tuple = (
self.ror(words[index - 2] , 17 )
^ self.ror(words[index - 2] , 19 )
^ (words[index - 2] >> 10)
)
A_ : Any = (
words[index - 16] + sa + words[index - 7] + sa
) % 0X1_0000_0000
# Compression
A_ : Union[str, Any] = self.ror(lowercase , 6 ) ^ self.ror(lowercase , 11 ) ^ self.ror(lowercase , 25 )
A_ : List[Any] = (e & f) ^ ((~e & 0XFFFF_FFFF) & g)
A_ : Dict = (
h + sa + ch + self.round_constants[index] + words[index]
) % 0X1_0000_0000
A_ : List[str] = self.ror(lowercase , 2 ) ^ self.ror(lowercase , 13 ) ^ self.ror(lowercase , 22 )
A_ : str = (a & b) ^ (a & c) ^ (b & c)
A_ : int = (sa + maj) % 0X1_0000_0000
A_, A_, A_, A_, A_, A_, A_, A_ : List[Any] = (
g,
f,
e,
((d + tempa) % 0X1_0000_0000),
c,
b,
a,
((tempa + tempa) % 0X1_0000_0000),
)
A_ : int = [a, b, c, d, e, f, g, h]
# Modify final values
A_ : Dict = [
((element + mutated_hash_values[index]) % 0X1_0000_0000)
for index, element in enumerate(self.hashes )
]
A_ : Any = """""".join([hex(lowercase )[2:].zfill(8 ) for value in self.hashes] )
def _a (self , lowercase , lowercase ):
return 0XFFFF_FFFF & (value << (32 - rotations)) | (value >> rotations)
class _lowerCAmelCase ( unittest.TestCase ):
def _a (self ):
import hashlib
A_ : Optional[Any] = bytes("""Test String""" , """utf-8""" )
self.assertEqual(SHAaaa(lowercase ).hash , hashlib.shaaaa(lowercase ).hexdigest() )
def a ( ):
'''simple docstring'''
import doctest
doctest.testmod()
A_ : Any = argparse.ArgumentParser()
parser.add_argument(
"""-s""" , """--string""" , dest="""input_string""" , default="""Hello World!! Welcome to Cryptography""" , help="""Hash the string""" , )
parser.add_argument(
"""-f""" , """--file""" , dest="""input_file""" , help="""Hash contents of a file""" )
A_ : List[str] = parser.parse_args()
A_ : Tuple = args.input_string
# hash input should be a bytestring
if args.input_file:
with open(args.input_file , """rb""" ) as f:
A_ : Union[str, Any] = f.read()
else:
A_ : Optional[Any] = bytes(lowerCamelCase__ , """utf-8""" )
print(SHAaaa(lowerCamelCase__ ).hash )
if __name__ == "__main__":
main() | 206 | 1 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer
@dataclass
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : torch.FloatTensor
class A_ ( _snake_case , _snake_case ):
'''simple docstring'''
@register_to_config
def __init__( self : Any , lowercase_ : int = 3 , lowercase_ : int = 3 , lowercase_ : Tuple[str] = ("DownEncoderBlock2D",) , lowercase_ : Tuple[str] = ("UpDecoderBlock2D",) , lowercase_ : Tuple[int] = (64,) , lowercase_ : int = 1 , lowercase_ : str = "silu" , lowercase_ : int = 3 , lowercase_ : int = 32 , lowercase_ : int = 256 , lowercase_ : int = 32 , lowercase_ : Optional[int] = None , lowercase_ : float = 0.1_8215 , lowercase_ : str = "group" , ) -> str:
super().__init__()
# pass init params to Encoder
UpperCAmelCase : Optional[Any] = Encoder(
in_channels=lowercase_ , out_channels=lowercase_ , down_block_types=lowercase_ , block_out_channels=lowercase_ , layers_per_block=lowercase_ , act_fn=lowercase_ , norm_num_groups=lowercase_ , double_z=lowercase_ , )
UpperCAmelCase : Union[str, Any] = vq_embed_dim if vq_embed_dim is not None else latent_channels
UpperCAmelCase : Tuple = nn.Convad(lowercase_ , lowercase_ , 1 )
UpperCAmelCase : str = VectorQuantizer(lowercase_ , lowercase_ , beta=0.25 , remap=lowercase_ , sane_index_shape=lowercase_ )
UpperCAmelCase : Union[str, Any] = nn.Convad(lowercase_ , lowercase_ , 1 )
# pass init params to Decoder
UpperCAmelCase : Dict = Decoder(
in_channels=lowercase_ , out_channels=lowercase_ , up_block_types=lowercase_ , block_out_channels=lowercase_ , layers_per_block=lowercase_ , act_fn=lowercase_ , norm_num_groups=lowercase_ , norm_type=lowercase_ , )
@apply_forward_hook
def UpperCAmelCase_ ( self : List[str] , lowercase_ : torch.FloatTensor , lowercase_ : bool = True ) -> VQEncoderOutput:
UpperCAmelCase : Tuple = self.encoder(lowercase_ )
UpperCAmelCase : Optional[Any] = self.quant_conv(lowercase_ )
if not return_dict:
return (h,)
return VQEncoderOutput(latents=lowercase_ )
@apply_forward_hook
def UpperCAmelCase_ ( self : Any , lowercase_ : torch.FloatTensor , lowercase_ : bool = False , lowercase_ : bool = True ) -> Union[DecoderOutput, torch.FloatTensor]:
# also go through quantization layer
if not force_not_quantize:
UpperCAmelCase : List[str] = self.quantize(lowercase_ )
else:
UpperCAmelCase : Union[str, Any] = h
UpperCAmelCase : Optional[Any] = self.post_quant_conv(lowercase_ )
UpperCAmelCase : Any = self.decoder(lowercase_ , quant if self.config.norm_type == 'spatial' else None )
if not return_dict:
return (dec,)
return DecoderOutput(sample=lowercase_ )
def UpperCAmelCase_ ( self : Any , lowercase_ : torch.FloatTensor , lowercase_ : bool = True ) -> Union[DecoderOutput, torch.FloatTensor]:
UpperCAmelCase : str = sample
UpperCAmelCase : Union[str, Any] = self.encode(lowercase_ ).latents
UpperCAmelCase : Union[str, Any] = self.decode(lowercase_ ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=lowercase_ )
| 367 |
'''simple docstring'''
import argparse
import re
from flax.traverse_util import flatten_dict, unflatten_dict
from tax import checkpoints
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
from transformers.utils import logging
logging.set_verbosity_info()
# should not include what is already done by the `from_pt` argument
lowercase__ = {
"/attention/": "/0/SelfAttention/",
"/self_attention/": "/0/SelfAttention/",
"/encoder_decoder_attention/": "/1/EncDecAttention/",
"value": "v",
"query": "q",
"key": "k",
"out": "o",
"pre_self_attention_layer_norm": "0/layer_norm",
"pre_cross_attention_layer_norm": "1/layer_norm",
"pre_attention_layer_norm": "0/layer_norm", # previously 1, but seems wrong
"token_embedder": "shared",
"encoder_norm": "final_layer_norm",
"decoder_norm": "final_layer_norm",
"relpos_bias/rel_embedding": "block/0/layer/0/SelfAttention/relative_attention_bias/weight",
"router/router_weights/w/": "router/classifier/",
"roer/roer_weights/w/": "router/classifier/",
"logits_dense": "lm_head",
}
def UpperCamelCase( UpperCAmelCase_ ):
# 1. in HF T5, we have block.{x}.layer.{y}. which corresponds to layer.{x} in
# the original model
UpperCAmelCase : List[Any] = list(s_dict.keys() )
for key in keys:
UpperCAmelCase : Union[str, Any] = R'.*/layers_(\d+)'
UpperCAmelCase : List[str] = key
if re.match(UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase : Union[str, Any] = re.sub(R'layers_(\d+)' , R'block/\1/layer' , UpperCAmelCase_ )
UpperCAmelCase : str = R'(encoder|decoder)\/'
if re.match(UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase : Tuple = re.match(UpperCAmelCase_ , UpperCAmelCase_ ).groups()
if groups[0] == "encoder":
UpperCAmelCase : Union[str, Any] = re.sub(R'/mlp/' , R'/1/mlp/' , UpperCAmelCase_ )
UpperCAmelCase : List[Any] = re.sub(R'/pre_mlp_layer_norm/' , R'/1/layer_norm/' , UpperCAmelCase_ )
elif groups[0] == "decoder":
UpperCAmelCase : Tuple = re.sub(R'/mlp/' , R'/2/mlp/' , UpperCAmelCase_ )
UpperCAmelCase : Union[str, Any] = re.sub(R'/pre_mlp_layer_norm/' , R'/2/layer_norm/' , UpperCAmelCase_ )
# 2. Convert other classic mappings
for old_key, temp_key in MOE_LAYER_NAME_MAPPING.items():
if old_key in new_key:
UpperCAmelCase : List[str] = new_key.replace(UpperCAmelCase_ , UpperCAmelCase_ )
print(F"""{key} -> {new_key}""" )
UpperCAmelCase : List[Any] = s_dict.pop(UpperCAmelCase_ )
if "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
UpperCAmelCase : Optional[int] = s_dict[
'encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight'
].T
if "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
UpperCAmelCase : Optional[int] = s_dict[
'decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight'
].T
# 3. Take extra care of the EXPERTS layer
for key in list(s_dict.keys() ):
if "expert" in key:
UpperCAmelCase : List[str] = s_dict[key].shape[0]
UpperCAmelCase : List[Any] = s_dict[key]
for idx in range(UpperCAmelCase_ ):
UpperCAmelCase : Union[str, Any] = expert_weihts[idx]
print(F"""{key} -> {key.replace("expert/" , "nested fstring" )}""" )
s_dict.pop(UpperCAmelCase_ )
return s_dict
lowercase__ = {
"NUM_ENCODER_LAYERS": "num_layers",
"NUM_DECODER_LAYERS": "num_decoder_layers",
"NUM_HEADS": "num_heads",
"HEAD_DIM": "d_kv",
"EMBED_DIM": "d_model",
"MLP_DIM": "d_ff",
"NUM_SELECTED_EXPERTS": "num_selected_experts",
"NUM_ENCODER_SPARSE_LAYERS": "num_sparse_encoder_layers",
"NUM_DECODER_SPARSE_LAYERS": "num_sparse_decoder_layers",
"dense.MlpBlock.activations": "feed_forward_proj",
}
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
# Convert a google style config to the hugging face fromat
import regex as re
with open(UpperCAmelCase_ , 'r' ) as f:
UpperCAmelCase : Union[str, Any] = f.read()
UpperCAmelCase : Union[str, Any] = re.findall(R'(.*) = ([0-9.]*)' , UpperCAmelCase_ )
UpperCAmelCase : str = {}
for param, value in regex_match:
if param in GIN_TO_CONFIG_MAPPING and value != "":
UpperCAmelCase : Dict = float(UpperCAmelCase_ ) if '.' in value else int(UpperCAmelCase_ )
UpperCAmelCase : str = re.findall(R'(.*activations) = \(\'(.*)\',\)' , UpperCAmelCase_ )[0]
UpperCAmelCase : Union[str, Any] = str(activation[1] )
UpperCAmelCase : Optional[int] = num_experts
UpperCAmelCase : List[str] = SwitchTransformersConfig(**UpperCAmelCase_ )
return config
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_=None , UpperCAmelCase_="./" , UpperCAmelCase_=8 ):
# Initialise PyTorch model
print(F"""Loading flax weights from : {flax_checkpoint_path}""" )
UpperCAmelCase : List[Any] = checkpoints.load_tax_checkpoint(UpperCAmelCase_ )
if gin_file is not None:
UpperCAmelCase : List[Any] = convert_gin_to_config(UpperCAmelCase_ , UpperCAmelCase_ )
else:
UpperCAmelCase : str = SwitchTransformersConfig.from_pretrained(UpperCAmelCase_ )
UpperCAmelCase : str = SwitchTransformersForConditionalGeneration(UpperCAmelCase_ )
UpperCAmelCase : str = flax_params['target']
UpperCAmelCase : Union[str, Any] = flatten_dict(UpperCAmelCase_ , sep='/' )
UpperCAmelCase : Tuple = rename_keys(UpperCAmelCase_ )
UpperCAmelCase : Optional[int] = unflatten_dict(UpperCAmelCase_ , sep='/' )
# Load the flax params in the PT model
load_flax_weights_in_pytorch_model(UpperCAmelCase_ , UpperCAmelCase_ )
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
pt_model.save_pretrained(UpperCAmelCase_ )
if __name__ == "__main__":
lowercase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--switch_t5x_checkpoint_path",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained SwitchTransformers model. \nThis specifies the"
" model architecture. If not provided, a `gin_file` has to be provided."
),
)
parser.add_argument(
"--gin_file",
default=None,
type=str,
required=False,
help="Path to the gin config file. If not provided, a `config_file` has to be passed ",
)
parser.add_argument(
"--config_name", default=None, type=str, required=False, help="Config name of SwitchTransformers model."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output pytorch model."
)
parser.add_argument("--num_experts", default=8, type=int, required=False, help="Number of experts")
lowercase__ = parser.parse_args()
convert_flax_checkpoint_to_pytorch(
args.switch_tax_checkpoint_path,
args.config_name,
args.gin_file,
args.pytorch_dump_folder_path,
args.num_experts,
)
| 280 | 0 |
"""simple docstring"""
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> int:
snake_case_ = 1 # To kept the Calculated Value
# Since C(n, k) = C(n, n-k)
if k > (n - k):
snake_case_ = n - k
# Calculate C(n,k)
for i in range(UpperCAmelCase_ ):
result *= n - i
result //= i + 1
return result
def _a ( _SCREAMING_SNAKE_CASE ) -> int:
return binomial_coefficient(2 * node_count , UpperCAmelCase_ ) // (node_count + 1)
def _a ( _SCREAMING_SNAKE_CASE ) -> int:
if n < 0:
raise ValueError("""factorial() not defined for negative values""" )
snake_case_ = 1
for i in range(1 , n + 1 ):
result *= i
return result
def _a ( _SCREAMING_SNAKE_CASE ) -> int:
return catalan_number(UpperCAmelCase_ ) * factorial(UpperCAmelCase_ )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : Union[str, Any] = int(input('Enter the number of nodes: ').strip() or 0)
if node_count <= 0:
raise ValueError('We need some nodes to work with.')
print(
f"""Given {node_count} nodes, there are {binary_tree_count(node_count)} """
f"""binary trees and {catalan_number(node_count)} binary search trees."""
)
| 347 | """simple docstring"""
def __UpperCAmelCase ( UpperCAmelCase_ : int , UpperCAmelCase_ : int ) -> int:
'''simple docstring'''
while a != 0:
__snake_case , __snake_case : Optional[Any] = b % a, a
return b
def __UpperCAmelCase ( UpperCAmelCase_ : int , UpperCAmelCase_ : int ) -> int:
'''simple docstring'''
if gcd(UpperCAmelCase_ , UpperCAmelCase_ ) != 1:
__snake_case : Optional[Any] = F"mod inverse of {a!r} and {m!r} does not exist"
raise ValueError(UpperCAmelCase_ )
__snake_case , __snake_case , __snake_case : Optional[int] = 1, 0, a
__snake_case , __snake_case , __snake_case : int = 0, 1, m
while va != 0:
__snake_case : Union[str, Any] = ua // va
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case : Union[str, Any] = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va
return ua % m
| 172 | 0 |
import collections
import os
import re
from pathlib import Path
__SCREAMING_SNAKE_CASE : Dict = "src/transformers"
# Matches is_xxx_available()
__SCREAMING_SNAKE_CASE : Union[str, Any] = re.compile(R'is\_([a-z_]*)_available()')
# Catches a one-line _import_struct = {xxx}
__SCREAMING_SNAKE_CASE : int = re.compile(R'^_import_structure\s+=\s+\{([^\}]+)\}')
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
__SCREAMING_SNAKE_CASE : List[str] = re.compile(R'\s+\"\S*\":\s+\[([^\]]*)\]')
# Catches a line if not is_foo_available
__SCREAMING_SNAKE_CASE : Any = re.compile(R'^\s*if\s+not\s+is\_[a-z_]*\_available\(\)')
# Catches a line _import_struct["bla"].append("foo")
__SCREAMING_SNAKE_CASE : Any = re.compile(R'^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)')
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
__SCREAMING_SNAKE_CASE : Optional[Any] = re.compile(R'^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]')
# Catches a line with an object between quotes and a comma: "MyModel",
__SCREAMING_SNAKE_CASE : Dict = re.compile(R'^\s+\"([^\"]+)\",')
# Catches a line with objects between brackets only: ["foo", "bar"],
__SCREAMING_SNAKE_CASE : List[Any] = re.compile(R'^\s+\[([^\]]+)\]')
# Catches a line with from foo import bar, bla, boo
__SCREAMING_SNAKE_CASE : Optional[Any] = re.compile(R'\s+from\s+\S*\s+import\s+([^\(\s].*)\n')
# Catches a line with try:
__SCREAMING_SNAKE_CASE : Tuple = re.compile(R'^\s*try:')
# Catches a line with else:
__SCREAMING_SNAKE_CASE : List[Any] = re.compile(R'^\s*else:')
def snake_case (__lowercase ) -> Any:
'''simple docstring'''
if _re_test_backend.search(__lowerCAmelCase ) is None:
return None
_snake_case : Any = [b[0] for b in _re_backend.findall(__lowerCAmelCase )]
backends.sort()
return "_and_".join(__lowerCAmelCase )
def snake_case (__lowercase ) -> List[str]:
'''simple docstring'''
with open(__lowerCAmelCase , "r" , encoding="utf-8" , newline="\n" ) as f:
_snake_case : str = f.readlines()
_snake_case : Dict = 0
while line_index < len(__lowerCAmelCase ) and not lines[line_index].startswith("_import_structure = {" ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(__lowerCAmelCase ):
return None
# First grab the objects without a specific backend in _import_structure
_snake_case : int = []
while not lines[line_index].startswith("if TYPE_CHECKING" ) and find_backend(lines[line_index] ) is None:
_snake_case : Optional[Any] = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(__lowerCAmelCase ):
_snake_case : Union[str, Any] = _re_one_line_import_struct.search(__lowerCAmelCase ).groups()[0]
_snake_case : Union[str, Any] = re.findall(r"\[([^\]]+)\]" , __lowerCAmelCase )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(", " )] )
line_index += 1
continue
_snake_case : Optional[int] = _re_import_struct_key_value.search(__lowerCAmelCase )
if single_line_import_search is not None:
_snake_case : Dict = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(", " ) if len(__lowerCAmelCase ) > 0]
objects.extend(__lowerCAmelCase )
elif line.startswith(" " * 8 + "\"" ):
objects.append(line[9:-3] )
line_index += 1
_snake_case : Optional[int] = {"""none""": objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith("if TYPE_CHECKING" ):
# If the line is an if not is_backend_available, we grab all objects associated.
_snake_case : int = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
_snake_case : int = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
_snake_case : Any = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(" " * 4 ):
_snake_case : Union[str, Any] = lines[line_index]
if _re_import_struct_add_one.search(__lowerCAmelCase ) is not None:
objects.append(_re_import_struct_add_one.search(__lowerCAmelCase ).groups()[0] )
elif _re_import_struct_add_many.search(__lowerCAmelCase ) is not None:
_snake_case : Optional[int] = _re_import_struct_add_many.search(__lowerCAmelCase ).groups()[0].split(", " )
_snake_case : Optional[Any] = [obj[1:-1] for obj in imports if len(__lowerCAmelCase ) > 0]
objects.extend(__lowerCAmelCase )
elif _re_between_brackets.search(__lowerCAmelCase ) is not None:
_snake_case : Tuple = _re_between_brackets.search(__lowerCAmelCase ).groups()[0].split(", " )
_snake_case : Any = [obj[1:-1] for obj in imports if len(__lowerCAmelCase ) > 0]
objects.extend(__lowerCAmelCase )
elif _re_quote_object.search(__lowerCAmelCase ) is not None:
objects.append(_re_quote_object.search(__lowerCAmelCase ).groups()[0] )
elif line.startswith(" " * 8 + "\"" ):
objects.append(line[9:-3] )
elif line.startswith(" " * 12 + "\"" ):
objects.append(line[13:-3] )
line_index += 1
_snake_case : Union[str, Any] = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
_snake_case : Optional[int] = []
while (
line_index < len(__lowerCAmelCase )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith("else" )
):
_snake_case : Optional[int] = lines[line_index]
_snake_case : int = _re_import.search(__lowerCAmelCase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(", " ) )
elif line.startswith(" " * 8 ):
objects.append(line[8:-2] )
line_index += 1
_snake_case : Optional[Any] = {"""none""": objects}
# Let's continue with backend-specific objects
while line_index < len(__lowerCAmelCase ):
# If the line is an if is_backend_available, we grab all objects associated.
_snake_case : List[str] = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
_snake_case : List[Any] = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
_snake_case : Any = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(" " * 8 ):
_snake_case : List[str] = lines[line_index]
_snake_case : str = _re_import.search(__lowerCAmelCase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(", " ) )
elif line.startswith(" " * 12 ):
objects.append(line[12:-2] )
line_index += 1
_snake_case : Dict = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def snake_case (__lowercase , __lowercase ) -> Optional[int]:
'''simple docstring'''
def find_duplicates(__lowercase ):
return [k for k, v in collections.Counter(__lowerCAmelCase ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
_snake_case : List[Any] = []
for key in import_dict_objects.keys():
_snake_case : Any = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(F"""Duplicate _import_structure definitions for: {duplicate_imports}""" )
_snake_case : Tuple = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(F"""Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}""" )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
_snake_case : Any = """base imports""" if key == """none""" else F"""{key} backend"""
errors.append(F"""Differences for {name}:""" )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F""" {a} in TYPE_HINT but not in _import_structure.""" )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F""" {a} in _import_structure but not in TYPE_HINT.""" )
return errors
def snake_case () -> str:
'''simple docstring'''
_snake_case : Optional[int] = []
for root, _, files in os.walk(__lowerCAmelCase ):
if "__init__.py" in files:
_snake_case : Optional[Any] = os.path.join(__lowerCAmelCase , "__init__.py" )
_snake_case : Dict = parse_init(__lowerCAmelCase )
if objects is not None:
_snake_case : List[str] = analyze_results(*__lowerCAmelCase )
if len(__lowerCAmelCase ) > 0:
_snake_case : List[Any] = F"""Problem in {fname}, both halves do not define the same objects.\n{errors[0]}"""
failures.append("\n".join(__lowerCAmelCase ) )
if len(__lowerCAmelCase ) > 0:
raise ValueError("\n\n".join(__lowerCAmelCase ) )
def snake_case () -> Dict:
'''simple docstring'''
_snake_case : Dict = []
for path, directories, files in os.walk(__lowerCAmelCase ):
for folder in directories:
# Ignore private modules
if folder.startswith("_" ):
directories.remove(__lowerCAmelCase )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(__lowerCAmelCase ) / folder).glob("*.py" ) ) ) == 0:
continue
_snake_case : Union[str, Any] = str((Path(__lowerCAmelCase ) / folder).relative_to(__lowerCAmelCase ) )
_snake_case : List[str] = short_path.replace(os.path.sep , "." )
submodules.append(__lowerCAmelCase )
for fname in files:
if fname == "__init__.py":
continue
_snake_case : Tuple = str((Path(__lowerCAmelCase ) / fname).relative_to(__lowerCAmelCase ) )
_snake_case : Optional[int] = short_path.replace(".py" , "" ).replace(os.path.sep , "." )
if len(submodule.split("." ) ) == 1:
submodules.append(__lowerCAmelCase )
return submodules
__SCREAMING_SNAKE_CASE : Optional[Any] = [
"convert_pytorch_checkpoint_to_tf2",
"modeling_flax_pytorch_utils",
"models.esm.openfold_utils",
]
def snake_case () -> int:
'''simple docstring'''
from transformers.utils import direct_transformers_import
_snake_case : int = direct_transformers_import(__lowerCAmelCase )
_snake_case : Optional[int] = set(transformers._import_structure.keys() )
# This contains all the base keys of the _import_structure object defined in the init, but if the user is missing
# some optional dependencies, they may not have all of them. Thus we read the init to read all additions and
# (potentiall re-) add them.
with open(os.path.join(__lowerCAmelCase , "__init__.py" ) , "r" ) as f:
_snake_case : str = f.read()
import_structure_keys.update(set(re.findall(r"import_structure\[\"([^\"]*)\"\]" , __lowerCAmelCase ) ) )
_snake_case : Optional[int] = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in import_structure_keys
]
if len(__lowerCAmelCase ) > 0:
_snake_case : Dict = """\n""".join(F"""- {module}""" for module in module_not_registered )
raise ValueError(
"The following submodules are not properly registed in the main init of Transformers:\n"
F"""{list_of_modules}\n"""
"Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value." )
if __name__ == "__main__":
check_all_inits()
check_submodules() | 362 | def snake_case () -> Dict:
'''simple docstring'''
_snake_case : List[str] = 0
for i in range(1 , 1_001 ):
total += i**i
return str(__lowercase )[-10:]
if __name__ == "__main__":
print(solution()) | 284 | 0 |
'''simple docstring'''
from arguments import InitializationArguments
from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, HfArgumentParser
# Configuration
lowercase : Optional[int] = HfArgumentParser(InitializationArguments)
lowercase : Union[str, Any] = parser.parse_args()
# Load codeparrot tokenizer trained for Python code tokenization
lowercase : Optional[int] = AutoTokenizer.from_pretrained(args.tokenizer_name)
# Config: "scale_attn_by_layer_idx" and "reorder_and_upcast_attn" are Mistral stability tweaks
lowercase : Optional[int] = {
'vocab_size': len(tokenizer),
'scale_attn_by_inverse_layer_idx': True,
'reorder_and_upcast_attn': True,
}
# Load model config (GPT-2 large in this case)
lowercase : Dict = AutoConfig.from_pretrained(args.config_name, **config_kwargs)
# Initialize new model with config
lowercase : str = AutoModelForCausalLM.from_config(config)
# Save model to the hub
model.save_pretrained(args.model_name, push_to_hub=args.push_to_hub)
| 3 |
'''simple docstring'''
import requests
from bsa import BeautifulSoup
def lowerCAmelCase_ ( snake_case__ , snake_case__ ):
'''simple docstring'''
A : str = BeautifulSoup(requests.get(snake_case__ , params=snake_case__ ).content , '''html.parser''' )
A : Dict = soup.find('''div''' , attrs={'''class''': '''gs_ri'''} )
A : Optional[int] = div.find('''div''' , attrs={'''class''': '''gs_fl'''} ).find_all('''a''' )
return anchors[2].get_text()
if __name__ == "__main__":
lowercase : str = {
'title': (
'Precisely geometry controlled microsupercapacitors for ultrahigh areal '
'capacitance, volumetric capacitance, and energy density'
),
'journal': 'Chem. Mater.',
'volume': 30,
'pages': '3979-3990',
'year': 20_18,
'hl': 'en',
}
print(get_citation('https://scholar.google.com/scholar_lookup', params=params))
| 3 | 1 |
from ..utils import DummyObject, requires_backends
class __snake_case ( metaclass=lowerCamelCase_ ):
lowerCAmelCase_ = ["flax"]
def __init__( self : Tuple , *_lowercase : List[str] , **_lowercase : int ):
"""simple docstring"""
requires_backends(self , ["""flax"""] )
@classmethod
def __a ( cls : List[Any] , *_lowercase : int , **_lowercase : Any ):
"""simple docstring"""
requires_backends(cls , ["""flax"""] )
@classmethod
def __a ( cls : Tuple , *_lowercase : Any , **_lowercase : int ):
"""simple docstring"""
requires_backends(cls , ["""flax"""] )
class __snake_case ( metaclass=lowerCamelCase_ ):
lowerCAmelCase_ = ["flax"]
def __init__( self : Tuple , *_lowercase : int , **_lowercase : List[Any] ):
"""simple docstring"""
requires_backends(self , ["""flax"""] )
@classmethod
def __a ( cls : Union[str, Any] , *_lowercase : int , **_lowercase : List[str] ):
"""simple docstring"""
requires_backends(cls , ["""flax"""] )
@classmethod
def __a ( cls : List[str] , *_lowercase : int , **_lowercase : int ):
"""simple docstring"""
requires_backends(cls , ["""flax"""] )
class __snake_case ( metaclass=lowerCamelCase_ ):
lowerCAmelCase_ = ["flax"]
def __init__( self : Tuple , *_lowercase : Union[str, Any] , **_lowercase : str ):
"""simple docstring"""
requires_backends(self , ["""flax"""] )
@classmethod
def __a ( cls : Optional[int] , *_lowercase : List[str] , **_lowercase : str ):
"""simple docstring"""
requires_backends(cls , ["""flax"""] )
@classmethod
def __a ( cls : Optional[int] , *_lowercase : Optional[Any] , **_lowercase : List[str] ):
"""simple docstring"""
requires_backends(cls , ["""flax"""] )
class __snake_case ( metaclass=lowerCamelCase_ ):
lowerCAmelCase_ = ["flax"]
def __init__( self : Tuple , *_lowercase : List[Any] , **_lowercase : str ):
"""simple docstring"""
requires_backends(self , ["""flax"""] )
@classmethod
def __a ( cls : int , *_lowercase : Tuple , **_lowercase : int ):
"""simple docstring"""
requires_backends(cls , ["""flax"""] )
@classmethod
def __a ( cls : Union[str, Any] , *_lowercase : int , **_lowercase : Union[str, Any] ):
"""simple docstring"""
requires_backends(cls , ["""flax"""] )
class __snake_case ( metaclass=lowerCamelCase_ ):
lowerCAmelCase_ = ["flax"]
def __init__( self : Dict , *_lowercase : Union[str, Any] , **_lowercase : str ):
"""simple docstring"""
requires_backends(self , ["""flax"""] )
@classmethod
def __a ( cls : List[Any] , *_lowercase : Optional[int] , **_lowercase : Optional[Any] ):
"""simple docstring"""
requires_backends(cls , ["""flax"""] )
@classmethod
def __a ( cls : List[str] , *_lowercase : List[Any] , **_lowercase : Optional[Any] ):
"""simple docstring"""
requires_backends(cls , ["""flax"""] )
class __snake_case ( metaclass=lowerCamelCase_ ):
lowerCAmelCase_ = ["flax"]
def __init__( self : Dict , *_lowercase : Optional[Any] , **_lowercase : Union[str, Any] ):
"""simple docstring"""
requires_backends(self , ["""flax"""] )
@classmethod
def __a ( cls : Optional[int] , *_lowercase : Union[str, Any] , **_lowercase : List[str] ):
"""simple docstring"""
requires_backends(cls , ["""flax"""] )
@classmethod
def __a ( cls : Any , *_lowercase : Any , **_lowercase : Dict ):
"""simple docstring"""
requires_backends(cls , ["""flax"""] )
class __snake_case ( metaclass=lowerCamelCase_ ):
lowerCAmelCase_ = ["flax"]
def __init__( self : Any , *_lowercase : Dict , **_lowercase : Any ):
"""simple docstring"""
requires_backends(self , ["""flax"""] )
@classmethod
def __a ( cls : List[Any] , *_lowercase : Any , **_lowercase : Optional[Any] ):
"""simple docstring"""
requires_backends(cls , ["""flax"""] )
@classmethod
def __a ( cls : Dict , *_lowercase : List[str] , **_lowercase : List[Any] ):
"""simple docstring"""
requires_backends(cls , ["""flax"""] )
class __snake_case ( metaclass=lowerCamelCase_ ):
lowerCAmelCase_ = ["flax"]
def __init__( self : List[str] , *_lowercase : Dict , **_lowercase : Optional[Any] ):
"""simple docstring"""
requires_backends(self , ["""flax"""] )
@classmethod
def __a ( cls : Any , *_lowercase : Tuple , **_lowercase : str ):
"""simple docstring"""
requires_backends(cls , ["""flax"""] )
@classmethod
def __a ( cls : Tuple , *_lowercase : Tuple , **_lowercase : Dict ):
"""simple docstring"""
requires_backends(cls , ["""flax"""] )
class __snake_case ( metaclass=lowerCamelCase_ ):
lowerCAmelCase_ = ["flax"]
def __init__( self : Dict , *_lowercase : int , **_lowercase : Optional[int] ):
"""simple docstring"""
requires_backends(self , ["""flax"""] )
@classmethod
def __a ( cls : Optional[int] , *_lowercase : Tuple , **_lowercase : Tuple ):
"""simple docstring"""
requires_backends(cls , ["""flax"""] )
@classmethod
def __a ( cls : str , *_lowercase : Union[str, Any] , **_lowercase : List[Any] ):
"""simple docstring"""
requires_backends(cls , ["""flax"""] )
class __snake_case ( metaclass=lowerCamelCase_ ):
lowerCAmelCase_ = ["flax"]
def __init__( self : Tuple , *_lowercase : str , **_lowercase : Union[str, Any] ):
"""simple docstring"""
requires_backends(self , ["""flax"""] )
@classmethod
def __a ( cls : List[Any] , *_lowercase : Tuple , **_lowercase : List[str] ):
"""simple docstring"""
requires_backends(cls , ["""flax"""] )
@classmethod
def __a ( cls : int , *_lowercase : Dict , **_lowercase : int ):
"""simple docstring"""
requires_backends(cls , ["""flax"""] )
class __snake_case ( metaclass=lowerCamelCase_ ):
lowerCAmelCase_ = ["flax"]
def __init__( self : Union[str, Any] , *_lowercase : List[Any] , **_lowercase : Union[str, Any] ):
"""simple docstring"""
requires_backends(self , ["""flax"""] )
@classmethod
def __a ( cls : Dict , *_lowercase : Dict , **_lowercase : Union[str, Any] ):
"""simple docstring"""
requires_backends(cls , ["""flax"""] )
@classmethod
def __a ( cls : List[str] , *_lowercase : Optional[int] , **_lowercase : Optional[int] ):
"""simple docstring"""
requires_backends(cls , ["""flax"""] )
class __snake_case ( metaclass=lowerCamelCase_ ):
lowerCAmelCase_ = ["flax"]
def __init__( self : Any , *_lowercase : Tuple , **_lowercase : Optional[Any] ):
"""simple docstring"""
requires_backends(self , ["""flax"""] )
@classmethod
def __a ( cls : List[str] , *_lowercase : Any , **_lowercase : Any ):
"""simple docstring"""
requires_backends(cls , ["""flax"""] )
@classmethod
def __a ( cls : List[Any] , *_lowercase : Dict , **_lowercase : int ):
"""simple docstring"""
requires_backends(cls , ["""flax"""] )
class __snake_case ( metaclass=lowerCamelCase_ ):
lowerCAmelCase_ = ["flax"]
def __init__( self : List[Any] , *_lowercase : List[str] , **_lowercase : Union[str, Any] ):
"""simple docstring"""
requires_backends(self , ["""flax"""] )
@classmethod
def __a ( cls : List[str] , *_lowercase : List[Any] , **_lowercase : Union[str, Any] ):
"""simple docstring"""
requires_backends(cls , ["""flax"""] )
@classmethod
def __a ( cls : Optional[int] , *_lowercase : Tuple , **_lowercase : Dict ):
"""simple docstring"""
requires_backends(cls , ["""flax"""] )
| 204 | from typing import List, Optional, Union
import numpy as np
from ....audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ....feature_extraction_sequence_utils import SequenceFeatureExtractor
from ....feature_extraction_utils import BatchFeature
from ....file_utils import PaddingStrategy, TensorType
from ....utils import logging
__lowerCamelCase : Any = logging.get_logger(__name__)
class __snake_case ( lowerCamelCase_ ):
lowerCAmelCase_ = ["input_features", "attention_mask"]
def __init__( self : int , _lowercase : Dict=80 , _lowercase : Union[str, Any]=1_60_00 , _lowercase : str=0.0 , _lowercase : int=10 , _lowercase : str=25 , _lowercase : Union[str, Any]="hamming_window" , _lowercase : Dict=3_27_68.0 , _lowercase : Optional[int]=0.97 , _lowercase : Optional[int]=1.0 , _lowercase : int=True , _lowercase : Union[str, Any]=True , _lowercase : Optional[int]=False , **_lowercase : Union[str, Any] , ):
"""simple docstring"""
super().__init__(feature_size=_lowercase , sampling_rate=_lowercase , padding_value=_lowercase , **_lowercase )
SCREAMING_SNAKE_CASE__ = feature_size
SCREAMING_SNAKE_CASE__ = sampling_rate
SCREAMING_SNAKE_CASE__ = padding_value
SCREAMING_SNAKE_CASE__ = hop_length
SCREAMING_SNAKE_CASE__ = win_length
SCREAMING_SNAKE_CASE__ = frame_signal_scale
SCREAMING_SNAKE_CASE__ = preemphasis_coeff
SCREAMING_SNAKE_CASE__ = mel_floor
SCREAMING_SNAKE_CASE__ = normalize_means
SCREAMING_SNAKE_CASE__ = normalize_vars
SCREAMING_SNAKE_CASE__ = win_function
SCREAMING_SNAKE_CASE__ = return_attention_mask
SCREAMING_SNAKE_CASE__ = win_length * sampling_rate // 10_00
SCREAMING_SNAKE_CASE__ = hop_length * sampling_rate // 10_00
SCREAMING_SNAKE_CASE__ = optimal_fft_length(self.sample_size )
SCREAMING_SNAKE_CASE__ = (self.n_fft // 2) + 1
def __a ( self : str , _lowercase : np.array ):
"""simple docstring"""
if self.win_function == "hamming_window":
SCREAMING_SNAKE_CASE__ = window_function(window_length=self.sample_size , name=self.win_function , periodic=_lowercase )
else:
SCREAMING_SNAKE_CASE__ = window_function(window_length=self.sample_size , name=self.win_function )
SCREAMING_SNAKE_CASE__ = mel_filter_bank(
num_frequency_bins=self.n_freqs , num_mel_filters=self.feature_size , min_frequency=0.0 , max_frequency=self.sampling_rate / 2.0 , sampling_rate=self.sampling_rate , )
SCREAMING_SNAKE_CASE__ = spectrogram(
one_waveform * self.frame_signal_scale , window=_lowercase , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , center=_lowercase , preemphasis=self.preemphasis_coeff , mel_filters=_lowercase , mel_floor=self.mel_floor , log_mel="""log""" , )
return msfc_features.T
def __a ( self : Tuple , _lowercase : Union[str, Any] , _lowercase : Optional[int] , _lowercase : int ):
"""simple docstring"""
if self.normalize_means:
SCREAMING_SNAKE_CASE__ = x[:input_length].mean(axis=0 )
SCREAMING_SNAKE_CASE__ = np.subtract(_lowercase , _lowercase )
if self.normalize_vars:
SCREAMING_SNAKE_CASE__ = x[:input_length].std(axis=0 )
SCREAMING_SNAKE_CASE__ = np.divide(_lowercase , _lowercase )
if input_length < x.shape[0]:
SCREAMING_SNAKE_CASE__ = padding_value
# make sure array is in float32
SCREAMING_SNAKE_CASE__ = x.astype(np.floataa )
return x
def __a ( self : int , _lowercase : List[np.ndarray] , _lowercase : Optional[np.ndarray] = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [self._normalize_one(_lowercase , _lowercase , self.padding_value ) for x, n in zip(_lowercase , _lowercase )]
def __call__( self : Dict , _lowercase : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , _lowercase : Union[bool, str, PaddingStrategy] = False , _lowercase : Optional[int] = None , _lowercase : bool = False , _lowercase : Optional[int] = None , _lowercase : Optional[bool] = None , _lowercase : Optional[Union[str, TensorType]] = None , _lowercase : Optional[int] = None , **_lowercase : List[str] , ):
"""simple docstring"""
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"""The model corresponding to this feature extractor: {self} was trained using a sampling rate of"""
f""" {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with"""
f""" {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
"""It is strongly recommended to pass the ``sampling_rate`` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
SCREAMING_SNAKE_CASE__ = isinstance(_lowercase , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f"""Only mono-channel audio is supported for input to {self}""" )
SCREAMING_SNAKE_CASE__ = is_batched_numpy or (
isinstance(_lowercase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
SCREAMING_SNAKE_CASE__ = [np.asarray(_lowercase , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(_lowercase , np.ndarray ):
SCREAMING_SNAKE_CASE__ = np.asarray(_lowercase , dtype=np.floataa )
elif isinstance(_lowercase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
SCREAMING_SNAKE_CASE__ = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
SCREAMING_SNAKE_CASE__ = [raw_speech]
# extract fbank features
SCREAMING_SNAKE_CASE__ = [self._extract_mfsc_features(_lowercase ) for one_waveform in raw_speech]
# convert into correct format for padding
SCREAMING_SNAKE_CASE__ = BatchFeature({"""input_features""": features} )
SCREAMING_SNAKE_CASE__ = self.pad(
_lowercase , padding=_lowercase , max_length=_lowercase , truncation=_lowercase , pad_to_multiple_of=_lowercase , return_attention_mask=_lowercase , **_lowercase , )
# make sure list is in array format
SCREAMING_SNAKE_CASE__ = padded_inputs.get("""input_features""" )
if isinstance(input_features[0] , _lowercase ):
SCREAMING_SNAKE_CASE__ = [np.asarray(_lowercase , dtype=np.floataa ) for feature in input_features]
SCREAMING_SNAKE_CASE__ = padded_inputs.get("""attention_mask""" )
if attention_mask is not None:
SCREAMING_SNAKE_CASE__ = [np.asarray(_lowercase , dtype=np.intaa ) for array in attention_mask]
if self.normalize_means or self.normalize_vars:
SCREAMING_SNAKE_CASE__ = (
np.array(_lowercase , dtype=np.intaa )
if self._get_padding_strategies(_lowercase , max_length=_lowercase ) is not PaddingStrategy.DO_NOT_PAD
and padding
else None
)
SCREAMING_SNAKE_CASE__ = self.normalize(
padded_inputs["""input_features"""] , attention_mask=_lowercase )
if return_tensors is not None:
SCREAMING_SNAKE_CASE__ = padded_inputs.convert_to_tensors(_lowercase )
return padded_inputs
| 204 | 1 |
"""simple docstring"""
import random
import unittest
import torch
from diffusers import IFInpaintingSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class _lowerCAmelCase ( a , a , unittest.TestCase ):
"""simple docstring"""
__magic_name__ :Any = IFInpaintingSuperResolutionPipeline
__magic_name__ :Optional[Any] = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"""width""", """height"""}
__magic_name__ :List[str] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({"""original_image"""} )
__magic_name__ :List[str] = PipelineTesterMixin.required_optional_params - {"""latents"""}
def snake_case ( self ):
'''simple docstring'''
return self._get_superresolution_dummy_components()
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase=0 ):
'''simple docstring'''
if str(__UpperCAmelCase ).startswith('mps' ):
lowerCAmelCase__ :Tuple = torch.manual_seed(__UpperCAmelCase )
else:
lowerCAmelCase__ :Optional[int] = torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase )
lowerCAmelCase__ :Optional[Any] = floats_tensor((1, 3, 1_6, 1_6) , rng=random.Random(__UpperCAmelCase ) ).to(__UpperCAmelCase )
lowerCAmelCase__ :Optional[int] = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(__UpperCAmelCase ) ).to(__UpperCAmelCase )
lowerCAmelCase__ :Tuple = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(__UpperCAmelCase ) ).to(__UpperCAmelCase )
lowerCAmelCase__ :Tuple = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'original_image': original_image,
'mask_image': mask_image,
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def snake_case ( self ):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def snake_case ( self ):
'''simple docstring'''
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA' )
def snake_case ( self ):
'''simple docstring'''
super().test_save_load_floataa(expected_max_diff=1E-1 )
def snake_case ( self ):
'''simple docstring'''
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def snake_case ( self ):
'''simple docstring'''
self._test_save_load_local()
def snake_case ( self ):
'''simple docstring'''
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 293 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DiffusionPipeline,
EulerDiscreteScheduler,
StableDiffusionXLImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.utils import floats_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _lowerCAmelCase ( a , a , unittest.TestCase ):
"""simple docstring"""
__magic_name__ :Tuple = StableDiffusionXLImgaImgPipeline
__magic_name__ :List[Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"""height""", """width"""}
__magic_name__ :Optional[Any] = PipelineTesterMixin.required_optional_params - {"""latents"""}
__magic_name__ :Optional[Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
__magic_name__ :str = IMAGE_TO_IMAGE_IMAGE_PARAMS
__magic_name__ :Any = IMAGE_TO_IMAGE_IMAGE_PARAMS
def snake_case ( self ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase__ :Optional[Any] = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , attention_head_dim=(2, 4) , use_linear_projection=__UpperCAmelCase , addition_embed_type='text_time' , addition_time_embed_dim=8 , transformer_layers_per_block=(1, 2) , projection_class_embeddings_input_dim=8_0 , cross_attention_dim=6_4 , )
lowerCAmelCase__ :str = EulerDiscreteScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , steps_offset=1 , beta_schedule='scaled_linear' , timestep_spacing='leading' , )
torch.manual_seed(0 )
lowerCAmelCase__ :str = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=1_2_8 , )
torch.manual_seed(0 )
lowerCAmelCase__ :str = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act='gelu' , projection_dim=3_2 , )
lowerCAmelCase__ :int = CLIPTextModel(__UpperCAmelCase )
lowerCAmelCase__ :Tuple = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' , local_files_only=__UpperCAmelCase )
lowerCAmelCase__ :Any = CLIPTextModelWithProjection(__UpperCAmelCase )
lowerCAmelCase__ :Optional[Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' , local_files_only=__UpperCAmelCase )
lowerCAmelCase__ :str = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'text_encoder_2': text_encoder_a,
'tokenizer_2': tokenizer_a,
# "safety_checker": None,
# "feature_extractor": None,
}
return components
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase=0 ):
'''simple docstring'''
lowerCAmelCase__ :Dict = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(__UpperCAmelCase ) ).to(__UpperCAmelCase )
lowerCAmelCase__ :Optional[int] = image / 2 + 0.5
if str(__UpperCAmelCase ).startswith('mps' ):
lowerCAmelCase__ :Optional[int] = torch.manual_seed(__UpperCAmelCase )
else:
lowerCAmelCase__ :Optional[Any] = torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase )
lowerCAmelCase__ :Dict = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 5.0,
'output_type': 'numpy',
'strength': 0.75,
}
return inputs
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :List[Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase__ :int = self.get_dummy_components()
lowerCAmelCase__ :List[str] = StableDiffusionXLImgaImgPipeline(**__UpperCAmelCase )
lowerCAmelCase__ :str = sd_pipe.to(__UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
lowerCAmelCase__ :str = self.get_dummy_inputs(__UpperCAmelCase )
lowerCAmelCase__ :int = sd_pipe(**__UpperCAmelCase ).images
lowerCAmelCase__ :int = image[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
lowerCAmelCase__ :List[str] = np.array([0.46_56, 0.48_40, 0.44_39, 0.66_98, 0.55_74, 0.45_24, 0.57_99, 0.59_43, 0.51_65] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def snake_case ( self ):
'''simple docstring'''
super().test_attention_slicing_forward_pass(expected_max_diff=3E-3 )
def snake_case ( self ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def snake_case ( self ):
'''simple docstring'''
pass
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Tuple = self.get_dummy_components()
lowerCAmelCase__ :str = StableDiffusionXLImgaImgPipeline(**__UpperCAmelCase )
lowerCAmelCase__ :str = sd_pipe.to(__UpperCAmelCase )
lowerCAmelCase__ :List[str] = sd_pipe.to(__UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
# forward without prompt embeds
lowerCAmelCase__ :int = self.get_dummy_inputs(__UpperCAmelCase )
lowerCAmelCase__ :Optional[int] = 3 * ['this is a negative prompt']
lowerCAmelCase__ :Tuple = negative_prompt
lowerCAmelCase__ :str = 3 * [inputs['prompt']]
lowerCAmelCase__ :Optional[Any] = sd_pipe(**__UpperCAmelCase )
lowerCAmelCase__ :List[Any] = output.images[0, -3:, -3:, -1]
# forward with prompt embeds
lowerCAmelCase__ :Optional[Any] = self.get_dummy_inputs(__UpperCAmelCase )
lowerCAmelCase__ :Tuple = 3 * ['this is a negative prompt']
lowerCAmelCase__ :str = 3 * [inputs.pop('prompt' )]
(
(
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) ,
) :List[str] = sd_pipe.encode_prompt(__UpperCAmelCase , negative_prompt=__UpperCAmelCase )
lowerCAmelCase__ :str = sd_pipe(
**__UpperCAmelCase , prompt_embeds=__UpperCAmelCase , negative_prompt_embeds=__UpperCAmelCase , pooled_prompt_embeds=__UpperCAmelCase , negative_pooled_prompt_embeds=__UpperCAmelCase , )
lowerCAmelCase__ :Optional[Any] = output.images[0, -3:, -3:, -1]
# make sure that it's equal
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4
@slow
@require_torch_gpu
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def snake_case ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase="cpu" , __UpperCAmelCase=torch.floataa , __UpperCAmelCase=0 ):
'''simple docstring'''
lowerCAmelCase__ :Any = torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase )
lowerCAmelCase__ :Dict = np.random.RandomState(__UpperCAmelCase ).standard_normal((1, 4, 6_4, 6_4) )
lowerCAmelCase__ :Optional[int] = torch.from_numpy(__UpperCAmelCase ).to(device=__UpperCAmelCase , dtype=__UpperCAmelCase )
lowerCAmelCase__ :int = {
'prompt': 'a photograph of an astronaut riding a horse',
'latents': latents,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :List[Any] = DiffusionPipeline.from_pretrained('stabilityai/stable-diffusion-2-base' )
pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
lowerCAmelCase__ :Tuple = self.get_inputs(__UpperCAmelCase )
lowerCAmelCase__ :int = pipe(**__UpperCAmelCase ).images
lowerCAmelCase__ :Union[str, Any] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_1_2, 5_1_2, 3)
lowerCAmelCase__ :List[str] = np.array([0.4_94_93, 0.4_78_96, 0.4_07_98, 0.5_42_14, 0.5_32_12, 0.4_82_02, 0.4_76_56, 0.4_63_29, 0.4_85_06] )
assert np.abs(image_slice - expected_slice ).max() < 7E-3
| 293 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''facebook/deit-base-distilled-patch16-224''': (
'''https://huggingface.co/facebook/deit-base-patch16-224/resolve/main/config.json'''
),
# See all DeiT models at https://huggingface.co/models?filter=deit
}
class __snake_case ( _lowercase):
snake_case__ : int = "deit"
def __init__( self : List[str] , __lowerCAmelCase : int=7_6_8 , __lowerCAmelCase : Tuple=1_2 , __lowerCAmelCase : str=1_2 , __lowerCAmelCase : Dict=3_0_7_2 , __lowerCAmelCase : List[str]="gelu" , __lowerCAmelCase : Union[str, Any]=0.0 , __lowerCAmelCase : Dict=0.0 , __lowerCAmelCase : str=0.02 , __lowerCAmelCase : List[str]=1E-12 , __lowerCAmelCase : Dict=2_2_4 , __lowerCAmelCase : str=1_6 , __lowerCAmelCase : Union[str, Any]=3 , __lowerCAmelCase : str=True , __lowerCAmelCase : int=1_6 , **__lowerCAmelCase : int , ):
"""simple docstring"""
super().__init__(**__lowerCAmelCase )
_lowerCamelCase : int = hidden_size
_lowerCamelCase : Optional[Any] = num_hidden_layers
_lowerCamelCase : Tuple = num_attention_heads
_lowerCamelCase : Tuple = intermediate_size
_lowerCamelCase : Tuple = hidden_act
_lowerCamelCase : Any = hidden_dropout_prob
_lowerCamelCase : Dict = attention_probs_dropout_prob
_lowerCamelCase : Union[str, Any] = initializer_range
_lowerCamelCase : str = layer_norm_eps
_lowerCamelCase : Union[str, Any] = image_size
_lowerCamelCase : Optional[int] = patch_size
_lowerCamelCase : List[str] = num_channels
_lowerCamelCase : int = qkv_bias
_lowerCamelCase : Optional[Any] = encoder_stride
class __snake_case ( _lowercase):
snake_case__ : Tuple = version.parse("1.11")
@property
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
return 1E-4
| 360 |
"""simple docstring"""
from __future__ import annotations
from itertools import permutations
from random import randint
from timeit import repeat
def snake_case_ ( ):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = [randint(-10_00, 10_00 ) for i in range(10 )]
_lowerCamelCase : Tuple = randint(-50_00, 50_00 )
return (arr, r)
lowerCAmelCase__ = make_dataset()
def snake_case_ ( A_ : list[int], A_ : int ):
'''simple docstring'''
for triplet in permutations(A_, 3 ):
if sum(A_ ) == target:
return tuple(sorted(A_ ) )
return (0, 0, 0)
def snake_case_ ( A_ : list[int], A_ : int ):
'''simple docstring'''
arr.sort()
_lowerCamelCase : Optional[Any] = len(A_ )
for i in range(n - 1 ):
_lowerCamelCase , _lowerCamelCase : int = i + 1, n - 1
while left < right:
if arr[i] + arr[left] + arr[right] == target:
return (arr[i], arr[left], arr[right])
elif arr[i] + arr[left] + arr[right] < target:
left += 1
elif arr[i] + arr[left] + arr[right] > target:
right -= 1
return (0, 0, 0)
def snake_case_ ( ):
'''simple docstring'''
_lowerCamelCase : Tuple = '''
from __main__ import dataset, triplet_sum1, triplet_sum2
'''
_lowerCamelCase : List[Any] = '''
triplet_sum1(*dataset)
'''
_lowerCamelCase : str = '''
triplet_sum2(*dataset)
'''
_lowerCamelCase : Optional[int] = repeat(setup=A_, stmt=A_, repeat=5, number=1_00_00 )
_lowerCamelCase : Union[str, Any] = repeat(setup=A_, stmt=A_, repeat=5, number=1_00_00 )
return (min(A_ ), min(A_ ))
if __name__ == "__main__":
from doctest import testmod
testmod()
lowerCAmelCase__ = solution_times()
print(F"""The time for naive implementation is {times[0]}.""")
print(F"""The time for optimized implementation is {times[1]}.""")
| 175 | 0 |
from __future__ import annotations
import numpy as np
from numpy import floataa
from numpy.typing import NDArray
def lowerCAmelCase_ ( __A, __A, __A, __A, ) -> list[float]:
'''simple docstring'''
UpperCAmelCase__ = coefficient_matrix.shape
UpperCAmelCase__ = constant_matrix.shape
if rowsa != colsa:
UpperCAmelCase__ = f"""Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}"""
raise ValueError(__A )
if colsa != 1:
UpperCAmelCase__ = f"""Constant matrix must be nx1 but received {rowsa}x{colsa}"""
raise ValueError(__A )
if rowsa != rowsa:
UpperCAmelCase__ = (
'Coefficient and constant matrices dimensions must be nxn and nx1 but '
f"""received {rowsa}x{colsa} and {rowsa}x{colsa}"""
)
raise ValueError(__A )
if len(__A ) != rowsa:
UpperCAmelCase__ = (
'Number of initial values must be equal to number of rows in coefficient '
f"""matrix but received {len(__A )} and {rowsa}"""
)
raise ValueError(__A )
if iterations <= 0:
raise ValueError("Iterations must be at least 1" )
UpperCAmelCase__ = np.concatenate(
(coefficient_matrix, constant_matrix), axis=1 )
UpperCAmelCase__ = table.shape
strictly_diagonally_dominant(__A )
# Iterates the whole matrix for given number of times
for _ in range(__A ):
UpperCAmelCase__ = []
for row in range(__A ):
UpperCAmelCase__ = 0
for col in range(__A ):
if col == row:
UpperCAmelCase__ = table[row][col]
elif col == cols - 1:
UpperCAmelCase__ = table[row][col]
else:
temp += (-1) * table[row][col] * init_val[col]
UpperCAmelCase__ = (temp + val) / denom
new_val.append(__A )
UpperCAmelCase__ = new_val
return [float(__A ) for i in new_val]
def lowerCAmelCase_ ( __A ) -> bool:
'''simple docstring'''
UpperCAmelCase__ = table.shape
UpperCAmelCase__ = True
for i in range(0, __A ):
UpperCAmelCase__ = 0
for j in range(0, cols - 1 ):
if i == j:
continue
else:
total += table[i][j]
if table[i][i] <= total:
raise ValueError("Coefficient matrix is not strictly diagonally dominant" )
return is_diagonally_dominant
# Test Cases
if __name__ == "__main__":
import doctest
doctest.testmod()
| 65 |
import hashlib
import unittest
from typing import Dict
import numpy as np
from transformers import (
MODEL_FOR_MASK_GENERATION_MAPPING,
TF_MODEL_FOR_MASK_GENERATION_MAPPING,
is_vision_available,
pipeline,
)
from transformers.pipelines import MaskGenerationPipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
if is_vision_available():
from PIL import Image
else:
class _A:
"""simple docstring"""
@staticmethod
def UpperCAmelCase_ ( *_A , **_A ):
pass
def _SCREAMING_SNAKE_CASE ( a ) -> str:
__A : str = hashlib.mda(image.tobytes() )
return m.hexdigest()[:10]
def _SCREAMING_SNAKE_CASE ( a ) -> Dict:
__A : Dict = np.array(a )
__A : List[Any] = npimg.shape
return {"hash": hashimage(a ), "shape": shape}
@is_pipeline_test
@require_vision
@require_torch
class _A( unittest.TestCase ):
"""simple docstring"""
UpperCamelCase : str = dict(
(list(MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if MODEL_FOR_MASK_GENERATION_MAPPING else []) )
UpperCamelCase : int = dict(
(list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []) )
def UpperCAmelCase_ ( self , _A , _A , _A ):
__A : Dict = MaskGenerationPipeline(model=_A , image_processor=_A )
return image_segmenter, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def UpperCAmelCase_ ( self , _A , _A ):
pass
@require_tf
@unittest.skip('Image segmentation not implemented in TF' )
def UpperCAmelCase_ ( self ):
pass
@slow
@require_torch
def UpperCAmelCase_ ( self ):
__A : Union[str, Any] = pipeline('mask-generation' , model='facebook/sam-vit-huge' )
__A : List[str] = image_segmenter('http://images.cocodataset.org/val2017/000000039769.jpg' , points_per_batch=256 )
# Shortening by hashing
__A : List[Any] = []
for i, o in enumerate(outputs['masks'] ):
new_outupt += [{"mask": mask_to_test_readable(_A ), "scores": outputs["scores"][i]}]
# fmt: off
self.assertEqual(
nested_simplify(_A , decimals=4 ) , [
{'mask': {'hash': '115ad19f5f', 'shape': (480, 640)}, 'scores': 1.0_4_4_4},
{'mask': {'hash': '6affa964c6', 'shape': (480, 640)}, 'scores': 1.0_2_1},
{'mask': {'hash': 'dfe28a0388', 'shape': (480, 640)}, 'scores': 1.0_1_6_7},
{'mask': {'hash': 'c0a5f4a318', 'shape': (480, 640)}, 'scores': 1.0_1_3_2},
{'mask': {'hash': 'fe8065c197', 'shape': (480, 640)}, 'scores': 1.0_0_5_3},
{'mask': {'hash': 'e2d0b7a0b7', 'shape': (480, 640)}, 'scores': 0.9_9_6_7},
{'mask': {'hash': '453c7844bd', 'shape': (480, 640)}, 'scores': 0.9_9_3},
{'mask': {'hash': '3d44f2926d', 'shape': (480, 640)}, 'scores': 0.9_9_0_9},
{'mask': {'hash': '64033ddc3f', 'shape': (480, 640)}, 'scores': 0.9_8_7_9},
{'mask': {'hash': '801064ff79', 'shape': (480, 640)}, 'scores': 0.9_8_3_4},
{'mask': {'hash': '6172f276ef', 'shape': (480, 640)}, 'scores': 0.9_7_1_6},
{'mask': {'hash': 'b49e60e084', 'shape': (480, 640)}, 'scores': 0.9_6_1_2},
{'mask': {'hash': 'a811e775fd', 'shape': (480, 640)}, 'scores': 0.9_5_9_9},
{'mask': {'hash': 'a6a8ebcf4b', 'shape': (480, 640)}, 'scores': 0.9_5_5_2},
{'mask': {'hash': '9d8257e080', 'shape': (480, 640)}, 'scores': 0.9_5_3_2},
{'mask': {'hash': '32de6454a8', 'shape': (480, 640)}, 'scores': 0.9_5_1_6},
{'mask': {'hash': 'af3d4af2c8', 'shape': (480, 640)}, 'scores': 0.9_4_9_9},
{'mask': {'hash': '3c6db475fb', 'shape': (480, 640)}, 'scores': 0.9_4_8_3},
{'mask': {'hash': 'c290813fb9', 'shape': (480, 640)}, 'scores': 0.9_4_6_4},
{'mask': {'hash': 'b6f0b8f606', 'shape': (480, 640)}, 'scores': 0.9_4_3},
{'mask': {'hash': '92ce16bfdf', 'shape': (480, 640)}, 'scores': 0.9_4_3},
{'mask': {'hash': 'c749b25868', 'shape': (480, 640)}, 'scores': 0.9_4_0_8},
{'mask': {'hash': 'efb6cab859', 'shape': (480, 640)}, 'scores': 0.9_3_3_5},
{'mask': {'hash': '1ff2eafb30', 'shape': (480, 640)}, 'scores': 0.9_3_2_6},
{'mask': {'hash': '788b798e24', 'shape': (480, 640)}, 'scores': 0.9_2_6_2},
{'mask': {'hash': 'abea804f0e', 'shape': (480, 640)}, 'scores': 0.8_9_9_9},
{'mask': {'hash': '7b9e8ddb73', 'shape': (480, 640)}, 'scores': 0.8_9_8_6},
{'mask': {'hash': 'cd24047c8a', 'shape': (480, 640)}, 'scores': 0.8_9_8_4},
{'mask': {'hash': '6943e6bcbd', 'shape': (480, 640)}, 'scores': 0.8_8_7_3},
{'mask': {'hash': 'b5f47c9191', 'shape': (480, 640)}, 'scores': 0.8_8_7_1}
] , )
# fmt: on
@require_torch
@slow
def UpperCAmelCase_ ( self ):
__A : Optional[Any] = 'facebook/sam-vit-huge'
__A : List[str] = pipeline('mask-generation' , model=_A )
__A : Tuple = image_segmenter(
'http://images.cocodataset.org/val2017/000000039769.jpg' , pred_iou_thresh=1 , points_per_batch=256 )
# Shortening by hashing
__A : List[str] = []
for i, o in enumerate(outputs['masks'] ):
new_outupt += [{"mask": mask_to_test_readable(_A ), "scores": outputs["scores"][i]}]
self.assertEqual(
nested_simplify(_A , decimals=4 ) , [
{'mask': {'hash': '115ad19f5f', 'shape': (480, 640)}, 'scores': 1.0_4_4_4},
{'mask': {'hash': '6affa964c6', 'shape': (480, 640)}, 'scores': 1.0_2_1_0},
{'mask': {'hash': 'dfe28a0388', 'shape': (480, 640)}, 'scores': 1.0_1_6_7},
{'mask': {'hash': 'c0a5f4a318', 'shape': (480, 640)}, 'scores': 1.0_1_3_2},
{'mask': {'hash': 'fe8065c197', 'shape': (480, 640)}, 'scores': 1.0_0_5_3},
] , )
| 280 | 0 |
'''simple docstring'''
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import YolosImageProcessor
class A_ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Union[str, Any] , lowercase_ : Union[str, Any] , lowercase_ : Optional[int]=7 , lowercase_ : Any=3 , lowercase_ : Optional[int]=30 , lowercase_ : str=400 , lowercase_ : List[Any]=True , lowercase_ : Union[str, Any]=None , lowercase_ : List[Any]=True , lowercase_ : Optional[Any]=[0.5, 0.5, 0.5] , lowercase_ : Union[str, Any]=[0.5, 0.5, 0.5] , lowercase_ : Optional[int]=True , lowercase_ : Union[str, Any]=1 / 255 , lowercase_ : str=True , ) -> int:
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
UpperCAmelCase : Tuple = size if size is not None else {'shortest_edge': 18, 'longest_edge': 1_333}
UpperCAmelCase : Optional[Any] = parent
UpperCAmelCase : Optional[int] = batch_size
UpperCAmelCase : Any = num_channels
UpperCAmelCase : List[str] = min_resolution
UpperCAmelCase : Any = max_resolution
UpperCAmelCase : Dict = do_resize
UpperCAmelCase : Tuple = size
UpperCAmelCase : List[Any] = do_normalize
UpperCAmelCase : Tuple = image_mean
UpperCAmelCase : Union[str, Any] = image_std
UpperCAmelCase : Tuple = do_rescale
UpperCAmelCase : Optional[int] = rescale_factor
UpperCAmelCase : Optional[Any] = do_pad
def UpperCAmelCase_ ( self : Any ) -> List[Any]:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def UpperCAmelCase_ ( self : str , lowercase_ : int , lowercase_ : Optional[int]=False ) -> List[str]:
if not batched:
UpperCAmelCase : Union[str, Any] = image_inputs[0]
if isinstance(lowercase_ , Image.Image ):
UpperCAmelCase , UpperCAmelCase : Any = image.size
else:
UpperCAmelCase , UpperCAmelCase : int = image.shape[1], image.shape[2]
if w < h:
UpperCAmelCase : Dict = int(self.size['shortest_edge'] * h / w )
UpperCAmelCase : Tuple = self.size['shortest_edge']
elif w > h:
UpperCAmelCase : Optional[Any] = self.size['shortest_edge']
UpperCAmelCase : str = int(self.size['shortest_edge'] * w / h )
else:
UpperCAmelCase : List[Any] = self.size['shortest_edge']
UpperCAmelCase : Union[str, Any] = self.size['shortest_edge']
else:
UpperCAmelCase : Dict = []
for image in image_inputs:
UpperCAmelCase , UpperCAmelCase : str = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
UpperCAmelCase : List[Any] = max(lowercase_ , key=lambda lowercase_ : item[0] )[0]
UpperCAmelCase : Any = max(lowercase_ , key=lambda lowercase_ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class A_ ( _snake_case , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ : str = YolosImageProcessor if is_vision_available() else None
def UpperCAmelCase_ ( self : Optional[int] ) -> Tuple:
UpperCAmelCase : Tuple = YolosImageProcessingTester(self )
@property
def UpperCAmelCase_ ( self : str ) -> str:
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase_ ( self : int ) -> List[str]:
UpperCAmelCase : List[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowercase_ , 'image_mean' ) )
self.assertTrue(hasattr(lowercase_ , 'image_std' ) )
self.assertTrue(hasattr(lowercase_ , 'do_normalize' ) )
self.assertTrue(hasattr(lowercase_ , 'do_resize' ) )
self.assertTrue(hasattr(lowercase_ , 'size' ) )
def UpperCAmelCase_ ( self : Dict ) -> Dict:
UpperCAmelCase : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 18, 'longest_edge': 1_333} )
self.assertEqual(image_processor.do_pad , lowercase_ )
UpperCAmelCase : List[Any] = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=lowercase_ )
self.assertEqual(image_processor.size , {'shortest_edge': 42, 'longest_edge': 84} )
self.assertEqual(image_processor.do_pad , lowercase_ )
def UpperCAmelCase_ ( self : List[str] ) -> int:
pass
def UpperCAmelCase_ ( self : Tuple ) -> List[str]:
# Initialize image_processing
UpperCAmelCase : str = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase_ )
for image in image_inputs:
self.assertIsInstance(lowercase_ , Image.Image )
# Test not batched input
UpperCAmelCase : List[str] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
UpperCAmelCase , UpperCAmelCase : Optional[Any] = self.image_processor_tester.get_expected_values(lowercase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase , UpperCAmelCase : Tuple = self.image_processor_tester.get_expected_values(lowercase_ , batched=lowercase_ )
UpperCAmelCase : Dict = image_processing(lowercase_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCAmelCase_ ( self : str ) -> Union[str, Any]:
# Initialize image_processing
UpperCAmelCase : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCAmelCase : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase_ , numpify=lowercase_ )
for image in image_inputs:
self.assertIsInstance(lowercase_ , np.ndarray )
# Test not batched input
UpperCAmelCase : int = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
UpperCAmelCase , UpperCAmelCase : List[str] = self.image_processor_tester.get_expected_values(lowercase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase : Optional[Any] = image_processing(lowercase_ , return_tensors='pt' ).pixel_values
UpperCAmelCase , UpperCAmelCase : str = self.image_processor_tester.get_expected_values(lowercase_ , batched=lowercase_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCAmelCase_ ( self : List[str] ) -> Optional[int]:
# Initialize image_processing
UpperCAmelCase : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase_ , torchify=lowercase_ )
for image in image_inputs:
self.assertIsInstance(lowercase_ , torch.Tensor )
# Test not batched input
UpperCAmelCase : List[Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
UpperCAmelCase , UpperCAmelCase : Dict = self.image_processor_tester.get_expected_values(lowercase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase : Optional[Any] = image_processing(lowercase_ , return_tensors='pt' ).pixel_values
UpperCAmelCase , UpperCAmelCase : Tuple = self.image_processor_tester.get_expected_values(lowercase_ , batched=lowercase_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCAmelCase_ ( self : Optional[int] ) -> int:
# Initialize image_processings
UpperCAmelCase : List[Any] = self.image_processing_class(**self.image_processor_dict )
UpperCAmelCase : Optional[Any] = self.image_processing_class(do_resize=lowercase_ , do_normalize=lowercase_ , do_rescale=lowercase_ )
# create random PyTorch tensors
UpperCAmelCase : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase_ , torchify=lowercase_ )
for image in image_inputs:
self.assertIsInstance(lowercase_ , torch.Tensor )
# Test whether the method "pad" and calling the image processor return the same tensors
UpperCAmelCase : Any = image_processing_a.pad(lowercase_ , return_tensors='pt' )
UpperCAmelCase : Union[str, Any] = image_processing_a(lowercase_ , return_tensors='pt' )
self.assertTrue(
torch.allclose(encoded_images_with_method['pixel_values'] , encoded_images['pixel_values'] , atol=1E-4 ) )
@slow
def UpperCAmelCase_ ( self : Optional[Any] ) -> Union[str, Any]:
# prepare image and target
UpperCAmelCase : Tuple = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' , 'r' ) as f:
UpperCAmelCase : int = json.loads(f.read() )
UpperCAmelCase : Dict = {'image_id': 39_769, 'annotations': target}
# encode them
UpperCAmelCase : Any = YolosImageProcessor.from_pretrained('hustvl/yolos-small' )
UpperCAmelCase : str = image_processing(images=lowercase_ , annotations=lowercase_ , return_tensors='pt' )
# verify pixel values
UpperCAmelCase : int = torch.Size([1, 3, 800, 1_066] )
self.assertEqual(encoding['pixel_values'].shape , lowercase_ )
UpperCAmelCase : Dict = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , lowercase_ , atol=1E-4 ) )
# verify area
UpperCAmelCase : List[Any] = torch.tensor([5887.9600, 1_1250.2061, 48_9353.8438, 83_7122.7500, 14_7967.5156, 16_5732.3438] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , lowercase_ ) )
# verify boxes
UpperCAmelCase : int = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , lowercase_ )
UpperCAmelCase : Any = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , lowercase_ , atol=1E-3 ) )
# verify image_id
UpperCAmelCase : List[str] = torch.tensor([39_769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , lowercase_ ) )
# verify is_crowd
UpperCAmelCase : str = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , lowercase_ ) )
# verify class_labels
UpperCAmelCase : str = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , lowercase_ ) )
# verify orig_size
UpperCAmelCase : Optional[int] = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , lowercase_ ) )
# verify size
UpperCAmelCase : str = torch.tensor([800, 1_066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , lowercase_ ) )
@slow
def UpperCAmelCase_ ( self : List[Any] ) -> Optional[Any]:
# prepare image, target and masks_path
UpperCAmelCase : Optional[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' , 'r' ) as f:
UpperCAmelCase : List[Any] = json.loads(f.read() )
UpperCAmelCase : List[str] = {'file_name': '000000039769.png', 'image_id': 39_769, 'segments_info': target}
UpperCAmelCase : int = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' )
# encode them
UpperCAmelCase : Optional[int] = YolosImageProcessor(format='coco_panoptic' )
UpperCAmelCase : str = image_processing(images=lowercase_ , annotations=lowercase_ , masks_path=lowercase_ , return_tensors='pt' )
# verify pixel values
UpperCAmelCase : Optional[int] = torch.Size([1, 3, 800, 1_066] )
self.assertEqual(encoding['pixel_values'].shape , lowercase_ )
UpperCAmelCase : Union[str, Any] = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , lowercase_ , atol=1E-4 ) )
# verify area
UpperCAmelCase : Union[str, Any] = torch.tensor([14_7979.6875, 16_5527.0469, 48_4638.5938, 1_1292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , lowercase_ ) )
# verify boxes
UpperCAmelCase : str = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , lowercase_ )
UpperCAmelCase : List[Any] = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , lowercase_ , atol=1E-3 ) )
# verify image_id
UpperCAmelCase : Dict = torch.tensor([39_769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , lowercase_ ) )
# verify is_crowd
UpperCAmelCase : int = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , lowercase_ ) )
# verify class_labels
UpperCAmelCase : Union[str, Any] = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , lowercase_ ) )
# verify masks
UpperCAmelCase : Union[str, Any] = 822_873
self.assertEqual(encoding['labels'][0]['masks'].sum().item() , lowercase_ )
# verify orig_size
UpperCAmelCase : str = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , lowercase_ ) )
# verify size
UpperCAmelCase : Any = torch.tensor([800, 1_066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , lowercase_ ) )
| 280 |
'''simple docstring'''
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
lowercase__ = logging.get_logger("transformers.models.speecht5")
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
hf_model.apply_weight_norm()
UpperCAmelCase : Dict = checkpoint['input_conv.weight_g']
UpperCAmelCase : Any = checkpoint['input_conv.weight_v']
UpperCAmelCase : Any = checkpoint['input_conv.bias']
for i in range(len(config.upsample_rates ) ):
UpperCAmelCase : Any = checkpoint[F"""upsamples.{i}.1.weight_g"""]
UpperCAmelCase : Union[str, Any] = checkpoint[F"""upsamples.{i}.1.weight_v"""]
UpperCAmelCase : str = checkpoint[F"""upsamples.{i}.1.bias"""]
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
UpperCAmelCase : Any = checkpoint[F"""blocks.{i}.convs1.{j}.1.weight_g"""]
UpperCAmelCase : Optional[int] = checkpoint[F"""blocks.{i}.convs1.{j}.1.weight_v"""]
UpperCAmelCase : Dict = checkpoint[F"""blocks.{i}.convs1.{j}.1.bias"""]
UpperCAmelCase : Union[str, Any] = checkpoint[F"""blocks.{i}.convs2.{j}.1.weight_g"""]
UpperCAmelCase : int = checkpoint[F"""blocks.{i}.convs2.{j}.1.weight_v"""]
UpperCAmelCase : Optional[int] = checkpoint[F"""blocks.{i}.convs2.{j}.1.bias"""]
UpperCAmelCase : Dict = checkpoint['output_conv.1.weight_g']
UpperCAmelCase : str = checkpoint['output_conv.1.weight_v']
UpperCAmelCase : Union[str, Any] = checkpoint['output_conv.1.bias']
hf_model.remove_weight_norm()
@torch.no_grad()
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_=None , UpperCAmelCase_=None , ):
if config_path is not None:
UpperCAmelCase : Any = SpeechTaHifiGanConfig.from_pretrained(UpperCAmelCase_ )
else:
UpperCAmelCase : Optional[Any] = SpeechTaHifiGanConfig()
UpperCAmelCase : List[Any] = SpeechTaHifiGan(UpperCAmelCase_ )
UpperCAmelCase : int = torch.load(UpperCAmelCase_ )
load_weights(orig_checkpoint['model']['generator'] , UpperCAmelCase_ , UpperCAmelCase_ )
UpperCAmelCase : int = np.load(UpperCAmelCase_ )
UpperCAmelCase : str = stats[0].reshape(-1 )
UpperCAmelCase : List[str] = stats[1].reshape(-1 )
UpperCAmelCase : Union[str, Any] = torch.from_numpy(UpperCAmelCase_ ).float()
UpperCAmelCase : List[str] = torch.from_numpy(UpperCAmelCase_ ).float()
model.save_pretrained(UpperCAmelCase_ )
if repo_id:
print('Pushing to the hub...' )
model.push_to_hub(UpperCAmelCase_ )
if __name__ == "__main__":
lowercase__ = argparse.ArgumentParser()
parser.add_argument("--checkpoint_path", required=True, default=None, type=str, help="Path to original checkpoint")
parser.add_argument("--stats_path", required=True, default=None, type=str, help="Path to stats.npy file")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--pytorch_dump_folder_path", required=True, default=None, type=str, help="Path to the output PyTorch model."
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
lowercase__ = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 280 | 1 |
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case :Dict = logging.get_logger(__name__)
__snake_case :Tuple = {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json''',
}
class _A ( __UpperCAmelCase ):
UpperCamelCase__ : Dict = '''mvp'''
UpperCamelCase__ : Optional[int] = ['''past_key_values''']
UpperCamelCase__ : int = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self : List[str] , __SCREAMING_SNAKE_CASE : List[Any]=50_267 , __SCREAMING_SNAKE_CASE : Any=1_024 , __SCREAMING_SNAKE_CASE : Optional[int]=12 , __SCREAMING_SNAKE_CASE : Dict=4_096 , __SCREAMING_SNAKE_CASE : Tuple=16 , __SCREAMING_SNAKE_CASE : Optional[int]=12 , __SCREAMING_SNAKE_CASE : Any=4_096 , __SCREAMING_SNAKE_CASE : Union[str, Any]=16 , __SCREAMING_SNAKE_CASE : Optional[int]=0.0 , __SCREAMING_SNAKE_CASE : List[str]=0.0 , __SCREAMING_SNAKE_CASE : str="gelu" , __SCREAMING_SNAKE_CASE : List[Any]=1_024 , __SCREAMING_SNAKE_CASE : List[Any]=0.1 , __SCREAMING_SNAKE_CASE : List[Any]=0.0 , __SCREAMING_SNAKE_CASE : Union[str, Any]=0.0 , __SCREAMING_SNAKE_CASE : int=0.02 , __SCREAMING_SNAKE_CASE : str=0.0 , __SCREAMING_SNAKE_CASE : Any=False , __SCREAMING_SNAKE_CASE : List[Any]=True , __SCREAMING_SNAKE_CASE : List[Any]=1 , __SCREAMING_SNAKE_CASE : str=0 , __SCREAMING_SNAKE_CASE : List[str]=2 , __SCREAMING_SNAKE_CASE : List[Any]=True , __SCREAMING_SNAKE_CASE : Dict=2 , __SCREAMING_SNAKE_CASE : Union[str, Any]=2 , __SCREAMING_SNAKE_CASE : Tuple=False , __SCREAMING_SNAKE_CASE : Tuple=100 , __SCREAMING_SNAKE_CASE : List[str]=800 , **__SCREAMING_SNAKE_CASE : Tuple , ):
'''simple docstring'''
__a = vocab_size
__a = max_position_embeddings
__a = d_model
__a = encoder_ffn_dim
__a = encoder_layers
__a = encoder_attention_heads
__a = decoder_ffn_dim
__a = decoder_layers
__a = decoder_attention_heads
__a = dropout
__a = attention_dropout
__a = activation_dropout
__a = activation_function
__a = init_std
__a = encoder_layerdrop
__a = decoder_layerdrop
__a = classifier_dropout
__a = use_cache
__a = encoder_layers
__a = scale_embedding # scale factor will be sqrt(d_model) if True
__a = use_prompt
__a = prompt_length
__a = prompt_mid_dim
super().__init__(
pad_token_id=__SCREAMING_SNAKE_CASE , bos_token_id=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE , is_encoder_decoder=__SCREAMING_SNAKE_CASE , decoder_start_token_id=__SCREAMING_SNAKE_CASE , forced_eos_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
if self.forced_bos_token_id is None and kwargs.get('''force_bos_token_to_be_generated''' , __SCREAMING_SNAKE_CASE):
__a = self.bos_token_id
warnings.warn(
F'Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. '
'''The config can simply be saved and uploaded again to be fixed.''')
| 49 |
import re
from filelock import FileLock
try:
import nltk
_snake_case : Any = True
except (ImportError, ModuleNotFoundError):
_snake_case : Union[str, Any] = False
if NLTK_AVAILABLE:
with FileLock('.lock') as lock:
nltk.download('punkt', quiet=True)
def a_ ( lowerCAmelCase_ : str ):
re.sub('<n>', '', lowerCAmelCase_ ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(lowerCAmelCase_ ) )
| 284 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
A__ : str = logging.get_logger(__name__)
A__ : Tuple = {
'microsoft/resnet-50': 'https://huggingface.co/microsoft/resnet-50/blob/main/config.json',
}
class lowercase__ ( snake_case__, snake_case__ ):
_UpperCAmelCase :Union[str, Any] = "resnet"
_UpperCAmelCase :Optional[int] = ["basic", "bottleneck"]
def __init__( self : Union[str, Any] , snake_case__ : str=3 , snake_case__ : Dict=64 , snake_case__ : List[Any]=[256, 512, 1024, 2048] , snake_case__ : Tuple=[3, 4, 6, 3] , snake_case__ : List[str]="bottleneck" , snake_case__ : List[str]="relu" , snake_case__ : Any=False , snake_case__ : Optional[Any]=None , snake_case__ : List[Any]=None , **snake_case__ : Tuple , ):
super().__init__(**snake_case__ )
if layer_type not in self.layer_types:
raise ValueError(F"""layer_type={layer_type} is not one of {','.join(self.layer_types )}""" )
lowerCamelCase_ : Tuple =num_channels
lowerCamelCase_ : List[Any] =embedding_size
lowerCamelCase_ : List[Any] =hidden_sizes
lowerCamelCase_ : List[str] =depths
lowerCamelCase_ : Union[str, Any] =layer_type
lowerCamelCase_ : Tuple =hidden_act
lowerCamelCase_ : Union[str, Any] =downsample_in_first_stage
lowerCamelCase_ : Any =["stem"] + [F"""stage{idx}""" for idx in range(1 , len(snake_case__ ) + 1 )]
lowerCamelCase_ , lowerCamelCase_ : Any =get_aligned_output_features_output_indices(
out_features=snake_case__ , out_indices=snake_case__ , stage_names=self.stage_names )
class lowercase__ ( snake_case__ ):
_UpperCAmelCase :Union[str, Any] = version.parse("1.11" )
@property
def UpperCAmelCase__ ( self : Dict ):
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def UpperCAmelCase__ ( self : List[str] ):
return 1E-3
| 209 |
"""simple docstring"""
import copy
import json
import os
import tempfile
from transformers import is_torch_available
from .test_configuration_utils import config_common_kwargs
class lowercase__ ( snake_case__ ):
def __init__( self : Tuple , snake_case__ : Optional[int] , snake_case__ : int=None , snake_case__ : Union[str, Any]=True , snake_case__ : Optional[int]=None , **snake_case__ : Optional[int] ):
lowerCamelCase_ : Dict =parent
lowerCamelCase_ : List[str] =config_class
lowerCamelCase_ : Union[str, Any] =has_text_modality
lowerCamelCase_ : Optional[int] =kwargs
lowerCamelCase_ : List[str] =common_properties
def UpperCAmelCase__ ( self : Optional[Any] ):
lowerCamelCase_ : List[str] =self.config_class(**self.inputs_dict )
lowerCamelCase_ : Any =(
["hidden_size", "num_attention_heads", "num_hidden_layers"]
if self.common_properties is None
else self.common_properties
)
# Add common fields for text models
if self.has_text_modality:
common_properties.extend(["vocab_size"] )
# Test that config has the common properties as getters
for prop in common_properties:
self.parent.assertTrue(hasattr(snake_case__ , snake_case__ ) , msg=F"""`{prop}` does not exist""" )
# Test that config has the common properties as setter
for idx, name in enumerate(snake_case__ ):
try:
setattr(snake_case__ , snake_case__ , snake_case__ )
self.parent.assertEqual(
getattr(snake_case__ , snake_case__ ) , snake_case__ , msg=F"""`{name} value {idx} expected, but was {getattr(snake_case__ , snake_case__ )}""" )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
# Test if config class can be called with Config(prop_name=..)
for idx, name in enumerate(snake_case__ ):
try:
lowerCamelCase_ : Dict =self.config_class(**{name: idx} )
self.parent.assertEqual(
getattr(snake_case__ , snake_case__ ) , snake_case__ , msg=F"""`{name} value {idx} expected, but was {getattr(snake_case__ , snake_case__ )}""" )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
def UpperCAmelCase__ ( self : Any ):
lowerCamelCase_ : Tuple =self.config_class(**self.inputs_dict )
lowerCamelCase_ : Any =json.loads(config.to_json_string() )
for key, value in self.inputs_dict.items():
self.parent.assertEqual(obj[key] , snake_case__ )
def UpperCAmelCase__ ( self : int ):
lowerCamelCase_ : Tuple =self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCamelCase_ : List[Any] =os.path.join(snake_case__ , "config.json" )
config_first.to_json_file(snake_case__ )
lowerCamelCase_ : Optional[int] =self.config_class.from_json_file(snake_case__ )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def UpperCAmelCase__ ( self : Dict ):
lowerCamelCase_ : Dict =self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
config_first.save_pretrained(snake_case__ )
lowerCamelCase_ : Optional[int] =self.config_class.from_pretrained(snake_case__ )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def UpperCAmelCase__ ( self : str ):
lowerCamelCase_ : Dict =self.config_class(**self.inputs_dict )
lowerCamelCase_ : Dict ="test"
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCamelCase_ : str =os.path.join(snake_case__ , snake_case__ )
config_first.save_pretrained(snake_case__ )
lowerCamelCase_ : Optional[Any] =self.config_class.from_pretrained(snake_case__ , subfolder=snake_case__ )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def UpperCAmelCase__ ( self : Optional[Any] ):
lowerCamelCase_ : Optional[Any] =self.config_class(**self.inputs_dict , num_labels=5 )
self.parent.assertEqual(len(config.idalabel ) , 5 )
self.parent.assertEqual(len(config.labelaid ) , 5 )
lowerCamelCase_ : List[Any] =3
self.parent.assertEqual(len(config.idalabel ) , 3 )
self.parent.assertEqual(len(config.labelaid ) , 3 )
def UpperCAmelCase__ ( self : List[Any] ):
if self.config_class.is_composition:
return
lowerCamelCase_ : Tuple =self.config_class()
self.parent.assertIsNotNone(snake_case__ )
def UpperCAmelCase__ ( self : List[Any] ):
lowerCamelCase_ : List[str] =copy.deepcopy(snake_case__ )
lowerCamelCase_ : Optional[int] =self.config_class(**snake_case__ )
lowerCamelCase_ : Union[str, Any] =[]
for key, value in config_common_kwargs.items():
if key == "torch_dtype":
if not is_torch_available():
continue
else:
import torch
if config.torch_dtype != torch.floataa:
wrong_values.append(("torch_dtype", config.torch_dtype, torch.floataa) )
elif getattr(snake_case__ , snake_case__ ) != value:
wrong_values.append((key, getattr(snake_case__ , snake_case__ ), value) )
if len(snake_case__ ) > 0:
lowerCamelCase_ : Any ="\n".join([F"""- {v[0]}: got {v[1]} instead of {v[2]}""" for v in wrong_values] )
raise ValueError(F"""The following keys were not properly set in the config:\n{errors}""" )
def UpperCAmelCase__ ( self : int ):
self.create_and_test_config_common_properties()
self.create_and_test_config_to_json_string()
self.create_and_test_config_to_json_file()
self.create_and_test_config_from_and_save_pretrained()
self.create_and_test_config_from_and_save_pretrained_subfolder()
self.create_and_test_config_with_num_labels()
self.check_config_can_be_init_without_params()
self.check_config_arguments_init()
| 209 | 1 |
import qiskit
def _SCREAMING_SNAKE_CASE ( lowercase : int , lowercase : int ):
'''simple docstring'''
lowerCamelCase_ = qiskit.Aer.get_backend('aer_simulator' )
# Create a Quantum Circuit acting on the q register
lowerCamelCase_ = qiskit.QuantumCircuit(lowercase , lowercase )
# Apply X (NOT) Gate to Qubits 0 & 1
circuit.x(0 )
circuit.x(1 )
# Map the quantum measurement to the classical bits
circuit.measure([0, 1] , [0, 1] )
# Execute the circuit on the qasm simulator
lowerCamelCase_ = qiskit.execute(lowercase , lowercase , shots=10_00 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(lowercase )
if __name__ == "__main__":
lowerCamelCase : Optional[Any] = single_qubit_measure(2, 2)
print(F"""Total count for various states are: {counts}""")
| 204 |
from collections.abc import Callable
import numpy as np
def _SCREAMING_SNAKE_CASE ( lowercase : Callable , lowercase : float , lowercase : float , lowercase : float , lowercase : float ):
'''simple docstring'''
lowerCamelCase_ = int(np.ceil((x_end - xa) / step_size ) )
lowerCamelCase_ = np.zeros((n + 1,) )
lowerCamelCase_ = ya
lowerCamelCase_ = xa
for k in range(lowercase ):
lowerCamelCase_ = y[k] + step_size * ode_func(lowercase , y[k] )
lowerCamelCase_ = y[k] + (
(step_size / 2) * (ode_func(lowercase , y[k] ) + ode_func(x + step_size , lowercase ))
)
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 204 | 1 |
from collections import namedtuple
lowercase_ = namedtuple("""from_to""", """from_ to""")
lowercase_ = {
"""cubicmeter""": from_to(1, 1),
"""litre""": from_to(0.001, 1000),
"""kilolitre""": from_to(1, 1),
"""gallon""": from_to(0.00454, 264.172),
"""cubicyard""": from_to(0.76455, 1.30795),
"""cubicfoot""": from_to(0.028, 35.3147),
"""cup""": from_to(0.000236588, 4226.75),
}
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
if from_type not in METRIC_CONVERSION:
raise ValueError(
f'''Invalid \'from_type\' value: {from_type!r} Supported values are:\n'''
+ ", ".join(SCREAMING_SNAKE_CASE_ ) )
if to_type not in METRIC_CONVERSION:
raise ValueError(
f'''Invalid \'to_type\' value: {to_type!r}. Supported values are:\n'''
+ ", ".join(SCREAMING_SNAKE_CASE_ ) )
return value * METRIC_CONVERSION[from_type].from_ * METRIC_CONVERSION[to_type].to
if __name__ == "__main__":
import doctest
doctest.testmod()
| 224 |
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
lowercase__ = len(SCREAMING_SNAKE_CASE_ )
for i in range(SCREAMING_SNAKE_CASE_ ):
for j in range(i + 1 , SCREAMING_SNAKE_CASE_ ):
if numbers[j] < numbers[i]:
lowercase__ , lowercase__ = numbers[j], numbers[i]
return numbers
if __name__ == "__main__":
lowercase_ = input("""Enter numbers separated by a comma:\n""").strip()
lowercase_ = [int(item) for item in user_input.split(""",""")]
print(exchange_sort(unsorted))
| 224 | 1 |
import timeit
import numpy as np
import datasets
from datasets.arrow_writer import ArrowWriter
from datasets.features.features import _ArrayXD
def SCREAMING_SNAKE_CASE_ ( __A : Tuple ) -> Optional[Any]:
"""simple docstring"""
def wrapper(*__A : str , **__A : Union[str, Any] ):
a_ : int = timeit.default_timer()
a_ : Optional[int] = func(*__A , **__A )
a_ : Union[str, Any] = timeit.default_timer() - starttime
return delta
a_ : str = func.__name__
return wrapper
def SCREAMING_SNAKE_CASE_ ( __A : dict , __A : Optional[Any]=1_00 , __A : Dict=None ) -> Union[str, Any]:
"""simple docstring"""
a_ : Dict = []
a_ : Dict = seq_shapes or {}
for i in range(__A ):
a_ : int = {}
for col_id, (k, v) in enumerate(features.items() ):
if isinstance(__A , _ArrayXD ):
a_ : Tuple = np.random.rand(*v.shape ).astype(v.dtype )
elif isinstance(__A , datasets.Value ):
if v.dtype == "string":
a_ : Union[str, Any] = 'The small grey turtle was surprisingly fast when challenged.'
else:
a_ : List[str] = np.random.randint(10 , size=1 ).astype(v.dtype ).item()
elif isinstance(__A , datasets.Sequence ):
while isinstance(__A , datasets.Sequence ):
a_ : Optional[Any] = v.feature
a_ : str = seq_shapes[k]
a_ : Optional[Any] = np.random.rand(*__A ).astype(v.dtype )
a_ : int = data
dummy_data.append((i, example) )
return dummy_data
def SCREAMING_SNAKE_CASE_ ( __A : Dict , __A : List[str] , __A : str=1_00 , __A : str=None ) -> Union[str, Any]:
"""simple docstring"""
a_ : Optional[int] = generate_examples(__A , num_examples=__A , seq_shapes=__A )
with ArrowWriter(features=__A , path=__A ) as writer:
for key, record in dummy_data:
a_ : List[str] = features.encode_example(__A )
writer.write(__A )
a_ : int = writer.finalize()
if not num_final_examples == num_examples:
raise ValueError(
F"""Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}.""" )
a_ : Dict = datasets.Dataset.from_file(filename=__A , info=datasets.DatasetInfo(features=__A ) )
return dataset
| 32 | import argparse
import torch
from transformers import GPTaLMHeadModel, RobertaForMaskedLM
if __name__ == "__main__":
a_ = argparse.ArgumentParser(
description=(
'Extraction some layers of the full RobertaForMaskedLM or GPT2LMHeadModel for Transfer Learned'
' Distillation'
)
)
parser.add_argument('--model_type', default='roberta', choices=['roberta', 'gpt2'])
parser.add_argument('--model_name', default='roberta-large', type=str)
parser.add_argument('--dump_checkpoint', default='serialization_dir/tf_roberta_048131723.pth', type=str)
parser.add_argument('--vocab_transform', action='store_true')
a_ = parser.parse_args()
if args.model_type == "roberta":
a_ = RobertaForMaskedLM.from_pretrained(args.model_name)
a_ = 'roberta'
elif args.model_type == "gpt2":
a_ = GPTaLMHeadModel.from_pretrained(args.model_name)
a_ = 'transformer'
a_ = model.state_dict()
a_ = {}
# Embeddings #
if args.model_type == "gpt2":
for param_name in ["wte.weight", "wpe.weight"]:
a_ = state_dict[F"""{prefix}.{param_name}"""]
else:
for w in ["word_embeddings", "position_embeddings", "token_type_embeddings"]:
a_ = F"""{prefix}.embeddings.{w}.weight"""
a_ = state_dict[param_name]
for w in ["weight", "bias"]:
a_ = F"""{prefix}.embeddings.LayerNorm.{w}"""
a_ = state_dict[param_name]
# Transformer Blocks #
a_ = 0
for teacher_idx in [0, 2, 4, 7, 9, 11]:
if args.model_type == "gpt2":
for layer in ["ln_1", "attn.c_attn", "attn.c_proj", "ln_2", "mlp.c_fc", "mlp.c_proj"]:
for w in ["weight", "bias"]:
a_ = state_dict[
F"""{prefix}.h.{teacher_idx}.{layer}.{w}"""
]
a_ = state_dict[F"""{prefix}.h.{teacher_idx}.attn.bias"""]
else:
for layer in [
"attention.self.query",
"attention.self.key",
"attention.self.value",
"attention.output.dense",
"attention.output.LayerNorm",
"intermediate.dense",
"output.dense",
"output.LayerNorm",
]:
for w in ["weight", "bias"]:
a_ = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.{layer}.{w}"""
]
std_idx += 1
# Language Modeling Head ###s
if args.model_type == "roberta":
for layer in ["lm_head.decoder.weight", "lm_head.bias"]:
a_ = state_dict[F"""{layer}"""]
if args.vocab_transform:
for w in ["weight", "bias"]:
a_ = state_dict[F"""lm_head.dense.{w}"""]
a_ = state_dict[F"""lm_head.layer_norm.{w}"""]
elif args.model_type == "gpt2":
for w in ["weight", "bias"]:
a_ = state_dict[F"""{prefix}.ln_f.{w}"""]
a_ = state_dict['lm_head.weight']
print(F"""N layers selected for distillation: {std_idx}""")
print(F"""Number of params transferred for distillation: {len(compressed_sd.keys())}""")
print(F"""Save transferred checkpoint to {args.dump_checkpoint}.""")
torch.save(compressed_sd, args.dump_checkpoint)
| 175 | 0 |
import gc
import math
import unittest
import torch
from diffusers import UNetaDModel
from diffusers.utils import floats_tensor, logging, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
UpperCAmelCase : Optional[Any] = logging.get_logger(__name__)
enable_full_determinism()
class __lowercase ( a_ , a_ , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase : Union[str, Any] = UNetaDModel
UpperCamelCase : Union[str, Any] = "sample"
@property
def __A ( self ) -> str:
'''simple docstring'''
lowerCamelCase = 4
lowerCamelCase = 3
lowerCamelCase = (32, 32)
lowerCamelCase = floats_tensor((batch_size, num_channels) + sizes ).to(A )
lowerCamelCase = torch.tensor([10] ).to(A )
return {"sample": noise, "timestep": time_step}
@property
def __A ( self ) -> Optional[int]:
'''simple docstring'''
return (3, 32, 32)
@property
def __A ( self ) -> Dict:
'''simple docstring'''
return (3, 32, 32)
def __A ( self ) -> str:
'''simple docstring'''
lowerCamelCase = {
"""block_out_channels""": (32, 64),
"""down_block_types""": ("""DownBlock2D""", """AttnDownBlock2D"""),
"""up_block_types""": ("""AttnUpBlock2D""", """UpBlock2D"""),
"""attention_head_dim""": 3,
"""out_channels""": 3,
"""in_channels""": 3,
"""layers_per_block""": 2,
"""sample_size""": 32,
}
lowerCamelCase = self.dummy_input
return init_dict, inputs_dict
class __lowercase ( a_ , a_ , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase : int = UNetaDModel
UpperCamelCase : Dict = "sample"
@property
def __A ( self ) -> Any:
'''simple docstring'''
lowerCamelCase = 4
lowerCamelCase = 4
lowerCamelCase = (32, 32)
lowerCamelCase = floats_tensor((batch_size, num_channels) + sizes ).to(A )
lowerCamelCase = torch.tensor([10] ).to(A )
return {"sample": noise, "timestep": time_step}
@property
def __A ( self ) -> Tuple:
'''simple docstring'''
return (4, 32, 32)
@property
def __A ( self ) -> Dict:
'''simple docstring'''
return (4, 32, 32)
def __A ( self ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase = {
"""sample_size""": 32,
"""in_channels""": 4,
"""out_channels""": 4,
"""layers_per_block""": 2,
"""block_out_channels""": (32, 64),
"""attention_head_dim""": 32,
"""down_block_types""": ("""DownBlock2D""", """DownBlock2D"""),
"""up_block_types""": ("""UpBlock2D""", """UpBlock2D"""),
}
lowerCamelCase = self.dummy_input
return init_dict, inputs_dict
def __A ( self ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase , lowerCamelCase = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" , output_loading_info=A )
self.assertIsNotNone(A )
self.assertEqual(len(loading_info["""missing_keys"""] ) , 0 )
model.to(A )
lowerCamelCase = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != """cuda""" , """This test is supposed to run on GPU""" )
def __A ( self ) -> Any:
'''simple docstring'''
lowerCamelCase , lowerCamelCase = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" , output_loading_info=A )
model.to(A )
lowerCamelCase = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != """cuda""" , """This test is supposed to run on GPU""" )
def __A ( self ) -> Tuple:
'''simple docstring'''
lowerCamelCase , lowerCamelCase = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" , output_loading_info=A )
model_accelerate.to(A )
model_accelerate.eval()
lowerCamelCase = torch.randn(
1 , model_accelerate.config.in_channels , model_accelerate.config.sample_size , model_accelerate.config.sample_size , generator=torch.manual_seed(0 ) , )
lowerCamelCase = noise.to(A )
lowerCamelCase = torch.tensor([10] * noise.shape[0] ).to(A )
lowerCamelCase = model_accelerate(A , A )["""sample"""]
# two models don't need to stay in the device at the same time
del model_accelerate
torch.cuda.empty_cache()
gc.collect()
lowerCamelCase , lowerCamelCase = UNetaDModel.from_pretrained(
"""fusing/unet-ldm-dummy-update""" , output_loading_info=A , low_cpu_mem_usage=A )
model_normal_load.to(A )
model_normal_load.eval()
lowerCamelCase = model_normal_load(A , A )["""sample"""]
assert torch_all_close(A , A , rtol=1e-3 )
def __A ( self ) -> Tuple:
'''simple docstring'''
lowerCamelCase = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" )
model.eval()
model.to(A )
lowerCamelCase = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
lowerCamelCase = noise.to(A )
lowerCamelCase = torch.tensor([10] * noise.shape[0] ).to(A )
with torch.no_grad():
lowerCamelCase = model(A , A ).sample
lowerCamelCase = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
lowerCamelCase = torch.tensor([-13.3258, -20.1100, -15.9873, -17.6617, -23.0596, -17.9419, -13.3675, -16.1889, -12.3800] )
# fmt: on
self.assertTrue(torch_all_close(A , A , rtol=1e-3 ) )
class __lowercase ( a_ , a_ , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase : List[str] = UNetaDModel
UpperCamelCase : Optional[int] = "sample"
@property
def __A ( self , A=(32, 32) ) -> List[Any]:
'''simple docstring'''
lowerCamelCase = 4
lowerCamelCase = 3
lowerCamelCase = floats_tensor((batch_size, num_channels) + sizes ).to(A )
lowerCamelCase = torch.tensor(batch_size * [10] ).to(dtype=torch.intaa , device=A )
return {"sample": noise, "timestep": time_step}
@property
def __A ( self ) -> Optional[int]:
'''simple docstring'''
return (3, 32, 32)
@property
def __A ( self ) -> Dict:
'''simple docstring'''
return (3, 32, 32)
def __A ( self ) -> Any:
'''simple docstring'''
lowerCamelCase = {
"""block_out_channels""": [32, 64, 64, 64],
"""in_channels""": 3,
"""layers_per_block""": 1,
"""out_channels""": 3,
"""time_embedding_type""": """fourier""",
"""norm_eps""": 1e-6,
"""mid_block_scale_factor""": math.sqrt(2.0 ),
"""norm_num_groups""": None,
"""down_block_types""": [
"""SkipDownBlock2D""",
"""AttnSkipDownBlock2D""",
"""SkipDownBlock2D""",
"""SkipDownBlock2D""",
],
"""up_block_types""": [
"""SkipUpBlock2D""",
"""SkipUpBlock2D""",
"""AttnSkipUpBlock2D""",
"""SkipUpBlock2D""",
],
}
lowerCamelCase = self.dummy_input
return init_dict, inputs_dict
@slow
def __A ( self ) -> Any:
'''simple docstring'''
lowerCamelCase , lowerCamelCase = UNetaDModel.from_pretrained("""google/ncsnpp-celebahq-256""" , output_loading_info=A )
self.assertIsNotNone(A )
self.assertEqual(len(loading_info["""missing_keys"""] ) , 0 )
model.to(A )
lowerCamelCase = self.dummy_input
lowerCamelCase = floats_tensor((4, 3) + (2_56, 2_56) ).to(A )
lowerCamelCase = noise
lowerCamelCase = model(**A )
assert image is not None, "Make sure output is not None"
@slow
def __A ( self ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase = UNetaDModel.from_pretrained("""google/ncsnpp-celebahq-256""" )
model.to(A )
lowerCamelCase = 4
lowerCamelCase = 3
lowerCamelCase = (2_56, 2_56)
lowerCamelCase = torch.ones((batch_size, num_channels) + sizes ).to(A )
lowerCamelCase = torch.tensor(batch_size * [1e-4] ).to(A )
with torch.no_grad():
lowerCamelCase = model(A , A ).sample
lowerCamelCase = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
lowerCamelCase = torch.tensor([-4842.8691, -6499.6631, -3800.1953, -7978.2686, -10980.7129, -20028.8535, 8148.2822, 2342.2905, 567.7608] )
# fmt: on
self.assertTrue(torch_all_close(A , A , rtol=1e-2 ) )
def __A ( self ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase = UNetaDModel.from_pretrained("""fusing/ncsnpp-ffhq-ve-dummy-update""" )
model.to(A )
lowerCamelCase = 4
lowerCamelCase = 3
lowerCamelCase = (32, 32)
lowerCamelCase = torch.ones((batch_size, num_channels) + sizes ).to(A )
lowerCamelCase = torch.tensor(batch_size * [1e-4] ).to(A )
with torch.no_grad():
lowerCamelCase = model(A , A ).sample
lowerCamelCase = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
lowerCamelCase = torch.tensor([-0.0325, -0.0900, -0.0869, -0.0332, -0.0725, -0.0270, -0.0101, 0.0227, 0.0256] )
# fmt: on
self.assertTrue(torch_all_close(A , A , rtol=1e-2 ) )
def __A ( self ) -> List[str]:
'''simple docstring'''
pass
| 66 |
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
UpperCAmelCase : str = logging.get_logger(__name__)
UpperCAmelCase : Dict = {
"EleutherAI/gpt-j-6B": "https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json",
# See all GPT-J models at https://huggingface.co/models?filter=gpt_j
}
class __lowercase ( a_ ):
"""simple docstring"""
UpperCamelCase : List[Any] = "gptj"
UpperCamelCase : Optional[int] = {
"max_position_embeddings": "n_positions",
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self , A=5_04_00 , A=20_48 , A=40_96 , A=28 , A=16 , A=64 , A=None , A="gelu_new" , A=0.0 , A=0.0 , A=0.0 , A=1e-5 , A=0.02 , A=True , A=5_02_56 , A=5_02_56 , A=False , **A , ) -> int:
'''simple docstring'''
lowerCamelCase = vocab_size
lowerCamelCase = n_positions
lowerCamelCase = n_embd
lowerCamelCase = n_layer
lowerCamelCase = n_head
lowerCamelCase = n_inner
lowerCamelCase = rotary_dim
lowerCamelCase = activation_function
lowerCamelCase = resid_pdrop
lowerCamelCase = embd_pdrop
lowerCamelCase = attn_pdrop
lowerCamelCase = layer_norm_epsilon
lowerCamelCase = initializer_range
lowerCamelCase = use_cache
lowerCamelCase = bos_token_id
lowerCamelCase = eos_token_id
super().__init__(
bos_token_id=A , eos_token_id=A , tie_word_embeddings=A , **A )
class __lowercase ( a_ ):
"""simple docstring"""
def __init__( self , A , A = "default" , A = None , A = False , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(A , task=A , patching_specs=A , use_past=A )
if not getattr(self._config , """pad_token_id""" , A ):
# TODO: how to do that better?
lowerCamelCase = 0
@property
def __A ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
lowerCamelCase = OrderedDict({"""input_ids""": {0: """batch""", 1: """sequence"""}} )
if self.use_past:
self.fill_with_past_key_values_(A , direction="""inputs""" )
lowerCamelCase = {0: """batch""", 1: """past_sequence + sequence"""}
else:
lowerCamelCase = {0: """batch""", 1: """sequence"""}
return common_inputs
@property
def __A ( self ) -> int:
'''simple docstring'''
return self._config.n_layer
@property
def __A ( self ) -> int:
'''simple docstring'''
return self._config.n_head
def __A ( self , A , A = -1 , A = -1 , A = False , A = None , ) -> Mapping[str, Any]:
'''simple docstring'''
lowerCamelCase = super(A , self ).generate_dummy_inputs(
A , batch_size=A , seq_length=A , is_pair=A , framework=A )
# We need to order the input in the way they appears in the forward()
lowerCamelCase = OrderedDict({"""input_ids""": common_inputs["""input_ids"""]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
lowerCamelCase , lowerCamelCase = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
lowerCamelCase = seqlen + 2
lowerCamelCase = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
lowerCamelCase = [
(torch.zeros(A ), torch.zeros(A )) for _ in range(self.num_layers )
]
lowerCamelCase = common_inputs["""attention_mask"""]
if self.use_past:
lowerCamelCase = ordered_inputs["""attention_mask"""].dtype
lowerCamelCase = torch.cat(
[ordered_inputs["""attention_mask"""], torch.ones(A , A , dtype=A )] , dim=1 )
return ordered_inputs
@property
def __A ( self ) -> int:
'''simple docstring'''
return 13
| 66 | 1 |
from __future__ import annotations
import numpy as np
from numpy import floataa
from numpy.typing import NDArray
def _SCREAMING_SNAKE_CASE ( a , a , a , a , ) -> list[float]:
__A , __A : Optional[Any] = coefficient_matrix.shape
__A , __A : str = constant_matrix.shape
if rowsa != colsa:
__A : Optional[Any] = F"""Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}"""
raise ValueError(a )
if colsa != 1:
__A : int = F"""Constant matrix must be nx1 but received {rowsa}x{colsa}"""
raise ValueError(a )
if rowsa != rowsa:
__A : Any = (
'Coefficient and constant matrices dimensions must be nxn and nx1 but '
F"""received {rowsa}x{colsa} and {rowsa}x{colsa}"""
)
raise ValueError(a )
if len(a ) != rowsa:
__A : str = (
'Number of initial values must be equal to number of rows in coefficient '
F"""matrix but received {len(a )} and {rowsa}"""
)
raise ValueError(a )
if iterations <= 0:
raise ValueError('Iterations must be at least 1' )
__A : NDArray[floataa] = np.concatenate(
(coefficient_matrix, constant_matrix) , axis=1 )
__A , __A : Any = table.shape
strictly_diagonally_dominant(a )
# Iterates the whole matrix for given number of times
for _ in range(a ):
__A : Dict = []
for row in range(a ):
__A : Dict = 0
for col in range(a ):
if col == row:
__A : List[str] = table[row][col]
elif col == cols - 1:
__A : Optional[Any] = table[row][col]
else:
temp += (-1) * table[row][col] * init_val[col]
__A : str = (temp + val) / denom
new_val.append(a )
__A : int = new_val
return [float(a ) for i in new_val]
def _SCREAMING_SNAKE_CASE ( a ) -> bool:
__A , __A : Union[str, Any] = table.shape
__A : Any = True
for i in range(0 , a ):
__A : str = 0
for j in range(0 , cols - 1 ):
if i == j:
continue
else:
total += table[i][j]
if table[i][i] <= total:
raise ValueError('Coefficient matrix is not strictly diagonally dominant' )
return is_diagonally_dominant
# Test Cases
if __name__ == "__main__":
import doctest
doctest.testmod()
| 280 |
from __future__ import annotations
def _SCREAMING_SNAKE_CASE ( a , a , a ) -> float:
if days_between_payments <= 0:
raise ValueError('days_between_payments must be > 0' )
if daily_interest_rate < 0:
raise ValueError('daily_interest_rate must be >= 0' )
if principal <= 0:
raise ValueError('principal must be > 0' )
return principal * daily_interest_rate * days_between_payments
def _SCREAMING_SNAKE_CASE ( a , a , a , ) -> float:
if number_of_compounding_periods <= 0:
raise ValueError('number_of_compounding_periods must be > 0' )
if nominal_annual_interest_rate_percentage < 0:
raise ValueError('nominal_annual_interest_rate_percentage must be >= 0' )
if principal <= 0:
raise ValueError('principal must be > 0' )
return principal * (
(1 + nominal_annual_interest_rate_percentage) ** number_of_compounding_periods
- 1
)
def _SCREAMING_SNAKE_CASE ( a , a , a , ) -> float:
if number_of_years <= 0:
raise ValueError('number_of_years must be > 0' )
if nominal_annual_percentage_rate < 0:
raise ValueError('nominal_annual_percentage_rate must be >= 0' )
if principal <= 0:
raise ValueError('principal must be > 0' )
return compound_interest(
a , nominal_annual_percentage_rate / 3_65 , number_of_years * 3_65 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 280 | 1 |
from PIL import Image
def snake_case_ ( lowerCAmelCase_ : Image , lowerCAmelCase_ : float ):
def brightness(lowerCAmelCase_ : int ) -> float:
return 128 + level + (c - 128)
if not -255.0 <= level <= 255.0:
raise ValueError("""level must be between -255.0 (black) and 255.0 (white)""" )
return img.point(lowerCAmelCase_ )
if __name__ == "__main__":
# Load image
with Image.open('''image_data/lena.jpg''') as img:
# Change brightness to 100
lowerCamelCase : List[str] = change_brightness(img, 1_00)
brigt_img.save('''image_data/lena_brightness.png''', format='''png''') | 358 |
import argparse
from torch import nn
# transformers_old should correspond to branch `save_old_prophetnet_model_structure` here
# original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively
from transformers_old.modeling_prophetnet import (
ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld,
)
from transformers_old.modeling_xlm_prophetnet import (
XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld,
)
from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging
lowerCamelCase : Tuple = logging.get_logger(__name__)
logging.set_verbosity_info()
def snake_case_ ( lowerCAmelCase_ : str , lowerCAmelCase_ : str ):
if "xprophetnet" in prophetnet_checkpoint_path:
__lowercase : List[str] = XLMProphetNetForConditionalGenerationOld.from_pretrained(lowerCAmelCase_ )
__lowercase , __lowercase : int = XLMProphetNetForConditionalGeneration.from_pretrained(
lowerCAmelCase_ , output_loading_info=lowerCAmelCase_ )
else:
__lowercase : List[Any] = ProphetNetForConditionalGenerationOld.from_pretrained(lowerCAmelCase_ )
__lowercase , __lowercase : Optional[Any] = ProphetNetForConditionalGeneration.from_pretrained(
lowerCAmelCase_ , output_loading_info=lowerCAmelCase_ )
__lowercase : List[str] = ["""key_proj""", """value_proj""", """query_proj"""]
__lowercase : Optional[int] = {
"""self_attn""": """ngram_self_attn""",
"""cross_attn""": """encoder_attn""",
"""cross_attn_layer_norm""": """encoder_attn_layer_norm""",
"""feed_forward_layer_norm""": """final_layer_norm""",
"""feed_forward""": """""",
"""intermediate""": """fc1""",
"""output""": """fc2""",
"""key_proj""": """k_proj""",
"""query_proj""": """q_proj""",
"""value_proj""": """v_proj""",
"""word_embeddings""": """embed_tokens""",
"""embeddings_layer_norm""": """emb_layer_norm""",
"""relative_pos_embeddings""": """relative_linear""",
"""ngram_embeddings""": """ngram_input_embed""",
"""position_embeddings""": """embed_positions""",
}
for key in loading_info["missing_keys"]:
__lowercase : Tuple = key.split(""".""" )
if attributes[0] == "lm_head":
__lowercase : str = prophet
__lowercase : List[str] = prophet_old
else:
__lowercase : Tuple = prophet.prophetnet
__lowercase : Union[str, Any] = prophet_old.model
__lowercase : Optional[Any] = False
for attribute in attributes:
if attribute in mapping:
__lowercase : Optional[int] = mapping[attribute]
if not hasattr(lowerCAmelCase_ , lowerCAmelCase_ ) and len(lowerCAmelCase_ ) > 0:
__lowercase : str = attribute
elif hasattr(lowerCAmelCase_ , lowerCAmelCase_ ):
__lowercase : List[Any] = attribute
if attribute == "weight":
assert old_model.weight.shape == model.weight.shape, "Shapes have to match!"
__lowercase : Any = old_model.weight
logger.info(F"{attribute} is initialized." )
__lowercase : Any = True
break
elif attribute == "bias":
assert old_model.bias.shape == model.bias.shape, "Shapes have to match!"
__lowercase : Dict = old_model.bias
logger.info(F"{attribute} is initialized" )
__lowercase : int = True
break
elif attribute in special_keys and hasattr(lowerCAmelCase_ , """in_proj_weight""" ):
__lowercase : Dict = old_model.in_proj_weight.shape[0] // 3
__lowercase : Tuple = getattr(lowerCAmelCase_ , lowerCAmelCase_ )
param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match"
param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match"
if attribute == "query_proj":
__lowercase : Union[str, Any] = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] )
__lowercase : int = nn.Parameter(old_model.in_proj_bias[:embed_dim] )
elif attribute == "key_proj":
__lowercase : Any = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] )
__lowercase : List[Any] = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] )
elif attribute == "value_proj":
__lowercase : Tuple = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] )
__lowercase : int = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] )
__lowercase : int = True
break
elif attribute == "position_embeddings":
assert (
model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1]
), "Hidden size has to match"
assert model.position_embeddings.weight.shape[0] == 512, "We want 512 position_embeddings."
__lowercase : Optional[Any] = nn.Parameter(old_model.embed_positions.weight[:512, :] )
__lowercase : int = True
break
if attribute.isdigit():
__lowercase : Tuple = model[int(lowerCAmelCase_ )]
__lowercase : int = old_model[int(lowerCAmelCase_ )]
else:
__lowercase : Union[str, Any] = getattr(lowerCAmelCase_ , lowerCAmelCase_ )
if old_attribute == "":
__lowercase : int = old_model
else:
if not hasattr(lowerCAmelCase_ , lowerCAmelCase_ ):
raise ValueError(F"{old_model} does not have {old_attribute}" )
__lowercase : List[Any] = getattr(lowerCAmelCase_ , lowerCAmelCase_ )
if not is_key_init:
raise ValueError(F"{key} was not correctly initialized!" )
print(F"Saving model to {pytorch_dump_folder_path}" )
prophet.save_pretrained(lowerCAmelCase_ )
if __name__ == "__main__":
lowerCamelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--prophetnet_checkpoint_path''', default=None, type=str, required=True, help='''Path the official PyTorch dump.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
lowerCamelCase : Any = parser.parse_args()
convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path) | 306 | 0 |
from __future__ import annotations
import math
def lowerCAmelCase__(__snake_case ) -> list[int]:
'''simple docstring'''
if num <= 0:
lowerCamelCase__ = F'{num}: Invalid input, please enter a positive integer.'
raise ValueError(__snake_case )
lowerCamelCase__ = [True] * (num + 1)
lowerCamelCase__ = []
lowerCamelCase__ = 2
lowerCamelCase__ = int(math.sqrt(__snake_case ) )
while start <= end:
# If start is a prime
if sieve[start] is True:
prime.append(__snake_case )
# Set multiples of start be False
for i in range(start * start ,num + 1 ,__snake_case ):
if sieve[i] is True:
lowerCamelCase__ = False
start += 1
for j in range(end + 1 ,num + 1 ):
if sieve[j] is True:
prime.append(__snake_case )
return prime
if __name__ == "__main__":
print(prime_sieve(int(input("Enter a positive integer: ").strip())))
| 209 |
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import BatchEncoding, PreTrainedTokenizer
from ...utils import logging
_a = logging.get_logger(__name__)
_a = "▁"
_a = {
"vocab_file": "vocab.json",
"spm_file": "sentencepiece.bpe.model",
"tokenizer_config_file": "tokenizer_config.json",
}
_a = {
"vocab_file": {
"facebook/m2m100_418M": "https://huggingface.co/facebook/m2m100_418M/resolve/main/vocab.json",
"facebook/m2m100_1.2B": "https://huggingface.co/facebook/m2m100_1.2B/resolve/main/vocab.json",
},
"spm_file": {
"facebook/m2m100_418M": "https://huggingface.co/facebook/m2m100_418M/resolve/main/sentencepiece.bpe.model",
"facebook/m2m100_1.2B": "https://huggingface.co/facebook/m2m100_1.2B/resolve/main/sentencepiece.bpe.model",
},
"tokenizer_config_file": {
"facebook/m2m100_418M": "https://huggingface.co/facebook/m2m100_418M/resolve/main/tokenizer_config.json",
"facebook/m2m100_1.2B": "https://huggingface.co/facebook/m2m100_1.2B/resolve/main/tokenizer_config.json",
},
}
_a = {
"facebook/m2m100_418M": 1_024,
}
# fmt: off
_a = {
"m2m100": ["af", "am", "ar", "ast", "az", "ba", "be", "bg", "bn", "br", "bs", "ca", "ceb", "cs", "cy", "da", "de", "el", "en", "es", "et", "fa", "ff", "fi", "fr", "fy", "ga", "gd", "gl", "gu", "ha", "he", "hi", "hr", "ht", "hu", "hy", "id", "ig", "ilo", "is", "it", "ja", "jv", "ka", "kk", "km", "kn", "ko", "lb", "lg", "ln", "lo", "lt", "lv", "mg", "mk", "ml", "mn", "mr", "ms", "my", "ne", "nl", "no", "ns", "oc", "or", "pa", "pl", "ps", "pt", "ro", "ru", "sd", "si", "sk", "sl", "so", "sq", "sr", "ss", "su", "sv", "sw", "ta", "th", "tl", "tn", "tr", "uk", "ur", "uz", "vi", "wo", "xh", "yi", "yo", "zh", "zu"],
"wmt21": ["en", "ha", "is", "ja", "cs", "ru", "zh", "de"]
}
class __A ( lowerCAmelCase ):
'''simple docstring'''
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = ["""input_ids""", """attention_mask"""]
lowerCAmelCase_ = []
lowerCAmelCase_ = []
def __init__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase="<s>" , __lowerCAmelCase="</s>" , __lowerCAmelCase="</s>" , __lowerCAmelCase="<pad>" , __lowerCAmelCase="<unk>" , __lowerCAmelCase="m2m100" , __lowerCAmelCase = None , __lowerCAmelCase=8 , **__lowerCAmelCase , ):
'''simple docstring'''
lowerCamelCase__ = {} if sp_model_kwargs is None else sp_model_kwargs
lowerCamelCase__ = language_codes
lowerCamelCase__ = FAIRSEQ_LANGUAGE_CODES[language_codes]
lowerCamelCase__ = {lang_code: F'__{lang_code}__' for lang_code in fairseq_language_code}
lowerCamelCase__ = kwargs.get('''additional_special_tokens''' , [] )
kwargs["additional_special_tokens"] += [
self.get_lang_token(__lowerCAmelCase )
for lang_code in fairseq_language_code
if self.get_lang_token(__lowerCAmelCase ) not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=__lowerCAmelCase , tgt_lang=__lowerCAmelCase , bos_token=__lowerCAmelCase , eos_token=__lowerCAmelCase , sep_token=__lowerCAmelCase , unk_token=__lowerCAmelCase , pad_token=__lowerCAmelCase , language_codes=__lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , num_madeup_words=__lowerCAmelCase , **__lowerCAmelCase , )
lowerCamelCase__ = vocab_file
lowerCamelCase__ = load_json(__lowerCAmelCase )
lowerCamelCase__ = {v: k for k, v in self.encoder.items()}
lowerCamelCase__ = spm_file
lowerCamelCase__ = load_spm(__lowerCAmelCase , self.sp_model_kwargs )
lowerCamelCase__ = len(self.encoder )
lowerCamelCase__ = {
self.get_lang_token(__lowerCAmelCase ): self.encoder_size + i for i, lang_code in enumerate(__lowerCAmelCase )
}
lowerCamelCase__ = {lang_code: self.encoder_size + i for i, lang_code in enumerate(__lowerCAmelCase )}
lowerCamelCase__ = {v: k for k, v in self.lang_token_to_id.items()}
lowerCamelCase__ = src_lang if src_lang is not None else '''en'''
lowerCamelCase__ = tgt_lang
lowerCamelCase__ = self.get_lang_id(self._src_lang )
self.set_src_lang_special_tokens(self._src_lang )
lowerCamelCase__ = num_madeup_words
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return len(self.encoder ) + len(self.lang_token_to_id )
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self._src_lang
@src_lang.setter
def __lowerCamelCase ( self , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __lowerCamelCase ( self , __lowerCAmelCase ):
'''simple docstring'''
return self.sp_model.encode(__lowerCAmelCase , out_type=__lowerCAmelCase )
def __lowerCamelCase ( self , __lowerCAmelCase ):
'''simple docstring'''
if token in self.lang_token_to_id:
return self.lang_token_to_id[token]
return self.encoder.get(__lowerCAmelCase , self.encoder[self.unk_token] )
def __lowerCamelCase ( self , __lowerCAmelCase ):
'''simple docstring'''
if index in self.id_to_lang_token:
return self.id_to_lang_token[index]
return self.decoder.get(__lowerCAmelCase , self.unk_token )
def __lowerCamelCase ( self , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = []
lowerCamelCase__ = ''''''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(__lowerCAmelCase ) + token
lowerCamelCase__ = []
else:
current_sub_tokens.append(__lowerCAmelCase )
out_string += self.sp_model.decode(__lowerCAmelCase )
return out_string.strip()
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowerCAmelCase , token_ids_a=__lowerCAmelCase , already_has_special_tokens=__lowerCAmelCase )
lowerCamelCase__ = [1] * len(self.prefix_tokens )
lowerCamelCase__ = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(__lowerCAmelCase )) + suffix_ones
return prefix_ones + ([0] * len(__lowerCAmelCase )) + ([0] * len(__lowerCAmelCase )) + suffix_ones
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase = None ):
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = {self.convert_ids_to_tokens(__lowerCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
'''simple docstring'''
lowerCamelCase__ = self.__dict__.copy()
lowerCamelCase__ = None
return state
def __setstate__( self , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
lowerCamelCase__ = {}
lowerCamelCase__ = load_spm(self.spm_file , self.sp_model_kwargs )
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase = None ):
'''simple docstring'''
lowerCamelCase__ = Path(__lowerCAmelCase )
if not save_dir.is_dir():
raise OSError(F'{save_directory} should be a directory' )
lowerCamelCase__ = save_dir / (
(filename_prefix + '''-''' if filename_prefix else '''''') + self.vocab_files_names['''vocab_file''']
)
lowerCamelCase__ = save_dir / (
(filename_prefix + '''-''' if filename_prefix else '''''') + self.vocab_files_names['''spm_file''']
)
save_json(self.encoder , __lowerCAmelCase )
if os.path.abspath(self.spm_file ) != os.path.abspath(__lowerCAmelCase ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , __lowerCAmelCase )
elif not os.path.isfile(self.spm_file ):
with open(__lowerCAmelCase , '''wb''' ) as fi:
lowerCamelCase__ = self.sp_model.serialized_model_proto()
fi.write(__lowerCAmelCase )
return (str(__lowerCAmelCase ), str(__lowerCAmelCase ))
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase = "en" , __lowerCAmelCase = None , __lowerCAmelCase = "ro" , **__lowerCAmelCase , ):
'''simple docstring'''
lowerCamelCase__ = src_lang
lowerCamelCase__ = tgt_lang
self.set_src_lang_special_tokens(self.src_lang )
return super().prepare_seqaseq_batch(__lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase )
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ):
'''simple docstring'''
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' )
lowerCamelCase__ = src_lang
lowerCamelCase__ = self(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , **__lowerCAmelCase )
lowerCamelCase__ = self.get_lang_id(__lowerCAmelCase )
lowerCamelCase__ = tgt_lang_id
return inputs
def __lowerCamelCase ( self ):
'''simple docstring'''
self.set_src_lang_special_tokens(self.src_lang )
def __lowerCamelCase ( self ):
'''simple docstring'''
self.set_tgt_lang_special_tokens(self.tgt_lang )
def __lowerCamelCase ( self , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = self.get_lang_token(__lowerCAmelCase )
lowerCamelCase__ = self.lang_token_to_id[lang_token]
lowerCamelCase__ = [self.cur_lang_id]
lowerCamelCase__ = [self.eos_token_id]
def __lowerCamelCase ( self , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = self.get_lang_token(__lowerCAmelCase )
lowerCamelCase__ = self.lang_token_to_id[lang_token]
lowerCamelCase__ = [self.cur_lang_id]
lowerCamelCase__ = [self.eos_token_id]
def __lowerCamelCase ( self , __lowerCAmelCase ):
'''simple docstring'''
return self.lang_code_to_token[lang]
def __lowerCamelCase ( self , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = self.get_lang_token(__lowerCAmelCase )
return self.lang_token_to_id[lang_token]
def lowerCAmelCase__(__snake_case ,__snake_case ) -> sentencepiece.SentencePieceProcessor:
'''simple docstring'''
lowerCamelCase__ = sentencepiece.SentencePieceProcessor(**__snake_case )
spm.Load(str(__snake_case ) )
return spm
def lowerCAmelCase__(__snake_case ) -> Union[Dict, List]:
'''simple docstring'''
with open(__snake_case ,'''r''' ) as f:
return json.load(__snake_case )
def lowerCAmelCase__(__snake_case ,__snake_case ) -> None:
'''simple docstring'''
with open(__snake_case ,'''w''' ) as f:
json.dump(__snake_case ,__snake_case ,indent=2 )
| 209 | 1 |
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def snake_case (__lowercase ) -> Optional[int]:
'''simple docstring'''
_snake_case : int = filter(lambda __lowercase : p.requires_grad , model.parameters() )
_snake_case : Union[str, Any] = sum([np.prod(p.size() ) for p in model_parameters] )
return params
__SCREAMING_SNAKE_CASE : Tuple = logging.getLogger(__name__)
def snake_case (__lowercase , __lowercase ) -> Union[str, Any]:
'''simple docstring'''
if metric == "rouge2":
_snake_case : int = "{val_avg_rouge2:.4f}-{step_count}"
elif metric == "bleu":
_snake_case : Dict = "{val_avg_bleu:.4f}-{step_count}"
elif metric == "em":
_snake_case : Optional[int] = "{val_avg_em:.4f}-{step_count}"
else:
raise NotImplementedError(
F"""seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this"""
" function." )
_snake_case : Union[str, Any] = ModelCheckpoint(
dirpath=__lowercase , filename=__lowercase , monitor=F"""val_{metric}""" , mode="max" , save_top_k=3 , every_n_epochs=1 , )
return checkpoint_callback
def snake_case (__lowercase , __lowercase ) -> List[Any]:
'''simple docstring'''
return EarlyStopping(
monitor=F"""val_{metric}""" , mode="min" if "loss" in metric else "max" , patience=__lowercase , verbose=__lowercase , )
class lowercase_ ( pl.Callback ):
def UpperCamelCase ( self , lowercase_ , lowercase_ ):
_snake_case : Optional[Any] = {f"""lr_group_{i}""": param["lr"] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(lowercase_ )
@rank_zero_only
def UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_=True ):
logger.info(f"""***** {type_path} results at step {trainer.global_step:05d} *****""" )
_snake_case : Optional[Any] = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ["log", "progress_bar", "preds"]} )
# Log results
_snake_case : Tuple = Path(pl_module.hparams.output_dir )
if type_path == "test":
_snake_case : Tuple = od / "test_results.txt"
_snake_case : Tuple = od / "test_generations.txt"
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
_snake_case : Tuple = od / f"""{type_path}_results/{trainer.global_step:05d}.txt"""
_snake_case : Optional[Any] = od / f"""{type_path}_generations/{trainer.global_step:05d}.txt"""
results_file.parent.mkdir(exist_ok=lowercase_ )
generations_file.parent.mkdir(exist_ok=lowercase_ )
with open(lowercase_ , "a+" ) as writer:
for key in sorted(lowercase_ ):
if key in ["log", "progress_bar", "preds"]:
continue
_snake_case : Dict = metrics[key]
if isinstance(lowercase_ , torch.Tensor ):
_snake_case : Any = val.item()
_snake_case : List[Any] = f"""{key}: {val:.6f}\n"""
writer.write(lowercase_ )
if not save_generations:
return
if "preds" in metrics:
_snake_case : Union[str, Any] = "\n".join(metrics["preds"] )
generations_file.open("w+" ).write(lowercase_ )
@rank_zero_only
def UpperCamelCase ( self , lowercase_ , lowercase_ ):
try:
_snake_case : Tuple = pl_module.model.model.num_parameters()
except AttributeError:
_snake_case : List[str] = pl_module.model.num_parameters()
_snake_case : Union[str, Any] = count_trainable_parameters(lowercase_ )
# mp stands for million parameters
trainer.logger.log_metrics({"n_params": npars, "mp": npars / 1e6, "grad_mp": n_trainable_pars / 1e6} )
@rank_zero_only
def UpperCamelCase ( self , lowercase_ , lowercase_ ):
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(lowercase_ , lowercase_ , "test" )
@rank_zero_only
def UpperCamelCase ( self , lowercase_ , lowercase_ ):
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid") | 360 | def snake_case (__lowercase ) -> list:
'''simple docstring'''
return [
txt[:a] + txt[a].upper() + txt[a + 1 :]
for a in range(len(__lowercase ) )
if txt[a].isalpha()
]
if __name__ == "__main__":
__import__('doctest').testmod() | 284 | 0 |
"""simple docstring"""
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_multi_gpu
from accelerate.utils import patch_environment
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
lowerCAmelCase_ : Union[str, Any] = inspect.getfile(accelerate.test_utils )
lowerCAmelCase_ : Dict = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_script.py'] )
lowerCAmelCase_ : int = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_distributed_data_loop.py'] )
lowerCAmelCase_ : Union[str, Any] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_ops.py'] )
@require_multi_gpu
def SCREAMING_SNAKE_CASE__ ( self : Any ):
print(F"Found {torch.cuda.device_count()} devices." )
lowerCAmelCase_ : Optional[int] = ['torchrun', F"--nproc_per_node={torch.cuda.device_count()}", self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(SCREAMING_SNAKE_CASE_ , env=os.environ.copy() )
@require_multi_gpu
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
print(F"Found {torch.cuda.device_count()} devices." )
lowerCAmelCase_ : int = ['torchrun', F"--nproc_per_node={torch.cuda.device_count()}", self.operation_file_path]
print(F"Command: {cmd}" )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(SCREAMING_SNAKE_CASE_ , env=os.environ.copy() )
@require_multi_gpu
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
lowerCAmelCase_ : List[Any] = ['torchrun', F"--nproc_per_node={torch.cuda.device_count()}", inspect.getfile(self.__class__ )]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(SCREAMING_SNAKE_CASE_ , env=os.environ.copy() )
@require_multi_gpu
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
print(F"Found {torch.cuda.device_count()} devices, using 2 devices only" )
lowerCAmelCase_ : Any = ['torchrun', F"--nproc_per_node={torch.cuda.device_count()}", self.data_loop_file_path]
with patch_environment(omp_num_threads=1 , cuda_visible_devices='0,1' ):
execute_subprocess_async(SCREAMING_SNAKE_CASE_ , env=os.environ.copy() )
if __name__ == "__main__":
lowercase__ : Dict = Accelerator()
lowercase__ : List[Any] = (accelerator.state.process_index + 2, 1_0)
lowercase__ : Any = torch.randint(0, 1_0, shape).to(accelerator.device)
lowercase__ : List[Any] = """"""
lowercase__ : Union[str, Any] = accelerator.pad_across_processes(tensor)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += f"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0):
error_msg += "Padding was not done with the right value (0)."
lowercase__ : Optional[Any] = accelerator.pad_across_processes(tensor, pad_first=True)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += f"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
lowercase__ : Any = accelerator.state.num_processes - accelerator.state.process_index - 1
if not torch.equal(tensora[index:], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[:index] == 0):
error_msg += "Padding was not done with the right value (0)."
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 224 |
"""simple docstring"""
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ : Union[str, Any] = logging.get_logger(__name__)
lowercase__ : List[str] = {
"""huggingface/informer-tourism-monthly""": (
"""https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json"""
),
# See all Informer models at https://huggingface.co/models?filter=informer
}
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = """informer"""
_SCREAMING_SNAKE_CASE = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
"""num_hidden_layers""": """encoder_layers""",
}
def __init__( self : Any , SCREAMING_SNAKE_CASE_ : Optional[int] = None , SCREAMING_SNAKE_CASE_ : Optional[int] = None , SCREAMING_SNAKE_CASE_ : str = "student_t" , SCREAMING_SNAKE_CASE_ : str = "nll" , SCREAMING_SNAKE_CASE_ : int = 1 , SCREAMING_SNAKE_CASE_ : List[int] = None , SCREAMING_SNAKE_CASE_ : Optional[Union[str, bool]] = "mean" , SCREAMING_SNAKE_CASE_ : int = 0 , SCREAMING_SNAKE_CASE_ : int = 0 , SCREAMING_SNAKE_CASE_ : int = 0 , SCREAMING_SNAKE_CASE_ : int = 0 , SCREAMING_SNAKE_CASE_ : Optional[List[int]] = None , SCREAMING_SNAKE_CASE_ : Optional[List[int]] = None , SCREAMING_SNAKE_CASE_ : int = 6_4 , SCREAMING_SNAKE_CASE_ : int = 3_2 , SCREAMING_SNAKE_CASE_ : int = 3_2 , SCREAMING_SNAKE_CASE_ : int = 2 , SCREAMING_SNAKE_CASE_ : int = 2 , SCREAMING_SNAKE_CASE_ : int = 2 , SCREAMING_SNAKE_CASE_ : int = 2 , SCREAMING_SNAKE_CASE_ : bool = True , SCREAMING_SNAKE_CASE_ : str = "gelu" , SCREAMING_SNAKE_CASE_ : float = 0.05 , SCREAMING_SNAKE_CASE_ : float = 0.1 , SCREAMING_SNAKE_CASE_ : float = 0.1 , SCREAMING_SNAKE_CASE_ : float = 0.1 , SCREAMING_SNAKE_CASE_ : float = 0.1 , SCREAMING_SNAKE_CASE_ : int = 1_0_0 , SCREAMING_SNAKE_CASE_ : float = 0.02 , SCREAMING_SNAKE_CASE_ : str=True , SCREAMING_SNAKE_CASE_ : str = "prob" , SCREAMING_SNAKE_CASE_ : int = 5 , SCREAMING_SNAKE_CASE_ : bool = True , **SCREAMING_SNAKE_CASE_ : int , ):
# time series specific configuration
lowerCAmelCase_ : Dict = prediction_length
lowerCAmelCase_ : List[str] = context_length or prediction_length
lowerCAmelCase_ : List[Any] = distribution_output
lowerCAmelCase_ : int = loss
lowerCAmelCase_ : Optional[int] = input_size
lowerCAmelCase_ : Tuple = num_time_features
lowerCAmelCase_ : List[str] = lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7]
lowerCAmelCase_ : int = scaling
lowerCAmelCase_ : List[Any] = num_dynamic_real_features
lowerCAmelCase_ : Union[str, Any] = num_static_real_features
lowerCAmelCase_ : Optional[int] = num_static_categorical_features
# set cardinality
if cardinality and num_static_categorical_features > 0:
if len(SCREAMING_SNAKE_CASE_ ) != num_static_categorical_features:
raise ValueError(
'The cardinality should be a list of the same length as `num_static_categorical_features`' )
lowerCAmelCase_ : str = cardinality
else:
lowerCAmelCase_ : Any = [0]
# set embedding_dimension
if embedding_dimension and num_static_categorical_features > 0:
if len(SCREAMING_SNAKE_CASE_ ) != num_static_categorical_features:
raise ValueError(
'The embedding dimension should be a list of the same length as `num_static_categorical_features`' )
lowerCAmelCase_ : Optional[int] = embedding_dimension
else:
lowerCAmelCase_ : Union[str, Any] = [min(5_0 , (cat + 1) // 2 ) for cat in self.cardinality]
lowerCAmelCase_ : Optional[int] = num_parallel_samples
# Transformer architecture configuration
lowerCAmelCase_ : Any = input_size * len(self.lags_sequence ) + self._number_of_features
lowerCAmelCase_ : Any = d_model
lowerCAmelCase_ : Union[str, Any] = encoder_attention_heads
lowerCAmelCase_ : Optional[Any] = decoder_attention_heads
lowerCAmelCase_ : Any = encoder_ffn_dim
lowerCAmelCase_ : List[str] = decoder_ffn_dim
lowerCAmelCase_ : Optional[Any] = encoder_layers
lowerCAmelCase_ : Tuple = decoder_layers
lowerCAmelCase_ : Optional[int] = dropout
lowerCAmelCase_ : Dict = attention_dropout
lowerCAmelCase_ : int = activation_dropout
lowerCAmelCase_ : Dict = encoder_layerdrop
lowerCAmelCase_ : str = decoder_layerdrop
lowerCAmelCase_ : Union[str, Any] = activation_function
lowerCAmelCase_ : Union[str, Any] = init_std
lowerCAmelCase_ : Union[str, Any] = use_cache
# Informer
lowerCAmelCase_ : Optional[int] = attention_type
lowerCAmelCase_ : Any = sampling_factor
lowerCAmelCase_ : int = distil
super().__init__(is_encoder_decoder=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
@property
def SCREAMING_SNAKE_CASE__ ( self : Any ):
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 224 | 1 |
"""simple docstring"""
import unittest
from parameterized import parameterized
from transformers import AutoTokenizer, GPTNeoXConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXModel,
)
class __lowerCamelCase :
def __init__(self , lowerCamelCase , lowerCamelCase=13 , lowerCamelCase=7 , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=99 , lowerCamelCase=64 , lowerCamelCase=5 , lowerCamelCase=4 , lowerCamelCase=37 , lowerCamelCase="gelu" , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=512 , lowerCamelCase=16 , lowerCamelCase=2 , lowerCamelCase=0.02 , lowerCamelCase=3 , lowerCamelCase=4 , lowerCamelCase=None , ):
'''simple docstring'''
_lowerCAmelCase = parent
_lowerCAmelCase = batch_size
_lowerCAmelCase = seq_length
_lowerCAmelCase = is_training
_lowerCAmelCase = use_input_mask
_lowerCAmelCase = use_token_type_ids
_lowerCAmelCase = use_labels
_lowerCAmelCase = vocab_size
_lowerCAmelCase = hidden_size
_lowerCAmelCase = num_hidden_layers
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = intermediate_size
_lowerCAmelCase = hidden_act
_lowerCAmelCase = hidden_dropout_prob
_lowerCAmelCase = attention_probs_dropout_prob
_lowerCAmelCase = max_position_embeddings
_lowerCAmelCase = type_vocab_size
_lowerCAmelCase = type_sequence_label_size
_lowerCAmelCase = initializer_range
_lowerCAmelCase = num_labels
_lowerCAmelCase = num_choices
_lowerCAmelCase = scope
_lowerCAmelCase = vocab_size - 1
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCAmelCase = None
if self.use_input_mask:
_lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCAmelCase = None
if self.use_labels:
_lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_lowerCAmelCase = self.get_config()
return config, input_ids, input_mask, token_labels
def A__ (self ):
'''simple docstring'''
return GPTNeoXConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_UpperCAmelCase , initializer_range=self.initializer_range , pad_token_id=self.pad_token_id , )
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = self.prepare_config_and_inputs()
_lowerCAmelCase = True
return config, input_ids, input_mask, token_labels
def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = GPTNeoXModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_lowerCAmelCase = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase )
_lowerCAmelCase = model(_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = True
_lowerCAmelCase = GPTNeoXModel(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_lowerCAmelCase = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = GPTNeoXForCausalLM(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_lowerCAmelCase = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = self.num_labels
_lowerCAmelCase = GPTNeoXForQuestionAnswering(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_lowerCAmelCase = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = self.num_labels
_lowerCAmelCase = GPTNeoXForSequenceClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCAmelCase = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = self.num_labels
_lowerCAmelCase = GPTNeoXForTokenClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_lowerCAmelCase = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = True
_lowerCAmelCase = GPTNeoXForCausalLM(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
# first forward pass
_lowerCAmelCase = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , use_cache=_UpperCAmelCase )
_lowerCAmelCase = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
_lowerCAmelCase = ids_tensor((self.batch_size, 3) , config.vocab_size )
_lowerCAmelCase = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
_lowerCAmelCase = torch.cat([input_ids, next_tokens] , dim=-1 )
_lowerCAmelCase = torch.cat([input_mask, next_mask] , dim=-1 )
_lowerCAmelCase = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , output_hidden_states=_UpperCAmelCase )
_lowerCAmelCase = output_from_no_past['hidden_states'][0]
_lowerCAmelCase = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , past_key_values=_UpperCAmelCase , output_hidden_states=_UpperCAmelCase , )['hidden_states'][0]
# select random slice
_lowerCAmelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item()
_lowerCAmelCase = output_from_no_past[:, -3:, random_slice_idx].detach()
_lowerCAmelCase = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_UpperCAmelCase , _UpperCAmelCase , atol=1e-3 ) )
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = self.prepare_config_and_inputs()
_lowerCAmelCase = config_and_inputs
_lowerCAmelCase = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class __lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
__UpperCamelCase = (
(
GPTNeoXModel,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
)
if is_torch_available()
else ()
)
__UpperCamelCase = (GPTNeoXForCausalLM,) if is_torch_available() else ()
__UpperCamelCase = (
{
'feature-extraction': GPTNeoXModel,
'question-answering': GPTNeoXForQuestionAnswering,
'text-classification': GPTNeoXForSequenceClassification,
'text-generation': GPTNeoXForCausalLM,
'token-classification': GPTNeoXForTokenClassification,
'zero-shot': GPTNeoXForSequenceClassification,
}
if is_torch_available()
else {}
)
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = GPTNeoXModelTester(self )
_lowerCAmelCase = ConfigTester(self , config_class=_UpperCAmelCase , hidden_size=64 , num_attention_heads=8 )
def A__ (self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_decoder()
_lowerCAmelCase = None
self.model_tester.create_and_check_model_as_decoder(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*_UpperCAmelCase )
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_UpperCAmelCase )
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_UpperCAmelCase )
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_UpperCAmelCase )
@unittest.skip(reason="""Feed forward chunking is not implemented""" )
def A__ (self ):
'''simple docstring'''
pass
@parameterized.expand([("""linear""",), ("""dynamic""",)] )
def A__ (self , lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase = ids_tensor([1, 10] , config.vocab_size )
_lowerCAmelCase = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
_lowerCAmelCase = GPTNeoXModel(_UpperCAmelCase )
original_model.to(_UpperCAmelCase )
original_model.eval()
_lowerCAmelCase = original_model(_UpperCAmelCase ).last_hidden_state
_lowerCAmelCase = original_model(_UpperCAmelCase ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
_lowerCAmelCase = {'type': scaling_type, 'factor': 10.0}
_lowerCAmelCase = GPTNeoXModel(_UpperCAmelCase )
scaled_model.to(_UpperCAmelCase )
scaled_model.eval()
_lowerCAmelCase = scaled_model(_UpperCAmelCase ).last_hidden_state
_lowerCAmelCase = scaled_model(_UpperCAmelCase ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(_UpperCAmelCase , _UpperCAmelCase , atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(_UpperCAmelCase , _UpperCAmelCase , atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(_UpperCAmelCase , _UpperCAmelCase , atol=1e-5 ) )
@require_torch
class __lowerCamelCase ( unittest.TestCase ):
@slow
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = AutoTokenizer.from_pretrained("""EleutherAI/pythia-410m-deduped""" )
for checkpointing in [True, False]:
_lowerCAmelCase = GPTNeoXForCausalLM.from_pretrained("""EleutherAI/pythia-410m-deduped""" )
if checkpointing:
model.gradient_checkpointing_enable()
else:
model.gradient_checkpointing_disable()
model.to(_UpperCAmelCase )
_lowerCAmelCase = tokenizer("""My favorite food is""" , return_tensors="""pt""" ).to(_UpperCAmelCase )
# The hub repo. is updated on 2023-04-04, resulting in poor outputs.
# See: https://github.com/huggingface/transformers/pull/24193
_lowerCAmelCase = 'My favorite food is a good old-fashioned, old-fashioned, old-fashioned.\n\nI\'m not sure'
_lowerCAmelCase = model.generate(**_UpperCAmelCase , do_sample=_UpperCAmelCase , max_new_tokens=20 )
_lowerCAmelCase = tokenizer.batch_decode(_UpperCAmelCase )[0]
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase ) | 369 |
"""simple docstring"""
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionImageVariationPipeline
from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device
SCREAMING_SNAKE_CASE : List[str] = False
class __lowerCamelCase ( unittest.TestCase ):
pass
@slow
@require_torch_gpu
class __lowerCamelCase ( unittest.TestCase ):
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = VersatileDiffusionImageVariationPipeline.from_pretrained("""shi-labs/versatile-diffusion""" )
pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
_lowerCAmelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg""" )
_lowerCAmelCase = torch.manual_seed(0 )
_lowerCAmelCase = pipe(
image=lowerCamelCase , generator=lowerCamelCase , guidance_scale=7.5 , num_inference_steps=50 , output_type="""numpy""" , ).images
_lowerCAmelCase = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
_lowerCAmelCase = np.array([0.0441, 0.0469, 0.0507, 0.0575, 0.0632, 0.0650, 0.0865, 0.0909, 0.0945] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 | 317 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {
"facebook/nllb-moe-54B": "https://huggingface.co/facebook/nllb-moe-54b/resolve/main/config.json",
}
class lowerCamelCase ( _lowerCAmelCase ):
'''simple docstring'''
_A : Optional[int] = """nllb-moe"""
_A : List[str] = ["""past_key_values"""]
_A : List[Any] = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self: Union[str, Any] , snake_case: str=128_112 , snake_case: Optional[int]=1_024 , snake_case: str=12 , snake_case: Union[str, Any]=4_096 , snake_case: Dict=16 , snake_case: Tuple=12 , snake_case: Union[str, Any]=4_096 , snake_case: str=16 , snake_case: Dict=0.0_5 , snake_case: Any=0.0_5 , snake_case: str=True , snake_case: Any=True , snake_case: Any="relu" , snake_case: Dict=1_024 , snake_case: List[Any]=0.1 , snake_case: Union[str, Any]=0.1 , snake_case: Optional[Any]=0.0 , snake_case: str=0.0_2 , snake_case: int=2 , snake_case: List[str]=True , snake_case: str=False , snake_case: Optional[Any]="float32" , snake_case: int=False , snake_case: Optional[Any]=128 , snake_case: Any=64 , snake_case: List[Any]=4 , snake_case: str=4 , snake_case: int=0.0_0_1 , snake_case: Optional[Any]=0.0_0_1 , snake_case: List[Any]="all" , snake_case: Dict=False , snake_case: Any=False , snake_case: Dict=1.0 , snake_case: Optional[Any]=0.2 , snake_case: Any=1 , snake_case: Tuple=0 , snake_case: Any=2 , snake_case: str=False , **snake_case: Optional[Any] , ) -> int:
snake_case_ :List[Any] = vocab_size
snake_case_ :List[Any] = max_position_embeddings
snake_case_ :Any = d_model
snake_case_ :Tuple = encoder_ffn_dim
snake_case_ :Tuple = encoder_layers
snake_case_ :List[str] = encoder_attention_heads
snake_case_ :List[Any] = decoder_ffn_dim
snake_case_ :Optional[Any] = decoder_layers
snake_case_ :Optional[Any] = decoder_attention_heads
snake_case_ :Tuple = dropout
snake_case_ :List[str] = attention_dropout
snake_case_ :Union[str, Any] = activation_dropout
snake_case_ :Tuple = activation_function
snake_case_ :Optional[int] = init_std
snake_case_ :Union[str, Any] = encoder_layerdrop
snake_case_ :int = decoder_layerdrop
snake_case_ :Dict = use_cache
snake_case_ :Optional[int] = encoder_layers
snake_case_ :List[Any] = scale_embedding # scale factor will be sqrt(d_model) if True
snake_case_ :int = router_z_loss_coef
snake_case_ :Tuple = router_aux_loss_coef
snake_case_ :Tuple = decoder_sparse_step
snake_case_ :str = encoder_sparse_step
snake_case_ :Optional[Any] = num_experts
snake_case_ :Union[str, Any] = expert_capacity
snake_case_ :Dict = router_bias
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(f"""`router_dtype` must be one of 'float32', 'float16' or 'bfloat16', got {router_dtype}""" )
snake_case_ :Any = router_dtype
snake_case_ :List[str] = router_ignore_padding_tokens
snake_case_ :Optional[int] = batch_prioritized_routing
snake_case_ :Optional[int] = second_expert_policy
snake_case_ :str = normalize_router_prob_before_dropping
snake_case_ :Optional[int] = moe_eval_capacity_token_fraction
snake_case_ :Tuple = moe_token_dropout
snake_case_ :Optional[int] = output_router_logits
super().__init__(
pad_token_id=snake_case , bos_token_id=snake_case , eos_token_id=snake_case , is_encoder_decoder=snake_case , decoder_start_token_id=snake_case , **snake_case , )
| 66 |
"""simple docstring"""
import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from tensorflow.keras.layers import LSTM, Dense
from tensorflow.keras.models import Sequential
if __name__ == "__main__":
__a = pd.read_csv("sample_data.csv", header=None)
__a = df.shape[:1][0]
# If you're using some other dataset input the target column
__a = df.iloc[:, 1:2]
__a = actual_data.values.reshape(len_data, 1)
__a = MinMaxScaler().fit_transform(actual_data)
__a = 10
__a = 5
__a = 20
__a = len_data - periods * look_back
__a = actual_data[:division]
__a = actual_data[division - look_back :]
__a , __a = [], []
__a , __a = [], []
for i in range(0, len(train_data) - forward_days - look_back + 1):
train_x.append(train_data[i : i + look_back])
train_y.append(train_data[i + look_back : i + look_back + forward_days])
for i in range(0, len(test_data) - forward_days - look_back + 1):
test_x.append(test_data[i : i + look_back])
test_y.append(test_data[i + look_back : i + look_back + forward_days])
__a = np.array(train_x)
__a = np.array(test_x)
__a = np.array([list(i.ravel()) for i in train_y])
__a = np.array([list(i.ravel()) for i in test_y])
__a = Sequential()
model.add(LSTM(1_28, input_shape=(look_back, 1), return_sequences=True))
model.add(LSTM(64, input_shape=(1_28, 1)))
model.add(Dense(forward_days))
model.compile(loss="mean_squared_error", optimizer="adam")
__a = model.fit(
x_train, y_train, epochs=1_50, verbose=1, shuffle=True, batch_size=4
)
__a = model.predict(x_test)
| 66 | 1 |
'''simple docstring'''
def a_ ( _UpperCAmelCase : Optional[Any] ) -> List[str]:
__snake_case : Dict = []
__snake_case : Any = set({'(', '[', '{'} )
__snake_case : List[Any] = set({')', ']', '}'} )
__snake_case : List[Any] = {'{': '}', '[': ']', '(': ')'}
for i in range(len(_UpperCAmelCase ) ):
if s[i] in open_brackets:
stack.append(s[i] )
elif s[i] in closed_brackets and (
len(_UpperCAmelCase ) == 0 or (len(_UpperCAmelCase ) > 0 and open_to_closed[stack.pop()] != s[i])
):
return False
return len(_UpperCAmelCase ) == 0
def a_ ( ) -> Optional[int]:
__snake_case : Any = input('Enter sequence of brackets: ' )
if is_balanced(_UpperCAmelCase ):
print(_UpperCAmelCase ,'is balanced' )
else:
print(_UpperCAmelCase ,'is not balanced' )
if __name__ == "__main__":
main()
| 0 |
'''simple docstring'''
from __future__ import annotations
A__ : str = '''Muhammad Umer Farooq'''
A__ : int = '''MIT'''
A__ : Optional[int] = '''1.0.0'''
A__ : List[Any] = '''Muhammad Umer Farooq'''
A__ : Optional[Any] = '''[email protected]'''
A__ : Optional[Any] = '''Alpha'''
import re
from html.parser import HTMLParser
from urllib import parse
import requests
class snake_case__ ( SCREAMING_SNAKE_CASE_ ):
def __init__( self : Union[str, Any] , __a : str ) -> None:
'''simple docstring'''
super().__init__()
__snake_case : list[str] = []
__snake_case : Dict = domain
def A_ ( self : Dict , __a : str , __a : list[tuple[str, str | None]] ) -> None:
'''simple docstring'''
# Only parse the 'anchor' tag.
if tag == "a":
# Check the list of defined attributes.
for name, value in attrs:
# If href is defined, and not empty nor # print it.
if name == "href" and value != "#" and value != "":
# If not already in urls.
if value not in self.urls:
__snake_case : Optional[Any] = parse.urljoin(self.domain , __a )
self.urls.append(__a )
def a_ ( _UpperCAmelCase : str ) -> str:
return ".".join(get_sub_domain_name(_UpperCAmelCase ).split('.' )[-2:] )
def a_ ( _UpperCAmelCase : str ) -> str:
return parse.urlparse(_UpperCAmelCase ).netloc
def a_ ( _UpperCAmelCase : str = "https://github.com" ) -> list[str]:
__snake_case : List[Any] = get_domain_name(_UpperCAmelCase )
# Initialize the parser
__snake_case : Tuple = Parser(_UpperCAmelCase )
try:
# Open URL
__snake_case : Any = requests.get(_UpperCAmelCase )
# pass the raw HTML to the parser to get links
parser.feed(r.text )
# Get links and loop through
__snake_case : Dict = set()
for link in parser.urls:
# open URL.
# read = requests.get(link)
try:
__snake_case : List[Any] = requests.get(_UpperCAmelCase )
# Get the valid email.
__snake_case : Optional[Any] = re.findall('[a-zA-Z0-9]+@' + domain ,read.text )
# If not in list then append it.
for email in emails:
valid_emails.add(_UpperCAmelCase )
except ValueError:
pass
except ValueError:
raise SystemExit(1 )
# Finally return a sorted list of email addresses with no duplicates.
return sorted(_UpperCAmelCase )
if __name__ == "__main__":
A__ : Tuple = emails_from_url('''https://github.com''')
print(F"""{len(emails)} emails found:""")
print('''\n'''.join(sorted(emails)))
| 0 | 1 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {"vocab_file": "spm_char.model"}
__a = {
"vocab_file": {
"microsoft/speecht5_asr": "https://huggingface.co/microsoft/speecht5_asr/resolve/main/spm_char.model",
"microsoft/speecht5_tts": "https://huggingface.co/microsoft/speecht5_tts/resolve/main/spm_char.model",
"microsoft/speecht5_vc": "https://huggingface.co/microsoft/speecht5_vc/resolve/main/spm_char.model",
}
}
__a = {
"microsoft/speecht5_asr": 10_24,
"microsoft/speecht5_tts": 10_24,
"microsoft/speecht5_vc": 10_24,
}
class lowerCamelCase ( _UpperCAmelCase ):
'''simple docstring'''
_A : Union[str, Any] = VOCAB_FILES_NAMES
_A : Dict = PRETRAINED_VOCAB_FILES_MAP
_A : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_A : Tuple = ["input_ids", "attention_mask"]
def __init__( self: List[Any] , snake_case: Optional[int] , snake_case: List[str]="<s>" , snake_case: Dict="</s>" , snake_case: str="<unk>" , snake_case: Optional[Any]="<pad>" , snake_case: Optional[Dict[str, Any]] = None , **snake_case: Tuple , ) -> Tuple:
snake_case_ :str = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=UpperCAmelCase_ , eos_token=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , sp_model_kwargs=self.sp_model_kwargs , **UpperCAmelCase_ , )
snake_case_ :Union[str, Any] = vocab_file
snake_case_ :Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(UpperCAmelCase_ )
@property
def lowerCAmelCase_ ( self: int ) -> List[str]:
return self.sp_model.get_piece_size()
def lowerCAmelCase_ ( self: Dict ) -> Tuple:
snake_case_ :Any = {self.convert_ids_to_tokens(UpperCAmelCase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self: Any ) -> int:
snake_case_ :Optional[int] = self.__dict__.copy()
snake_case_ :str = None
return state
def __setstate__( self: Union[str, Any] , snake_case: Tuple ) -> Optional[int]:
snake_case_ :List[Any] = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
snake_case_ :Optional[int] = {}
snake_case_ :Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowerCAmelCase_ ( self: Optional[int] , snake_case: str ) -> List[Any]:
return self.sp_model.encode(UpperCAmelCase_ , out_type=UpperCAmelCase_ )
def lowerCAmelCase_ ( self: Optional[int] , snake_case: str ) -> Any:
return self.sp_model.piece_to_id(UpperCAmelCase_ )
def lowerCAmelCase_ ( self: List[Any] , snake_case: Tuple ) -> str:
snake_case_ :Union[str, Any] = self.sp_model.IdToPiece(UpperCAmelCase_ )
return token
def lowerCAmelCase_ ( self: Any , snake_case: Dict ) -> int:
snake_case_ :List[str] = []
snake_case_ :Dict = """"""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(UpperCAmelCase_ ) + token
snake_case_ :str = []
else:
current_sub_tokens.append(UpperCAmelCase_ )
out_string += self.sp_model.decode(UpperCAmelCase_ )
return out_string.strip()
def lowerCAmelCase_ ( self: Any , snake_case: Optional[Any] , snake_case: List[str]=None ) -> str:
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def lowerCAmelCase_ ( self: int , snake_case: List[int] , snake_case: Optional[List[int]] = None , snake_case: bool = False ) -> List[str]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase_ , token_ids_a=UpperCAmelCase_ , already_has_special_tokens=UpperCAmelCase_ )
snake_case_ :Union[str, Any] = [1]
if token_ids_a is None:
return ([0] * len(UpperCAmelCase_ )) + suffix_ones
return ([0] * len(UpperCAmelCase_ )) + ([0] * len(UpperCAmelCase_ )) + suffix_ones
def lowerCAmelCase_ ( self: List[Any] , snake_case: str , snake_case: Optional[str] = None ) -> str:
if not os.path.isdir(UpperCAmelCase_ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
snake_case_ :Any = os.path.join(
UpperCAmelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCAmelCase_ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCAmelCase_ , """wb""" ) as fi:
snake_case_ :Optional[Any] = self.sp_model.serialized_model_proto()
fi.write(UpperCAmelCase_ )
return (out_vocab_file,)
| 66 |
import unittest
import numpy as np
from transformers import DistilBertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.distilbert.modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
)
class __UpperCAmelCase (unittest.TestCase ):
def __init__( self: Optional[Any] , UpperCAmelCase_: Optional[int] , UpperCAmelCase_: List[Any]=13 , UpperCAmelCase_: List[str]=7 , UpperCAmelCase_: Tuple=True , UpperCAmelCase_: List[Any]=True , UpperCAmelCase_: List[str]=True , UpperCAmelCase_: Optional[Any]=True , UpperCAmelCase_: str=99 , UpperCAmelCase_: List[Any]=32 , UpperCAmelCase_: Dict=5 , UpperCAmelCase_: Tuple=4 , UpperCAmelCase_: Optional[Any]=37 , UpperCAmelCase_: Optional[int]="gelu" , UpperCAmelCase_: Optional[Any]=0.1 , UpperCAmelCase_: List[Any]=0.1 , UpperCAmelCase_: List[Any]=512 , UpperCAmelCase_: Any=16 , UpperCAmelCase_: Dict=2 , UpperCAmelCase_: Union[str, Any]=0.02 , UpperCAmelCase_: Union[str, Any]=4 , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = parent
_SCREAMING_SNAKE_CASE = batch_size
_SCREAMING_SNAKE_CASE = seq_length
_SCREAMING_SNAKE_CASE = is_training
_SCREAMING_SNAKE_CASE = use_attention_mask
_SCREAMING_SNAKE_CASE = use_token_type_ids
_SCREAMING_SNAKE_CASE = use_labels
_SCREAMING_SNAKE_CASE = vocab_size
_SCREAMING_SNAKE_CASE = hidden_size
_SCREAMING_SNAKE_CASE = num_hidden_layers
_SCREAMING_SNAKE_CASE = num_attention_heads
_SCREAMING_SNAKE_CASE = intermediate_size
_SCREAMING_SNAKE_CASE = hidden_act
_SCREAMING_SNAKE_CASE = hidden_dropout_prob
_SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE = max_position_embeddings
_SCREAMING_SNAKE_CASE = type_vocab_size
_SCREAMING_SNAKE_CASE = type_sequence_label_size
_SCREAMING_SNAKE_CASE = initializer_range
_SCREAMING_SNAKE_CASE = num_choices
def UpperCamelCase ( self: Tuple ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_SCREAMING_SNAKE_CASE = None
if self.use_attention_mask:
_SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] )
_SCREAMING_SNAKE_CASE = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , tie_weights_=UpperCAmelCase_ , )
return config, input_ids, attention_mask
def UpperCamelCase ( self: Tuple ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = config_and_inputs
_SCREAMING_SNAKE_CASE = {"""input_ids""": input_ids, """attention_mask""": attention_mask}
return config, inputs_dict
@require_flax
class __UpperCAmelCase (_UpperCAmelCase ,unittest.TestCase ):
__snake_case : Optional[int] = (
(
FlaxDistilBertModel,
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def UpperCamelCase ( self: Dict ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = FlaxDistilBertModelTester(self )
@slow
def UpperCamelCase ( self: List[str] ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
_SCREAMING_SNAKE_CASE = model_class_name.from_pretrained("""distilbert-base-uncased""" )
_SCREAMING_SNAKE_CASE = model(np.ones((1, 1) ) )
self.assertIsNotNone(UpperCAmelCase_ )
@require_flax
class __UpperCAmelCase (unittest.TestCase ):
@slow
def UpperCamelCase ( self: Union[str, Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = FlaxDistilBertModel.from_pretrained("""distilbert-base-uncased""" )
_SCREAMING_SNAKE_CASE = np.array([[0, 345, 232, 328, 740, 140, 1_695, 69, 6_078, 1_588, 2]] )
_SCREAMING_SNAKE_CASE = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ )[0]
_SCREAMING_SNAKE_CASE = (1, 11, 768)
self.assertEqual(output.shape , UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = np.array([[[-0.16_39, 0.32_99, 0.16_48], [-0.17_46, 0.32_89, 0.17_10], [-0.18_84, 0.33_57, 0.18_10]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , UpperCAmelCase_ , atol=1E-4 ) )
| 306 | 0 |
from string import ascii_uppercase
_SCREAMING_SNAKE_CASE : Any = {char: i for i, char in enumerate(ascii_uppercase)}
_SCREAMING_SNAKE_CASE : Union[str, Any] = dict(enumerate(ascii_uppercase))
def UpperCAmelCase_ ( _A , _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = len(_A )
SCREAMING_SNAKE_CASE__ = 0
while True:
if x == i:
SCREAMING_SNAKE_CASE__ = 0
if len(_A ) == len(_A ):
break
key += key[i]
i += 1
return key
def UpperCAmelCase_ ( _A , _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = ''
SCREAMING_SNAKE_CASE__ = 0
for letter in message:
if letter == " ":
cipher_text += " "
else:
SCREAMING_SNAKE_CASE__ = (dicta[letter] - dicta[key_new[i]]) % 26
i += 1
cipher_text += dicta[x]
return cipher_text
def UpperCAmelCase_ ( _A , _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = ''
SCREAMING_SNAKE_CASE__ = 0
for letter in cipher_text:
if letter == " ":
or_txt += " "
else:
SCREAMING_SNAKE_CASE__ = (dicta[letter] + dicta[key_new[i]] + 26) % 26
i += 1
or_txt += dicta[x]
return or_txt
def UpperCAmelCase_ ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = 'THE GERMAN ATTACK'
SCREAMING_SNAKE_CASE__ = 'SECRET'
SCREAMING_SNAKE_CASE__ = generate_key(_A , _A )
SCREAMING_SNAKE_CASE__ = cipher_text(_A , _A )
print(F'''Encrypted Text = {s}''' )
print(F'''Original Text = {original_text(_A , _A )}''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 353 |
import argparse
import os
import torch
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
_SCREAMING_SNAKE_CASE : str = {
'''sample_size''': 32,
'''in_channels''': 3,
'''out_channels''': 3,
'''layers_per_block''': 2,
'''num_class_embeds''': 1000,
'''block_out_channels''': [32, 64],
'''attention_head_dim''': 8,
'''down_block_types''': [
'''ResnetDownsampleBlock2D''',
'''AttnDownBlock2D''',
],
'''up_block_types''': [
'''AttnUpBlock2D''',
'''ResnetUpsampleBlock2D''',
],
'''resnet_time_scale_shift''': '''scale_shift''',
'''upsample_type''': '''resnet''',
'''downsample_type''': '''resnet''',
}
_SCREAMING_SNAKE_CASE : Dict = {
'''sample_size''': 64,
'''in_channels''': 3,
'''out_channels''': 3,
'''layers_per_block''': 3,
'''num_class_embeds''': 1000,
'''block_out_channels''': [192, 192 * 2, 192 * 3, 192 * 4],
'''attention_head_dim''': 64,
'''down_block_types''': [
'''ResnetDownsampleBlock2D''',
'''AttnDownBlock2D''',
'''AttnDownBlock2D''',
'''AttnDownBlock2D''',
],
'''up_block_types''': [
'''AttnUpBlock2D''',
'''AttnUpBlock2D''',
'''AttnUpBlock2D''',
'''ResnetUpsampleBlock2D''',
],
'''resnet_time_scale_shift''': '''scale_shift''',
'''upsample_type''': '''resnet''',
'''downsample_type''': '''resnet''',
}
_SCREAMING_SNAKE_CASE : int = {
'''sample_size''': 256,
'''in_channels''': 3,
'''out_channels''': 3,
'''layers_per_block''': 2,
'''num_class_embeds''': None,
'''block_out_channels''': [256, 256, 256 * 2, 256 * 2, 256 * 4, 256 * 4],
'''attention_head_dim''': 64,
'''down_block_types''': [
'''ResnetDownsampleBlock2D''',
'''ResnetDownsampleBlock2D''',
'''ResnetDownsampleBlock2D''',
'''AttnDownBlock2D''',
'''AttnDownBlock2D''',
'''AttnDownBlock2D''',
],
'''up_block_types''': [
'''AttnUpBlock2D''',
'''AttnUpBlock2D''',
'''AttnUpBlock2D''',
'''ResnetUpsampleBlock2D''',
'''ResnetUpsampleBlock2D''',
'''ResnetUpsampleBlock2D''',
],
'''resnet_time_scale_shift''': '''default''',
'''upsample_type''': '''resnet''',
'''downsample_type''': '''resnet''',
}
_SCREAMING_SNAKE_CASE : int = {
'''num_train_timesteps''': 40,
'''sigma_min''': 0.0_0_2,
'''sigma_max''': 8_0.0,
}
_SCREAMING_SNAKE_CASE : str = {
'''num_train_timesteps''': 201,
'''sigma_min''': 0.0_0_2,
'''sigma_max''': 8_0.0,
}
_SCREAMING_SNAKE_CASE : Tuple = {
'''num_train_timesteps''': 151,
'''sigma_min''': 0.0_0_2,
'''sigma_max''': 8_0.0,
}
def UpperCAmelCase_ ( _A ):
'''simple docstring'''
if isinstance(_A , _A ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError('''boolean value expected''' )
def UpperCAmelCase_ ( _A , _A , _A , _A , _A=False ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = checkpoint[F'''{old_prefix}.in_layers.0.weight''']
SCREAMING_SNAKE_CASE__ = checkpoint[F'''{old_prefix}.in_layers.0.bias''']
SCREAMING_SNAKE_CASE__ = checkpoint[F'''{old_prefix}.in_layers.2.weight''']
SCREAMING_SNAKE_CASE__ = checkpoint[F'''{old_prefix}.in_layers.2.bias''']
SCREAMING_SNAKE_CASE__ = checkpoint[F'''{old_prefix}.emb_layers.1.weight''']
SCREAMING_SNAKE_CASE__ = checkpoint[F'''{old_prefix}.emb_layers.1.bias''']
SCREAMING_SNAKE_CASE__ = checkpoint[F'''{old_prefix}.out_layers.0.weight''']
SCREAMING_SNAKE_CASE__ = checkpoint[F'''{old_prefix}.out_layers.0.bias''']
SCREAMING_SNAKE_CASE__ = checkpoint[F'''{old_prefix}.out_layers.3.weight''']
SCREAMING_SNAKE_CASE__ = checkpoint[F'''{old_prefix}.out_layers.3.bias''']
if has_skip:
SCREAMING_SNAKE_CASE__ = checkpoint[F'''{old_prefix}.skip_connection.weight''']
SCREAMING_SNAKE_CASE__ = checkpoint[F'''{old_prefix}.skip_connection.bias''']
return new_checkpoint
def UpperCAmelCase_ ( _A , _A , _A , _A , _A=None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = checkpoint[F'''{old_prefix}.qkv.weight'''].chunk(3 , dim=0 )
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = checkpoint[F'''{old_prefix}.qkv.bias'''].chunk(3 , dim=0 )
SCREAMING_SNAKE_CASE__ = checkpoint[F'''{old_prefix}.norm.weight''']
SCREAMING_SNAKE_CASE__ = checkpoint[F'''{old_prefix}.norm.bias''']
SCREAMING_SNAKE_CASE__ = weight_q.squeeze(-1 ).squeeze(-1 )
SCREAMING_SNAKE_CASE__ = bias_q.squeeze(-1 ).squeeze(-1 )
SCREAMING_SNAKE_CASE__ = weight_k.squeeze(-1 ).squeeze(-1 )
SCREAMING_SNAKE_CASE__ = bias_k.squeeze(-1 ).squeeze(-1 )
SCREAMING_SNAKE_CASE__ = weight_v.squeeze(-1 ).squeeze(-1 )
SCREAMING_SNAKE_CASE__ = bias_v.squeeze(-1 ).squeeze(-1 )
SCREAMING_SNAKE_CASE__ = (
checkpoint[F'''{old_prefix}.proj_out.weight'''].squeeze(-1 ).squeeze(-1 )
)
SCREAMING_SNAKE_CASE__ = checkpoint[F'''{old_prefix}.proj_out.bias'''].squeeze(-1 ).squeeze(-1 )
return new_checkpoint
def UpperCAmelCase_ ( _A , _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = torch.load(_A , map_location='''cpu''' )
SCREAMING_SNAKE_CASE__ = {}
SCREAMING_SNAKE_CASE__ = checkpoint['''time_embed.0.weight''']
SCREAMING_SNAKE_CASE__ = checkpoint['''time_embed.0.bias''']
SCREAMING_SNAKE_CASE__ = checkpoint['''time_embed.2.weight''']
SCREAMING_SNAKE_CASE__ = checkpoint['''time_embed.2.bias''']
if unet_config["num_class_embeds"] is not None:
SCREAMING_SNAKE_CASE__ = checkpoint['''label_emb.weight''']
SCREAMING_SNAKE_CASE__ = checkpoint['''input_blocks.0.0.weight''']
SCREAMING_SNAKE_CASE__ = checkpoint['''input_blocks.0.0.bias''']
SCREAMING_SNAKE_CASE__ = unet_config['''down_block_types''']
SCREAMING_SNAKE_CASE__ = unet_config['''layers_per_block''']
SCREAMING_SNAKE_CASE__ = unet_config['''attention_head_dim''']
SCREAMING_SNAKE_CASE__ = unet_config['''block_out_channels''']
SCREAMING_SNAKE_CASE__ = 1
SCREAMING_SNAKE_CASE__ = channels_list[0]
for i, layer_type in enumerate(_A ):
SCREAMING_SNAKE_CASE__ = channels_list[i]
SCREAMING_SNAKE_CASE__ = current_channels != prev_channels
if layer_type == "ResnetDownsampleBlock2D":
for j in range(_A ):
SCREAMING_SNAKE_CASE__ = F'''down_blocks.{i}.resnets.{j}'''
SCREAMING_SNAKE_CASE__ = F'''input_blocks.{current_layer}.0'''
SCREAMING_SNAKE_CASE__ = True if j == 0 and downsample_block_has_skip else False
SCREAMING_SNAKE_CASE__ = convert_resnet(_A , _A , _A , _A , has_skip=_A )
current_layer += 1
elif layer_type == "AttnDownBlock2D":
for j in range(_A ):
SCREAMING_SNAKE_CASE__ = F'''down_blocks.{i}.resnets.{j}'''
SCREAMING_SNAKE_CASE__ = F'''input_blocks.{current_layer}.0'''
SCREAMING_SNAKE_CASE__ = True if j == 0 and downsample_block_has_skip else False
SCREAMING_SNAKE_CASE__ = convert_resnet(_A , _A , _A , _A , has_skip=_A )
SCREAMING_SNAKE_CASE__ = F'''down_blocks.{i}.attentions.{j}'''
SCREAMING_SNAKE_CASE__ = F'''input_blocks.{current_layer}.1'''
SCREAMING_SNAKE_CASE__ = convert_attention(
_A , _A , _A , _A , _A )
current_layer += 1
if i != len(_A ) - 1:
SCREAMING_SNAKE_CASE__ = F'''down_blocks.{i}.downsamplers.0'''
SCREAMING_SNAKE_CASE__ = F'''input_blocks.{current_layer}.0'''
SCREAMING_SNAKE_CASE__ = convert_resnet(_A , _A , _A , _A )
current_layer += 1
SCREAMING_SNAKE_CASE__ = current_channels
# hardcoded the mid-block for now
SCREAMING_SNAKE_CASE__ = '''mid_block.resnets.0'''
SCREAMING_SNAKE_CASE__ = '''middle_block.0'''
SCREAMING_SNAKE_CASE__ = convert_resnet(_A , _A , _A , _A )
SCREAMING_SNAKE_CASE__ = '''mid_block.attentions.0'''
SCREAMING_SNAKE_CASE__ = '''middle_block.1'''
SCREAMING_SNAKE_CASE__ = convert_attention(_A , _A , _A , _A , _A )
SCREAMING_SNAKE_CASE__ = '''mid_block.resnets.1'''
SCREAMING_SNAKE_CASE__ = '''middle_block.2'''
SCREAMING_SNAKE_CASE__ = convert_resnet(_A , _A , _A , _A )
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = unet_config['''up_block_types''']
for i, layer_type in enumerate(_A ):
if layer_type == "ResnetUpsampleBlock2D":
for j in range(layers_per_block + 1 ):
SCREAMING_SNAKE_CASE__ = F'''up_blocks.{i}.resnets.{j}'''
SCREAMING_SNAKE_CASE__ = F'''output_blocks.{current_layer}.0'''
SCREAMING_SNAKE_CASE__ = convert_resnet(_A , _A , _A , _A , has_skip=_A )
current_layer += 1
if i != len(_A ) - 1:
SCREAMING_SNAKE_CASE__ = F'''up_blocks.{i}.upsamplers.0'''
SCREAMING_SNAKE_CASE__ = F'''output_blocks.{current_layer-1}.1'''
SCREAMING_SNAKE_CASE__ = convert_resnet(_A , _A , _A , _A )
elif layer_type == "AttnUpBlock2D":
for j in range(layers_per_block + 1 ):
SCREAMING_SNAKE_CASE__ = F'''up_blocks.{i}.resnets.{j}'''
SCREAMING_SNAKE_CASE__ = F'''output_blocks.{current_layer}.0'''
SCREAMING_SNAKE_CASE__ = convert_resnet(_A , _A , _A , _A , has_skip=_A )
SCREAMING_SNAKE_CASE__ = F'''up_blocks.{i}.attentions.{j}'''
SCREAMING_SNAKE_CASE__ = F'''output_blocks.{current_layer}.1'''
SCREAMING_SNAKE_CASE__ = convert_attention(
_A , _A , _A , _A , _A )
current_layer += 1
if i != len(_A ) - 1:
SCREAMING_SNAKE_CASE__ = F'''up_blocks.{i}.upsamplers.0'''
SCREAMING_SNAKE_CASE__ = F'''output_blocks.{current_layer-1}.2'''
SCREAMING_SNAKE_CASE__ = convert_resnet(_A , _A , _A , _A )
SCREAMING_SNAKE_CASE__ = checkpoint['''out.0.weight''']
SCREAMING_SNAKE_CASE__ = checkpoint['''out.0.bias''']
SCREAMING_SNAKE_CASE__ = checkpoint['''out.2.weight''']
SCREAMING_SNAKE_CASE__ = checkpoint['''out.2.bias''']
return new_checkpoint
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : Optional[Any] = argparse.ArgumentParser()
parser.add_argument('''--unet_path''', default=None, type=str, required=True, help='''Path to the unet.pt to convert.''')
parser.add_argument(
'''--dump_path''', default=None, type=str, required=True, help='''Path to output the converted UNet model.'''
)
parser.add_argument('''--class_cond''', default=True, type=str, help='''Whether the model is class-conditional.''')
_SCREAMING_SNAKE_CASE : Tuple = parser.parse_args()
_SCREAMING_SNAKE_CASE : List[str] = strabool(args.class_cond)
_SCREAMING_SNAKE_CASE : int = os.path.basename(args.unet_path)
print(F"Checkpoint: {ckpt_name}")
# Get U-Net config
if "imagenet64" in ckpt_name:
_SCREAMING_SNAKE_CASE : Optional[Any] = IMAGENET_64_UNET_CONFIG
elif "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
_SCREAMING_SNAKE_CASE : int = LSUN_256_UNET_CONFIG
elif "test" in ckpt_name:
_SCREAMING_SNAKE_CASE : Union[str, Any] = TEST_UNET_CONFIG
else:
raise ValueError(F"Checkpoint type {ckpt_name} is not currently supported.")
if not args.class_cond:
_SCREAMING_SNAKE_CASE : Union[str, Any] = None
_SCREAMING_SNAKE_CASE : int = con_pt_to_diffuser(args.unet_path, unet_config)
_SCREAMING_SNAKE_CASE : Optional[int] = UNetaDModel(**unet_config)
image_unet.load_state_dict(converted_unet_ckpt)
# Get scheduler config
if "cd" in ckpt_name or "test" in ckpt_name:
_SCREAMING_SNAKE_CASE : Optional[Any] = CD_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "imagenet64" in ckpt_name:
_SCREAMING_SNAKE_CASE : Any = CT_IMAGENET_64_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
_SCREAMING_SNAKE_CASE : int = CT_LSUN_256_SCHEDULER_CONFIG
else:
raise ValueError(F"Checkpoint type {ckpt_name} is not currently supported.")
_SCREAMING_SNAKE_CASE : int = CMStochasticIterativeScheduler(**scheduler_config)
_SCREAMING_SNAKE_CASE : str = ConsistencyModelPipeline(unet=image_unet, scheduler=cm_scheduler)
consistency_model.save_pretrained(args.dump_path)
| 218 | 0 |
'''simple docstring'''
def UpperCamelCase( UpperCAmelCase_ ):
# bit count represents no. of bits in the gray code
if bit_count < 0:
raise ValueError('The given input must be positive' )
# get the generated string sequence
UpperCAmelCase : Optional[Any] = gray_code_sequence_string(lowerCAmelCase_ )
#
# convert them to integers
for i in range(len(lowerCAmelCase_ ) ):
UpperCAmelCase : Optional[Any] = int(sequence[i] , 2 )
return sequence
def UpperCamelCase( UpperCAmelCase_ ):
# The approach is a recursive one
# Base case achieved when either n = 0 or n=1
if bit_count == 0:
return ["0"]
if bit_count == 1:
return ["0", "1"]
UpperCAmelCase : Optional[int] = 1 << bit_count # defines the length of the sequence
# 1<< n is equivalent to 2^n
# recursive answer will generate answer for n-1 bits
UpperCAmelCase : str = gray_code_sequence_string(bit_count - 1 )
UpperCAmelCase : List[Any] = []
# append 0 to first half of the smaller sequence generated
for i in range(seq_len // 2 ):
UpperCAmelCase : Union[str, Any] = '0' + smaller_sequence[i]
sequence.append(lowerCAmelCase_ )
# append 1 to second half ... start from the end of the list
for i in reversed(range(seq_len // 2 ) ):
UpperCAmelCase : List[str] = '1' + smaller_sequence[i]
sequence.append(lowerCAmelCase_ )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
| 151 |
import importlib
import shutil
import threading
import warnings
from typing import List
import fsspec
import fsspec.asyn
from . import compression
from .hffilesystem import HfFileSystem
_snake_case : Tuple = importlib.util.find_spec('s3fs') is not None
if _has_safs:
from .safilesystem import SaFileSystem # noqa: F401
_snake_case : List[compression.BaseCompressedFileFileSystem] = [
compression.BzaFileSystem,
compression.GzipFileSystem,
compression.LzaFileSystem,
compression.XzFileSystem,
compression.ZstdFileSystem,
]
# Register custom filesystems
for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]:
if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class:
warnings.warn(F"""A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.""")
fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True)
def a_ ( lowerCAmelCase_ : str ):
if "://" in dataset_path:
__lowerCAmelCase = dataset_path.split('://' )[1]
return dataset_path
def a_ ( lowerCAmelCase_ : fsspec.AbstractFileSystem ):
if fs is not None and fs.protocol != "file":
return True
else:
return False
def a_ ( lowerCAmelCase_ : fsspec.AbstractFileSystem, lowerCAmelCase_ : str, lowerCAmelCase_ : str ):
__lowerCAmelCase = not is_remote_filesystem(lowerCAmelCase_ )
if is_local:
# LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory
shutil.move(fs._strip_protocol(lowerCAmelCase_ ), fs._strip_protocol(lowerCAmelCase_ ) )
else:
fs.mv(lowerCAmelCase_, lowerCAmelCase_, recursive=lowerCAmelCase_ )
def a_ ( ):
if hasattr(fsspec.asyn, 'reset_lock' ):
# for future fsspec>2022.05.0
fsspec.asyn.reset_lock()
else:
__lowerCAmelCase = None
__lowerCAmelCase = None
__lowerCAmelCase = threading.Lock()
| 284 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
if is_sentencepiece_available():
from ..ta.tokenization_ta import TaTokenizer
else:
from ...utils.dummy_sentencepiece_objects import TaTokenizer
lowercase__ : Optional[int] = TaTokenizer
if is_tokenizers_available():
from ..ta.tokenization_ta_fast import TaTokenizerFast
else:
from ...utils.dummy_tokenizers_objects import TaTokenizerFast
lowercase__ : Tuple = TaTokenizerFast
lowercase__ : List[str] = {'configuration_mt5': ['MT5Config', 'MT5OnnxConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Tuple = [
'MT5EncoderModel',
'MT5ForConditionalGeneration',
'MT5ForQuestionAnswering',
'MT5Model',
'MT5PreTrainedModel',
'MT5Stack',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Union[str, Any] = ['TFMT5EncoderModel', 'TFMT5ForConditionalGeneration', 'TFMT5Model']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : int = ['FlaxMT5EncoderModel', 'FlaxMT5ForConditionalGeneration', 'FlaxMT5Model']
if TYPE_CHECKING:
from .configuration_mta import MTaConfig, MTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mta import (
MTaEncoderModel,
MTaForConditionalGeneration,
MTaForQuestionAnswering,
MTaModel,
MTaPreTrainedModel,
MTaStack,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel
else:
import sys
lowercase__ : Optional[int] = _LazyModule(
__name__,
globals()['__file__'],
_import_structure,
extra_objects={'MT5Tokenizer': MTaTokenizer, 'MT5TokenizerFast': MTaTokenizerFast},
module_spec=__spec__,
)
| 362 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ : List[Any] = logging.get_logger(__name__)
lowercase__ : Tuple = {
'microsoft/trocr-base-handwritten': (
'https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json'
),
# See all TrOCR models at https://huggingface.co/models?filter=trocr
}
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
_snake_case : int = 'trocr'
_snake_case : List[str] = ['past_key_values']
_snake_case : Tuple = {
'num_attention_heads': 'decoder_attention_heads',
'hidden_size': 'd_model',
'num_hidden_layers': 'decoder_layers',
}
def __init__( self : Tuple , lowerCAmelCase__ : Union[str, Any]=50265 , lowerCAmelCase__ : List[Any]=1024 , lowerCAmelCase__ : List[str]=12 , lowerCAmelCase__ : int=16 , lowerCAmelCase__ : List[Any]=4096 , lowerCAmelCase__ : Optional[int]="gelu" , lowerCAmelCase__ : int=512 , lowerCAmelCase__ : Optional[Any]=0.1 , lowerCAmelCase__ : Optional[int]=0.0 , lowerCAmelCase__ : Dict=0.0 , lowerCAmelCase__ : Tuple=2 , lowerCAmelCase__ : Dict=0.02 , lowerCAmelCase__ : Tuple=0.0 , lowerCAmelCase__ : int=True , lowerCAmelCase__ : Optional[Any]=False , lowerCAmelCase__ : Any=True , lowerCAmelCase__ : Dict=True , lowerCAmelCase__ : Optional[int]=1 , lowerCAmelCase__ : List[Any]=0 , lowerCAmelCase__ : Any=2 , **lowerCAmelCase__ : Optional[int] , ) -> Any:
'''simple docstring'''
_UpperCamelCase = vocab_size
_UpperCamelCase = d_model
_UpperCamelCase = decoder_layers
_UpperCamelCase = decoder_attention_heads
_UpperCamelCase = decoder_ffn_dim
_UpperCamelCase = activation_function
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = dropout
_UpperCamelCase = attention_dropout
_UpperCamelCase = activation_dropout
_UpperCamelCase = init_std
_UpperCamelCase = decoder_layerdrop
_UpperCamelCase = use_cache
_UpperCamelCase = scale_embedding
_UpperCamelCase = use_learned_position_embeddings
_UpperCamelCase = layernorm_embedding
super().__init__(
pad_token_id=lowerCAmelCase__ , bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , decoder_start_token_id=lowerCAmelCase__ , **lowerCAmelCase__ , )
| 287 | 0 |
from __future__ import annotations
import unittest
from transformers import is_tf_available, is_torch_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, is_pt_tf_cross_test, slow
if is_tf_available():
from transformers import (
AutoConfig,
BertConfig,
GPTaConfig,
TaConfig,
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
if is_torch_available():
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelWithLMHead,
BertForMaskedLM,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertModel,
GPTaLMHeadModel,
RobertaForMaskedLM,
TaForConditionalGeneration,
)
@is_pt_tf_cross_test
class A__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def __lowercase ( self) -> List[Any]:
'''simple docstring'''
for model_name in ["bert-base-uncased"]:
a__ : str = AutoConfig.from_pretrained(lowercase)
self.assertIsNotNone(lowercase)
self.assertIsInstance(lowercase , lowercase)
a__ : str = TFAutoModel.from_pretrained(lowercase , from_pt=lowercase)
self.assertIsNotNone(lowercase)
self.assertIsInstance(lowercase , lowercase)
a__ : Optional[Any] = AutoModel.from_pretrained(lowercase , from_tf=lowercase)
self.assertIsNotNone(lowercase)
self.assertIsInstance(lowercase , lowercase)
@slow
def __lowercase ( self) -> List[Any]:
'''simple docstring'''
for model_name in ["bert-base-uncased"]:
a__ : List[str] = AutoConfig.from_pretrained(lowercase)
self.assertIsNotNone(lowercase)
self.assertIsInstance(lowercase , lowercase)
a__ : Any = TFAutoModelForPreTraining.from_pretrained(lowercase , from_pt=lowercase)
self.assertIsNotNone(lowercase)
self.assertIsInstance(lowercase , lowercase)
a__ : Optional[int] = AutoModelForPreTraining.from_pretrained(lowercase , from_tf=lowercase)
self.assertIsNotNone(lowercase)
self.assertIsInstance(lowercase , lowercase)
@slow
def __lowercase ( self) -> str:
'''simple docstring'''
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a__ : Optional[Any] = AutoConfig.from_pretrained(lowercase)
self.assertIsNotNone(lowercase)
self.assertIsInstance(lowercase , lowercase)
a__ : List[Any] = TFAutoModelForCausalLM.from_pretrained(lowercase , from_pt=lowercase)
a__ , a__ : Tuple = TFAutoModelForCausalLM.from_pretrained(
lowercase , output_loading_info=lowercase , from_pt=lowercase)
self.assertIsNotNone(lowercase)
self.assertIsInstance(lowercase , lowercase)
a__ : Tuple = AutoModelForCausalLM.from_pretrained(lowercase , from_tf=lowercase)
a__ , a__ : Any = AutoModelForCausalLM.from_pretrained(
lowercase , output_loading_info=lowercase , from_tf=lowercase)
self.assertIsNotNone(lowercase)
self.assertIsInstance(lowercase , lowercase)
@slow
def __lowercase ( self) -> List[Any]:
'''simple docstring'''
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a__ : Optional[Any] = AutoConfig.from_pretrained(lowercase)
self.assertIsNotNone(lowercase)
self.assertIsInstance(lowercase , lowercase)
a__ : str = TFAutoModelWithLMHead.from_pretrained(lowercase , from_pt=lowercase)
self.assertIsNotNone(lowercase)
self.assertIsInstance(lowercase , lowercase)
a__ : int = AutoModelWithLMHead.from_pretrained(lowercase , from_tf=lowercase)
self.assertIsNotNone(lowercase)
self.assertIsInstance(lowercase , lowercase)
@slow
def __lowercase ( self) -> List[str]:
'''simple docstring'''
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a__ : str = AutoConfig.from_pretrained(lowercase)
self.assertIsNotNone(lowercase)
self.assertIsInstance(lowercase , lowercase)
a__ : List[str] = TFAutoModelForMaskedLM.from_pretrained(lowercase , from_pt=lowercase)
a__ , a__ : Optional[Any] = TFAutoModelForMaskedLM.from_pretrained(
lowercase , output_loading_info=lowercase , from_pt=lowercase)
self.assertIsNotNone(lowercase)
self.assertIsInstance(lowercase , lowercase)
a__ : Tuple = AutoModelForMaskedLM.from_pretrained(lowercase , from_tf=lowercase)
a__ , a__ : Optional[int] = AutoModelForMaskedLM.from_pretrained(
lowercase , output_loading_info=lowercase , from_tf=lowercase)
self.assertIsNotNone(lowercase)
self.assertIsInstance(lowercase , lowercase)
@slow
def __lowercase ( self) -> List[str]:
'''simple docstring'''
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a__ : Optional[Any] = AutoConfig.from_pretrained(lowercase)
self.assertIsNotNone(lowercase)
self.assertIsInstance(lowercase , lowercase)
a__ : Optional[int] = TFAutoModelForSeqaSeqLM.from_pretrained(lowercase , from_pt=lowercase)
a__ , a__ : List[str] = TFAutoModelForSeqaSeqLM.from_pretrained(
lowercase , output_loading_info=lowercase , from_pt=lowercase)
self.assertIsNotNone(lowercase)
self.assertIsInstance(lowercase , lowercase)
a__ : Union[str, Any] = AutoModelForSeqaSeqLM.from_pretrained(lowercase , from_tf=lowercase)
a__ , a__ : Optional[int] = AutoModelForSeqaSeqLM.from_pretrained(
lowercase , output_loading_info=lowercase , from_tf=lowercase)
self.assertIsNotNone(lowercase)
self.assertIsInstance(lowercase , lowercase)
@slow
def __lowercase ( self) -> List[str]:
'''simple docstring'''
for model_name in ["bert-base-uncased"]:
a__ : Optional[Any] = AutoConfig.from_pretrained(lowercase)
self.assertIsNotNone(lowercase)
self.assertIsInstance(lowercase , lowercase)
a__ : int = TFAutoModelForSequenceClassification.from_pretrained(lowercase , from_pt=lowercase)
self.assertIsNotNone(lowercase)
self.assertIsInstance(lowercase , lowercase)
a__ : Any = AutoModelForSequenceClassification.from_pretrained(lowercase , from_tf=lowercase)
self.assertIsNotNone(lowercase)
self.assertIsInstance(lowercase , lowercase)
@slow
def __lowercase ( self) -> Optional[int]:
'''simple docstring'''
for model_name in ["bert-base-uncased"]:
a__ : List[Any] = AutoConfig.from_pretrained(lowercase)
self.assertIsNotNone(lowercase)
self.assertIsInstance(lowercase , lowercase)
a__ : Optional[int] = TFAutoModelForQuestionAnswering.from_pretrained(lowercase , from_pt=lowercase)
self.assertIsNotNone(lowercase)
self.assertIsInstance(lowercase , lowercase)
a__ : str = AutoModelForQuestionAnswering.from_pretrained(lowercase , from_tf=lowercase)
self.assertIsNotNone(lowercase)
self.assertIsInstance(lowercase , lowercase)
def __lowercase ( self) -> List[str]:
'''simple docstring'''
a__ : Tuple = TFAutoModelWithLMHead.from_pretrained(lowercase , from_pt=lowercase)
self.assertIsInstance(lowercase , lowercase)
self.assertEqual(model.num_parameters() , 1_4410)
self.assertEqual(model.num_parameters(only_trainable=lowercase) , 1_4410)
a__ : Any = AutoModelWithLMHead.from_pretrained(lowercase , from_tf=lowercase)
self.assertIsInstance(lowercase , lowercase)
self.assertEqual(model.num_parameters() , 1_4410)
self.assertEqual(model.num_parameters(only_trainable=lowercase) , 1_4410)
def __lowercase ( self) -> Tuple:
'''simple docstring'''
a__ : List[str] = TFAutoModelWithLMHead.from_pretrained(lowercase , from_pt=lowercase)
self.assertIsInstance(lowercase , lowercase)
self.assertEqual(model.num_parameters() , 1_4410)
self.assertEqual(model.num_parameters(only_trainable=lowercase) , 1_4410)
a__ : int = AutoModelWithLMHead.from_pretrained(lowercase , from_tf=lowercase)
self.assertIsInstance(lowercase , lowercase)
self.assertEqual(model.num_parameters() , 1_4410)
self.assertEqual(model.num_parameters(only_trainable=lowercase) , 1_4410)
| 99 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
a__ = {
"""configuration_wav2vec2""": ["""WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Wav2Vec2Config"""],
"""feature_extraction_wav2vec2""": ["""Wav2Vec2FeatureExtractor"""],
"""processing_wav2vec2""": ["""Wav2Vec2Processor"""],
"""tokenization_wav2vec2""": ["""Wav2Vec2CTCTokenizer""", """Wav2Vec2Tokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ = [
"""WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Wav2Vec2ForAudioFrameClassification""",
"""Wav2Vec2ForCTC""",
"""Wav2Vec2ForMaskedLM""",
"""Wav2Vec2ForPreTraining""",
"""Wav2Vec2ForSequenceClassification""",
"""Wav2Vec2ForXVector""",
"""Wav2Vec2Model""",
"""Wav2Vec2PreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ = [
"""TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFWav2Vec2ForCTC""",
"""TFWav2Vec2Model""",
"""TFWav2Vec2PreTrainedModel""",
"""TFWav2Vec2ForSequenceClassification""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ = [
"""FlaxWav2Vec2ForCTC""",
"""FlaxWav2Vec2ForPreTraining""",
"""FlaxWav2Vec2Model""",
"""FlaxWav2Vec2PreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_wavaveca import WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, WavaVecaConfig
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .processing_wavaveca import WavaVecaProcessor
from .tokenization_wavaveca import WavaVecaCTCTokenizer, WavaVecaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavaveca import (
WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
WavaVecaForAudioFrameClassification,
WavaVecaForCTC,
WavaVecaForMaskedLM,
WavaVecaForPreTraining,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
WavaVecaModel,
WavaVecaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWavaVecaForCTC,
TFWavaVecaForSequenceClassification,
TFWavaVecaModel,
TFWavaVecaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
FlaxWavaVecaForCTC,
FlaxWavaVecaForPreTraining,
FlaxWavaVecaModel,
FlaxWavaVecaPreTrainedModel,
)
else:
import sys
a__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 317 | 0 |
'''simple docstring'''
import fire
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoTokenizer
from utils import SeqaSeqDataset, pickle_save
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=1_024 , __lowerCAmelCase=1_024 , __lowerCAmelCase=False , **__lowerCAmelCase ):
_UpperCAmelCase : Any = AutoTokenizer.from_pretrained(__lowerCAmelCase )
_UpperCAmelCase : List[str] = SeqaSeqDataset(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , type_path="train" , **__lowerCAmelCase )
_UpperCAmelCase : Dict = tok.pad_token_id
def get_lens(__lowerCAmelCase ):
_UpperCAmelCase : Union[str, Any] = tqdm(
DataLoader(__lowerCAmelCase , batch_size=512 , num_workers=8 , shuffle=__lowerCAmelCase , collate_fn=ds.collate_fn ) , desc=str(ds.len_file ) , )
_UpperCAmelCase : List[str] = []
for batch in dl:
_UpperCAmelCase : Any = batch["input_ids"].ne(__lowerCAmelCase ).sum(1 ).tolist()
_UpperCAmelCase : Tuple = batch["labels"].ne(__lowerCAmelCase ).sum(1 ).tolist()
if consider_target:
for src, tgt in zip(__lowerCAmelCase , __lowerCAmelCase ):
max_lens.append(max(__lowerCAmelCase , __lowerCAmelCase ) )
else:
max_lens.extend(__lowerCAmelCase )
return max_lens
_UpperCAmelCase : Dict = get_lens(__lowerCAmelCase )
_UpperCAmelCase : Optional[Any] = SeqaSeqDataset(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , type_path="val" , **__lowerCAmelCase )
_UpperCAmelCase : Union[str, Any] = get_lens(__lowerCAmelCase )
pickle_save(__lowerCAmelCase , train_ds.len_file )
pickle_save(__lowerCAmelCase , val_ds.len_file )
if __name__ == "__main__":
fire.Fire(save_len_file)
| 322 |
'''simple docstring'''
import fire
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoTokenizer
from utils import SeqaSeqDataset, pickle_save
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=1_024 , __lowerCAmelCase=1_024 , __lowerCAmelCase=False , **__lowerCAmelCase ):
_UpperCAmelCase : Any = AutoTokenizer.from_pretrained(__lowerCAmelCase )
_UpperCAmelCase : List[str] = SeqaSeqDataset(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , type_path="train" , **__lowerCAmelCase )
_UpperCAmelCase : Dict = tok.pad_token_id
def get_lens(__lowerCAmelCase ):
_UpperCAmelCase : Union[str, Any] = tqdm(
DataLoader(__lowerCAmelCase , batch_size=512 , num_workers=8 , shuffle=__lowerCAmelCase , collate_fn=ds.collate_fn ) , desc=str(ds.len_file ) , )
_UpperCAmelCase : List[str] = []
for batch in dl:
_UpperCAmelCase : Any = batch["input_ids"].ne(__lowerCAmelCase ).sum(1 ).tolist()
_UpperCAmelCase : Tuple = batch["labels"].ne(__lowerCAmelCase ).sum(1 ).tolist()
if consider_target:
for src, tgt in zip(__lowerCAmelCase , __lowerCAmelCase ):
max_lens.append(max(__lowerCAmelCase , __lowerCAmelCase ) )
else:
max_lens.extend(__lowerCAmelCase )
return max_lens
_UpperCAmelCase : Dict = get_lens(__lowerCAmelCase )
_UpperCAmelCase : Optional[Any] = SeqaSeqDataset(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , type_path="val" , **__lowerCAmelCase )
_UpperCAmelCase : Union[str, Any] = get_lens(__lowerCAmelCase )
pickle_save(__lowerCAmelCase , train_ds.len_file )
pickle_save(__lowerCAmelCase , val_ds.len_file )
if __name__ == "__main__":
fire.Fire(save_len_file)
| 322 | 1 |
def _a ( a :int ) -> Tuple:
a = []
a = set({'''(''', '''[''', '''{'''} )
a = set({''')''', ''']''', '''}'''} )
a = {'''{''': '''}''', '''[''': ''']''', '''(''': ''')'''}
for i in range(len(a ) ):
if s[i] in open_brackets:
stack.append(s[i] )
elif s[i] in closed_brackets and (
len(a ) == 0 or (len(a ) > 0 and open_to_closed[stack.pop()] != s[i])
):
return False
return len(a ) == 0
def _a ( ) -> int:
a = input('''Enter sequence of brackets: ''' )
if is_balanced(a ):
print(a , '''is balanced''' )
else:
print(a , '''is not balanced''' )
if __name__ == "__main__":
main()
| 0 |
from __future__ import annotations
UpperCAmelCase__ = "Muhammad Umer Farooq"
UpperCAmelCase__ = "MIT"
UpperCAmelCase__ = "1.0.0"
UpperCAmelCase__ = "Muhammad Umer Farooq"
UpperCAmelCase__ = "[email protected]"
UpperCAmelCase__ = "Alpha"
import re
from html.parser import HTMLParser
from urllib import parse
import requests
class lowercase_ ( lowercase ):
'''simple docstring'''
def __init__( self : Union[str, Any] , __UpperCAmelCase : str ) ->None:
"""simple docstring"""
super().__init__()
a = []
a = domain
def __lowerCAmelCase ( self : List[str] , __UpperCAmelCase : str , __UpperCAmelCase : list[tuple[str, str | None]] ) ->None:
"""simple docstring"""
if tag == "a":
# Check the list of defined attributes.
for name, value in attrs:
# If href is defined, and not empty nor # print it.
if name == "href" and value != "#" and value != "":
# If not already in urls.
if value not in self.urls:
a = parse.urljoin(self.domain , __UpperCAmelCase )
self.urls.append(__UpperCAmelCase )
def _a ( a :str ) -> str:
return ".".join(get_sub_domain_name(a ).split('''.''' )[-2:] )
def _a ( a :str ) -> str:
return parse.urlparse(a ).netloc
def _a ( a :str = "https://github.com" ) -> list[str]:
a = get_domain_name(a )
# Initialize the parser
a = Parser(a )
try:
# Open URL
a = requests.get(a )
# pass the raw HTML to the parser to get links
parser.feed(r.text )
# Get links and loop through
a = set()
for link in parser.urls:
# open URL.
# read = requests.get(link)
try:
a = requests.get(a )
# Get the valid email.
a = re.findall('''[a-zA-Z0-9]+@''' + domain , read.text )
# If not in list then append it.
for email in emails:
valid_emails.add(a )
except ValueError:
pass
except ValueError:
raise SystemExit(1 )
# Finally return a sorted list of email addresses with no duplicates.
return sorted(a )
if __name__ == "__main__":
UpperCAmelCase__ = emails_from_url("https://github.com")
print(f"""{len(emails)} emails found:""")
print("\n".join(sorted(emails)))
| 0 | 1 |
import inspect
import unittest
from transformers import ViTMSNConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMSNForImageClassification, ViTMSNModel
from transformers.models.vit_msn.modeling_vit_msn import VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _UpperCAmelCase :
'''simple docstring'''
def __init__( self : Dict , lowercase_ : Optional[int] , lowercase_ : int=13 , lowercase_ : Optional[int]=30 , lowercase_ : Dict=2 , lowercase_ : Any=3 , lowercase_ : Any=True , lowercase_ : List[str]=True , lowercase_ : Optional[Any]=32 , lowercase_ : int=5 , lowercase_ : Dict=4 , lowercase_ : str=37 , lowercase_ : Tuple="gelu" , lowercase_ : str=0.1 , lowercase_ : Optional[Any]=0.1 , lowercase_ : Optional[int]=10 , lowercase_ : int=0.02 , lowercase_ : Union[str, Any]=None , ) -> Any:
"""simple docstring"""
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = image_size
_UpperCamelCase = patch_size
_UpperCamelCase = num_channels
_UpperCamelCase = is_training
_UpperCamelCase = use_labels
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_act
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = type_sequence_label_size
_UpperCamelCase = initializer_range
_UpperCamelCase = scope
# in ViT MSN, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
_UpperCamelCase = (image_size // patch_size) ** 2
_UpperCamelCase = num_patches + 1
def __UpperCAmelCase ( self : Dict) -> str:
"""simple docstring"""
_UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
_UpperCamelCase = None
if self.use_labels:
_UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size)
_UpperCamelCase = self.get_config()
return config, pixel_values, labels
def __UpperCAmelCase ( self : Dict) -> Optional[Any]:
"""simple docstring"""
return ViTMSNConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def __UpperCAmelCase ( self : Dict , lowercase_ : str , lowercase_ : Optional[int] , lowercase_ : Tuple) -> List[str]:
"""simple docstring"""
_UpperCamelCase = ViTMSNModel(config=lowercase_)
model.to(lowercase_)
model.eval()
_UpperCamelCase = model(lowercase_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def __UpperCAmelCase ( self : Dict , lowercase_ : str , lowercase_ : List[Any] , lowercase_ : Tuple) -> Dict:
"""simple docstring"""
_UpperCamelCase = self.type_sequence_label_size
_UpperCamelCase = ViTMSNForImageClassification(lowercase_)
model.to(lowercase_)
model.eval()
_UpperCamelCase = model(lowercase_ , labels=lowercase_)
print("Pixel and labels shape: {pixel_values.shape}, {labels.shape}")
print("Labels: {labels}")
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
# test greyscale images
_UpperCamelCase = 1
_UpperCamelCase = ViTMSNForImageClassification(lowercase_)
model.to(lowercase_)
model.eval()
_UpperCamelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
_UpperCamelCase = model(lowercase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
def __UpperCAmelCase ( self : Optional[int]) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = self.prepare_config_and_inputs()
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = config_and_inputs
_UpperCamelCase = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class _UpperCAmelCase ( lowerCAmelCase, lowerCAmelCase, unittest.TestCase ):
'''simple docstring'''
__A = (ViTMSNModel, ViTMSNForImageClassification) if is_torch_available() else ()
__A = (
{'''feature-extraction''': ViTMSNModel, '''image-classification''': ViTMSNForImageClassification}
if is_torch_available()
else {}
)
__A = False
__A = False
__A = False
__A = False
def __UpperCAmelCase ( self : str) -> str:
"""simple docstring"""
_UpperCamelCase = ViTMSNModelTester(self)
_UpperCamelCase = ConfigTester(self , config_class=lowercase_ , has_text_modality=lowercase_ , hidden_size=37)
def __UpperCAmelCase ( self : List[str]) -> Union[str, Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="ViTMSN does not use inputs_embeds")
def __UpperCAmelCase ( self : Tuple) -> Any:
"""simple docstring"""
pass
def __UpperCAmelCase ( self : int) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase = model_class(lowercase_)
self.assertIsInstance(model.get_input_embeddings() , (nn.Module))
_UpperCamelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowercase_ , nn.Linear))
def __UpperCAmelCase ( self : List[str]) -> Any:
"""simple docstring"""
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase = model_class(lowercase_)
_UpperCamelCase = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCamelCase = [*signature.parameters.keys()]
_UpperCamelCase = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowercase_)
def __UpperCAmelCase ( self : Tuple) -> Any:
"""simple docstring"""
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_)
def __UpperCAmelCase ( self : Optional[Any]) -> List[str]:
"""simple docstring"""
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase_)
@slow
def __UpperCAmelCase ( self : int) -> str:
"""simple docstring"""
for model_name in VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase = ViTMSNModel.from_pretrained(lowercase_)
self.assertIsNotNone(lowercase_)
def lowerCAmelCase__ ( ) ->int:
'''simple docstring'''
_UpperCamelCase = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __UpperCAmelCase ( self : str) -> List[str]:
"""simple docstring"""
return ViTImageProcessor.from_pretrained("facebook/vit-msn-small") if is_vision_available() else None
@slow
def __UpperCAmelCase ( self : Optional[int]) -> str:
"""simple docstring"""
torch.manual_seed(2)
_UpperCamelCase = ViTMSNForImageClassification.from_pretrained("facebook/vit-msn-small").to(lowercase_)
_UpperCamelCase = self.default_image_processor
_UpperCamelCase = prepare_img()
_UpperCamelCase = image_processor(images=lowercase_ , return_tensors="pt").to(lowercase_)
# forward pass
with torch.no_grad():
_UpperCamelCase = model(**lowercase_)
# verify the logits
_UpperCamelCase = torch.Size((1, 1000))
self.assertEqual(outputs.logits.shape , lowercase_)
_UpperCamelCase = torch.tensor([-0.08_03, -0.44_54, -0.23_75]).to(lowercase_)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase_ , atol=1e-4))
| 361 | from __future__ import annotations
import random
import unittest
from transformers import TransfoXLConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLModel,
)
class _UpperCAmelCase :
'''simple docstring'''
def __init__( self : Optional[Any] , lowercase_ : Optional[Any] , ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = parent
_UpperCamelCase = 13
_UpperCamelCase = 7
_UpperCamelCase = 30
_UpperCamelCase = self.seq_length + self.mem_len
_UpperCamelCase = 15
_UpperCamelCase = True
_UpperCamelCase = True
_UpperCamelCase = 99
_UpperCamelCase = [10, 50, 80]
_UpperCamelCase = 32
_UpperCamelCase = 32
_UpperCamelCase = 4
_UpperCamelCase = 8
_UpperCamelCase = 128
_UpperCamelCase = 2
_UpperCamelCase = 2
_UpperCamelCase = None
_UpperCamelCase = 1
_UpperCamelCase = 0
_UpperCamelCase = 3
_UpperCamelCase = self.vocab_size - 1
_UpperCamelCase = 0.01
def __UpperCAmelCase ( self : Dict) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
_UpperCamelCase = None
if self.use_labels:
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
_UpperCamelCase = TransfoXLConfig(
vocab_size=self.vocab_size , mem_len=self.mem_len , clamp_len=self.clamp_len , cutoffs=self.cutoffs , d_model=self.hidden_size , d_embed=self.d_embed , n_head=self.num_attention_heads , d_head=self.d_head , d_inner=self.d_inner , div_val=self.div_val , n_layer=self.num_hidden_layers , eos_token_id=self.eos_token_id , pad_token_id=self.vocab_size - 1 , init_range=self.init_range , num_labels=self.num_labels , )
return (config, input_ids_a, input_ids_a, lm_labels)
def __UpperCAmelCase ( self : Union[str, Any]) -> Tuple:
"""simple docstring"""
random.seed(self.seed)
tf.random.set_seed(self.seed)
def __UpperCAmelCase ( self : int , lowercase_ : Optional[int] , lowercase_ : Tuple , lowercase_ : Optional[Any] , lowercase_ : Optional[Any]) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = TFTransfoXLModel(lowercase_)
_UpperCamelCase , _UpperCamelCase = model(lowercase_).to_tuple()
_UpperCamelCase = {"input_ids": input_ids_a, "mems": mems_a}
_UpperCamelCase , _UpperCamelCase = model(lowercase_).to_tuple()
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def __UpperCAmelCase ( self : Dict , lowercase_ : str , lowercase_ : str , lowercase_ : Dict , lowercase_ : List[Any]) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = TFTransfoXLLMHeadModel(lowercase_)
_UpperCamelCase , _UpperCamelCase = model(lowercase_).to_tuple()
_UpperCamelCase = {"input_ids": input_ids_a, "labels": lm_labels}
_UpperCamelCase , _UpperCamelCase = model(lowercase_).to_tuple()
_UpperCamelCase , _UpperCamelCase = model([input_ids_a, mems_a]).to_tuple()
_UpperCamelCase = {"input_ids": input_ids_a, "mems": mems_a, "labels": lm_labels}
_UpperCamelCase , _UpperCamelCase = model(lowercase_).to_tuple()
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size))
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size))
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def __UpperCAmelCase ( self : Optional[Any] , lowercase_ : List[Any] , lowercase_ : List[Any] , lowercase_ : Optional[Any] , lowercase_ : Dict) -> str:
"""simple docstring"""
_UpperCamelCase = TFTransfoXLForSequenceClassification(lowercase_)
_UpperCamelCase = model(lowercase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def __UpperCAmelCase ( self : Dict) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = self.prepare_config_and_inputs()
((_UpperCamelCase) , (_UpperCamelCase) , (_UpperCamelCase) , (_UpperCamelCase)) = config_and_inputs
_UpperCamelCase = {"input_ids": input_ids_a}
return config, inputs_dict
@require_tf
class _UpperCAmelCase ( lowerCAmelCase, lowerCAmelCase, unittest.TestCase ):
'''simple docstring'''
__A = (
(TFTransfoXLModel, TFTransfoXLLMHeadModel, TFTransfoXLForSequenceClassification) if is_tf_available() else ()
)
__A = () if is_tf_available() else ()
__A = (
{
'''feature-extraction''': TFTransfoXLModel,
'''text-classification''': TFTransfoXLForSequenceClassification,
'''text-generation''': TFTransfoXLLMHeadModel,
'''zero-shot''': TFTransfoXLForSequenceClassification,
}
if is_tf_available()
else {}
)
# TODO: add this test when TFTransfoXLLMHead has a linear output layer implemented
__A = False
__A = False
__A = False
__A = False
def __UpperCAmelCase ( self : List[Any] , lowercase_ : Dict , lowercase_ : Tuple , lowercase_ : Dict , lowercase_ : Any , lowercase_ : List[str]) -> Any:
"""simple docstring"""
if pipeline_test_casse_name == "TextGenerationPipelineTests":
# Get `ValueError: AttributeError: 'NoneType' object has no attribute 'new_ones'` or `AssertionError`.
# `TransfoXLConfig` was never used in pipeline tests: cannot create a simple
# tokenizer.
return True
return False
def __UpperCAmelCase ( self : Optional[Any]) -> int:
"""simple docstring"""
_UpperCamelCase = TFTransfoXLModelTester(self)
_UpperCamelCase = ConfigTester(self , config_class=lowercase_ , d_embed=37)
def __UpperCAmelCase ( self : Dict) -> Optional[int]:
"""simple docstring"""
self.config_tester.run_common_tests()
def __UpperCAmelCase ( self : Union[str, Any]) -> List[str]:
"""simple docstring"""
self.model_tester.set_seed()
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_model(*lowercase_)
def __UpperCAmelCase ( self : Optional[Any]) -> List[Any]:
"""simple docstring"""
self.model_tester.set_seed()
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_lm_head(*lowercase_)
def __UpperCAmelCase ( self : List[str]) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_for_sequence_classification(*lowercase_)
def __UpperCAmelCase ( self : Dict) -> int:
"""simple docstring"""
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCamelCase = [TFTransfoXLForSequenceClassification]
for model_class in self.all_model_classes:
_UpperCamelCase = model_class(lowercase_)
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer)
if model_class in list_other_models_with_output_ebd:
_UpperCamelCase = model.get_output_embeddings()
assert isinstance(lowercase_ , tf.keras.layers.Layer)
_UpperCamelCase = model.get_bias()
assert name is None
else:
_UpperCamelCase = model.get_output_embeddings()
assert x is None
_UpperCamelCase = model.get_bias()
assert name is None
def __UpperCAmelCase ( self : Optional[int]) -> Any:
"""simple docstring"""
pass
@slow
def __UpperCAmelCase ( self : List[str]) -> Tuple:
"""simple docstring"""
for model_name in TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase = TFTransfoXLModel.from_pretrained(lowercase_)
self.assertIsNotNone(lowercase_)
@unittest.skip(reason="This model doesn't play well with fit() due to not returning a single loss.")
def __UpperCAmelCase ( self : Union[str, Any]) -> Tuple:
"""simple docstring"""
pass
@require_tf
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@unittest.skip("Skip test until #12651 is resolved.")
@slow
def __UpperCAmelCase ( self : Optional[Any]) -> Dict:
"""simple docstring"""
_UpperCamelCase = TFTransfoXLLMHeadModel.from_pretrained("transfo-xl-wt103")
# fmt: off
_UpperCamelCase = tf.convert_to_tensor([[33,1297,2,1,1009,4,1109,11739,4762,358,5,25,245,22,1706,17,20098,5,3215,21,37,1110,3,13,1041,4,24,603,490,2,71477,20098,104447,2,20961,1,2604,4,1,329,3,6224,831,16002,2,8,603,78967,29546,23,803,20,25,416,5,8,232,4,277,6,1855,4601,3,29546,54,8,3609,5,57211,49,4,1,277,18,8,1755,15691,3,341,25,416,693,42573,71,17,401,94,31,17919,2,29546,7873,18,1,435,23,11011,755,5,5167,3,7983,98,84,2,29546,3267,8,3609,4,1,4865,1075,2,6087,71,6,346,8,5854,3,29546,824,1400,1868,2,19,160,2,311,8,5496,2,20920,17,25,15097,3,24,24,0]] , dtype=tf.intaa) # noqa: E231
# fmt: on
# In 1991 , the remains of Russian Tsar Nicholas II and his family
# ( except for Alexei and Maria ) are discovered .
# The voice of Nicholas's young son , Tsarevich Alexei Nikolaevich , narrates the
# remainder of the story . 1883 Western Siberia ,
# a young Grigori Rasputin is asked by his father and a group of men to perform magic .
# Rasputin has a vision and denounces one of the men as a horse thief . Although his
# father initially slaps him for making such an accusation , Rasputin watches as the
# man is chased outside and beaten . Twenty years later , Rasputin sees a vision of
# the Virgin Mary , prompting him to become a priest . Rasputin quickly becomes famous ,
# with people , even a bishop , begging for his blessing . <eod> </s> <eos>
# fmt: off
_UpperCamelCase = [33,1297,2,1,1009,4,1109,11739,4762,358,5,25,245,22,1706,17,20098,5,3215,21,37,1110,3,13,1041,4,24,603,490,2,71477,20098,104447,2,20961,1,2604,4,1,329,3,6224,831,16002,2,8,603,78967,29546,23,803,20,25,416,5,8,232,4,277,6,1855,4601,3,29546,54,8,3609,5,57211,49,4,1,277,18,8,1755,15691,3,341,25,416,693,42573,71,17,401,94,31,17919,2,29546,7873,18,1,435,23,11011,755,5,5167,3,7983,98,84,2,29546,3267,8,3609,4,1,4865,1075,2,6087,71,6,346,8,5854,3,29546,824,1400,1868,2,19,160,2,311,8,5496,2,20920,17,25,15097,3,24,24,0,33,1,1857,2,1,1009,4,1109,11739,4762,358,5,25,245,28,1110,3,13,1041,4,24,603,490,2,71477,20098,104447,2,20961,1,2604,4,1,329,3,0] # noqa: E231
# fmt: on
# In 1991, the remains of Russian Tsar Nicholas II and his family (
# except for Alexei and Maria ) are discovered. The voice of young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.
# 1883 Western Siberia, a young Grigori Rasputin is asked by his father
# and a group of men to perform magic. Rasputin has a vision and
# denounces one of the men as a horse thief. Although his father initially
# slaps him for making such an accusation, Rasputin watches as the man
# is chased outside and beaten. Twenty years later, Rasputin sees a vision
# of the Virgin Mary, prompting him to become a priest.
# Rasputin quickly becomes famous, with people, even a bishop, begging for
# his blessing. <unk> <unk> <eos> In the 1990s, the remains of Russian Tsar
# Nicholas II and his family were discovered. The voice of <unk> young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.<eos>
_UpperCamelCase = model.generate(lowercase_ , max_length=200 , do_sample=lowercase_)
self.assertListEqual(output_ids[0].numpy().tolist() , lowercase_)
| 63 | 0 |
import os
import shutil
from pathlib import Path
from typing import Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ..utils import ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, is_onnx_available, logging
if is_onnx_available():
import onnxruntime as ort
lowercase : Dict = logging.get_logger(__name__)
lowercase : Union[str, Any] = {
"""tensor(bool)""": np.bool_,
"""tensor(int8)""": np.inta,
"""tensor(uint8)""": np.uinta,
"""tensor(int16)""": np.intaa,
"""tensor(uint16)""": np.uintaa,
"""tensor(int32)""": np.intaa,
"""tensor(uint32)""": np.uintaa,
"""tensor(int64)""": np.intaa,
"""tensor(uint64)""": np.uintaa,
"""tensor(float16)""": np.floataa,
"""tensor(float)""": np.floataa,
"""tensor(double)""": np.floataa,
}
class __snake_case :
def __init__( self ,snake_case=None ,**snake_case ):
'''simple docstring'''
logger.info("""`diffusers.OnnxRuntimeModel` is experimental and might change in the future.""" )
lowercase : Any = model
lowercase : Optional[Any] = kwargs.get("""model_save_dir""" ,snake_case )
lowercase : str = kwargs.get("""latest_model_name""" ,snake_case )
def __call__( self ,**snake_case ):
'''simple docstring'''
lowercase : Tuple = {k: np.array(snake_case ) for k, v in kwargs.items()}
return self.model.run(snake_case ,snake_case )
@staticmethod
def _SCREAMING_SNAKE_CASE ( snake_case ,snake_case=None ,snake_case=None ):
'''simple docstring'''
if provider is None:
logger.info("""No onnxruntime provider specified, using CPUExecutionProvider""" )
lowercase : Optional[Any] = """CPUExecutionProvider"""
return ort.InferenceSession(snake_case ,providers=[provider] ,sess_options=snake_case )
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case = None ,**snake_case ):
'''simple docstring'''
lowercase : Any = file_name if file_name is not None else ONNX_WEIGHTS_NAME
lowercase : Union[str, Any] = self.model_save_dir.joinpath(self.latest_model_name )
lowercase : Dict = Path(snake_case ).joinpath(snake_case )
try:
shutil.copyfile(snake_case ,snake_case )
except shutil.SameFileError:
pass
# copy external weights (for models >2GB)
lowercase : Dict = self.model_save_dir.joinpath(snake_case )
if src_path.exists():
lowercase : Optional[int] = Path(snake_case ).joinpath(snake_case )
try:
shutil.copyfile(snake_case ,snake_case )
except shutil.SameFileError:
pass
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,**snake_case ,):
'''simple docstring'''
if os.path.isfile(snake_case ):
logger.error(f"Provided path ({save_directory}) should be a directory, not a file" )
return
os.makedirs(snake_case ,exist_ok=snake_case )
# saving model weights/files
self._save_pretrained(snake_case ,**snake_case )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls ,snake_case ,snake_case = None ,snake_case = None ,snake_case = False ,snake_case = None ,snake_case = None ,snake_case = None ,snake_case = None ,**snake_case ,):
'''simple docstring'''
lowercase : Any = file_name if file_name is not None else ONNX_WEIGHTS_NAME
# load model from local directory
if os.path.isdir(snake_case ):
lowercase : Any = OnnxRuntimeModel.load_model(
os.path.join(snake_case ,snake_case ) ,provider=snake_case ,sess_options=snake_case )
lowercase : int = Path(snake_case )
# load model from hub
else:
# download model
lowercase : Union[str, Any] = hf_hub_download(
repo_id=snake_case ,filename=snake_case ,use_auth_token=snake_case ,revision=snake_case ,cache_dir=snake_case ,force_download=snake_case ,)
lowercase : int = Path(snake_case ).parent
lowercase : Union[str, Any] = Path(snake_case ).name
lowercase : int = OnnxRuntimeModel.load_model(snake_case ,provider=snake_case ,sess_options=snake_case )
return cls(model=snake_case ,**snake_case )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls ,snake_case ,snake_case = True ,snake_case = None ,snake_case = None ,**snake_case ,):
'''simple docstring'''
lowercase : int = None
if len(str(snake_case ).split("""@""" ) ) == 2:
lowercase , lowercase : List[Any] = model_id.split("""@""" )
return cls._from_pretrained(
model_id=snake_case ,revision=snake_case ,cache_dir=snake_case ,force_download=snake_case ,use_auth_token=snake_case ,**snake_case ,)
| 20 |
import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
_lowerCAmelCase : str = logging.get_logger(__name__)
class __magic_name__ ( lowerCAmelCase_ ):
def __init__( self , *__snake_case , **__snake_case ) -> None:
'''simple docstring'''
warnings.warn(
'The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use CLIPImageProcessor instead.' , __snake_case , )
super().__init__(*__snake_case , **__snake_case )
| 218 | 0 |
'''simple docstring'''
import argparse
import pickle
import numpy as np
import torch
from torch import nn
from transformers import ReformerConfig, ReformerModelWithLMHead
from transformers.utils import logging
logging.set_verbosity_info()
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[str]=None ):
'''simple docstring'''
assert torch_layer.weight.shape == weight.shape, F'''{torch_layer} layer.weight does not match'''
UpperCAmelCase__ = nn.Parameter(SCREAMING_SNAKE_CASE__ )
if bias is not None:
assert torch_layer.bias.shape == bias.shape, F'''{torch_layer} layer.bias does not match'''
UpperCAmelCase__ = nn.Parameter(SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : str ):
'''simple docstring'''
UpperCAmelCase__ = np.asarray(weights[0] )
UpperCAmelCase__ = np.asarray(weights[1] )
UpperCAmelCase__ = np.asarray(weights[2] )
set_param(
torch_layer.self_attention.query_key , torch.tensor(SCREAMING_SNAKE_CASE__ ).transpose(1 , 2 ).contiguous().view(-1 , SCREAMING_SNAKE_CASE__ ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(SCREAMING_SNAKE_CASE__ ).transpose(1 , 2 ).contiguous().view(-1 , SCREAMING_SNAKE_CASE__ ) , )
set_param(
torch_layer.output.dense , torch.tensor(SCREAMING_SNAKE_CASE__ ).view(-1 , SCREAMING_SNAKE_CASE__ ).contiguous().transpose(0 , 1 ) , )
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ = np.asarray(weights[0] )
UpperCAmelCase__ = np.asarray(weights[1] )
UpperCAmelCase__ = np.asarray(weights[2] )
UpperCAmelCase__ = np.asarray(weights[3] )
set_param(
torch_layer.self_attention.query , torch.tensor(SCREAMING_SNAKE_CASE__ ).transpose(1 , 2 ).contiguous().view(-1 , SCREAMING_SNAKE_CASE__ ) , )
set_param(
torch_layer.self_attention.key , torch.tensor(SCREAMING_SNAKE_CASE__ ).transpose(1 , 2 ).contiguous().view(-1 , SCREAMING_SNAKE_CASE__ ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(SCREAMING_SNAKE_CASE__ ).transpose(1 , 2 ).contiguous().view(-1 , SCREAMING_SNAKE_CASE__ ) , )
set_param(
torch_layer.output.dense , torch.tensor(SCREAMING_SNAKE_CASE__ ).view(-1 , SCREAMING_SNAKE_CASE__ ).contiguous().transpose(0 , 1 ) , )
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Any ):
'''simple docstring'''
UpperCAmelCase__ = weights[0][0][0]
UpperCAmelCase__ = np.asarray(layer_norm_a[0] )
UpperCAmelCase__ = np.asarray(layer_norm_a[1] )
set_param(
torch_block.attention.layer_norm , torch.tensor(SCREAMING_SNAKE_CASE__ ) , torch.tensor(SCREAMING_SNAKE_CASE__ ) , )
# lsh weights + output
UpperCAmelCase__ = weights[0][1]
if len(SCREAMING_SNAKE_CASE__ ) < 4:
set_layer_weights_in_torch_lsh(SCREAMING_SNAKE_CASE__ , torch_block.attention , SCREAMING_SNAKE_CASE__ )
else:
set_layer_weights_in_torch_local(SCREAMING_SNAKE_CASE__ , torch_block.attention , SCREAMING_SNAKE_CASE__ )
# intermediate weighs
UpperCAmelCase__ = weights[2][0][1][2]
# Chunked Feed Forward
if len(SCREAMING_SNAKE_CASE__ ) == 4:
UpperCAmelCase__ = intermediate_weights[2]
# layernorm 2
UpperCAmelCase__ = np.asarray(intermediate_weights[0][0] )
UpperCAmelCase__ = np.asarray(intermediate_weights[0][1] )
set_param(
torch_block.feed_forward.layer_norm , torch.tensor(SCREAMING_SNAKE_CASE__ ) , torch.tensor(SCREAMING_SNAKE_CASE__ ) , )
# intermediate dense
UpperCAmelCase__ = np.asarray(intermediate_weights[1][0] )
UpperCAmelCase__ = np.asarray(intermediate_weights[1][1] )
set_param(
torch_block.feed_forward.dense.dense , torch.tensor(SCREAMING_SNAKE_CASE__ ).transpose(0 , 1 ).contiguous() , torch.tensor(SCREAMING_SNAKE_CASE__ ) , )
# intermediate out
UpperCAmelCase__ = np.asarray(intermediate_weights[4][0] )
UpperCAmelCase__ = np.asarray(intermediate_weights[4][1] )
set_param(
torch_block.feed_forward.output.dense , torch.tensor(SCREAMING_SNAKE_CASE__ ).transpose(0 , 1 ).contiguous() , torch.tensor(SCREAMING_SNAKE_CASE__ ) , )
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ = torch_model.reformer
# word embeds
UpperCAmelCase__ = np.asarray(weights[1] )
set_param(
torch_model_reformer.embeddings.word_embeddings , torch.tensor(SCREAMING_SNAKE_CASE__ ) , )
if isinstance(weights[3] , SCREAMING_SNAKE_CASE__ ):
UpperCAmelCase__ = torch_model_reformer.embeddings.position_embeddings
for emb_idx in range(len(position_embeddings.weights ) ):
UpperCAmelCase__ = np.asarray(weights[3][emb_idx][0] )
assert (
position_embeddings.weights[emb_idx].shape == emb_weights.shape
), F'''{position_embeddings[emb_idx]} emb does not match'''
UpperCAmelCase__ = nn.Parameter(torch.tensor(SCREAMING_SNAKE_CASE__ ) )
UpperCAmelCase__ = weights[5]
assert len(torch_model_reformer.encoder.layers ) * 4 == len(
SCREAMING_SNAKE_CASE__ ), "HF and trax model do not have the same number of layers"
for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ):
UpperCAmelCase__ = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)]
set_block_weights_in_torch(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# output layer norm
UpperCAmelCase__ = np.asarray(weights[7][0] )
UpperCAmelCase__ = np.asarray(weights[7][1] )
set_param(
torch_model_reformer.encoder.layer_norm , torch.tensor(SCREAMING_SNAKE_CASE__ ) , torch.tensor(SCREAMING_SNAKE_CASE__ ) , )
# output embeddings
UpperCAmelCase__ = np.asarray(weights[9][0] )
UpperCAmelCase__ = np.asarray(weights[9][1] )
set_param(
torch_model.lm_head.decoder , torch.tensor(SCREAMING_SNAKE_CASE__ ).transpose(0 , 1 ).contiguous() , torch.tensor(SCREAMING_SNAKE_CASE__ ) , )
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Dict ):
'''simple docstring'''
UpperCAmelCase__ = ReformerConfig.from_json_file(SCREAMING_SNAKE_CASE__ )
print(F'''Building PyTorch model from configuration: {config}''' )
UpperCAmelCase__ = ReformerModelWithLMHead(SCREAMING_SNAKE_CASE__ )
with open(SCREAMING_SNAKE_CASE__ , """rb""" ) as f:
UpperCAmelCase__ = pickle.load(SCREAMING_SNAKE_CASE__ )["""weights"""]
set_model_weights_in_torch(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , config.hidden_size )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--trax_model_pkl_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained Reformer model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
UpperCAmelCase_ = parser.parse_args()
convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
| 355 |
'''simple docstring'''
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
return int(input_a == input_a == 0 )
def _UpperCamelCase ( ):
'''simple docstring'''
print("""Truth Table of NOR Gate:""" )
print("""| Input 1 | Input 2 | Output |""" )
print(F'''| 0 | 0 | {nor_gate(0 , 0 )} |''' )
print(F'''| 0 | 1 | {nor_gate(0 , 1 )} |''' )
print(F'''| 1 | 0 | {nor_gate(1 , 0 )} |''' )
print(F'''| 1 | 1 | {nor_gate(1 , 1 )} |''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 61 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase : List[Any] = logging.get_logger(__name__)
_lowercase : str = {
"SCUT-DLVCLab/lilt-roberta-en-base": (
"https://huggingface.co/SCUT-DLVCLab/lilt-roberta-en-base/resolve/main/config.json"
),
}
class lowerCAmelCase__ ( lowerCamelCase_ ):
lowerCAmelCase_ = '''lilt'''
def __init__( self , __SCREAMING_SNAKE_CASE=3_05_22 , __SCREAMING_SNAKE_CASE=7_68 , __SCREAMING_SNAKE_CASE=12 , __SCREAMING_SNAKE_CASE=12 , __SCREAMING_SNAKE_CASE=30_72 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=5_12 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=1E-1_2 , __SCREAMING_SNAKE_CASE=0 , __SCREAMING_SNAKE_CASE="absolute" , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=10_24 , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
super().__init__(pad_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
lowercase_ : Tuple = vocab_size
lowercase_ : Dict = hidden_size
lowercase_ : int = num_hidden_layers
lowercase_ : Union[str, Any] = num_attention_heads
lowercase_ : Tuple = hidden_act
lowercase_ : List[str] = intermediate_size
lowercase_ : int = hidden_dropout_prob
lowercase_ : Dict = attention_probs_dropout_prob
lowercase_ : Optional[int] = max_position_embeddings
lowercase_ : int = type_vocab_size
lowercase_ : List[str] = initializer_range
lowercase_ : Tuple = layer_norm_eps
lowercase_ : Tuple = position_embedding_type
lowercase_ : Tuple = classifier_dropout
lowercase_ : Any = channel_shrink_ratio
lowercase_ : List[Any] = max_ad_position_embeddings
| 93 |
from __future__ import annotations
import collections
import pprint
from pathlib import Path
def _a ( lowerCamelCase ):
return "".join(sorted(lowerCamelCase ) )
def _a ( lowerCamelCase ):
return word_by_signature[signature(lowerCamelCase )]
_lowerCamelCase =Path(__file__).parent.joinpath("""words.txt""").read_text(encoding="""utf-8""")
_lowerCamelCase =sorted({word.strip().lower() for word in data.splitlines()})
_lowerCamelCase =collections.defaultdict(list)
for word in word_list:
word_by_signature[signature(word)].append(word)
if __name__ == "__main__":
_lowerCamelCase ={word: anagram(word) for word in word_list if len(anagram(word)) > 1}
with open("""anagrams.txt""", """w""") as file:
file.write("""all_anagrams = \n """)
file.write(pprint.pformat(all_anagrams))
| 287 | 0 |
"""simple docstring"""
import argparse
import requests
import torch
from PIL import Image
from transformers import SwinConfig, SwinForMaskedImageModeling, ViTImageProcessor
def _lowerCAmelCase ( UpperCAmelCase__ : Optional[Any] ) ->str:
A__ : List[str] = SwinConfig(image_size=1_9_2 )
if "base" in model_name:
A__ : Optional[Any] = 6
A__ : Union[str, Any] = 1_2_8
A__ : Dict = (2, 2, 1_8, 2)
A__ : Optional[int] = (4, 8, 1_6, 3_2)
elif "large" in model_name:
A__ : Any = 1_2
A__ : str = 1_9_2
A__ : List[str] = (2, 2, 1_8, 2)
A__ : Any = (6, 1_2, 2_4, 4_8)
else:
raise ValueError("""Model not supported, only supports base and large variants""" )
A__ : Optional[int] = window_size
A__ : Optional[int] = embed_dim
A__ : Dict = depths
A__ : List[str] = num_heads
return config
def _lowerCAmelCase ( UpperCAmelCase__ : Optional[Any] ) ->Optional[int]:
if "encoder.mask_token" in name:
A__ : Dict = name.replace("""encoder.mask_token""", """embeddings.mask_token""" )
if "encoder.patch_embed.proj" in name:
A__ : List[str] = name.replace("""encoder.patch_embed.proj""", """embeddings.patch_embeddings.projection""" )
if "encoder.patch_embed.norm" in name:
A__ : Union[str, Any] = name.replace("""encoder.patch_embed.norm""", """embeddings.norm""" )
if "attn.proj" in name:
A__ : List[Any] = name.replace("""attn.proj""", """attention.output.dense""" )
if "attn" in name:
A__ : List[str] = name.replace("""attn""", """attention.self""" )
if "norm1" in name:
A__ : Tuple = name.replace("""norm1""", """layernorm_before""" )
if "norm2" in name:
A__ : Optional[Any] = name.replace("""norm2""", """layernorm_after""" )
if "mlp.fc1" in name:
A__ : str = name.replace("""mlp.fc1""", """intermediate.dense""" )
if "mlp.fc2" in name:
A__ : str = name.replace("""mlp.fc2""", """output.dense""" )
if name == "encoder.norm.weight":
A__ : List[str] = """layernorm.weight"""
if name == "encoder.norm.bias":
A__ : Dict = """layernorm.bias"""
if "decoder" in name:
pass
else:
A__ : Tuple = """swin.""" + name
return name
def _lowerCAmelCase ( UpperCAmelCase__ : str, UpperCAmelCase__ : List[Any] ) ->Tuple:
for key in orig_state_dict.copy().keys():
A__ : str = orig_state_dict.pop(UpperCAmelCase__ )
if "attn_mask" in key:
pass
elif "qkv" in key:
A__ : Optional[Any] = key.split(""".""" )
A__ : Union[str, Any] = int(key_split[2] )
A__ : Tuple = int(key_split[4] )
A__ : int = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
A__ : Optional[Any] = val[:dim, :]
A__ : int = val[
dim : dim * 2, :
]
A__ : Union[str, Any] = val[-dim:, :]
else:
A__ : List[Any] = val[
:dim
]
A__ : List[str] = val[
dim : dim * 2
]
A__ : int = val[
-dim:
]
else:
A__ : Optional[Any] = val
return orig_state_dict
def _lowerCAmelCase ( UpperCAmelCase__ : List[str], UpperCAmelCase__ : Any, UpperCAmelCase__ : int, UpperCAmelCase__ : Tuple ) ->List[Any]:
A__ : Tuple = torch.load(UpperCAmelCase__, map_location="""cpu""" )["""model"""]
A__ : List[Any] = get_swin_config(UpperCAmelCase__ )
A__ : Tuple = SwinForMaskedImageModeling(UpperCAmelCase__ )
model.eval()
A__ : Dict = convert_state_dict(UpperCAmelCase__, UpperCAmelCase__ )
model.load_state_dict(UpperCAmelCase__ )
A__ : Optional[int] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
A__ : Tuple = ViTImageProcessor(size={"""height""": 1_9_2, """width""": 1_9_2} )
A__ : str = Image.open(requests.get(UpperCAmelCase__, stream=UpperCAmelCase__ ).raw )
A__ : List[Any] = image_processor(images=UpperCAmelCase__, return_tensors="""pt""" )
with torch.no_grad():
A__ : Optional[Any] = model(**UpperCAmelCase__ ).logits
print(outputs.keys() )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
print(f'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(UpperCAmelCase__ )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(UpperCAmelCase__ )
if push_to_hub:
print(f'Pushing model and image processor for {model_name} to hub' )
model.push_to_hub(f'microsoft/{model_name}' )
image_processor.push_to_hub(f'microsoft/{model_name}' )
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''swin-base-simmim-window6-192''',
type=str,
choices=['''swin-base-simmim-window6-192''', '''swin-large-simmim-window12-192'''],
help='''Name of the Swin SimMIM model you\'d like to convert.''',
)
parser.add_argument(
'''--checkpoint_path''',
default='''/Users/nielsrogge/Documents/SwinSimMIM/simmim_pretrain__swin_base__img192_window6__100ep.pth''',
type=str,
help='''Path to the original PyTorch checkpoint (.pth file).''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
A_ = parser.parse_args()
convert_swin_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 296 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
A_ = logging.get_logger(__name__)
def _lowerCAmelCase ( UpperCAmelCase__ : List[Any] ) ->List[str]:
A__ : Union[str, Any] = DPTConfig()
if "large" in checkpoint_url:
A__ : int = 1_0_2_4
A__ : Union[str, Any] = 4_0_9_6
A__ : Optional[int] = 2_4
A__ : int = 1_6
A__ : Union[str, Any] = [5, 1_1, 1_7, 2_3]
A__ : Tuple = [2_5_6, 5_1_2, 1_0_2_4, 1_0_2_4]
A__ : Tuple = (1, 3_8_4, 3_8_4)
if "ade" in checkpoint_url:
A__ : Optional[int] = True
A__ : int = 1_5_0
A__ : Union[str, Any] = """huggingface/label-files"""
A__ : List[Any] = """ade20k-id2label.json"""
A__ : Union[str, Any] = json.load(open(cached_download(hf_hub_url(UpperCAmelCase__, UpperCAmelCase__, repo_type="""dataset""" ) ), """r""" ) )
A__ : List[Any] = {int(UpperCAmelCase__ ): v for k, v in idalabel.items()}
A__ : Dict = idalabel
A__ : List[Any] = {v: k for k, v in idalabel.items()}
A__ : Optional[Any] = [1, 1_5_0, 4_8_0, 4_8_0]
return config, expected_shape
def _lowerCAmelCase ( UpperCAmelCase__ : int ) ->Any:
A__ : List[Any] = ["""pretrained.model.head.weight""", """pretrained.model.head.bias"""]
for k in ignore_keys:
state_dict.pop(UpperCAmelCase__, UpperCAmelCase__ )
def _lowerCAmelCase ( UpperCAmelCase__ : Union[str, Any] ) ->List[str]:
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
A__ : str = name.replace("""pretrained.model""", """dpt.encoder""" )
if "pretrained.model" in name:
A__ : Dict = name.replace("""pretrained.model""", """dpt.embeddings""" )
if "patch_embed" in name:
A__ : List[Any] = name.replace("""patch_embed""", """patch_embeddings""" )
if "pos_embed" in name:
A__ : int = name.replace("""pos_embed""", """position_embeddings""" )
if "attn.proj" in name:
A__ : Tuple = name.replace("""attn.proj""", """attention.output.dense""" )
if "proj" in name and "project" not in name:
A__ : List[Any] = name.replace("""proj""", """projection""" )
if "blocks" in name:
A__ : Optional[Any] = name.replace("""blocks""", """layer""" )
if "mlp.fc1" in name:
A__ : int = name.replace("""mlp.fc1""", """intermediate.dense""" )
if "mlp.fc2" in name:
A__ : List[str] = name.replace("""mlp.fc2""", """output.dense""" )
if "norm1" in name:
A__ : Any = name.replace("""norm1""", """layernorm_before""" )
if "norm2" in name:
A__ : List[str] = name.replace("""norm2""", """layernorm_after""" )
if "scratch.output_conv" in name:
A__ : Optional[int] = name.replace("""scratch.output_conv""", """head""" )
if "scratch" in name:
A__ : List[str] = name.replace("""scratch""", """neck""" )
if "layer1_rn" in name:
A__ : List[str] = name.replace("""layer1_rn""", """convs.0""" )
if "layer2_rn" in name:
A__ : Optional[int] = name.replace("""layer2_rn""", """convs.1""" )
if "layer3_rn" in name:
A__ : Any = name.replace("""layer3_rn""", """convs.2""" )
if "layer4_rn" in name:
A__ : Any = name.replace("""layer4_rn""", """convs.3""" )
if "refinenet" in name:
A__ : Union[str, Any] = int(name[len("""neck.refinenet""" ) : len("""neck.refinenet""" ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
A__ : str = name.replace(f'refinenet{layer_idx}', f'fusion_stage.layers.{abs(layer_idx-4 )}' )
if "out_conv" in name:
A__ : Optional[Any] = name.replace("""out_conv""", """projection""" )
if "resConfUnit1" in name:
A__ : List[Any] = name.replace("""resConfUnit1""", """residual_layer1""" )
if "resConfUnit2" in name:
A__ : Tuple = name.replace("""resConfUnit2""", """residual_layer2""" )
if "conv1" in name:
A__ : Tuple = name.replace("""conv1""", """convolution1""" )
if "conv2" in name:
A__ : List[Any] = name.replace("""conv2""", """convolution2""" )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
A__ : Union[str, Any] = name.replace("""pretrained.act_postprocess1.0.project.0""", """neck.reassemble_stage.readout_projects.0.0""" )
if "pretrained.act_postprocess2.0.project.0" in name:
A__ : Tuple = name.replace("""pretrained.act_postprocess2.0.project.0""", """neck.reassemble_stage.readout_projects.1.0""" )
if "pretrained.act_postprocess3.0.project.0" in name:
A__ : Optional[Any] = name.replace("""pretrained.act_postprocess3.0.project.0""", """neck.reassemble_stage.readout_projects.2.0""" )
if "pretrained.act_postprocess4.0.project.0" in name:
A__ : Optional[Any] = name.replace("""pretrained.act_postprocess4.0.project.0""", """neck.reassemble_stage.readout_projects.3.0""" )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
A__ : Any = name.replace("""pretrained.act_postprocess1.3""", """neck.reassemble_stage.layers.0.projection""" )
if "pretrained.act_postprocess1.4" in name:
A__ : List[Any] = name.replace("""pretrained.act_postprocess1.4""", """neck.reassemble_stage.layers.0.resize""" )
if "pretrained.act_postprocess2.3" in name:
A__ : Dict = name.replace("""pretrained.act_postprocess2.3""", """neck.reassemble_stage.layers.1.projection""" )
if "pretrained.act_postprocess2.4" in name:
A__ : Optional[Any] = name.replace("""pretrained.act_postprocess2.4""", """neck.reassemble_stage.layers.1.resize""" )
if "pretrained.act_postprocess3.3" in name:
A__ : Union[str, Any] = name.replace("""pretrained.act_postprocess3.3""", """neck.reassemble_stage.layers.2.projection""" )
if "pretrained.act_postprocess4.3" in name:
A__ : Optional[int] = name.replace("""pretrained.act_postprocess4.3""", """neck.reassemble_stage.layers.3.projection""" )
if "pretrained.act_postprocess4.4" in name:
A__ : Dict = name.replace("""pretrained.act_postprocess4.4""", """neck.reassemble_stage.layers.3.resize""" )
if "pretrained" in name:
A__ : Union[str, Any] = name.replace("""pretrained""", """dpt""" )
if "bn" in name:
A__ : Union[str, Any] = name.replace("""bn""", """batch_norm""" )
if "head" in name:
A__ : Dict = name.replace("""head""", """head.head""" )
if "encoder.norm" in name:
A__ : Optional[int] = name.replace("""encoder.norm""", """layernorm""" )
if "auxlayer" in name:
A__ : List[str] = name.replace("""auxlayer""", """auxiliary_head.head""" )
return name
def _lowerCAmelCase ( UpperCAmelCase__ : int, UpperCAmelCase__ : Dict ) ->str:
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
A__ : Any = state_dict.pop(f'dpt.encoder.layer.{i}.attn.qkv.weight' )
A__ : Tuple = state_dict.pop(f'dpt.encoder.layer.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
A__ : List[str] = in_proj_weight[: config.hidden_size, :]
A__ : int = in_proj_bias[: config.hidden_size]
A__ : Tuple = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
A__ : Any = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
A__ : str = in_proj_weight[
-config.hidden_size :, :
]
A__ : Optional[Any] = in_proj_bias[-config.hidden_size :]
def _lowerCAmelCase ( ) ->List[str]:
A__ : int = """http://images.cocodataset.org/val2017/000000039769.jpg"""
A__ : int = Image.open(requests.get(UpperCAmelCase__, stream=UpperCAmelCase__ ).raw )
return im
@torch.no_grad()
def _lowerCAmelCase ( UpperCAmelCase__ : int, UpperCAmelCase__ : Optional[int], UpperCAmelCase__ : str, UpperCAmelCase__ : int ) ->str:
A__ , A__ : Dict = get_dpt_config(UpperCAmelCase__ )
# load original state_dict from URL
A__ : Any = torch.hub.load_state_dict_from_url(UpperCAmelCase__, map_location="""cpu""" )
# remove certain keys
remove_ignore_keys_(UpperCAmelCase__ )
# rename keys
for key in state_dict.copy().keys():
A__ : int = state_dict.pop(UpperCAmelCase__ )
A__ : str = val
# read in qkv matrices
read_in_q_k_v(UpperCAmelCase__, UpperCAmelCase__ )
# load HuggingFace model
A__ : Optional[Any] = DPTForSemanticSegmentation(UpperCAmelCase__ ) if """ade""" in checkpoint_url else DPTForDepthEstimation(UpperCAmelCase__ )
model.load_state_dict(UpperCAmelCase__ )
model.eval()
# Check outputs on an image
A__ : Optional[Any] = 4_8_0 if """ade""" in checkpoint_url else 3_8_4
A__ : Dict = DPTImageProcessor(size=UpperCAmelCase__ )
A__ : Optional[int] = prepare_img()
A__ : Any = image_processor(UpperCAmelCase__, return_tensors="""pt""" )
# forward pass
A__ : List[str] = model(**UpperCAmelCase__ ).logits if """ade""" in checkpoint_url else model(**UpperCAmelCase__ ).predicted_depth
# Assert logits
A__ : Optional[Any] = torch.tensor([[6.3199, 6.3629, 6.4148], [6.3850, 6.3615, 6.4166], [6.3519, 6.3176, 6.3575]] )
if "ade" in checkpoint_url:
A__ : Optional[int] = torch.tensor([[4.0480, 4.2420, 4.4360], [4.3124, 4.5693, 4.8261], [4.5768, 4.8965, 5.2163]] )
assert outputs.shape == torch.Size(UpperCAmelCase__ )
assert (
torch.allclose(outputs[0, 0, :3, :3], UpperCAmelCase__, atol=1e-4 )
if "ade" in checkpoint_url
else torch.allclose(outputs[0, :3, :3], UpperCAmelCase__ )
)
Path(UpperCAmelCase__ ).mkdir(exist_ok=UpperCAmelCase__ )
print(f'Saving model to {pytorch_dump_folder_path}' )
model.save_pretrained(UpperCAmelCase__ )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(UpperCAmelCase__ )
if push_to_hub:
print("""Pushing model to hub...""" )
model.push_to_hub(
repo_path_or_name=Path(UpperCAmelCase__, UpperCAmelCase__ ), organization="""nielsr""", commit_message="""Add model""", use_temp_dir=UpperCAmelCase__, )
image_processor.push_to_hub(
repo_path_or_name=Path(UpperCAmelCase__, UpperCAmelCase__ ), organization="""nielsr""", commit_message="""Add image processor""", use_temp_dir=UpperCAmelCase__, )
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt''',
type=str,
help='''URL of the original DPT checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
required=True,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
)
parser.add_argument(
'''--model_name''',
default='''dpt-large''',
type=str,
help='''Name of the model, in case you\'re pushing to the hub.''',
)
A_ = parser.parse_args()
convert_dpt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 296 | 1 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class A_ ( snake_case__ , unittest.TestCase ):
_lowercase : Dict = KandinskyVaaControlnetImgaImgPipeline
_lowercase : Dict = ['image_embeds', 'negative_image_embeds', 'image', 'hint']
_lowercase : Union[str, Any] = ['image_embeds', 'negative_image_embeds', 'image', 'hint']
_lowercase : int = [
'generator',
'height',
'width',
'strength',
'guidance_scale',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
_lowercase : str = False
@property
def UpperCAmelCase ( self : int ) -> Tuple:
return 3_2
@property
def UpperCAmelCase ( self : Any ) -> Union[str, Any]:
return 3_2
@property
def UpperCAmelCase ( self : Dict ) -> Union[str, Any]:
return self.time_input_dim
@property
def UpperCAmelCase ( self : Any ) -> List[Any]:
return self.time_input_dim * 4
@property
def UpperCAmelCase ( self : Dict ) -> Dict:
return 1_0_0
@property
def UpperCAmelCase ( self : int ) -> int:
torch.manual_seed(0 )
__lowerCAmelCase: Union[str, Any] = {
'in_channels': 8,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'image_hint',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
__lowerCAmelCase: str = UNetaDConditionModel(**UpperCAmelCase )
return model
@property
def UpperCAmelCase ( self : str ) -> Tuple:
return {
"block_out_channels": [3_2, 3_2, 6_4, 6_4],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 1_2,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def UpperCAmelCase ( self : int ) -> Optional[Any]:
torch.manual_seed(0 )
__lowerCAmelCase: Optional[int] = VQModel(**self.dummy_movq_kwargs )
return model
def UpperCAmelCase ( self : Dict ) -> Tuple:
__lowerCAmelCase: int = self.dummy_unet
__lowerCAmelCase: Optional[Any] = self.dummy_movq
__lowerCAmelCase: str = {
'num_train_timesteps': 1_0_0_0,
'beta_schedule': 'linear',
'beta_start': 0.00085,
'beta_end': 0.012,
'clip_sample': False,
'set_alpha_to_one': False,
'steps_offset': 0,
'prediction_type': 'epsilon',
'thresholding': False,
}
__lowerCAmelCase: Optional[int] = DDIMScheduler(**UpperCAmelCase )
__lowerCAmelCase: int = {
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def UpperCAmelCase ( self : Dict , UpperCAmelCase : Dict , UpperCAmelCase : int=0 ) -> List[str]:
__lowerCAmelCase: str = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(UpperCAmelCase ) ).to(UpperCAmelCase )
__lowerCAmelCase: Any = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
UpperCAmelCase )
# create init_image
__lowerCAmelCase: int = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(UpperCAmelCase ) ).to(UpperCAmelCase )
__lowerCAmelCase: Any = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__lowerCAmelCase: Dict = Image.fromarray(np.uinta(UpperCAmelCase ) ).convert('RGB' ).resize((2_5_6, 2_5_6) )
# create hint
__lowerCAmelCase: Union[str, Any] = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(UpperCAmelCase ) ).to(UpperCAmelCase )
if str(UpperCAmelCase ).startswith('mps' ):
__lowerCAmelCase: List[Any] = torch.manual_seed(UpperCAmelCase )
else:
__lowerCAmelCase: str = torch.Generator(device=UpperCAmelCase ).manual_seed(UpperCAmelCase )
__lowerCAmelCase: List[str] = {
'image': init_image,
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'hint': hint,
'generator': generator,
'height': 6_4,
'width': 6_4,
'num_inference_steps': 1_0,
'guidance_scale': 7.0,
'strength': 0.2,
'output_type': 'np',
}
return inputs
def UpperCAmelCase ( self : Union[str, Any] ) -> Dict:
__lowerCAmelCase: int = 'cpu'
__lowerCAmelCase: List[str] = self.get_dummy_components()
__lowerCAmelCase: Optional[Any] = self.pipeline_class(**UpperCAmelCase )
__lowerCAmelCase: str = pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
__lowerCAmelCase: Union[str, Any] = pipe(**self.get_dummy_inputs(UpperCAmelCase ) )
__lowerCAmelCase: Dict = output.images
__lowerCAmelCase: str = pipe(
**self.get_dummy_inputs(UpperCAmelCase ) , return_dict=UpperCAmelCase , )[0]
__lowerCAmelCase: Optional[int] = image[0, -3:, -3:, -1]
__lowerCAmelCase: Optional[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
__lowerCAmelCase: int = np.array(
[0.54985034, 0.55509365, 0.52561504, 0.5570494, 0.5593818, 0.5263979, 0.50285643, 0.5069846, 0.51196736] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class A_ ( unittest.TestCase ):
def UpperCAmelCase ( self : Any ) -> Union[str, Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase ( self : int ) -> Tuple:
__lowerCAmelCase: str = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/kandinskyv22_controlnet_img2img_robotcat_fp16.npy' )
__lowerCAmelCase: Any = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png' )
__lowerCAmelCase: Tuple = init_image.resize((5_1_2, 5_1_2) )
__lowerCAmelCase: str = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/hint_image_cat.png' )
__lowerCAmelCase: Dict = torch.from_numpy(np.array(UpperCAmelCase ) ).float() / 255.0
__lowerCAmelCase: Optional[Any] = hint.permute(2 , 0 , 1 ).unsqueeze(0 )
__lowerCAmelCase: Dict = 'A robot, 4k photo'
__lowerCAmelCase: Union[str, Any] = KandinskyVaaPriorEmbaEmbPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-prior' , torch_dtype=torch.floataa )
pipe_prior.to(UpperCAmelCase )
__lowerCAmelCase: List[Any] = KandinskyVaaControlnetImgaImgPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-controlnet-depth' , torch_dtype=torch.floataa )
__lowerCAmelCase: int = pipeline.to(UpperCAmelCase )
pipeline.set_progress_bar_config(disable=UpperCAmelCase )
__lowerCAmelCase: Optional[int] = torch.Generator(device='cpu' ).manual_seed(0 )
__lowerCAmelCase , __lowerCAmelCase: Union[str, Any] = pipe_prior(
UpperCAmelCase , image=UpperCAmelCase , strength=0.85 , generator=UpperCAmelCase , negative_prompt='' , ).to_tuple()
__lowerCAmelCase: Any = pipeline(
image=UpperCAmelCase , image_embeds=UpperCAmelCase , negative_image_embeds=UpperCAmelCase , hint=UpperCAmelCase , generator=UpperCAmelCase , num_inference_steps=1_0_0 , height=5_1_2 , width=5_1_2 , strength=0.5 , output_type='np' , )
__lowerCAmelCase: Any = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert_mean_pixel_difference(UpperCAmelCase , UpperCAmelCase )
| 322 |
from __future__ import annotations
import copy
import inspect
import unittest
import numpy as np
from transformers import is_tf_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
)
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class A_ :
def __init__( self : Dict , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[int]=2 , UpperCAmelCase : int=3 , UpperCAmelCase : int=4 , UpperCAmelCase : str=2 , UpperCAmelCase : Union[str, Any]=7 , UpperCAmelCase : List[str]=True , UpperCAmelCase : Dict=True , UpperCAmelCase : Tuple=True , UpperCAmelCase : Optional[Any]=True , UpperCAmelCase : Optional[Any]=9_9 , UpperCAmelCase : Tuple=3_6 , UpperCAmelCase : Tuple=2 , UpperCAmelCase : Optional[int]=4 , UpperCAmelCase : Union[str, Any]=3_7 , UpperCAmelCase : Any="gelu" , UpperCAmelCase : Optional[int]=0.1 , UpperCAmelCase : Optional[Any]=0.1 , UpperCAmelCase : List[str]=5_1_2 , UpperCAmelCase : int=1_6 , UpperCAmelCase : Optional[Any]=2 , UpperCAmelCase : Optional[Any]=0.02 , UpperCAmelCase : Optional[Any]=6 , UpperCAmelCase : int=6 , UpperCAmelCase : str=3 , UpperCAmelCase : Any=4 , UpperCAmelCase : Optional[int]=None , UpperCAmelCase : List[str]=1_0_0_0 , ) -> int:
__lowerCAmelCase: List[str] = parent
__lowerCAmelCase: List[str] = batch_size
__lowerCAmelCase: Optional[Any] = num_channels
__lowerCAmelCase: Tuple = image_size
__lowerCAmelCase: str = patch_size
__lowerCAmelCase: List[str] = is_training
__lowerCAmelCase: Union[str, Any] = use_input_mask
__lowerCAmelCase: Union[str, Any] = use_token_type_ids
__lowerCAmelCase: Tuple = use_labels
__lowerCAmelCase: Optional[int] = vocab_size
__lowerCAmelCase: Any = hidden_size
__lowerCAmelCase: Tuple = num_hidden_layers
__lowerCAmelCase: Optional[int] = num_attention_heads
__lowerCAmelCase: Dict = intermediate_size
__lowerCAmelCase: Union[str, Any] = hidden_act
__lowerCAmelCase: str = hidden_dropout_prob
__lowerCAmelCase: str = attention_probs_dropout_prob
__lowerCAmelCase: str = max_position_embeddings
__lowerCAmelCase: str = type_vocab_size
__lowerCAmelCase: Optional[Any] = type_sequence_label_size
__lowerCAmelCase: Union[str, Any] = initializer_range
__lowerCAmelCase: List[str] = coordinate_size
__lowerCAmelCase: Tuple = shape_size
__lowerCAmelCase: List[Any] = num_labels
__lowerCAmelCase: Any = num_choices
__lowerCAmelCase: List[str] = scope
__lowerCAmelCase: Dict = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
__lowerCAmelCase: Optional[Any] = text_seq_length
__lowerCAmelCase: List[Any] = (image_size // patch_size) ** 2 + 1
__lowerCAmelCase: int = self.text_seq_length + self.image_seq_length
def UpperCAmelCase ( self : Any ) -> Any:
__lowerCAmelCase: Dict = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
__lowerCAmelCase: Any = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
__lowerCAmelCase: str = bbox.numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
__lowerCAmelCase: Optional[Any] = bbox[i, j, 3]
__lowerCAmelCase: Tuple = bbox[i, j, 1]
__lowerCAmelCase: Dict = tmp_coordinate
if bbox[i, j, 2] < bbox[i, j, 0]:
__lowerCAmelCase: Any = bbox[i, j, 2]
__lowerCAmelCase: int = bbox[i, j, 0]
__lowerCAmelCase: int = tmp_coordinate
__lowerCAmelCase: List[Any] = tf.constant(UpperCAmelCase )
__lowerCAmelCase: Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowerCAmelCase: Union[str, Any] = None
if self.use_input_mask:
__lowerCAmelCase: List[Any] = random_attention_mask([self.batch_size, self.text_seq_length] )
__lowerCAmelCase: int = None
if self.use_token_type_ids:
__lowerCAmelCase: List[Any] = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
__lowerCAmelCase: str = None
__lowerCAmelCase: Dict = None
if self.use_labels:
__lowerCAmelCase: Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCAmelCase: List[str] = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
__lowerCAmelCase: Dict = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def UpperCAmelCase ( self : Tuple , UpperCAmelCase : str , UpperCAmelCase : Optional[int] , UpperCAmelCase : Optional[int] , UpperCAmelCase : int , UpperCAmelCase : List[Any] , UpperCAmelCase : Tuple ) -> int:
__lowerCAmelCase: Tuple = TFLayoutLMvaModel(config=UpperCAmelCase )
# text + image
__lowerCAmelCase: Dict = model(UpperCAmelCase , pixel_values=UpperCAmelCase , training=UpperCAmelCase )
__lowerCAmelCase: List[str] = model(
UpperCAmelCase , bbox=UpperCAmelCase , pixel_values=UpperCAmelCase , attention_mask=UpperCAmelCase , token_type_ids=UpperCAmelCase , training=UpperCAmelCase , )
__lowerCAmelCase: Optional[Any] = model(UpperCAmelCase , bbox=UpperCAmelCase , pixel_values=UpperCAmelCase , training=UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
__lowerCAmelCase: str = model(UpperCAmelCase , training=UpperCAmelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
__lowerCAmelCase: List[str] = model({'pixel_values': pixel_values} , training=UpperCAmelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def UpperCAmelCase ( self : Union[str, Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Any , UpperCAmelCase : Optional[int] , UpperCAmelCase : List[str] , UpperCAmelCase : int , UpperCAmelCase : Tuple , UpperCAmelCase : List[Any] ) -> int:
__lowerCAmelCase: List[str] = self.num_labels
__lowerCAmelCase: Tuple = TFLayoutLMvaForSequenceClassification(config=UpperCAmelCase )
__lowerCAmelCase: int = model(
UpperCAmelCase , bbox=UpperCAmelCase , pixel_values=UpperCAmelCase , attention_mask=UpperCAmelCase , token_type_ids=UpperCAmelCase , labels=UpperCAmelCase , training=UpperCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase ( self : Any , UpperCAmelCase : List[Any] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : int , UpperCAmelCase : int , UpperCAmelCase : List[Any] , UpperCAmelCase : str , UpperCAmelCase : int ) -> Any:
__lowerCAmelCase: Union[str, Any] = self.num_labels
__lowerCAmelCase: List[str] = TFLayoutLMvaForTokenClassification(config=UpperCAmelCase )
__lowerCAmelCase: Any = model(
UpperCAmelCase , bbox=UpperCAmelCase , pixel_values=UpperCAmelCase , attention_mask=UpperCAmelCase , token_type_ids=UpperCAmelCase , labels=UpperCAmelCase , training=UpperCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def UpperCAmelCase ( self : List[Any] , UpperCAmelCase : Any , UpperCAmelCase : str , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Any , UpperCAmelCase : str , UpperCAmelCase : List[str] , UpperCAmelCase : List[Any] ) -> Any:
__lowerCAmelCase: str = 2
__lowerCAmelCase: Dict = TFLayoutLMvaForQuestionAnswering(config=UpperCAmelCase )
__lowerCAmelCase: int = model(
UpperCAmelCase , bbox=UpperCAmelCase , pixel_values=UpperCAmelCase , attention_mask=UpperCAmelCase , token_type_ids=UpperCAmelCase , start_positions=UpperCAmelCase , end_positions=UpperCAmelCase , training=UpperCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase ( self : List[str] ) -> Union[str, Any]:
__lowerCAmelCase: Union[str, Any] = self.prepare_config_and_inputs()
((__lowerCAmelCase) , (__lowerCAmelCase) , (__lowerCAmelCase) , (__lowerCAmelCase) , (__lowerCAmelCase) , (__lowerCAmelCase) , (__lowerCAmelCase) , (__lowerCAmelCase)): List[str] = config_and_inputs
__lowerCAmelCase: List[str] = {
'input_ids': input_ids,
'bbox': bbox,
'pixel_values': pixel_values,
'token_type_ids': token_type_ids,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_tf
class A_ ( snake_case__ , snake_case__ , unittest.TestCase ):
_lowercase : List[Any] = (
(
TFLayoutLMvaModel,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
)
if is_tf_available()
else ()
)
_lowercase : Tuple = (
{'document-question-answering': TFLayoutLMvaForQuestionAnswering, 'feature-extraction': TFLayoutLMvaModel}
if is_tf_available()
else {}
)
_lowercase : Union[str, Any] = False
_lowercase : Dict = False
_lowercase : Tuple = False
def UpperCAmelCase ( self : Optional[Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : int , UpperCAmelCase : Tuple , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[Any] ) -> List[str]:
return True
def UpperCAmelCase ( self : Tuple , UpperCAmelCase : int , UpperCAmelCase : Tuple , UpperCAmelCase : Dict=False ) -> dict:
__lowerCAmelCase: Optional[Any] = copy.deepcopy(UpperCAmelCase )
if model_class in get_values(UpperCAmelCase ):
__lowerCAmelCase: int = {
k: tf.tile(tf.expand_dims(UpperCAmelCase , 1 ) , (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1) )
if isinstance(UpperCAmelCase , tf.Tensor ) and v.ndim > 0
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(UpperCAmelCase ):
__lowerCAmelCase: Tuple = tf.ones(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(UpperCAmelCase ):
__lowerCAmelCase: Optional[Any] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
__lowerCAmelCase: Union[str, Any] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(UpperCAmelCase ):
__lowerCAmelCase: Union[str, Any] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(UpperCAmelCase ):
__lowerCAmelCase: str = tf.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=tf.intaa )
return inputs_dict
def UpperCAmelCase ( self : Optional[int] ) -> Optional[Any]:
__lowerCAmelCase: Tuple = TFLayoutLMvaModelTester(self )
__lowerCAmelCase: str = ConfigTester(self , config_class=UpperCAmelCase , hidden_size=3_7 )
def UpperCAmelCase ( self : Tuple ) -> Dict:
self.config_tester.run_common_tests()
def UpperCAmelCase ( self : List[Any] ) -> Tuple:
__lowerCAmelCase , __lowerCAmelCase: Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase: List[Any] = model_class(UpperCAmelCase )
if getattr(UpperCAmelCase , 'hf_compute_loss' , UpperCAmelCase ):
# The number of elements in the loss should be the same as the number of elements in the label
__lowerCAmelCase: Optional[int] = self._prepare_for_class(inputs_dict.copy() , UpperCAmelCase , return_labels=UpperCAmelCase )
__lowerCAmelCase: List[Any] = prepared_for_class[
sorted(prepared_for_class.keys() - inputs_dict.keys() , reverse=UpperCAmelCase )[0]
]
__lowerCAmelCase: Tuple = added_label.shape.as_list()[:1]
# Test that model correctly compute the loss with kwargs
__lowerCAmelCase: Optional[Any] = self._prepare_for_class(inputs_dict.copy() , UpperCAmelCase , return_labels=UpperCAmelCase )
__lowerCAmelCase: Tuple = prepared_for_class.pop('input_ids' )
__lowerCAmelCase: Union[str, Any] = model(UpperCAmelCase , **UpperCAmelCase )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss when we mask some positions
__lowerCAmelCase: Union[str, Any] = self._prepare_for_class(inputs_dict.copy() , UpperCAmelCase , return_labels=UpperCAmelCase )
__lowerCAmelCase: Optional[int] = prepared_for_class.pop('input_ids' )
if "labels" in prepared_for_class:
__lowerCAmelCase: str = prepared_for_class['labels'].numpy()
if len(labels.shape ) > 1 and labels.shape[1] != 1:
__lowerCAmelCase: Tuple = -1_0_0
__lowerCAmelCase: Union[str, Any] = tf.convert_to_tensor(UpperCAmelCase )
__lowerCAmelCase: Dict = model(UpperCAmelCase , **UpperCAmelCase )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
self.assertTrue(not np.any(np.isnan(loss.numpy() ) ) )
# Test that model correctly compute the loss with a dict
__lowerCAmelCase: str = self._prepare_for_class(inputs_dict.copy() , UpperCAmelCase , return_labels=UpperCAmelCase )
__lowerCAmelCase: Optional[Any] = model(UpperCAmelCase )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss with a tuple
__lowerCAmelCase: Any = self._prepare_for_class(inputs_dict.copy() , UpperCAmelCase , return_labels=UpperCAmelCase )
# Get keys that were added with the _prepare_for_class function
__lowerCAmelCase: Tuple = prepared_for_class.keys() - inputs_dict.keys()
__lowerCAmelCase: Dict = inspect.signature(model.call ).parameters
__lowerCAmelCase: Dict = list(signature.keys() )
# Create a dictionary holding the location of the tensors in the tuple
__lowerCAmelCase: str = {0: 'input_ids'}
for label_key in label_keys:
__lowerCAmelCase: Optional[Any] = signature_names.index(UpperCAmelCase )
__lowerCAmelCase: Tuple = label_key
__lowerCAmelCase: Tuple = sorted(tuple_index_mapping.items() )
# Initialize a list with their default values, update the values and convert to a tuple
__lowerCAmelCase: List[Any] = []
for name in signature_names:
if name != "kwargs":
list_input.append(signature[name].default )
for index, value in sorted_tuple_index_mapping:
__lowerCAmelCase: Optional[Any] = prepared_for_class[value]
__lowerCAmelCase: Union[str, Any] = tuple(UpperCAmelCase )
# Send to model
__lowerCAmelCase: Any = model(tuple_input[:-1] )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
def UpperCAmelCase ( self : Dict ) -> Tuple:
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
): str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
def UpperCAmelCase ( self : Dict ) -> int:
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
): str = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__lowerCAmelCase: Tuple = type
self.model_tester.create_and_check_model(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
def UpperCAmelCase ( self : str ) -> List[str]:
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
): Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
def UpperCAmelCase ( self : int ) -> List[str]:
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
): Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
def UpperCAmelCase ( self : Tuple ) -> str:
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
): Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
@slow
def UpperCAmelCase ( self : Union[str, Any] ) -> Tuple:
for model_name in TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase: Optional[int] = TFLayoutLMvaModel.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
def _a ( ) -> Any:
"""simple docstring"""
__lowerCAmelCase: Dict = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
class A_ ( unittest.TestCase ):
@cached_property
def UpperCAmelCase ( self : int ) -> Dict:
return LayoutLMvaImageProcessor(apply_ocr=UpperCAmelCase ) if is_vision_available() else None
@slow
def UpperCAmelCase ( self : Any ) -> List[str]:
__lowerCAmelCase: Any = TFLayoutLMvaModel.from_pretrained('microsoft/layoutlmv3-base' )
__lowerCAmelCase: Tuple = self.default_image_processor
__lowerCAmelCase: str = prepare_img()
__lowerCAmelCase: Optional[int] = image_processor(images=UpperCAmelCase , return_tensors='tf' ).pixel_values
__lowerCAmelCase: Dict = tf.constant([[1, 2]] )
__lowerCAmelCase: str = tf.expand_dims(tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]] ) , axis=0 )
# forward pass
__lowerCAmelCase: List[str] = model(input_ids=UpperCAmelCase , bbox=UpperCAmelCase , pixel_values=UpperCAmelCase , training=UpperCAmelCase )
# verify the logits
__lowerCAmelCase: Tuple = (1, 1_9_9, 7_6_8)
self.assertEqual(outputs.last_hidden_state.shape , UpperCAmelCase )
__lowerCAmelCase: str = tf.constant(
[[-0.0529, 0.3618, 0.1632], [-0.1587, -0.1667, -0.0400], [-0.1557, -0.1671, -0.0505]] )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , UpperCAmelCase , atol=1E-4 ) )
| 322 | 1 |
'''simple docstring'''
from __future__ import annotations
from PIL import Image
# Define glider example
snake_case__ : str = [
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
]
# Define blinker example
snake_case__ : List[Any] = [[0, 1, 0], [0, 1, 0], [0, 1, 0]]
def _lowerCamelCase ( lowerCamelCase_ : list[list[int]] ):
"""simple docstring"""
UpperCAmelCase_ : Tuple = []
for i in range(len(lowerCamelCase_ ) ):
UpperCAmelCase_ : List[Any] = []
for j in range(len(cells[i] ) ):
# Get the number of live neighbours
UpperCAmelCase_ : Optional[Any] = 0
if i > 0 and j > 0:
neighbour_count += cells[i - 1][j - 1]
if i > 0:
neighbour_count += cells[i - 1][j]
if i > 0 and j < len(cells[i] ) - 1:
neighbour_count += cells[i - 1][j + 1]
if j > 0:
neighbour_count += cells[i][j - 1]
if j < len(cells[i] ) - 1:
neighbour_count += cells[i][j + 1]
if i < len(lowerCamelCase_ ) - 1 and j > 0:
neighbour_count += cells[i + 1][j - 1]
if i < len(lowerCamelCase_ ) - 1:
neighbour_count += cells[i + 1][j]
if i < len(lowerCamelCase_ ) - 1 and j < len(cells[i] ) - 1:
neighbour_count += cells[i + 1][j + 1]
# Rules of the game of life (excerpt from Wikipedia):
# 1. Any live cell with two or three live neighbours survives.
# 2. Any dead cell with three live neighbours becomes a live cell.
# 3. All other live cells die in the next generation.
# Similarly, all other dead cells stay dead.
UpperCAmelCase_ : str = cells[i][j] == 1
if (
(alive and 2 <= neighbour_count <= 3)
or not alive
and neighbour_count == 3
):
next_generation_row.append(1 )
else:
next_generation_row.append(0 )
next_generation.append(lowerCamelCase_ )
return next_generation
def _lowerCamelCase ( lowerCamelCase_ : list[list[int]] , lowerCamelCase_ : int ):
"""simple docstring"""
UpperCAmelCase_ : int = []
for _ in range(lowerCamelCase_ ):
# Create output image
UpperCAmelCase_ : List[Any] = Image.new('RGB' , (len(cells[0] ), len(lowerCamelCase_ )) )
UpperCAmelCase_ : List[Any] = img.load()
# Save cells to image
for x in range(len(lowerCamelCase_ ) ):
for y in range(len(cells[0] ) ):
UpperCAmelCase_ : Optional[int] = 255 - cells[y][x] * 255
UpperCAmelCase_ : Optional[int] = (colour, colour, colour)
# Save image
images.append(lowerCamelCase_ )
UpperCAmelCase_ : int = new_generation(lowerCamelCase_ )
return images
if __name__ == "__main__":
snake_case__ : str = generate_images(GLIDER, 16)
images[0].save('''out.gif''', save_all=True, append_images=images[1:])
| 274 | '''simple docstring'''
from typing import Optional, Union
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models.modeling_utils import ModelMixin
class __SCREAMING_SNAKE_CASE ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
@register_to_config
def __init__( self , snake_case_ = 7_6_8 , ):
'''simple docstring'''
super().__init__()
UpperCAmelCase_ : int = nn.Parameter(torch.zeros(1 , snake_case_ ) )
UpperCAmelCase_ : str = nn.Parameter(torch.ones(1 , snake_case_ ) )
def _UpperCamelCase ( self , snake_case_ = None , snake_case_ = None , ):
'''simple docstring'''
UpperCAmelCase_ : int = nn.Parameter(self.mean.to(snake_case_ ).to(snake_case_ ) )
UpperCAmelCase_ : Tuple = nn.Parameter(self.std.to(snake_case_ ).to(snake_case_ ) )
return self
def _UpperCamelCase ( self , snake_case_ ):
'''simple docstring'''
UpperCAmelCase_ : Dict = (embeds - self.mean) * 1.0 / self.std
return embeds
def _UpperCamelCase ( self , snake_case_ ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = (embeds * self.std) + self.mean
return embeds
| 274 | 1 |
import json
import os
from typing import Dict, List, Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_lowercase: int = logging.get_logger(__name__)
_lowercase: str = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
'tokenizer_config_file': 'tokenizer_config.json',
}
_lowercase: List[Any] = {
'vocab_file': {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json'
},
'merges_file': {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt'
},
'tokenizer_config_file': {
'facebook/blenderbot_small-90M': (
'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json'
)
},
}
_lowercase: str = {'facebook/blenderbot_small-90M': 512}
def a( A : List[Any] ) -> List[str]:
"""simple docstring"""
a = set()
a = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
a = char
a = set(A )
return pairs
class _lowercase ( lowerCamelCase_ ):
"""simple docstring"""
__A = VOCAB_FILES_NAMES
__A = PRETRAINED_VOCAB_FILES_MAP
__A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__A = ["input_ids", "attention_mask"]
def __init__(self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_="__start__" , lowerCamelCase_="__end__" , lowerCamelCase_="__unk__" , lowerCamelCase_="__null__" , **lowerCamelCase_ , ):
"""simple docstring"""
super().__init__(unk_token=__a , bos_token=__a , eos_token=__a , pad_token=__a , **__a )
with open(__a , encoding="utf-8" ) as vocab_handle:
a = json.load(__a )
a = {v: k for k, v in self.encoder.items()}
with open(__a , encoding="utf-8" ) as merges_handle:
a = merges_handle.read().split("\n" )[1:-1]
a = [tuple(merge.split() ) for merge in merges]
a = dict(zip(__a , range(len(__a ) ) ) )
a = {}
@property
def UpperCamelCase_ (self ):
"""simple docstring"""
return len(self.encoder )
def UpperCamelCase_ (self ):
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def UpperCamelCase_ (self , lowerCamelCase_ ):
"""simple docstring"""
if token in self.cache:
return self.cache[token]
a = re.sub("([.,!?()])" , R" \1" , __a )
a = re.sub("(')" , R" \1 " , __a )
a = re.sub(R"\s{2,}" , " " , __a )
if "\n" in token:
a = token.replace("\n" , " __newln__" )
a = token.split(" " )
a = []
for token in tokens:
if not len(__a ):
continue
a = token.lower()
a = tuple(__a )
a = tuple(list(word[:-1] ) + [word[-1] + "</w>"] )
a = get_pairs(__a )
if not pairs:
words.append(__a )
continue
while True:
a = min(__a , key=lambda lowerCamelCase_ : self.bpe_ranks.get(__a , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
a , a = bigram
a = []
a = 0
while i < len(__a ):
try:
a = word.index(__a , __a )
new_word.extend(word[i:j] )
a = j
except ValueError:
new_word.extend(word[i:] )
break
if word[i] == first and i < len(__a ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
a = tuple(__a )
a = new_word
if len(__a ) == 1:
break
else:
a = get_pairs(__a )
a = "@@ ".join(__a )
a = word[:-4]
a = word
words.append(__a )
return " ".join(__a )
def UpperCamelCase_ (self , lowerCamelCase_ ):
"""simple docstring"""
a = []
a = re.findall(R"\S+\n?" , __a )
for token in words:
split_tokens.extend(list(self.bpe(__a ).split(" " ) ) )
return split_tokens
def UpperCamelCase_ (self , lowerCamelCase_ ):
"""simple docstring"""
a = token.lower()
return self.encoder.get(__a , self.encoder.get(self.unk_token ) )
def UpperCamelCase_ (self , lowerCamelCase_ ):
"""simple docstring"""
return self.decoder.get(__a , self.unk_token )
def UpperCamelCase_ (self , lowerCamelCase_ ):
"""simple docstring"""
a = " ".join(__a ).replace("@@ " , "" ).strip()
return out_string
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_ = None ):
"""simple docstring"""
if not os.path.isdir(__a ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
a = os.path.join(
__a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
a = os.path.join(
__a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(__a , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__a , ensure_ascii=__a ) + "\n" )
a = 0
with open(__a , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCamelCase_ : kv[1] ):
if index != token_index:
logger.warning(
F'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
" Please check that the tokenizer is not corrupted!" )
a = token_index
writer.write(" ".join(__a ) + "\n" )
index += 1
return vocab_file, merge_file
| 227 |
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowerCAmelCase_ : Dict = logging.get_logger(__name__)
lowerCAmelCase_ : Optional[int] = {
'ut/deta': 'https://huggingface.co/ut/deta/resolve/main/config.json',
}
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
__a ='deta'
__a ={
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self : List[str] , __a : List[str]=None , __a : Dict=9_00 , __a : str=20_48 , __a : Tuple=6 , __a : List[str]=20_48 , __a : str=8 , __a : Union[str, Any]=6 , __a : int=10_24 , __a : List[Any]=8 , __a : Dict=0.0 , __a : Tuple=True , __a : Optional[Any]="relu" , __a : Tuple=2_56 , __a : Optional[Any]=0.1 , __a : int=0.0 , __a : List[Any]=0.0 , __a : Optional[int]=0.02 , __a : str=1.0 , __a : Dict=True , __a : Dict=False , __a : Optional[int]="sine" , __a : Any=5 , __a : List[str]=4 , __a : Optional[int]=4 , __a : List[str]=True , __a : str=3_00 , __a : int=True , __a : int=True , __a : Tuple=1 , __a : Optional[int]=5 , __a : Tuple=2 , __a : Dict=1 , __a : Optional[int]=1 , __a : Any=5 , __a : Optional[int]=2 , __a : Dict=0.1 , __a : str=0.25 , **__a : Tuple , ):
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
_a = CONFIG_MAPPING["resnet"](out_features=["stage2", "stage3", "stage4"] )
else:
if isinstance(__a , __a ):
_a = backbone_config.pop("model_type" )
_a = CONFIG_MAPPING[backbone_model_type]
_a = config_class.from_dict(__a )
_a = backbone_config
_a = num_queries
_a = max_position_embeddings
_a = d_model
_a = encoder_ffn_dim
_a = encoder_layers
_a = encoder_attention_heads
_a = decoder_ffn_dim
_a = decoder_layers
_a = decoder_attention_heads
_a = dropout
_a = attention_dropout
_a = activation_dropout
_a = activation_function
_a = init_std
_a = init_xavier_std
_a = encoder_layerdrop
_a = auxiliary_loss
_a = position_embedding_type
# deformable attributes
_a = num_feature_levels
_a = encoder_n_points
_a = decoder_n_points
_a = two_stage
_a = two_stage_num_proposals
_a = with_box_refine
_a = assign_first_stage
if two_stage is True and with_box_refine is False:
raise ValueError("If two_stage is True, with_box_refine must be True." )
# Hungarian matcher
_a = class_cost
_a = bbox_cost
_a = giou_cost
# Loss coefficients
_a = mask_loss_coefficient
_a = dice_loss_coefficient
_a = bbox_loss_coefficient
_a = giou_loss_coefficient
_a = eos_coefficient
_a = focal_alpha
super().__init__(is_encoder_decoder=__a , **__a )
@property
def UpperCamelCase__ ( self : Optional[Any] ):
return self.encoder_attention_heads
@property
def UpperCamelCase__ ( self : Dict ):
return self.d_model
def UpperCamelCase__ ( self : List[str] ):
_a = copy.deepcopy(self.__dict__ )
_a = self.backbone_config.to_dict()
_a = self.__class__.model_type
return output
| 63 | 0 |
def UpperCamelCase ( _a , _a , _a , _a , _a , _a ) -> int:
'''simple docstring'''
if index == r:
for j in range(_a ):
print(data[j] , end=''' ''' )
print(''' ''' )
return
# When no more elements are there to put in data[]
if i >= n:
return
# current is included, put next at next location
lowercase_ :Any = arr[i]
combination_util(_a , _a , _a , index + 1 , _a , i + 1 )
# current is excluded, replace it with
# next (Note that i+1 is passed, but
# index is not changed)
combination_util(_a , _a , _a , _a , _a , i + 1 )
# The main function that prints all combinations
# of size r in arr[] of size n. This function
# mainly uses combinationUtil()
def UpperCamelCase ( _a , _a , _a ) -> Optional[Any]:
'''simple docstring'''
lowercase_ :List[Any] = [0] * r
# Print all combination using temporary array 'data[]'
combination_util(_a , _a , _a , 0 , _a , 0 )
if __name__ == "__main__":
# Driver code to check the function above
SCREAMING_SNAKE_CASE : List[str] = [10, 20, 30, 40, 50]
print_combination(arr, len(arr), 3)
# This code is contributed by Ambuj sahu
| 363 |
import argparse
import shlex
import runhouse as rh
if __name__ == "__main__":
# Refer to https://runhouse-docs.readthedocs-hosted.com/en/latest/api/python/cluster.html#hardware-setup for cloud access
# setup instructions, if using on-demand hardware
# If user passes --user <user> --host <host> --key_path <key_path> <example> <args>, fill them in as BYO cluster
# If user passes --instance <instance> --provider <provider> <example> <args>, fill them in as on-demand cluster
# Throw an error if user passes both BYO and on-demand cluster args
# Otherwise, use default values
SCREAMING_SNAKE_CASE : Tuple = argparse.ArgumentParser()
parser.add_argument("--user", type=str, default="ubuntu")
parser.add_argument("--host", type=str, default="localhost")
parser.add_argument("--key_path", type=str, default=None)
parser.add_argument("--instance", type=str, default="V100:1")
parser.add_argument("--provider", type=str, default="cheapest")
parser.add_argument("--use_spot", type=bool, default=False)
parser.add_argument("--example", type=str, default="pytorch/text-generation/run_generation.py")
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = parser.parse_known_args()
if args.host != "localhost":
if args.instance != "V100:1" or args.provider != "cheapest":
raise ValueError("Cannot specify both BYO and on-demand cluster args")
SCREAMING_SNAKE_CASE : int = rh.cluster(
name="rh-cluster", ips=[args.host], ssh_creds={"ssh_user": args.user, "ssh_private_key": args.key_path}
)
else:
SCREAMING_SNAKE_CASE : Optional[Any] = rh.cluster(
name="rh-cluster", instance_type=args.instance, provider=args.provider, use_spot=args.use_spot
)
SCREAMING_SNAKE_CASE : Any = args.example.rsplit("/", 1)[0]
# Set up remote environment
cluster.install_packages(["pip:./"]) # Installs transformers from local source
# Note transformers is copied into the home directory on the remote machine, so we can install from there
cluster.run([f"pip install -r transformers/examples/{example_dir}/requirements.txt"])
cluster.run(["pip install torch --upgrade --extra-index-url https://download.pytorch.org/whl/cu117"])
# Run example. You can bypass the CLI wrapper and paste your own code here.
cluster.run([f"python transformers/examples/{args.example} {' '.join(shlex.quote(arg) for arg in unknown)}"])
# Alternatively, we can just import and run a training function (especially if there's no wrapper CLI):
# from my_script... import train
# reqs = ['pip:./', 'torch', 'datasets', 'accelerate', 'evaluate', 'tqdm', 'scipy', 'scikit-learn', 'tensorboard']
# launch_train_gpu = rh.function(fn=train,
# system=gpu,
# reqs=reqs,
# name='train_bert_glue')
#
# We can pass in arguments just like we would to a function:
# launch_train_gpu(num_epochs = 3, lr = 2e-5, seed = 42, batch_size = 16
# stream_logs=True)
| 252 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''microsoft/cvt-13''': '''https://huggingface.co/microsoft/cvt-13/resolve/main/config.json''',
# See all Cvt models at https://huggingface.co/models?filter=cvt
}
class _lowerCamelCase ( lowercase__ ):
UpperCAmelCase_ = """cvt"""
def __init__(self , __a=3 , __a=[7, 3, 3] , __a=[4, 2, 2] , __a=[2, 1, 1] , __a=[64, 1_92, 3_84] , __a=[1, 3, 6] , __a=[1, 2, 10] , __a=[4.0, 4.0, 4.0] , __a=[0.0, 0.0, 0.0] , __a=[0.0, 0.0, 0.0] , __a=[0.0, 0.0, 0.1] , __a=[True, True, True] , __a=[False, False, True] , __a=["dw_bn", "dw_bn", "dw_bn"] , __a=[3, 3, 3] , __a=[1, 1, 1] , __a=[2, 2, 2] , __a=[1, 1, 1] , __a=[1, 1, 1] , __a=0.02 , __a=1e-1_2 , **__a , ) -> Optional[int]:
super().__init__(**lowercase_ )
UpperCamelCase = num_channels
UpperCamelCase = patch_sizes
UpperCamelCase = patch_stride
UpperCamelCase = patch_padding
UpperCamelCase = embed_dim
UpperCamelCase = num_heads
UpperCamelCase = depth
UpperCamelCase = mlp_ratio
UpperCamelCase = attention_drop_rate
UpperCamelCase = drop_rate
UpperCamelCase = drop_path_rate
UpperCamelCase = qkv_bias
UpperCamelCase = cls_token
UpperCamelCase = qkv_projection_method
UpperCamelCase = kernel_qkv
UpperCamelCase = padding_kv
UpperCamelCase = stride_kv
UpperCamelCase = padding_q
UpperCamelCase = stride_q
UpperCamelCase = initializer_range
UpperCamelCase = layer_norm_eps
| 153 |
"""simple docstring"""
import inspect
import unittest
import warnings
from transformers import DeiTConfig
from transformers.models.auto import get_values
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
)
from transformers.models.deit.modeling_deit import DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class A_ :
'''simple docstring'''
def __init__( self , lowercase_ , lowercase_=13 , lowercase_=30 , lowercase_=2 , lowercase_=3 , lowercase_=True , lowercase_=True , lowercase_=32 , lowercase_=5 , lowercase_=4 , lowercase_=37 , lowercase_="gelu" , lowercase_=0.1 , lowercase_=0.1 , lowercase_=10 , lowercase_=0.02 , lowercase_=3 , lowercase_=None , lowercase_=2 , ):
"""simple docstring"""
UpperCAmelCase_ : List[str] = parent
UpperCAmelCase_ : int = batch_size
UpperCAmelCase_ : int = image_size
UpperCAmelCase_ : List[Any] = patch_size
UpperCAmelCase_ : Any = num_channels
UpperCAmelCase_ : Optional[int] = is_training
UpperCAmelCase_ : Union[str, Any] = use_labels
UpperCAmelCase_ : Union[str, Any] = hidden_size
UpperCAmelCase_ : str = num_hidden_layers
UpperCAmelCase_ : List[str] = num_attention_heads
UpperCAmelCase_ : str = intermediate_size
UpperCAmelCase_ : str = hidden_act
UpperCAmelCase_ : List[Any] = hidden_dropout_prob
UpperCAmelCase_ : Union[str, Any] = attention_probs_dropout_prob
UpperCAmelCase_ : str = type_sequence_label_size
UpperCAmelCase_ : str = initializer_range
UpperCAmelCase_ : Union[str, Any] = scope
UpperCAmelCase_ : str = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
UpperCAmelCase_ : int = (image_size // patch_size) ** 2
UpperCAmelCase_ : Optional[Any] = num_patches + 2
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase_ : Tuple = None
if self.use_labels:
UpperCAmelCase_ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase_ : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def UpperCamelCase__ ( self ):
"""simple docstring"""
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowercase_ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = DeiTModel(config=lowercase_ )
model.to(lowercase_ )
model.eval()
UpperCAmelCase_ : List[Any] = model(lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = DeiTForMaskedImageModeling(config=lowercase_ )
model.to(lowercase_ )
model.eval()
UpperCAmelCase_ : List[Any] = model(lowercase_ )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
UpperCAmelCase_ : List[str] = 1
UpperCAmelCase_ : Optional[Any] = DeiTForMaskedImageModeling(lowercase_ )
model.to(lowercase_ )
model.eval()
UpperCAmelCase_ : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase_ : Optional[int] = model(lowercase_ )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : Tuple = self.type_sequence_label_size
UpperCAmelCase_ : Union[str, Any] = DeiTForImageClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
UpperCAmelCase_ : List[str] = model(lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCAmelCase_ : Union[str, Any] = 1
UpperCAmelCase_ : Optional[int] = DeiTForImageClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
UpperCAmelCase_ : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase_ : List[Any] = model(lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : List[str] = self.prepare_config_and_inputs()
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
) : Dict = config_and_inputs
UpperCAmelCase_ : Optional[int] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class A_ (lowercase__ ,lowercase__ ,unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = (
(
DeiTModel,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE__ : Tuple = (
{
"""feature-extraction""": DeiTModel,
"""image-classification""": (DeiTForImageClassification, DeiTForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE__ : List[Any] = False
SCREAMING_SNAKE_CASE__ : Optional[Any] = False
SCREAMING_SNAKE_CASE__ : List[str] = False
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Any = DeiTModelTester(self )
UpperCAmelCase_ : Optional[int] = ConfigTester(self , config_class=lowercase_ , has_text_modality=lowercase_ , hidden_size=37 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="DeiT does not use inputs_embeds" )
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ : List[Any] = model_class(lowercase_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCAmelCase_ : Any = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowercase_ , nn.Linear ) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ : Dict = model_class(lowercase_ )
UpperCAmelCase_ : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ : str = [*signature.parameters.keys()]
UpperCAmelCase_ : Optional[int] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase_ )
def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_=False ):
"""simple docstring"""
UpperCAmelCase_ : Tuple = super()._prepare_for_class(lowercase_ , lowercase_ , return_labels=lowercase_ )
if return_labels:
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def UpperCamelCase__ ( self ):
"""simple docstring"""
if not self.model_tester.is_training:
return
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ : Union[str, Any] = True
for model_class in self.all_model_classes:
# DeiTForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(lowercase_ )
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
UpperCAmelCase_ : Optional[int] = model_class(lowercase_ )
model.to(lowercase_ )
model.train()
UpperCAmelCase_ : List[Any] = self._prepare_for_class(lowercase_ , lowercase_ , return_labels=lowercase_ )
UpperCAmelCase_ : Dict = model(**lowercase_ ).loss
loss.backward()
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ : int = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
UpperCAmelCase_ : Dict = False
UpperCAmelCase_ : Optional[int] = True
for model_class in self.all_model_classes:
if model_class in get_values(lowercase_ ) or not model_class.supports_gradient_checkpointing:
continue
# DeiTForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
continue
UpperCAmelCase_ : List[str] = model_class(lowercase_ )
model.gradient_checkpointing_enable()
model.to(lowercase_ )
model.train()
UpperCAmelCase_ : Optional[int] = self._prepare_for_class(lowercase_ , lowercase_ , return_labels=lowercase_ )
UpperCAmelCase_ : Any = model(**lowercase_ ).loss
loss.backward()
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ : Dict = [
{"title": "multi_label_classification", "num_labels": 2, "dtype": torch.float},
{"title": "single_label_classification", "num_labels": 1, "dtype": torch.long},
{"title": "regression", "num_labels": 1, "dtype": torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(lowercase_ ),
*get_values(lowercase_ ),
]
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=F"""Testing {model_class} with {problem_type["title"]}""" ):
UpperCAmelCase_ : str = problem_type["title"]
UpperCAmelCase_ : List[Any] = problem_type["num_labels"]
UpperCAmelCase_ : Union[str, Any] = model_class(lowercase_ )
model.to(lowercase_ )
model.train()
UpperCAmelCase_ : int = self._prepare_for_class(lowercase_ , lowercase_ , return_labels=lowercase_ )
if problem_type["num_labels"] > 1:
UpperCAmelCase_ : List[Any] = inputs["labels"].unsqueeze(1 ).repeat(1 , problem_type["num_labels"] )
UpperCAmelCase_ : Tuple = inputs["labels"].to(problem_type["dtype"] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=lowercase_ ) as warning_list:
UpperCAmelCase_ : List[str] = model(**lowercase_ ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
F"""Something is going wrong in the regression problem: intercepted {w.message}""" )
loss.backward()
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
for model_name in DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ : Union[str, Any] = DeiTModel.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
def __a ( ):
UpperCAmelCase_ : Any = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class A_ (unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return (
DeiTImageProcessor.from_pretrained("facebook/deit-base-distilled-patch16-224" )
if is_vision_available()
else None
)
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Tuple = DeiTForImageClassificationWithTeacher.from_pretrained("facebook/deit-base-distilled-patch16-224" ).to(
lowercase_ )
UpperCAmelCase_ : List[str] = self.default_image_processor
UpperCAmelCase_ : List[str] = prepare_img()
UpperCAmelCase_ : int = image_processor(images=lowercase_ , return_tensors="pt" ).to(lowercase_ )
# forward pass
with torch.no_grad():
UpperCAmelCase_ : Dict = model(**lowercase_ )
# verify the logits
UpperCAmelCase_ : List[str] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , lowercase_ )
UpperCAmelCase_ : str = torch.tensor([-1.02_66, 0.19_12, -1.28_61] ).to(lowercase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase_ , atol=1E-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : List[str] = DeiTModel.from_pretrained(
"facebook/deit-base-distilled-patch16-224" , torch_dtype=torch.floataa , device_map="auto" )
UpperCAmelCase_ : str = self.default_image_processor
UpperCAmelCase_ : Union[str, Any] = prepare_img()
UpperCAmelCase_ : List[Any] = image_processor(images=lowercase_ , return_tensors="pt" )
UpperCAmelCase_ : List[str] = inputs.pixel_values.to(lowercase_ )
# forward pass to make sure inference works in fp16
with torch.no_grad():
UpperCAmelCase_ : int = model(lowercase_ )
| 61 | 0 |
import argparse
import os
import sys
from unittest.mock import patch
import pytorch_lightning as pl
import timeout_decorator
import torch
from distillation import SummarizationDistiller, distill_main
from finetune import SummarizationModule, main
from transformers import MarianMTModel
from transformers.file_utils import cached_path
from transformers.testing_utils import TestCasePlus, require_torch_gpu, slow
from utils import load_json
lowercase__ : Optional[int] = 'sshleifer/mar_enro_6_3_student'
class __lowerCAmelCase ( _snake_case ):
"""simple docstring"""
def snake_case__ ( self : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
super().setUp()
_UpperCamelCase = cached_path(
'''https://cdn-datasets.huggingface.co/translation/wmt_en_ro-tr40k-va0.5k-te0.5k.tar.gz''' , extract_compressed_file=UpperCamelCase__ , )
_UpperCamelCase = f"""{data_cached}/wmt_en_ro-tr40k-va0.5k-te0.5k"""
@slow
@require_torch_gpu
def snake_case__ ( self : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
MarianMTModel.from_pretrained(UpperCamelCase__ )
@slow
@require_torch_gpu
def snake_case__ ( self : Dict ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = {
'''$MAX_LEN''': 64,
'''$BS''': 64,
'''$GAS''': 1,
'''$ENRO_DIR''': self.data_dir,
'''facebook/mbart-large-cc25''': MARIAN_MODEL,
# "val_check_interval=0.25": "val_check_interval=1.0",
'''--learning_rate=3e-5''': '''--learning_rate 3e-4''',
'''--num_train_epochs 6''': '''--num_train_epochs 1''',
}
# Clean up bash script
_UpperCamelCase = (self.test_file_dir / '''train_mbart_cc25_enro.sh''').open().read().split('''finetune.py''' )[1].strip()
_UpperCamelCase = bash_script.replace('''\\\n''' , '''''' ).strip().replace('''\"$@\"''' , '''''' )
for k, v in env_vars_to_replace.items():
_UpperCamelCase = bash_script.replace(UpperCamelCase__ , str(UpperCamelCase__ ) )
_UpperCamelCase = self.get_auto_remove_tmp_dir()
# bash_script = bash_script.replace("--fp16 ", "")
_UpperCamelCase = f"""
--output_dir {output_dir}
--tokenizer_name Helsinki-NLP/opus-mt-en-ro
--sortish_sampler
--do_predict
--gpus 1
--freeze_encoder
--n_train 40000
--n_val 500
--n_test 500
--fp16_opt_level O1
--num_sanity_val_steps 0
--eval_beams 2
""".split()
# XXX: args.gpus > 1 : handle multi_gpu in the future
_UpperCamelCase = ['''finetune.py'''] + bash_script.split() + args
with patch.object(UpperCamelCase__ , '''argv''' , UpperCamelCase__ ):
_UpperCamelCase = argparse.ArgumentParser()
_UpperCamelCase = pl.Trainer.add_argparse_args(UpperCamelCase__ )
_UpperCamelCase = SummarizationModule.add_model_specific_args(UpperCamelCase__ , os.getcwd() )
_UpperCamelCase = parser.parse_args()
_UpperCamelCase = main(UpperCamelCase__ )
# Check metrics
_UpperCamelCase = load_json(model.metrics_save_path )
_UpperCamelCase = metrics['''val'''][0]
_UpperCamelCase = metrics['''val'''][-1]
self.assertEqual(len(metrics['''val'''] ) , (args.max_epochs / args.val_check_interval) )
assert isinstance(last_step_stats[f"""val_avg_{model.val_metric}"""] , UpperCamelCase__ )
self.assertGreater(last_step_stats['''val_avg_gen_time'''] , 0.01 )
# model hanging on generate. Maybe bad config was saved. (XXX: old comment/assert?)
self.assertLessEqual(last_step_stats['''val_avg_gen_time'''] , 1.0 )
# test learning requirements:
# 1. BLEU improves over the course of training by more than 2 pts
self.assertGreater(last_step_stats['''val_avg_bleu'''] - first_step_stats['''val_avg_bleu'''] , 2 )
# 2. BLEU finishes above 17
self.assertGreater(last_step_stats['''val_avg_bleu'''] , 17 )
# 3. test BLEU and val BLEU within ~1.1 pt.
self.assertLess(abs(metrics['''val'''][-1]['''val_avg_bleu'''] - metrics['''test'''][-1]['''test_avg_bleu'''] ) , 1.1 )
# check lightning ckpt can be loaded and has a reasonable statedict
_UpperCamelCase = os.listdir(UpperCamelCase__ )
_UpperCamelCase = [x for x in contents if x.endswith('''.ckpt''' )][0]
_UpperCamelCase = os.path.join(args.output_dir , UpperCamelCase__ )
_UpperCamelCase = torch.load(UpperCamelCase__ , map_location='''cpu''' )
_UpperCamelCase = '''model.model.decoder.layers.0.encoder_attn_layer_norm.weight'''
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
_UpperCamelCase = {os.path.basename(UpperCamelCase__ ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics['''test'''] ) == 1
class __lowerCAmelCase ( _snake_case ):
"""simple docstring"""
@timeout_decorator.timeout(600 )
@slow
@require_torch_gpu
def snake_case__ ( self : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = f"""{self.test_file_dir_str}/test_data/wmt_en_ro"""
_UpperCamelCase = {
'''--fp16_opt_level=O1''': '''''',
'''$MAX_LEN''': 128,
'''$BS''': 16,
'''$GAS''': 1,
'''$ENRO_DIR''': data_dir,
'''$m''': '''sshleifer/student_marian_en_ro_6_1''',
'''val_check_interval=0.25''': '''val_check_interval=1.0''',
}
# Clean up bash script
_UpperCamelCase = (
(self.test_file_dir / '''distil_marian_no_teacher.sh''').open().read().split('''distillation.py''' )[1].strip()
)
_UpperCamelCase = bash_script.replace('''\\\n''' , '''''' ).strip().replace('''\"$@\"''' , '''''' )
_UpperCamelCase = bash_script.replace('''--fp16 ''' , ''' ''' )
for k, v in env_vars_to_replace.items():
_UpperCamelCase = bash_script.replace(UpperCamelCase__ , str(UpperCamelCase__ ) )
_UpperCamelCase = self.get_auto_remove_tmp_dir()
_UpperCamelCase = bash_script.replace('''--fp16''' , '''''' )
_UpperCamelCase = 6
_UpperCamelCase = (
['''distillation.py''']
+ bash_script.split()
+ [
f"""--output_dir={output_dir}""",
'''--gpus=1''',
'''--learning_rate=1e-3''',
f"""--num_train_epochs={epochs}""",
'''--warmup_steps=10''',
'''--val_check_interval=1.0''',
'''--do_predict''',
]
)
with patch.object(UpperCamelCase__ , '''argv''' , UpperCamelCase__ ):
_UpperCamelCase = argparse.ArgumentParser()
_UpperCamelCase = pl.Trainer.add_argparse_args(UpperCamelCase__ )
_UpperCamelCase = SummarizationDistiller.add_model_specific_args(UpperCamelCase__ , os.getcwd() )
_UpperCamelCase = parser.parse_args()
# assert args.gpus == gpus THIS BREAKS for multi_gpu
_UpperCamelCase = distill_main(UpperCamelCase__ )
# Check metrics
_UpperCamelCase = load_json(model.metrics_save_path )
_UpperCamelCase = metrics['''val'''][0]
_UpperCamelCase = metrics['''val'''][-1]
assert len(metrics['''val'''] ) >= (args.max_epochs / args.val_check_interval) # +1 accounts for val_sanity_check
assert last_step_stats["val_avg_gen_time"] >= 0.01
assert first_step_stats["val_avg_bleu"] < last_step_stats["val_avg_bleu"] # model learned nothing
assert 1.0 >= last_step_stats["val_avg_gen_time"] # model hanging on generate. Maybe bad config was saved.
assert isinstance(last_step_stats[f"""val_avg_{model.val_metric}"""] , UpperCamelCase__ )
# check lightning ckpt can be loaded and has a reasonable statedict
_UpperCamelCase = os.listdir(UpperCamelCase__ )
_UpperCamelCase = [x for x in contents if x.endswith('''.ckpt''' )][0]
_UpperCamelCase = os.path.join(args.output_dir , UpperCamelCase__ )
_UpperCamelCase = torch.load(UpperCamelCase__ , map_location='''cpu''' )
_UpperCamelCase = '''model.model.decoder.layers.0.encoder_attn_layer_norm.weight'''
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
_UpperCamelCase = {os.path.basename(UpperCamelCase__ ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics['''test'''] ) == 1
| 363 |
'''simple docstring'''
import unittest
from transformers import CamembertTokenizer, CamembertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
lowercase__ : Optional[Any] = get_tests_dir('fixtures/test_sentencepiece.model')
lowercase__ : Any = get_tests_dir('fixtures/test_sentencepiece_bpe.model')
lowercase__ : Tuple = 'pt' if is_torch_available() else 'tf'
@require_sentencepiece
@require_tokenizers
class __lowerCAmelCase ( __magic_name__ , unittest.TestCase ):
"""simple docstring"""
_snake_case : Union[str, Any] = CamembertTokenizer
_snake_case : str = CamembertTokenizerFast
_snake_case : int = True
_snake_case : List[str] = True
def snake_case__ ( self : Dict ) -> Any:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
_UpperCamelCase = CamembertTokenizer(lowerCAmelCase__ )
tokenizer.save_pretrained(self.tmpdirname )
def snake_case__ ( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = '''<pad>'''
_UpperCamelCase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase__ ) , lowerCAmelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase__ ) , lowerCAmelCase__ )
def snake_case__ ( self : Dict ) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<s>NOTUSED''' )
self.assertEqual(vocab_keys[1] , '''<pad>''' )
self.assertEqual(vocab_keys[-1] , '''<mask>''' )
self.assertEqual(len(lowerCAmelCase__ ) , 1004 )
def snake_case__ ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 1005 )
def snake_case__ ( self : int ) -> Tuple:
'''simple docstring'''
_UpperCamelCase = CamembertTokenizer(lowerCAmelCase__ )
tokenizer.save_pretrained(self.tmpdirname )
_UpperCamelCase = CamembertTokenizerFast.from_pretrained(self.tmpdirname )
_UpperCamelCase = '''I was born in 92000, and this is falsé.'''
_UpperCamelCase = tokenizer.encode(lowerCAmelCase__ )
_UpperCamelCase = rust_tokenizer.encode(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCamelCase = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
_UpperCamelCase = rust_tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
# <unk> tokens are not the same for `rust` than for `slow`.
# Because spm gives back raw token instead of `unk` in EncodeAsPieces
# tokens = tokenizer.tokenize(sequence)
_UpperCamelCase = tokenizer.convert_ids_to_tokens(lowerCAmelCase__ )
_UpperCamelCase = rust_tokenizer.tokenize(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
def snake_case__ ( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
if not self.test_rust_tokenizer:
return
_UpperCamelCase = self.get_tokenizer()
_UpperCamelCase = self.get_rust_tokenizer()
_UpperCamelCase = '''I was born in 92000, and this is falsé.'''
_UpperCamelCase = tokenizer.tokenize(lowerCAmelCase__ )
_UpperCamelCase = rust_tokenizer.tokenize(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCamelCase = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
_UpperCamelCase = rust_tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCamelCase = self.get_rust_tokenizer()
_UpperCamelCase = tokenizer.encode(lowerCAmelCase__ )
_UpperCamelCase = rust_tokenizer.encode(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
@slow
def snake_case__ ( self : Any ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = {'''input_ids''': [[5, 54, 7196, 297, 30, 23, 776, 18, 11, 3215, 3705, 8252, 22, 3164, 1181, 2116, 29, 16, 813, 25, 791, 3314, 20, 3446, 38, 27575, 120, 6, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [5, 468, 17, 11, 9088, 20, 1517, 8, 22804, 18818, 10, 38, 629, 607, 607, 142, 19, 7196, 867, 56, 10326, 24, 2267, 20, 416, 5072, 15612, 233, 734, 7, 2399, 27, 16, 3015, 1649, 7, 24, 20, 4338, 2399, 27, 13, 3400, 14, 13, 6189, 8, 930, 9, 6]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# camembert is a french model. So we also use french texts.
_UpperCamelCase = [
'''Le transformeur est un modèle d\'apprentissage profond introduit en 2017, '''
'''utilisé principalement dans le domaine du traitement automatique des langues (TAL).''',
'''À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus '''
'''pour gérer des données séquentielles, telles que le langage naturel, pour des tâches '''
'''telles que la traduction et la synthèse de texte.''',
]
self.tokenizer_integration_test_util(
expected_encoding=lowerCAmelCase__ , model_name='''camembert-base''' , revision='''3a0641d9a1aeb7e848a74299e7e4c4bca216b4cf''' , sequences=lowerCAmelCase__ , )
| 287 | 0 |
import json
import logging
import os
import re
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import datasets
import numpy as np
import torch
import torchaudio
from packaging import version
from torch import nn
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaProcessor,
is_apex_available,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse("""1.6"""):
SCREAMING_SNAKE_CASE_ = True
from torch.cuda.amp import autocast
SCREAMING_SNAKE_CASE_ = logging.getLogger(__name__)
def __lowercase ( _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ) -> str:
'''simple docstring'''
return field(default_factory=lambda: default , metadata=_SCREAMING_SNAKE_CASE )
@dataclass
class UpperCamelCase__ :
'''simple docstring'''
__snake_case : str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
__snake_case : Optional[str] = field(
default=lowerCAmelCase_ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
__snake_case : Optional[bool] = field(
default=lowerCAmelCase_ , metadata={"help": "Whether to freeze the feature extractor layers of the model."} )
__snake_case : Optional[float] = field(
default=0.1 , metadata={"help": "The dropout ratio for the attention probabilities."} )
__snake_case : Optional[float] = field(
default=0.1 , metadata={"help": "The dropout ratio for activations inside the fully connected layer."} )
__snake_case : Optional[float] = field(
default=0.1 , metadata={
"help": "The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler."
} , )
__snake_case : Optional[float] = field(
default=0.1 , metadata={"help": "The dropout probabilitiy for all 1D convolutional layers in feature extractor."} , )
__snake_case : Optional[float] = field(
default=0.05 , metadata={
"help": (
"Propability of each feature vector along the time axis to be chosen as the start of the vector"
"span to be masked. Approximately ``mask_time_prob * sequence_length // mask_time_length`` feature"
"vectors will be masked along the time axis. This is only relevant if ``apply_spec_augment is True``."
)
} , )
__snake_case : Optional[float] = field(default=0.0 , metadata={"help": "The LayerDrop probability."} )
@dataclass
class UpperCamelCase__ :
'''simple docstring'''
__snake_case : Optional[str] = field(
default=lowerCAmelCase_ , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} )
__snake_case : Optional[str] = field(
default="train+validation" , metadata={
"help": "The name of the training data set split to use (via the datasets library). Defaults to 'train'"
} , )
__snake_case : bool = field(
default=lowerCAmelCase_ , metadata={"help": "Overwrite the cached preprocessed datasets or not."} )
__snake_case : Optional[int] = field(
default=lowerCAmelCase_ , metadata={"help": "The number of processes to use for the preprocessing."} , )
__snake_case : Optional[int] = field(
default=lowerCAmelCase_ , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
__snake_case : Optional[int] = field(
default=lowerCAmelCase_ , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of validation examples to this "
"value if set."
)
} , )
__snake_case : List[str] = list_field(
default=[",", "?", ".", "!", "-", ";", ":", "\"\"", "%", "'", "\"", "�"] , metadata={"help": "A list of characters to remove from the transcripts."} , )
@dataclass
class UpperCamelCase__ :
'''simple docstring'''
__snake_case : WavaVecaProcessor
__snake_case : Union[bool, str] = True
__snake_case : Optional[int] = None
__snake_case : Optional[int] = None
__snake_case : Optional[int] = None
__snake_case : Optional[int] = None
def __call__( self : Optional[int] ,lowerCamelCase__ : List[Dict[str, Union[List[int], torch.Tensor]]] ) -> Dict[str, torch.Tensor]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = [{"""input_values""": feature["""input_values"""]} for feature in features]
SCREAMING_SNAKE_CASE = [{"""input_ids""": feature["""labels"""]} for feature in features]
SCREAMING_SNAKE_CASE = self.processor.pad(
lowerCamelCase__ ,padding=self.padding ,max_length=self.max_length ,pad_to_multiple_of=self.pad_to_multiple_of ,return_tensors="""pt""" ,)
SCREAMING_SNAKE_CASE = self.processor.pad(
labels=lowerCamelCase__ ,padding=self.padding ,max_length=self.max_length_labels ,pad_to_multiple_of=self.pad_to_multiple_of_labels ,return_tensors="""pt""" ,)
# replace padding with -100 to ignore loss correctly
SCREAMING_SNAKE_CASE = labels_batch["""input_ids"""].masked_fill(labels_batch.attention_mask.ne(1 ) ,-100 )
SCREAMING_SNAKE_CASE = labels
return batch
class UpperCamelCase__ ( lowerCAmelCase_ ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ,lowerCamelCase__ : nn.Module ,lowerCamelCase__ : Dict[str, Union[torch.Tensor, Any]] ) -> torch.Tensor:
'''simple docstring'''
model.train()
SCREAMING_SNAKE_CASE = self._prepare_inputs(lowerCamelCase__ )
if self.use_amp:
with autocast():
SCREAMING_SNAKE_CASE = self.compute_loss(lowerCamelCase__ ,lowerCamelCase__ )
else:
SCREAMING_SNAKE_CASE = self.compute_loss(lowerCamelCase__ ,lowerCamelCase__ )
if self.args.n_gpu > 1:
if model.module.config.ctc_loss_reduction == "mean":
SCREAMING_SNAKE_CASE = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
SCREAMING_SNAKE_CASE = loss.sum() / (inputs["""labels"""] >= 0).sum()
else:
raise ValueError(F"""{model.config.ctc_loss_reduction} is not valid. Choose one of ['mean', 'sum']""" )
if self.args.gradient_accumulation_steps > 1:
SCREAMING_SNAKE_CASE = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(lowerCamelCase__ ).backward()
elif self.use_apex:
with amp.scale_loss(lowerCamelCase__ ,self.optimizer ) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(lowerCamelCase__ )
else:
loss.backward()
return loss.detach()
def __lowercase ( ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
SCREAMING_SNAKE_CASE = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
SCREAMING_SNAKE_CASE = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info("""Training/evaluation parameters %s""" , _SCREAMING_SNAKE_CASE )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets:
SCREAMING_SNAKE_CASE = datasets.load_dataset(
"""common_voice""" , data_args.dataset_config_name , split=data_args.train_split_name )
SCREAMING_SNAKE_CASE = datasets.load_dataset("""common_voice""" , data_args.dataset_config_name , split="""test""" )
# Create and save tokenizer
SCREAMING_SNAKE_CASE = F"""[{''.join(data_args.chars_to_ignore )}]"""
def remove_special_characters(_SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE = re.sub(_SCREAMING_SNAKE_CASE , """""" , batch["""sentence"""] ).lower() + """ """
return batch
SCREAMING_SNAKE_CASE = train_dataset.map(_SCREAMING_SNAKE_CASE , remove_columns=["""sentence"""] )
SCREAMING_SNAKE_CASE = eval_dataset.map(_SCREAMING_SNAKE_CASE , remove_columns=["""sentence"""] )
def extract_all_chars(_SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE = """ """.join(batch["""text"""] )
SCREAMING_SNAKE_CASE = list(set(_SCREAMING_SNAKE_CASE ) )
return {"vocab": [vocab], "all_text": [all_text]}
SCREAMING_SNAKE_CASE = train_dataset.map(
_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE , batch_size=-1 , keep_in_memory=_SCREAMING_SNAKE_CASE , remove_columns=train_dataset.column_names , )
SCREAMING_SNAKE_CASE = train_dataset.map(
_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE , batch_size=-1 , keep_in_memory=_SCREAMING_SNAKE_CASE , remove_columns=eval_dataset.column_names , )
SCREAMING_SNAKE_CASE = list(set(vocab_train["""vocab"""][0] ) | set(vocab_test["""vocab"""][0] ) )
SCREAMING_SNAKE_CASE = {v: k for k, v in enumerate(_SCREAMING_SNAKE_CASE )}
SCREAMING_SNAKE_CASE = vocab_dict[""" """]
del vocab_dict[" "]
SCREAMING_SNAKE_CASE = len(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = len(_SCREAMING_SNAKE_CASE )
with open("""vocab.json""" , """w""" ) as vocab_file:
json.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
SCREAMING_SNAKE_CASE = WavaVecaCTCTokenizer(
"""vocab.json""" , unk_token="""[UNK]""" , pad_token="""[PAD]""" , word_delimiter_token="""|""" , )
SCREAMING_SNAKE_CASE = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_60_00 , padding_value=0.0 , do_normalize=_SCREAMING_SNAKE_CASE , return_attention_mask=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = WavaVecaProcessor(feature_extractor=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = WavaVecaForCTC.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , activation_dropout=model_args.activation_dropout , attention_dropout=model_args.attention_dropout , hidden_dropout=model_args.hidden_dropout , feat_proj_dropout=model_args.feat_proj_dropout , mask_time_prob=model_args.mask_time_prob , gradient_checkpointing=training_args.gradient_checkpointing , layerdrop=model_args.layerdrop , ctc_loss_reduction="""mean""" , pad_token_id=processor.tokenizer.pad_token_id , vocab_size=len(processor.tokenizer ) , )
if data_args.max_train_samples is not None:
SCREAMING_SNAKE_CASE = min(len(_SCREAMING_SNAKE_CASE ) , data_args.max_train_samples )
SCREAMING_SNAKE_CASE = train_dataset.select(range(_SCREAMING_SNAKE_CASE ) )
if data_args.max_val_samples is not None:
SCREAMING_SNAKE_CASE = eval_dataset.select(range(data_args.max_val_samples ) )
SCREAMING_SNAKE_CASE = torchaudio.transforms.Resample(4_80_00 , 1_60_00 )
# Preprocessing the datasets.
# We need to read the aduio files as arrays and tokenize the targets.
def speech_file_to_array_fn(_SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = torchaudio.load(batch["""path"""] )
SCREAMING_SNAKE_CASE = resampler(_SCREAMING_SNAKE_CASE ).squeeze().numpy()
SCREAMING_SNAKE_CASE = 1_60_00
SCREAMING_SNAKE_CASE = batch["""text"""]
return batch
SCREAMING_SNAKE_CASE = train_dataset.map(
_SCREAMING_SNAKE_CASE , remove_columns=train_dataset.column_names , num_proc=data_args.preprocessing_num_workers , )
SCREAMING_SNAKE_CASE = eval_dataset.map(
_SCREAMING_SNAKE_CASE , remove_columns=eval_dataset.column_names , num_proc=data_args.preprocessing_num_workers , )
def prepare_dataset(_SCREAMING_SNAKE_CASE ):
# check that all files have the correct sampling rate
assert (
len(set(batch["""sampling_rate"""] ) ) == 1
), F"""Make sure all inputs have the same sampling rate of {processor.feature_extractor.sampling_rate}."""
SCREAMING_SNAKE_CASE = processor(
audio=batch["""speech"""] , text=batch["""target_text"""] , sampling_rate=batch["""sampling_rate"""][0] )
batch.update(_SCREAMING_SNAKE_CASE )
return batch
SCREAMING_SNAKE_CASE = train_dataset.map(
_SCREAMING_SNAKE_CASE , remove_columns=train_dataset.column_names , batch_size=training_args.per_device_train_batch_size , batched=_SCREAMING_SNAKE_CASE , num_proc=data_args.preprocessing_num_workers , )
SCREAMING_SNAKE_CASE = eval_dataset.map(
_SCREAMING_SNAKE_CASE , remove_columns=eval_dataset.column_names , batch_size=training_args.per_device_train_batch_size , batched=_SCREAMING_SNAKE_CASE , num_proc=data_args.preprocessing_num_workers , )
# Metric
SCREAMING_SNAKE_CASE = datasets.load_metric("""wer""" )
def compute_metrics(_SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE = pred.predictions
SCREAMING_SNAKE_CASE = np.argmax(_SCREAMING_SNAKE_CASE , axis=-1 )
SCREAMING_SNAKE_CASE = processor.tokenizer.pad_token_id
SCREAMING_SNAKE_CASE = processor.batch_decode(_SCREAMING_SNAKE_CASE )
# we do not want to group tokens when computing the metrics
SCREAMING_SNAKE_CASE = processor.batch_decode(pred.label_ids , group_tokens=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = wer_metric.compute(predictions=_SCREAMING_SNAKE_CASE , references=_SCREAMING_SNAKE_CASE )
return {"wer": wer}
if model_args.freeze_feature_extractor:
model.freeze_feature_extractor()
# Data collator
SCREAMING_SNAKE_CASE = DataCollatorCTCWithPadding(processor=_SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE )
# Initialize our Trainer
SCREAMING_SNAKE_CASE = CTCTrainer(
model=_SCREAMING_SNAKE_CASE , data_collator=_SCREAMING_SNAKE_CASE , args=_SCREAMING_SNAKE_CASE , compute_metrics=_SCREAMING_SNAKE_CASE , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=processor.feature_extractor , )
# Training
if training_args.do_train:
if last_checkpoint is not None:
SCREAMING_SNAKE_CASE = last_checkpoint
elif os.path.isdir(model_args.model_name_or_path ):
SCREAMING_SNAKE_CASE = model_args.model_name_or_path
else:
SCREAMING_SNAKE_CASE = None
# Save the feature_extractor and the tokenizer
if is_main_process(training_args.local_rank ):
processor.save_pretrained(training_args.output_dir )
SCREAMING_SNAKE_CASE = trainer.train(resume_from_checkpoint=_SCREAMING_SNAKE_CASE )
trainer.save_model()
SCREAMING_SNAKE_CASE = train_result.metrics
SCREAMING_SNAKE_CASE = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(_SCREAMING_SNAKE_CASE )
)
SCREAMING_SNAKE_CASE = min(_SCREAMING_SNAKE_CASE , len(_SCREAMING_SNAKE_CASE ) )
trainer.log_metrics("""train""" , _SCREAMING_SNAKE_CASE )
trainer.save_metrics("""train""" , _SCREAMING_SNAKE_CASE )
trainer.save_state()
# Evaluation
SCREAMING_SNAKE_CASE = {}
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
SCREAMING_SNAKE_CASE = trainer.evaluate()
SCREAMING_SNAKE_CASE = data_args.max_val_samples if data_args.max_val_samples is not None else len(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = min(_SCREAMING_SNAKE_CASE , len(_SCREAMING_SNAKE_CASE ) )
trainer.log_metrics("""eval""" , _SCREAMING_SNAKE_CASE )
trainer.save_metrics("""eval""" , _SCREAMING_SNAKE_CASE )
return results
if __name__ == "__main__":
main()
| 296 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {
"""microsoft/git-base""": """https://huggingface.co/microsoft/git-base/resolve/main/config.json""",
}
class UpperCamelCase__ ( lowerCAmelCase_ ):
'''simple docstring'''
__snake_case : Dict = "git_vision_model"
def __init__( self : List[Any] ,lowerCamelCase__ : Dict=768 ,lowerCamelCase__ : Union[str, Any]=3072 ,lowerCamelCase__ : Optional[int]=12 ,lowerCamelCase__ : Tuple=12 ,lowerCamelCase__ : Tuple=3 ,lowerCamelCase__ : Optional[Any]=224 ,lowerCamelCase__ : Union[str, Any]=16 ,lowerCamelCase__ : List[Any]="quick_gelu" ,lowerCamelCase__ : Optional[Any]=1e-5 ,lowerCamelCase__ : str=0.0 ,lowerCamelCase__ : Optional[int]=0.02 ,**lowerCamelCase__ : Union[str, Any] ,) -> Optional[int]:
'''simple docstring'''
super().__init__(**lowerCamelCase__ )
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = num_channels
SCREAMING_SNAKE_CASE = patch_size
SCREAMING_SNAKE_CASE = image_size
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = attention_dropout
SCREAMING_SNAKE_CASE = layer_norm_eps
SCREAMING_SNAKE_CASE = hidden_act
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : Tuple ,lowerCamelCase__ : Union[str, os.PathLike] ,**lowerCamelCase__ : int ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(lowerCamelCase__ )
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = cls.get_config_dict(lowerCamelCase__ ,**lowerCamelCase__ )
# get the vision config dict if we are loading from GITConfig
if config_dict.get("""model_type""" ) == "git":
SCREAMING_SNAKE_CASE = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls ,"""model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(lowerCamelCase__ ,**lowerCamelCase__ )
class UpperCamelCase__ ( lowerCAmelCase_ ):
'''simple docstring'''
__snake_case : Dict = "git"
def __init__( self : Optional[int] ,lowerCamelCase__ : int=None ,lowerCamelCase__ : str=30522 ,lowerCamelCase__ : Tuple=768 ,lowerCamelCase__ : Union[str, Any]=6 ,lowerCamelCase__ : str=12 ,lowerCamelCase__ : List[str]=3072 ,lowerCamelCase__ : Dict="gelu" ,lowerCamelCase__ : Tuple=0.1 ,lowerCamelCase__ : Any=0.1 ,lowerCamelCase__ : List[str]=1024 ,lowerCamelCase__ : List[str]=0.02 ,lowerCamelCase__ : str=1e-1_2 ,lowerCamelCase__ : Optional[int]=0 ,lowerCamelCase__ : Optional[int]="absolute" ,lowerCamelCase__ : Optional[Any]=True ,lowerCamelCase__ : str=False ,lowerCamelCase__ : int=101 ,lowerCamelCase__ : int=102 ,lowerCamelCase__ : Dict=None ,**lowerCamelCase__ : List[Any] ,) -> Optional[Any]:
'''simple docstring'''
super().__init__(bos_token_id=lowerCamelCase__ ,eos_token_id=lowerCamelCase__ ,pad_token_id=lowerCamelCase__ ,**lowerCamelCase__ )
if vision_config is None:
SCREAMING_SNAKE_CASE = {}
logger.info("""vision_config is None. initializing the GitVisionConfig with default values.""" )
SCREAMING_SNAKE_CASE = GitVisionConfig(**lowerCamelCase__ )
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = max_position_embeddings
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = layer_norm_eps
SCREAMING_SNAKE_CASE = position_embedding_type
SCREAMING_SNAKE_CASE = use_cache
SCREAMING_SNAKE_CASE = tie_word_embeddings
SCREAMING_SNAKE_CASE = num_image_with_embedding
SCREAMING_SNAKE_CASE = bos_token_id
SCREAMING_SNAKE_CASE = eos_token_id
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = copy.deepcopy(self.__dict__ )
SCREAMING_SNAKE_CASE = self.vision_config.to_dict()
SCREAMING_SNAKE_CASE = self.__class__.model_type
return output
| 296 | 1 |
def lowerCamelCase__ ( snake_case_ : int = 1000 ) -> int:
return sum(2 * a * ((a - 1) // 2) for a in range(3 , n + 1 ) )
if __name__ == "__main__":
print(solution())
| 238 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
snake_case_ = {
'configuration_git': ['GIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GitConfig', 'GitVisionConfig'],
'processing_git': ['GitProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = [
'GIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'GitForCausalLM',
'GitModel',
'GitPreTrainedModel',
'GitVisionModel',
]
if TYPE_CHECKING:
from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig
from .processing_git import GitProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_git import (
GIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GitForCausalLM,
GitModel,
GitPreTrainedModel,
GitVisionModel,
)
else:
import sys
snake_case_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 238 | 1 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from accelerate import PartialState
from accelerate.utils.operations import broadcast, gather, gather_object, pad_across_processes, reduce
def __lowerCamelCase ( __a :Tuple ) -> int:
"""simple docstring"""
return (torch.arange(state.num_processes ) + 1.0 + (state.num_processes * state.process_index)).to(state.device )
def __lowerCamelCase ( __a :Optional[Any] ) -> Tuple:
"""simple docstring"""
A__ = create_tensor(__a )
A__ = gather(__a )
assert gathered_tensor.tolist() == list(range(1 , state.num_processes**2 + 1 ) )
def __lowerCamelCase ( __a :Dict ) -> Dict:
"""simple docstring"""
A__ = [state.process_index]
A__ = gather_object(__a )
assert len(__a ) == state.num_processes, F'{gathered_obj}, {len(__a )} != {state.num_processes}'
assert gathered_obj == list(range(state.num_processes ) ), F'{gathered_obj} != {list(range(state.num_processes ) )}'
def __lowerCamelCase ( __a :List[Any] ) -> int:
"""simple docstring"""
A__ = create_tensor(__a )
A__ = broadcast(__a )
assert broadcasted_tensor.shape == torch.Size([state.num_processes] )
assert broadcasted_tensor.tolist() == list(range(1 , state.num_processes + 1 ) )
def __lowerCamelCase ( __a :List[str] ) -> Union[str, Any]:
"""simple docstring"""
if state.is_main_process:
A__ = torch.arange(state.num_processes + 1 ).to(state.device )
else:
A__ = torch.arange(state.num_processes ).to(state.device )
A__ = pad_across_processes(__a )
assert padded_tensor.shape == torch.Size([state.num_processes + 1] )
if not state.is_main_process:
assert padded_tensor.tolist() == list(range(0 , state.num_processes ) ) + [0]
def __lowerCamelCase ( __a :List[str] ) -> Union[str, Any]:
"""simple docstring"""
if state.num_processes != 2:
return
A__ = create_tensor(__a )
A__ = reduce(__a , """sum""" )
A__ = torch.tensor([4.0, 6] ).to(state.device )
assert torch.allclose(__a , __a ), F'{reduced_tensor} != {truth_tensor}'
def __lowerCamelCase ( __a :List[Any] ) -> Dict:
"""simple docstring"""
if state.num_processes != 2:
return
A__ = create_tensor(__a )
A__ = reduce(__a , """mean""" )
A__ = torch.tensor([2.0, 3] ).to(state.device )
assert torch.allclose(__a , __a ), F'{reduced_tensor} != {truth_tensor}'
def __lowerCamelCase ( __a :Dict ) -> List[str]:
"""simple docstring"""
main()
def __lowerCamelCase ( ) -> Any:
"""simple docstring"""
A__ = PartialState()
state.print(F'State: {state}' )
state.print("""testing gather""" )
test_gather(__a )
state.print("""testing gather_object""" )
test_gather_object(__a )
state.print("""testing broadcast""" )
test_broadcast(__a )
state.print("""testing pad_across_processes""" )
test_pad_across_processes(__a )
state.print("""testing reduce_sum""" )
test_reduce_sum(__a )
state.print("""testing reduce_mean""" )
test_reduce_mean(__a )
if __name__ == "__main__":
main()
| 274 |
import argparse
import csv
import logging
import os
import random
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from tqdm import tqdm, trange
from transformers import (
CONFIG_NAME,
WEIGHTS_NAME,
AdamW,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTTokenizer,
get_linear_schedule_with_warmup,
)
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO
)
A : Tuple = logging.getLogger(__name__)
def __lowerCamelCase ( __a :Optional[int] , __a :List[str] ) -> Tuple:
"""simple docstring"""
A__ = np.argmax(__a , axis=1 )
return np.sum(outputs == labels )
def __lowerCamelCase ( __a :Tuple ) -> Dict:
"""simple docstring"""
with open(__a , encoding="""utf_8""" ) as f:
A__ = csv.reader(__a )
A__ = []
next(__a ) # skip the first line
for line in tqdm(__a ):
output.append((""" """.join(line[1:5] ), line[5], line[6], int(line[-1] ) - 1) )
return output
def __lowerCamelCase ( __a :Optional[int] , __a :List[Any] , __a :Dict , __a :Optional[Any] , __a :Optional[Any] , __a :int ) -> Union[str, Any]:
"""simple docstring"""
A__ = []
for dataset in encoded_datasets:
A__ = len(__a )
A__ = np.zeros((n_batch, 2, input_len) , dtype=np.intaa )
A__ = np.zeros((n_batch, 2) , dtype=np.intaa )
A__ = np.full((n_batch, 2, input_len) , fill_value=-1_0_0 , dtype=np.intaa )
A__ = np.zeros((n_batch,) , dtype=np.intaa )
for (
i,
(story, conta, conta, mc_label),
) in enumerate(__a ):
A__ = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
A__ = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
A__ = with_conta
A__ = with_conta
A__ = len(__a ) - 1
A__ = len(__a ) - 1
A__ = with_conta
A__ = with_conta
A__ = mc_label
A__ = (input_ids, mc_token_ids, lm_labels, mc_labels)
tensor_datasets.append(tuple(torch.tensor(__a ) for t in all_inputs ) )
return tensor_datasets
def __lowerCamelCase ( ) -> Union[str, Any]:
"""simple docstring"""
A__ = argparse.ArgumentParser()
parser.add_argument("""--model_name""" , type=__a , default="""openai-gpt""" , help="""pretrained model name""" )
parser.add_argument("""--do_train""" , action="""store_true""" , help="""Whether to run training.""" )
parser.add_argument("""--do_eval""" , action="""store_true""" , help="""Whether to run eval on the dev set.""" )
parser.add_argument(
"""--output_dir""" , default=__a , type=__a , required=__a , help="""The output directory where the model predictions and checkpoints will be written.""" , )
parser.add_argument("""--train_dataset""" , type=__a , default="""""" )
parser.add_argument("""--eval_dataset""" , type=__a , default="""""" )
parser.add_argument("""--seed""" , type=__a , default=4_2 )
parser.add_argument("""--num_train_epochs""" , type=__a , default=3 )
parser.add_argument("""--train_batch_size""" , type=__a , default=8 )
parser.add_argument("""--eval_batch_size""" , type=__a , default=1_6 )
parser.add_argument("""--adam_epsilon""" , default=1E-8 , type=__a , help="""Epsilon for Adam optimizer.""" )
parser.add_argument("""--max_grad_norm""" , type=__a , default=1 )
parser.add_argument(
"""--max_steps""" , default=-1 , type=__a , help=(
"""If > 0: set total number of training steps to perform. Override num_train_epochs."""
) , )
parser.add_argument(
"""--gradient_accumulation_steps""" , type=__a , default=1 , help="""Number of updates steps to accumulate before performing a backward/update pass.""" , )
parser.add_argument("""--learning_rate""" , type=__a , default=6.25E-5 )
parser.add_argument("""--warmup_steps""" , default=0 , type=__a , help="""Linear warmup over warmup_steps.""" )
parser.add_argument("""--lr_schedule""" , type=__a , default="""warmup_linear""" )
parser.add_argument("""--weight_decay""" , type=__a , default=0.01 )
parser.add_argument("""--lm_coef""" , type=__a , default=0.9 )
parser.add_argument("""--n_valid""" , type=__a , default=3_7_4 )
parser.add_argument("""--server_ip""" , type=__a , default="""""" , help="""Can be used for distant debugging.""" )
parser.add_argument("""--server_port""" , type=__a , default="""""" , help="""Can be used for distant debugging.""" )
A__ = parser.parse_args()
print(__a )
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("""Waiting for debugger attach""" )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=__a )
ptvsd.wait_for_attach()
random.seed(args.seed )
np.random.seed(args.seed )
torch.manual_seed(args.seed )
torch.cuda.manual_seed_all(args.seed )
A__ = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" )
A__ = torch.cuda.device_count()
logger.info("""device: {}, n_gpu {}""".format(__a , __a ) )
if not args.do_train and not args.do_eval:
raise ValueError("""At least one of `do_train` or `do_eval` must be True.""" )
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
# Load tokenizer and model
# This loading functions also add new tokens and embeddings called `special tokens`
# These new embeddings will be fine-tuned on the RocStories dataset
A__ = ["""_start_""", """_delimiter_""", """_classify_"""]
A__ = OpenAIGPTTokenizer.from_pretrained(args.model_name )
tokenizer.add_tokens(__a )
A__ = tokenizer.convert_tokens_to_ids(__a )
A__ = OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name )
model.resize_token_embeddings(len(__a ) )
model.to(__a )
# Load and encode the datasets
def tokenize_and_encode(__a :Tuple ):
if isinstance(__a , __a ):
return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(__a ) )
elif isinstance(__a , __a ):
return obj
return [tokenize_and_encode(__a ) for o in obj]
logger.info("""Encoding dataset...""" )
A__ = load_rocstories_dataset(args.train_dataset )
A__ = load_rocstories_dataset(args.eval_dataset )
A__ = (train_dataset, eval_dataset)
A__ = tokenize_and_encode(__a )
# Compute the max input length for the Transformer
A__ = model.config.n_positions // 2 - 2
A__ = max(
len(story[:max_length] ) + max(len(conta[:max_length] ) , len(conta[:max_length] ) ) + 3
for dataset in encoded_datasets
for story, conta, conta, _ in dataset )
A__ = min(__a , model.config.n_positions ) # Max size of input for the pre-trained model
# Prepare inputs tensors and dataloaders
A__ = pre_process_datasets(__a , __a , __a , *__a )
A__ , A__ = tensor_datasets[0], tensor_datasets[1]
A__ = TensorDataset(*__a )
A__ = RandomSampler(__a )
A__ = DataLoader(__a , sampler=__a , batch_size=args.train_batch_size )
A__ = TensorDataset(*__a )
A__ = SequentialSampler(__a )
A__ = DataLoader(__a , sampler=__a , batch_size=args.eval_batch_size )
# Prepare optimizer
if args.do_train:
if args.max_steps > 0:
A__ = args.max_steps
A__ = args.max_steps // (len(__a ) // args.gradient_accumulation_steps) + 1
else:
A__ = len(__a ) // args.gradient_accumulation_steps * args.num_train_epochs
A__ = list(model.named_parameters() )
A__ = ["""bias""", """LayerNorm.bias""", """LayerNorm.weight"""]
A__ = [
{
"""params""": [p for n, p in param_optimizer if not any(nd in n for nd in no_decay )],
"""weight_decay""": args.weight_decay,
},
{"""params""": [p for n, p in param_optimizer if any(nd in n for nd in no_decay )], """weight_decay""": 0.0},
]
A__ = AdamW(__a , lr=args.learning_rate , eps=args.adam_epsilon )
A__ = get_linear_schedule_with_warmup(
__a , num_warmup_steps=args.warmup_steps , num_training_steps=__a )
if args.do_train:
A__ , A__ , A__ = 0, 0, None
model.train()
for _ in trange(int(args.num_train_epochs ) , desc="""Epoch""" ):
A__ = 0
A__ = 0
A__ = tqdm(__a , desc="""Training""" )
for step, batch in enumerate(__a ):
A__ = tuple(t.to(__a ) for t in batch )
A__ , A__ , A__ , A__ = batch
A__ = model(__a , mc_token_ids=__a , lm_labels=__a , mc_labels=__a )
A__ = args.lm_coef * losses[0] + losses[1]
loss.backward()
optimizer.step()
scheduler.step()
optimizer.zero_grad()
tr_loss += loss.item()
A__ = (
loss.item() if exp_average_loss is None else 0.7 * exp_average_loss + 0.3 * loss.item()
)
nb_tr_steps += 1
A__ = """Training loss: {:.2e} lr: {:.2e}""".format(__a , scheduler.get_lr()[0] )
# Save a trained model
if args.do_train:
# Save a trained model, configuration and tokenizer
A__ = model.module if hasattr(__a , """module""" ) else model # Only save the model itself
# If we save using the predefined names, we can load using `from_pretrained`
A__ = os.path.join(args.output_dir , __a )
A__ = os.path.join(args.output_dir , __a )
torch.save(model_to_save.state_dict() , __a )
model_to_save.config.to_json_file(__a )
tokenizer.save_vocabulary(args.output_dir )
# Load a trained model and vocabulary that you have fine-tuned
A__ = OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir )
A__ = OpenAIGPTTokenizer.from_pretrained(args.output_dir )
model.to(__a )
if args.do_eval:
model.eval()
A__ , A__ = 0, 0
A__ , A__ = 0, 0
for batch in tqdm(__a , desc="""Evaluating""" ):
A__ = tuple(t.to(__a ) for t in batch )
A__ , A__ , A__ , A__ = batch
with torch.no_grad():
A__ , A__ , A__ , A__ = model(
__a , mc_token_ids=__a , lm_labels=__a , mc_labels=__a )
A__ = mc_logits.detach().cpu().numpy()
A__ = mc_labels.to("""cpu""" ).numpy()
A__ = accuracy(__a , __a )
eval_loss += mc_loss.mean().item()
eval_accuracy += tmp_eval_accuracy
nb_eval_examples += input_ids.size(0 )
nb_eval_steps += 1
A__ = eval_loss / nb_eval_steps
A__ = eval_accuracy / nb_eval_examples
A__ = tr_loss / nb_tr_steps if args.do_train else None
A__ = {"""eval_loss""": eval_loss, """eval_accuracy""": eval_accuracy, """train_loss""": train_loss}
A__ = os.path.join(args.output_dir , """eval_results.txt""" )
with open(__a , """w""" ) as writer:
logger.info("""***** Eval results *****""" )
for key in sorted(result.keys() ):
logger.info(""" %s = %s""" , __a , str(result[key] ) )
writer.write("""%s = %s\n""" % (key, str(result[key] )) )
if __name__ == "__main__":
main()
| 274 | 1 |
import os
from tempfile import TemporaryDirectory
from unittest import TestCase
import pytest
from absl.testing import parameterized
from datasets import config
from datasets.arrow_reader import HF_GCP_BASE_URL
from datasets.builder import DatasetBuilder
from datasets.dataset_dict import IterableDatasetDict
from datasets.iterable_dataset import IterableDataset
from datasets.load import dataset_module_factory, import_main_class
from datasets.utils.file_utils import cached_path
_lowercase : int =[
{"dataset": "wikipedia", "config_name": "20220301.de"},
{"dataset": "wikipedia", "config_name": "20220301.en"},
{"dataset": "wikipedia", "config_name": "20220301.fr"},
{"dataset": "wikipedia", "config_name": "20220301.frr"},
{"dataset": "wikipedia", "config_name": "20220301.it"},
{"dataset": "wikipedia", "config_name": "20220301.simple"},
{"dataset": "snli", "config_name": "plain_text"},
{"dataset": "eli5", "config_name": "LFQA_reddit"},
{"dataset": "wiki40b", "config_name": "en"},
{"dataset": "wiki_dpr", "config_name": "psgs_w100.nq.compressed"},
{"dataset": "wiki_dpr", "config_name": "psgs_w100.nq.no_index"},
{"dataset": "wiki_dpr", "config_name": "psgs_w100.multiset.no_index"},
{"dataset": "natural_questions", "config_name": "default"},
]
def lowerCAmelCase_ ( _lowercase : List[Any]=True) -> str:
"""simple docstring"""
if with_config:
return [
{
"testcase_name": d["dataset"] + "/" + d["config_name"],
"dataset": d["dataset"],
"config_name": d["config_name"],
}
for d in DATASETS_ON_HF_GCP
]
else:
return [
{"testcase_name": dataset, "dataset": dataset} for dataset in {d["dataset"] for d in DATASETS_ON_HF_GCP}
]
@parameterized.named_parameters(list_datasets_on_hf_gcp_parameters(with_config=a__ ) )
class snake_case__ (a__ ):
"""simple docstring"""
__lowerCAmelCase :int = None
__lowerCAmelCase :Dict = None
def SCREAMING_SNAKE_CASE__( self , __lowercase , __lowercase ) -> Any:
"""simple docstring"""
with TemporaryDirectory() as tmp_dir:
a__ : Dict = dataset_module_factory(SCREAMING_SNAKE_CASE_ , cache_dir=SCREAMING_SNAKE_CASE_ )
a__ : Tuple = import_main_class(dataset_module.module_path , dataset=SCREAMING_SNAKE_CASE_ )
a__ : DatasetBuilder = builder_cls(
cache_dir=SCREAMING_SNAKE_CASE_ , config_name=SCREAMING_SNAKE_CASE_ , hash=dataset_module.hash , )
a__ : Optional[int] = '/'.join(
[
HF_GCP_BASE_URL,
builder_instance._relative_data_dir(with_hash=SCREAMING_SNAKE_CASE_ ).replace(os.sep , """/""" ),
config.DATASET_INFO_FILENAME,
] )
a__ : List[str] = cached_path(SCREAMING_SNAKE_CASE_ , cache_dir=SCREAMING_SNAKE_CASE_ )
self.assertTrue(os.path.exists(SCREAMING_SNAKE_CASE_ ) )
@pytest.mark.integration
def lowerCAmelCase_ ( _lowercase : Any) -> List[str]:
"""simple docstring"""
a__ : str = tmp_path_factory.mktemp("""test_hf_gcp""") / 'test_wikipedia_simple'
a__ : Tuple = dataset_module_factory("""wikipedia""" , cache_dir=snake_case__)
a__ : Dict = import_main_class(dataset_module.module_path)
a__ : DatasetBuilder = builder_cls(
cache_dir=snake_case__ , config_name="""20220301.frr""" , hash=dataset_module.hash , )
# use the HF cloud storage, not the original download_and_prepare that uses apache-beam
a__ : List[Any] = None
builder_instance.download_and_prepare()
a__ : Optional[int] = builder_instance.as_dataset()
assert ds
@pytest.mark.integration
def lowerCAmelCase_ ( _lowercase : Optional[Any]) -> Optional[Any]:
"""simple docstring"""
a__ : List[str] = dataset_module_factory("""wikipedia""" , cache_dir=snake_case__)
a__ : int = import_main_class(dataset_module.module_path , dataset=snake_case__)
a__ : DatasetBuilder = builder_cls(
cache_dir=snake_case__ , config_name="""20220301.frr""" , hash=dataset_module.hash , )
a__ : Dict = builder_instance.as_streaming_dataset()
assert ds
assert isinstance(snake_case__ , snake_case__)
assert "train" in ds
assert isinstance(ds["""train"""] , snake_case__)
assert next(iter(ds["""train"""]))
| 365 |
import torch
from diffusers import StableDiffusionPipeline
_lowercase : Optional[int] ="path-to-your-trained-model"
_lowercase : Union[str, Any] =StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.floataa).to("cuda")
_lowercase : Any ="A photo of sks dog in a bucket"
_lowercase : Any =pipe(prompt, num_inference_steps=50, guidance_scale=7.5).images[0]
image.save("dog-bucket.png")
| 266 | 0 |
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_A : Union[str, Any] = len(lowerCamelCase__ ) + 1
_A : List[Any] = len(lowerCamelCase__ ) + 1
# dp is a 2d matrix where dp[i][j] denotes whether prefix string of
# length i of input_string matches with prefix string of length j of
# given pattern.
# "dp" stands for dynamic programming.
_A : Any = [[0 for i in range(lowerCamelCase__ )] for j in range(lowerCamelCase__ )]
# since string of zero length match pattern of zero length
_A : Any = 1
# since pattern of zero length will never match with string of non-zero length
for i in range(1,lowerCamelCase__ ):
_A : Dict = 0
# since string of zero length will match with pattern where there
# is at least one * alternatively
for j in range(1,lowerCamelCase__ ):
_A : Any = dp[0][j - 2] if pattern[j - 1] == """*""" else 0
# now using bottom-up approach to find for all remaining lengths
for i in range(1,lowerCamelCase__ ):
for j in range(1,lowerCamelCase__ ):
if input_string[i - 1] == pattern[j - 1] or pattern[j - 1] == ".":
_A : int = dp[i - 1][j - 1]
elif pattern[j - 1] == "*":
if dp[i][j - 2] == 1:
_A : int = 1
elif pattern[j - 2] in (input_string[i - 1], "."):
_A : List[str] = dp[i - 1][j]
else:
_A : int = 0
else:
_A : Union[str, Any] = 0
return bool(dp[-1][-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
# inputing the strings
# input_string = input("input a string :")
# pattern = input("input a pattern :")
_snake_case = "aab"
_snake_case = "c*a*b"
# using function to check whether given string matches the given pattern
if match_pattern(input_string, pattern):
print(f"""{input_string} matches the given pattern {pattern}""")
else:
print(f"""{input_string} does not match with the given pattern {pattern}""")
| 26 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
UpperCAmelCase : Optional[Any] = {
"configuration_swiftformer": [
"SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"SwiftFormerConfig",
"SwiftFormerOnnxConfig",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Dict = [
"SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"SwiftFormerForImageClassification",
"SwiftFormerModel",
"SwiftFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_swiftformer import (
SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
SwiftFormerConfig,
SwiftFormerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swiftformer import (
SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
SwiftFormerForImageClassification,
SwiftFormerModel,
SwiftFormerPreTrainedModel,
)
else:
import sys
UpperCAmelCase : List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 252 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase_ = {
'configuration_pegasus_x': ['PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PegasusXConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST',
'PegasusXForConditionalGeneration',
'PegasusXModel',
'PegasusXPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pegasus_x import (
PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST,
PegasusXForConditionalGeneration,
PegasusXModel,
PegasusXPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 302 |
"""simple docstring"""
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision import transforms
from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def __UpperCAmelCase ( __lowerCamelCase ) -> Optional[int]:
lowercase__ : int = [2, 2, 6, 2] if '''tiny''' in model_name else [2, 2, 18, 2]
lowercase__ : Dict = True if '''large''' in model_name or '''huge''' in model_name else False
lowercase__ : Optional[int] = True if '''large''' in model_name or '''huge''' in model_name else False
lowercase__ : List[Any] = True if '''large''' in model_name or '''huge''' in model_name else False
if "large" in model_name or "xlarge" in model_name or "huge" in model_name:
if "fl3" in model_name:
lowercase__ : Dict = [3, 3, 3, 3]
lowercase__ : str = [5, 5, 5, 5]
elif "fl4" in model_name:
lowercase__ : List[str] = [4, 4, 4, 4]
lowercase__ : Any = [3, 3, 3, 3]
if "tiny" in model_name or "small" in model_name or "base" in model_name:
lowercase__ : List[str] = [3, 3, 3, 3]
if "lrf" in model_name:
lowercase__ : List[str] = [3, 3, 3, 3]
else:
lowercase__ : Optional[Any] = [2, 2, 2, 2]
if "tiny" in model_name:
lowercase__ : Optional[int] = 96
elif "small" in model_name:
lowercase__ : Union[str, Any] = 96
elif "base" in model_name:
lowercase__ : Tuple = 1_28
elif "large" in model_name:
lowercase__ : Any = 1_92
elif "xlarge" in model_name:
lowercase__ : Any = 2_56
elif "huge" in model_name:
lowercase__ : Union[str, Any] = 3_52
# set label information
lowercase__ : List[Any] = '''huggingface/label-files'''
if "large" in model_name or "huge" in model_name:
lowercase__ : Optional[int] = '''imagenet-22k-id2label.json'''
else:
lowercase__ : Optional[Any] = '''imagenet-1k-id2label.json'''
lowercase__ : Dict = json.load(open(hf_hub_download(__lowerCamelCase , __lowerCamelCase , repo_type='''dataset''' ) , '''r''' ) )
lowercase__ : Union[str, Any] = {int(__lowerCamelCase ): v for k, v in idalabel.items()}
lowercase__ : Optional[Any] = {v: k for k, v in idalabel.items()}
lowercase__ : int = FocalNetConfig(
embed_dim=__lowerCamelCase , depths=__lowerCamelCase , focal_levels=__lowerCamelCase , focal_windows=__lowerCamelCase , use_conv_embed=__lowerCamelCase , idalabel=__lowerCamelCase , labelaid=__lowerCamelCase , use_post_layernorm=__lowerCamelCase , use_layerscale=__lowerCamelCase , )
return config
def __UpperCAmelCase ( __lowerCamelCase ) -> Any:
if "patch_embed.proj" in name:
lowercase__ : Any = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
lowercase__ : Tuple = name.replace('''patch_embed.norm''' , '''embeddings.norm''' )
if "layers" in name:
lowercase__ : Dict = '''encoder.''' + name
if "encoder.layers" in name:
lowercase__ : Tuple = name.replace('''encoder.layers''' , '''encoder.stages''' )
if "downsample.proj" in name:
lowercase__ : Union[str, Any] = name.replace('''downsample.proj''' , '''downsample.projection''' )
if "blocks" in name:
lowercase__ : Optional[Any] = name.replace('''blocks''' , '''layers''' )
if "modulation.f.weight" in name or "modulation.f.bias" in name:
lowercase__ : Dict = name.replace('''modulation.f''' , '''modulation.projection_in''' )
if "modulation.h.weight" in name or "modulation.h.bias" in name:
lowercase__ : Dict = name.replace('''modulation.h''' , '''modulation.projection_context''' )
if "modulation.proj.weight" in name or "modulation.proj.bias" in name:
lowercase__ : Optional[Any] = name.replace('''modulation.proj''' , '''modulation.projection_out''' )
if name == "norm.weight":
lowercase__ : Dict = '''layernorm.weight'''
if name == "norm.bias":
lowercase__ : Dict = '''layernorm.bias'''
if "head" in name:
lowercase__ : Dict = name.replace('''head''' , '''classifier''' )
else:
lowercase__ : List[Any] = '''focalnet.''' + name
return name
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=False ) -> List[str]:
# fmt: off
lowercase__ : Any = {
'''focalnet-tiny''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth''',
'''focalnet-tiny-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth''',
'''focalnet-small''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth''',
'''focalnet-small-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth''',
'''focalnet-base''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth''',
'''focalnet-base-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth''',
'''focalnet-large-lrf-fl3''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth''',
'''focalnet-large-lrf-fl4''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth''',
'''focalnet-xlarge-lrf-fl3''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth''',
'''focalnet-xlarge-lrf-fl4''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth''',
}
# fmt: on
lowercase__ : Optional[int] = model_name_to_url[model_name]
print('''Checkpoint URL: ''' , __lowerCamelCase )
lowercase__ : str = torch.hub.load_state_dict_from_url(__lowerCamelCase , map_location='''cpu''' )['''model''']
# rename keys
for key in state_dict.copy().keys():
lowercase__ : int = state_dict.pop(__lowerCamelCase )
lowercase__ : Any = val
lowercase__ : List[Any] = get_focalnet_config(__lowerCamelCase )
lowercase__ : Optional[int] = FocalNetForImageClassification(__lowerCamelCase )
model.eval()
# load state dict
model.load_state_dict(__lowerCamelCase )
# verify conversion
lowercase__ : int = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowercase__ : int = BitImageProcessor(
do_resize=__lowerCamelCase , size={'''shortest_edge''': 2_56} , resample=PILImageResampling.BILINEAR , do_center_crop=__lowerCamelCase , crop_size=2_24 , do_normalize=__lowerCamelCase , image_mean=__lowerCamelCase , image_std=__lowerCamelCase , )
lowercase__ : str = Image.open(requests.get(__lowerCamelCase , stream=__lowerCamelCase ).raw )
lowercase__ : List[str] = processor(images=__lowerCamelCase , return_tensors='''pt''' )
lowercase__ : List[str] = transforms.Compose(
[
transforms.Resize(2_56 ),
transforms.CenterCrop(2_24 ),
transforms.ToTensor(),
transforms.Normalize(mean=[0.4_8_5, 0.4_5_6, 0.4_0_6] , std=[0.2_2_9, 0.2_2_4, 0.2_2_5] ),
] )
lowercase__ : Optional[Any] = image_transforms(__lowerCamelCase ).unsqueeze(0 )
# verify pixel_values
assert torch.allclose(inputs.pixel_values , __lowerCamelCase , atol=1E-4 )
lowercase__ : Optional[Any] = model(**__lowerCamelCase )
lowercase__ : Optional[int] = outputs.logits.argmax(-1 ).item()
print('''Predicted class:''' , model.config.idalabel[predicted_class_idx] )
print('''First values of logits:''' , outputs.logits[0, :3] )
if model_name == "focalnet-tiny":
lowercase__ : Dict = torch.tensor([0.2_1_6_6, -0.4_3_6_8, 0.2_1_9_1] )
elif model_name == "focalnet-tiny-lrf":
lowercase__ : Union[str, Any] = torch.tensor([1.1_6_6_9, 0.0_1_2_5, -0.1_6_9_5] )
elif model_name == "focalnet-small":
lowercase__ : Optional[int] = torch.tensor([0.4_9_1_7, -0.0_4_3_0, 0.1_3_4_1] )
elif model_name == "focalnet-small-lrf":
lowercase__ : Dict = torch.tensor([-0.2_5_8_8, -0.5_3_4_2, -0.2_3_3_1] )
elif model_name == "focalnet-base":
lowercase__ : List[str] = torch.tensor([-0.1_6_5_5, -0.4_0_9_0, -0.1_7_3_0] )
elif model_name == "focalnet-base-lrf":
lowercase__ : List[str] = torch.tensor([0.5_3_0_6, -0.0_4_8_3, -0.3_9_2_8] )
assert torch.allclose(outputs.logits[0, :3] , __lowerCamelCase , atol=1E-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(f"""Saving model and processor of {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(__lowerCamelCase )
processor.save_pretrained(__lowerCamelCase )
if push_to_hub:
print(f"""Pushing model and processor of {model_name} to the hub...""" )
model.push_to_hub(f"""{model_name}""" )
processor.push_to_hub(f"""{model_name}""" )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='focalnet-tiny',
type=str,
help='Name of the FocalNet model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model and processor to the hub.',
)
lowerCAmelCase_ = parser.parse_args()
convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 302 | 1 |
import numpy as np
snake_case : List[str] = [
['''a''', '''b''', '''c''', '''d''', '''e'''],
['''f''', '''g''', '''h''', '''i''', '''k'''],
['''l''', '''m''', '''n''', '''o''', '''p'''],
['''q''', '''r''', '''s''', '''t''', '''u'''],
['''v''', '''w''', '''x''', '''y''', '''z'''],
]
class _snake_case :
def __init__( self ):
a :Dict = np.array(_lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ):
a , a :Optional[int] = np.where(letter == self.SQUARE )
a :List[str] = np.concatenate([indexa + 1, indexa + 1] )
return indexes
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase ):
a :Tuple = self.SQUARE[indexa - 1, indexa - 1]
return letter
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ):
a :List[str] = message.lower()
a :str = message.replace(''' ''' , '''''' )
a :Optional[Any] = message.replace('''j''' , '''i''' )
a :Optional[Any] = np.empty((2, len(_lowerCamelCase )) )
for letter_index in range(len(_lowerCamelCase ) ):
a :str = self.letter_to_numbers(message[letter_index] )
a :Union[str, Any] = numbers[0]
a :Dict = numbers[1]
a :Optional[Any] = first_step.reshape(2 * len(_lowerCamelCase ) )
a :List[Any] = ''''''
for numbers_index in range(len(_lowerCamelCase ) ):
a :Tuple = int(second_step[numbers_index * 2] )
a :List[Any] = int(second_step[(numbers_index * 2) + 1] )
a :Dict = self.numbers_to_letter(_lowerCamelCase , _lowerCamelCase )
a :Dict = encoded_message + letter
return encoded_message
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ):
a :str = message.lower()
message.replace(''' ''' , '''''' )
a :Any = np.empty(2 * len(_lowerCamelCase ) )
for letter_index in range(len(_lowerCamelCase ) ):
a :Union[str, Any] = self.letter_to_numbers(message[letter_index] )
a :int = numbers[0]
a :Optional[Any] = numbers[1]
a :Optional[Any] = first_step.reshape((2, len(_lowerCamelCase )) )
a :Tuple = ''''''
for numbers_index in range(len(_lowerCamelCase ) ):
a :Union[str, Any] = int(second_step[0, numbers_index] )
a :Union[str, Any] = int(second_step[1, numbers_index] )
a :Tuple = self.numbers_to_letter(_lowerCamelCase , _lowerCamelCase )
a :Union[str, Any] = decoded_message + letter
return decoded_message
| 94 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
_lowerCamelCase =logging.get_logger(__name__)
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : Union[str, Any] = ["""pixel_values"""]
def __init__( self , __magic_name__ = True , __magic_name__ = None , __magic_name__ = None , __magic_name__ = PILImageResampling.BILINEAR , __magic_name__ = True , __magic_name__ = 1 / 2_5_5 , __magic_name__ = True , __magic_name__ = None , __magic_name__ = None , **__magic_name__ , ):
super().__init__(**__magic_name__ )
lowerCamelCase : Dict = size if size is not None else {"""shortest_edge""": 3_8_4}
lowerCamelCase : Tuple = get_size_dict(__magic_name__ , default_to_square=__magic_name__ )
lowerCamelCase : Dict = do_resize
lowerCamelCase : List[Any] = size
# Default value set here for backwards compatibility where the value in config is None
lowerCamelCase : Any = crop_pct if crop_pct is not None else 2_2_4 / 2_5_6
lowerCamelCase : Union[str, Any] = resample
lowerCamelCase : str = do_rescale
lowerCamelCase : Union[str, Any] = rescale_factor
lowerCamelCase : Tuple = do_normalize
lowerCamelCase : Optional[Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowerCamelCase : int = image_std if image_std is not None else IMAGENET_STANDARD_STD
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = PILImageResampling.BICUBIC , __magic_name__ = None , **__magic_name__ , ):
lowerCamelCase : Union[str, Any] = get_size_dict(__magic_name__ , default_to_square=__magic_name__ )
if "shortest_edge" not in size:
raise ValueError(F'''Size dictionary must contain \'shortest_edge\' key. Got {size.keys()}''' )
lowerCamelCase : str = size["""shortest_edge"""]
if shortest_edge < 3_8_4:
# maintain same ratio, resizing shortest edge to shortest_edge/crop_pct
lowerCamelCase : List[str] = int(shortest_edge / crop_pct )
lowerCamelCase : Optional[Any] = get_resize_output_image_size(__magic_name__ , size=__magic_name__ , default_to_square=__magic_name__ )
lowerCamelCase : Optional[int] = resize(image=__magic_name__ , size=__magic_name__ , resample=__magic_name__ , data_format=__magic_name__ , **__magic_name__ )
# then crop to (shortest_edge, shortest_edge)
return center_crop(image=__magic_name__ , size=(shortest_edge, shortest_edge) , data_format=__magic_name__ , **__magic_name__ )
else:
# warping (no cropping) when evaluated at 384 or larger
return resize(
__magic_name__ , size=(shortest_edge, shortest_edge) , resample=__magic_name__ , data_format=__magic_name__ , **__magic_name__ )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ = None , **__magic_name__ , ):
return rescale(__magic_name__ , scale=__magic_name__ , data_format=__magic_name__ , **__magic_name__ )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = None , **__magic_name__ , ):
return normalize(__magic_name__ , mean=__magic_name__ , std=__magic_name__ , data_format=__magic_name__ , **__magic_name__ )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = ChannelDimension.FIRST , **__magic_name__ , ):
lowerCamelCase : str = do_resize if do_resize is not None else self.do_resize
lowerCamelCase : Optional[Any] = crop_pct if crop_pct is not None else self.crop_pct
lowerCamelCase : Optional[int] = resample if resample is not None else self.resample
lowerCamelCase : Optional[int] = do_rescale if do_rescale is not None else self.do_rescale
lowerCamelCase : List[str] = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCamelCase : Tuple = do_normalize if do_normalize is not None else self.do_normalize
lowerCamelCase : Optional[Any] = image_mean if image_mean is not None else self.image_mean
lowerCamelCase : Tuple = image_std if image_std is not None else self.image_std
lowerCamelCase : Dict = size if size is not None else self.size
lowerCamelCase : str = get_size_dict(__magic_name__ , default_to_square=__magic_name__ )
lowerCamelCase : List[str] = make_list_of_images(__magic_name__ )
if not valid_images(__magic_name__ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_resize and size["shortest_edge"] < 3_8_4 and crop_pct is None:
raise ValueError("""crop_pct must be specified if size < 384.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
lowerCamelCase : Optional[Any] = [to_numpy_array(__magic_name__ ) for image in images]
if do_resize:
lowerCamelCase : List[Any] = [self.resize(image=__magic_name__ , size=__magic_name__ , crop_pct=__magic_name__ , resample=__magic_name__ ) for image in images]
if do_rescale:
lowerCamelCase : Union[str, Any] = [self.rescale(image=__magic_name__ , scale=__magic_name__ ) for image in images]
if do_normalize:
lowerCamelCase : List[Any] = [self.normalize(image=__magic_name__ , mean=__magic_name__ , std=__magic_name__ ) for image in images]
lowerCamelCase : Optional[int] = [to_channel_dimension_format(__magic_name__ , __magic_name__ ) for image in images]
lowerCamelCase : List[str] = {"""pixel_values""": images}
return BatchFeature(data=__magic_name__ , tensor_type=__magic_name__ )
| 287 | 0 |
"""simple docstring"""
import argparse
import fairseq
import torch
from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging
logging.set_verbosity_info()
__SCREAMING_SNAKE_CASE : Dict = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : Any = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"encoder.layer_norm_for_extract": "layer_norm_for_extract",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"label_embs_concat": "label_embeddings_concat",
"mask_emb": "masked_spec_embed",
"spk_proj": "speaker_proj",
}
__SCREAMING_SNAKE_CASE : Optional[int] = [
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
"label_embeddings_concat",
"speaker_proj",
"layer_norm_for_extract",
]
def lowerCAmelCase_( lowercase_ : Union[str, Any] , lowercase_ : Tuple , lowercase_ : List[Any] , lowercase_ : Optional[Any] , lowercase_ : int ) -> Union[str, Any]:
for attribute in key.split('''.''' ):
_lowerCamelCase = getattr(lowercase_ , lowercase_ )
if weight_type is not None:
_lowerCamelCase = getattr(lowercase_ , lowercase_ ).shape
else:
_lowerCamelCase = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}""" )
if weight_type == "weight":
_lowerCamelCase = value
elif weight_type == "weight_g":
_lowerCamelCase = value
elif weight_type == "weight_v":
_lowerCamelCase = value
elif weight_type == "bias":
_lowerCamelCase = value
else:
_lowerCamelCase = value
logger.info(F"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def lowerCAmelCase_( lowercase_ : Union[str, Any] , lowercase_ : int ) -> str:
_lowerCamelCase = []
_lowerCamelCase = fairseq_model.state_dict()
_lowerCamelCase = hf_model.unispeech_sat.feature_extractor
for name, value in fairseq_dict.items():
_lowerCamelCase = False
if "conv_layers" in name:
load_conv_layer(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , hf_model.config.feat_extract_norm == '''group''' , )
_lowerCamelCase = True
else:
for key, mapped_key in MAPPING.items():
_lowerCamelCase = """unispeech_sat.""" + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
if "layer_norm_for_extract" in name and (".".join(name.split('''.''' )[:-1] ) != key):
# special case since naming is very similar
continue
_lowerCamelCase = True
if "*" in mapped_key:
_lowerCamelCase = name.split(lowercase_ )[0].split('''.''' )[-2]
_lowerCamelCase = mapped_key.replace('''*''' , lowercase_ )
if "weight_g" in name:
_lowerCamelCase = """weight_g"""
elif "weight_v" in name:
_lowerCamelCase = """weight_v"""
elif "bias" in name:
_lowerCamelCase = """bias"""
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
_lowerCamelCase = """weight"""
else:
_lowerCamelCase = None
set_recursively(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
continue
if not is_used:
unused_weights.append(lowercase_ )
logger.warning(F"""Unused weights: {unused_weights}""" )
def lowerCAmelCase_( lowercase_ : Tuple , lowercase_ : int , lowercase_ : List[Any] , lowercase_ : List[str] , lowercase_ : List[Any] ) -> int:
_lowerCamelCase = full_name.split('''conv_layers.''' )[-1]
_lowerCamelCase = name.split('''.''' )
_lowerCamelCase = int(items[0] )
_lowerCamelCase = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
_lowerCamelCase = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
_lowerCamelCase = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.""" )
_lowerCamelCase = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.""" )
_lowerCamelCase = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(lowercase_ )
@torch.no_grad()
def lowerCAmelCase_( lowercase_ : Optional[int] , lowercase_ : Optional[Any] , lowercase_ : Any=None , lowercase_ : Tuple=None , lowercase_ : str=True ) -> List[Any]:
if config_path is not None:
_lowerCamelCase = UniSpeechSatConfig.from_pretrained(lowercase_ )
else:
_lowerCamelCase = UniSpeechSatConfig()
_lowerCamelCase = """"""
if is_finetuned:
_lowerCamelCase = UniSpeechSatForCTC(lowercase_ )
else:
_lowerCamelCase = UniSpeechSatForPreTraining(lowercase_ )
_lowerCamelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
_lowerCamelCase = model[0].eval()
recursively_load_weights(lowercase_ , lowercase_ )
hf_wavavec.save_pretrained(lowercase_ )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : List[str] = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
__SCREAMING_SNAKE_CASE : Any = parser.parse_args()
convert_unispeech_sat_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 359 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__SCREAMING_SNAKE_CASE : Dict = {
'''configuration_mega''': ['''MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MegaConfig''', '''MegaOnnxConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Union[str, Any] = [
'''MEGA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MegaForCausalLM''',
'''MegaForMaskedLM''',
'''MegaForMultipleChoice''',
'''MegaForQuestionAnswering''',
'''MegaForSequenceClassification''',
'''MegaForTokenClassification''',
'''MegaModel''',
'''MegaPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mega import (
MEGA_PRETRAINED_MODEL_ARCHIVE_LIST,
MegaForCausalLM,
MegaForMaskedLM,
MegaForMultipleChoice,
MegaForQuestionAnswering,
MegaForSequenceClassification,
MegaForTokenClassification,
MegaModel,
MegaPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 73 | 0 |
"""simple docstring"""
from ...utils import is_note_seq_available, is_transformers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .notes_encoder import SpectrogramNotesEncoder
from .continous_encoder import SpectrogramContEncoder
from .pipeline_spectrogram_diffusion import (
SpectrogramContEncoder,
SpectrogramDiffusionPipeline,
TaFilmDecoder,
)
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .midi_utils import MidiProcessor
| 238 |
"""simple docstring"""
from __future__ import annotations
_lowercase : Dict = 1.6_021E-19 # units = C
def snake_case__ ( __lowerCamelCase : float , __lowerCamelCase : float , __lowerCamelCase : float , ):
"""simple docstring"""
if (conductivity, electron_conc, mobility).count(0 ) != 1:
raise ValueError('''You cannot supply more or less than 2 values''' )
elif conductivity < 0:
raise ValueError('''Conductivity cannot be negative''' )
elif electron_conc < 0:
raise ValueError('''Electron concentration cannot be negative''' )
elif mobility < 0:
raise ValueError('''mobility cannot be negative''' )
elif conductivity == 0:
return (
"conductivity",
mobility * electron_conc * ELECTRON_CHARGE,
)
elif electron_conc == 0:
return (
"electron_conc",
conductivity / (mobility * ELECTRON_CHARGE),
)
else:
return (
"mobility",
conductivity / (electron_conc * ELECTRON_CHARGE),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 238 | 1 |
"""simple docstring"""
from json import JSONDecodeError # Workaround for requests.exceptions.JSONDecodeError
import requests
def a_ ( _lowercase = "isbn/0140328726" ):
_UpperCamelCase : Tuple = olid.strip().strip('''/''' ) # Remove leading/trailing whitespace & slashes
if new_olid.count('''/''' ) != 1:
_UpperCamelCase : Optional[int] = F"""{olid} is not a valid Open Library olid"""
raise ValueError(lowercase__ )
return requests.get(F"""https://openlibrary.org/{new_olid}.json""" ).json()
def a_ ( _lowercase ):
_UpperCamelCase : Optional[int] = {
"""title""": """Title""",
"""publish_date""": """Publish date""",
"""authors""": """Authors""",
"""number_of_pages""": """Number of pages:""",
"""first_sentence""": """First sentence""",
"""isbn_10""": """ISBN (10)""",
"""isbn_13""": """ISBN (13)""",
}
_UpperCamelCase : Any = {better_key: ol_book_data[key] for key, better_key in desired_keys.items()}
_UpperCamelCase : int = [
get_openlibrary_data(author['''key'''] )["""name"""] for author in data["""Authors"""]
]
_UpperCamelCase : Union[str, Any] = data["""First sentence"""]["""value"""]
for key, value in data.items():
if isinstance(lowercase__ , lowercase__ ):
_UpperCamelCase : int = """, """.join(lowercase__ )
return data
if __name__ == "__main__":
import doctest
doctest.testmod()
while True:
UpperCamelCase_ =input("""\nEnter the ISBN code to search (or \'quit\' to stop): """).strip()
if isbn.lower() in ("", "q", "quit", "exit", "stop"):
break
if len(isbn) not in (10, 13) or not isbn.isdigit():
print(F"Sorry, {isbn} is not a valid ISBN. Please, input a valid ISBN.")
continue
print(F"\nSearching Open Library for ISBN: {isbn}...\n")
try:
UpperCamelCase_ =summarize_book(get_openlibrary_data(F"isbn/{isbn}"))
print("""\n""".join(F"{key}: {value}" for key, value in book_summary.items()))
except JSONDecodeError: # Workaround for requests.exceptions.RequestException:
print(F"Sorry, there are no results for ISBN: {isbn}.")
| 351 |
"""simple docstring"""
import numpy as np
import torch
from imwatermark import WatermarkEncoder
# Copied from https://github.com/Stability-AI/generative-models/blob/613af104c6b85184091d42d374fef420eddb356d/scripts/demo/streamlit_helpers.py#L66
UpperCamelCase_ =0b1_0_1_1_0_0_1_1_1_1_1_0_1_1_0_0_1_0_0_1_0_0_0_0_0_1_1_1_1_0_1_1_1_0_1_1_0_0_0_1_1_0_0_1_1_1_1_0
# bin(x)[2:] gives bits of x as str, use int to convert them to 0/1
UpperCamelCase_ =[int(bit) for bit in bin(WATERMARK_MESSAGE)[2:]]
class _a :
def __init__( self : str ) -> str:
'''simple docstring'''
_UpperCamelCase : str = WATERMARK_BITS
_UpperCamelCase : Optional[int] = WatermarkEncoder()
self.encoder.set_watermark('''bits''', self.watermark )
def snake_case ( self : Dict, lowerCAmelCase__ : torch.FloatTensor ) -> int:
'''simple docstring'''
if images.shape[-1] < 2_5_6:
return images
_UpperCamelCase : Union[str, Any] = (2_5_5 * (images / 2 + 0.5)).cpu().permute(0, 2, 3, 1 ).float().numpy()
_UpperCamelCase : List[str] = [self.encoder.encode(lowerCAmelCase__, '''dwtDct''' ) for image in images]
_UpperCamelCase : Dict = torch.from_numpy(np.array(lowerCAmelCase__ ) ).permute(0, 3, 1, 2 )
_UpperCamelCase : Optional[int] = torch.clamp(2 * (images / 2_5_5 - 0.5), min=-1.0, max=1.0 )
return images
| 128 | 0 |
"""simple docstring"""
def _snake_case ( lowercase__ ):
stooge(lowercase__ , 0 , len(lowercase__ ) - 1 )
return arr
def _snake_case ( lowercase__ , lowercase__ , lowercase__ ):
if i >= h:
return
# If first element is smaller than the last then swap them
if arr[i] > arr[h]:
_lowerCamelCase, _lowerCamelCase : Optional[Any] = arr[h], arr[i]
# If there are more than 2 elements in the array
if h - i + 1 > 2:
_lowerCamelCase : Union[str, Any] = (int)((h - i + 1) / 3 )
# Recursively sort first 2/3 elements
stooge(lowercase__ , lowercase__ , (h - t) )
# Recursively sort last 2/3 elements
stooge(lowercase__ , i + t , (lowercase__) )
# Recursively sort first 2/3 elements
stooge(lowercase__ , lowercase__ , (h - t) )
if __name__ == "__main__":
lowercase__ = input("""Enter numbers separated by a comma:\n""").strip()
lowercase__ = [int(item) for item in user_input.split(""",""")]
print(stooge_sort(unsorted)) | 96 |
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
import torch
import torchaudio.compliance.kaldi as ta_kaldi
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
lowercase_ = logging.get_logger(__name__)
class snake_case ( _lowerCAmelCase ):
'''simple docstring'''
A_ : int = ["input_features", "attention_mask"]
def __init__( self : Optional[Any], _lowerCamelCase : Union[str, Any]=80, _lowerCamelCase : int=1_60_00, _lowerCamelCase : Any=80, _lowerCamelCase : List[str]=0.0, _lowerCamelCase : int=True, _lowerCamelCase : Optional[Any]=True, _lowerCamelCase : Optional[int]=True, **_lowerCamelCase : List[str], ):
'''simple docstring'''
super().__init__(feature_size=_lowerCamelCase, sampling_rate=_lowerCamelCase, padding_value=_lowerCamelCase, **_lowerCamelCase )
__A = num_mel_bins
__A = do_ceptral_normalize
__A = normalize_means
__A = normalize_vars
__A = True
def _SCREAMING_SNAKE_CASE ( self : Dict, _lowerCamelCase : np.ndarray, ):
'''simple docstring'''
__A = waveform * (2**15) # Kaldi compliance: 16-bit signed integers
__A = torch.from_numpy(_lowerCamelCase ).unsqueeze(0 )
__A = ta_kaldi.fbank(_lowerCamelCase, num_mel_bins=self.num_mel_bins, sample_frequency=self.sampling_rate )
return features.numpy()
@staticmethod
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : np.ndarray, _lowerCamelCase : int, _lowerCamelCase : Optional[bool] = True, _lowerCamelCase : Optional[bool] = True, _lowerCamelCase : float = 0.0, ):
'''simple docstring'''
# make sure we normalize float32 arrays
if normalize_means:
__A = x[:input_length].mean(axis=0 )
__A = np.subtract(_lowerCamelCase, _lowerCamelCase )
if normalize_vars:
__A = x[:input_length].std(axis=0 )
__A = np.divide(_lowerCamelCase, _lowerCamelCase )
if input_length < x.shape[0]:
__A = padding_value
# make sure array is in float32
__A = x.astype(np.floataa )
return x
def _SCREAMING_SNAKE_CASE ( self : str, _lowerCamelCase : List[np.ndarray], _lowerCamelCase : Optional[np.ndarray] = None ):
'''simple docstring'''
__A = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [
self.utterance_cmvn(_lowerCamelCase, _lowerCamelCase, self.normalize_means, self.normalize_vars, self.padding_value )
for x, n in zip(_lowerCamelCase, _lowerCamelCase )
]
def __call__( self : Optional[Any], _lowerCamelCase : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]], _lowerCamelCase : Union[bool, str, PaddingStrategy] = False, _lowerCamelCase : Optional[int] = None, _lowerCamelCase : bool = False, _lowerCamelCase : Optional[int] = None, _lowerCamelCase : Optional[Union[str, TensorType]] = None, _lowerCamelCase : Optional[int] = None, _lowerCamelCase : Optional[bool] = None, **_lowerCamelCase : Optional[Any], ):
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'The model corresponding to this feature extractor: {self} was trained using a sampling rate of'
f' {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with'
f' {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
__A = isinstance(_lowerCamelCase, np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'Only mono-channel audio is supported for input to {self}' )
__A = is_batched_numpy or (
isinstance(_lowerCamelCase, (list, tuple) ) and (isinstance(raw_speech[0], (np.ndarray, tuple, list) ))
)
if is_batched:
__A = [np.asarray(_lowerCamelCase, dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(_lowerCamelCase, np.ndarray ):
__A = np.asarray(_lowerCamelCase, dtype=np.floataa )
elif isinstance(_lowerCamelCase, np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
__A = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
__A = [raw_speech]
# extract fbank features
__A = [self._extract_fbank_features(_lowerCamelCase ) for waveform in raw_speech]
# convert into correct format for padding
__A = BatchFeature({'''input_features''': features} )
__A = self.pad(
_lowerCamelCase, padding=_lowerCamelCase, max_length=_lowerCamelCase, truncation=_lowerCamelCase, pad_to_multiple_of=_lowerCamelCase, return_attention_mask=_lowerCamelCase, **_lowerCamelCase, )
# make sure list is in array format
__A = padded_inputs.get('''input_features''' )
if isinstance(input_features[0], _lowerCamelCase ):
__A = [np.asarray(_lowerCamelCase, dtype=np.floataa ) for feature in input_features]
__A = padded_inputs.get('''attention_mask''' )
if attention_mask is not None:
__A = [np.asarray(_lowerCamelCase, dtype=np.intaa ) for array in attention_mask]
# Utterance-level cepstral mean and variance normalization
if self.do_ceptral_normalize:
__A = (
np.array(_lowerCamelCase, dtype=np.intaa )
if self._get_padding_strategies(_lowerCamelCase, max_length=_lowerCamelCase ) is not PaddingStrategy.DO_NOT_PAD
else None
)
__A = self.normalize(
padded_inputs['''input_features'''], attention_mask=_lowerCamelCase )
if return_tensors is not None:
__A = padded_inputs.convert_to_tensors(_lowerCamelCase )
return padded_inputs
| 266 | 0 |
"""simple docstring"""
import argparse
import os
import transformers
from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS
from .utils import logging
logging.set_verbosity_info()
__A : List[str] = logging.get_logger(__name__)
__A : List[Any] = {name: getattr(transformers, name + "Fast") for name in SLOW_TO_FAST_CONVERTERS}
def lowercase ( _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES:
raise ValueError(f'Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys() )}.' )
if tokenizer_name is None:
_UpperCAmelCase = TOKENIZER_CLASSES
else:
_UpperCAmelCase = {tokenizer_name: getattr(__snake_case , tokenizer_name + '''Fast''' )}
logger.info(f'Loading tokenizer classes: {tokenizer_names}' )
for tokenizer_name in tokenizer_names:
_UpperCAmelCase = TOKENIZER_CLASSES[tokenizer_name]
_UpperCAmelCase = True
if checkpoint_name is None:
_UpperCAmelCase = list(tokenizer_class.max_model_input_sizes.keys() )
else:
_UpperCAmelCase = [checkpoint_name]
logger.info(f'For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}' )
for checkpoint in checkpoint_names:
logger.info(f'Loading {tokenizer_class.__class__.__name__} {checkpoint}' )
# Load tokenizer
_UpperCAmelCase = tokenizer_class.from_pretrained(__snake_case , force_download=__snake_case )
# Save fast tokenizer
logger.info(f'Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}' )
# For organization names we create sub-directories
if "/" in checkpoint:
_UpperCAmelCase , _UpperCAmelCase = checkpoint.split('''/''' )
_UpperCAmelCase = os.path.join(__snake_case , __snake_case )
elif add_prefix:
_UpperCAmelCase = checkpoint
_UpperCAmelCase = dump_path
else:
_UpperCAmelCase = None
_UpperCAmelCase = dump_path
logger.info(f'=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}' )
if checkpoint in list(tokenizer.pretrained_vocab_files_map.values() )[0]:
_UpperCAmelCase = list(tokenizer.pretrained_vocab_files_map.values() )[0][checkpoint]
_UpperCAmelCase = file_path.split(__snake_case )[-1][0]
if next_char == "/":
_UpperCAmelCase = os.path.join(__snake_case , __snake_case )
_UpperCAmelCase = None
logger.info(f'=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}' )
_UpperCAmelCase = tokenizer.save_pretrained(
__snake_case , legacy_format=__snake_case , filename_prefix=__snake_case )
logger.info(f'=> File names {file_names}' )
for file_name in file_names:
if not file_name.endswith('''tokenizer.json''' ):
os.remove(__snake_case )
logger.info(f'=> removing {file_name}' )
if __name__ == "__main__":
__A : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--dump_path", default=None, type=str, required=True, help="Path to output generated fast tokenizer files."
)
parser.add_argument(
"--tokenizer_name",
default=None,
type=str,
help=(
f'''Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will '''
"download and convert all the checkpoints from AWS."
),
)
parser.add_argument(
"--checkpoint_name",
default=None,
type=str,
help="Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.",
)
parser.add_argument(
"--force_download",
action="store_true",
help="Re-download checkpoints.",
)
__A : List[str] = parser.parse_args()
convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download)
| 362 |
"""simple docstring"""
__A : Tuple = frozenset(
[
"prompt",
"height",
"width",
"guidance_scale",
"negative_prompt",
"prompt_embeds",
"negative_prompt_embeds",
"cross_attention_kwargs",
]
)
__A : Union[str, Any] = frozenset(["prompt", "negative_prompt"])
__A : str = frozenset([])
__A : List[str] = frozenset(["image"])
__A : Optional[Any] = frozenset(
[
"image",
"height",
"width",
"guidance_scale",
]
)
__A : Optional[int] = frozenset(["image"])
__A : Optional[int] = frozenset(
[
"prompt",
"image",
"height",
"width",
"guidance_scale",
"negative_prompt",
"prompt_embeds",
"negative_prompt_embeds",
]
)
__A : Optional[Any] = frozenset(["prompt", "image", "negative_prompt"])
__A : str = frozenset(
[
# Text guided image variation with an image mask
"prompt",
"image",
"mask_image",
"height",
"width",
"guidance_scale",
"negative_prompt",
"prompt_embeds",
"negative_prompt_embeds",
]
)
__A : Tuple = frozenset(["prompt", "image", "mask_image", "negative_prompt"])
__A : List[str] = frozenset(
[
# image variation with an image mask
"image",
"mask_image",
"height",
"width",
"guidance_scale",
]
)
__A : List[Any] = frozenset(["image", "mask_image"])
__A : List[str] = frozenset(
[
"example_image",
"image",
"mask_image",
"height",
"width",
"guidance_scale",
]
)
__A : Tuple = frozenset(["example_image", "image", "mask_image"])
__A : Dict = frozenset(["class_labels"])
__A : str = frozenset(["class_labels"])
__A : str = frozenset(["batch_size"])
__A : Union[str, Any] = frozenset([])
__A : str = frozenset(["batch_size"])
__A : Optional[int] = frozenset([])
__A : Any = frozenset(
[
"prompt",
"audio_length_in_s",
"guidance_scale",
"negative_prompt",
"prompt_embeds",
"negative_prompt_embeds",
"cross_attention_kwargs",
]
)
__A : List[str] = frozenset(["prompt", "negative_prompt"])
__A : Tuple = frozenset(["input_tokens"])
__A : Optional[int] = frozenset(["input_tokens"])
| 326 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.