code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
'''simple docstring'''
import os
def _snake_case ( ) -> Tuple:
with open(os.path.dirname(lowercase ) + """/p022_names.txt""" ) as file:
__a : Tuple = str(file.readlines()[0] )
__a : List[str] = names.replace("""\"""" , """""" ).split(""",""" )
names.sort()
__a : Union[str, Any] = 0
__a : List[str] = 0
for i, name in enumerate(lowercase ):
for letter in name:
name_score += ord(lowercase ) - 6_4
total_score += (i + 1) * name_score
__a : List[str] = 0
return total_score
if __name__ == "__main__":
print(solution()) | 697 |
'''simple docstring'''
from __future__ import annotations
import bisect
def _snake_case ( lowercase , lowercase , lowercase = 0 , lowercase = -1 ) -> int:
if hi < 0:
__a : Union[str, Any] = len(lowercase )
while lo < hi:
__a : List[str] = lo + (hi - lo) // 2
if sorted_collection[mid] < item:
__a : int = mid + 1
else:
__a : int = mid
return lo
def _snake_case ( lowercase , lowercase , lowercase = 0 , lowercase = -1 ) -> int:
if hi < 0:
__a : Any = len(lowercase )
while lo < hi:
__a : Any = lo + (hi - lo) // 2
if sorted_collection[mid] <= item:
__a : List[str] = mid + 1
else:
__a : Any = mid
return lo
def _snake_case ( lowercase , lowercase , lowercase = 0 , lowercase = -1 ) -> None:
sorted_collection.insert(bisect_left(lowercase , lowercase , lowercase , lowercase ) , lowercase )
def _snake_case ( lowercase , lowercase , lowercase = 0 , lowercase = -1 ) -> None:
sorted_collection.insert(bisect_right(lowercase , lowercase , lowercase , lowercase ) , lowercase )
def _snake_case ( lowercase , lowercase ) -> int | None:
__a : Dict = 0
__a : Any = len(lowercase ) - 1
while left <= right:
__a : str = left + (right - left) // 2
__a : List[Any] = sorted_collection[midpoint]
if current_item == item:
return midpoint
elif item < current_item:
__a : Optional[Any] = midpoint - 1
else:
__a : Optional[int] = midpoint + 1
return None
def _snake_case ( lowercase , lowercase ) -> int | None:
__a : Optional[int] = bisect.bisect_left(lowercase , lowercase )
if index != len(lowercase ) and sorted_collection[index] == item:
return index
return None
def _snake_case ( lowercase , lowercase , lowercase , lowercase ) -> int | None:
if right < left:
return None
__a : Any = left + (right - left) // 2
if sorted_collection[midpoint] == item:
return midpoint
elif sorted_collection[midpoint] > item:
return binary_search_by_recursion(lowercase , lowercase , lowercase , midpoint - 1 )
else:
return binary_search_by_recursion(lowercase , lowercase , midpoint + 1 , lowercase )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : List[Any] = input('Enter numbers separated by comma:\n').strip()
__SCREAMING_SNAKE_CASE : Optional[Any] = sorted(int(item) for item in user_input.split(','))
__SCREAMING_SNAKE_CASE : List[str] = int(input('Enter a single number to be found in the list:\n'))
__SCREAMING_SNAKE_CASE : Optional[int] = binary_search(collection, target)
if result is None:
print(f'''{target} was not found in {collection}.''')
else:
print(f'''{target} was found at position {result} in {collection}.''') | 697 | 1 |
'''simple docstring'''
import unittest
from datasets import load_dataset
from transformers import BloomTokenizerFast
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase , unittest.TestCase ):
lowercase__ = None
lowercase__ = BloomTokenizerFast
lowercase__ = BloomTokenizerFast
lowercase__ = True
lowercase__ = False
lowercase__ = "tokenizer_file"
lowercase__ = {"bos_token": "<s>", "eos_token": "</s>", "unk_token": "<unk>", "pad_token": "<pad>"}
def __lowerCamelCase ( self ):
'''simple docstring'''
super().setUp()
__a : Optional[Any] = BloomTokenizerFast.from_pretrained("""bigscience/tokenizer""" )
tokenizer.save_pretrained(self.tmpdirname )
def __lowerCamelCase ( self , **__UpperCamelCase ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return BloomTokenizerFast.from_pretrained(self.tmpdirname , **__UpperCamelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : str = self.get_rust_tokenizer()
__a : Dict = ["""The quick brown fox</s>""", """jumps over the lazy dog</s>"""]
__a : Dict = [[2175, 2_3714, 7_3173, 14_4252, 2], [77, 13_2619, 3478, 368, 10_9586, 3_5433, 2]]
__a : str = tokenizer.batch_encode_plus(__UpperCamelCase )["""input_ids"""]
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
__a : int = tokenizer.batch_decode(__UpperCamelCase )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
def __lowerCamelCase ( self , __UpperCamelCase=6 ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
__a : Dict = self.rust_tokenizer_class.from_pretrained(__UpperCamelCase , **__UpperCamelCase )
# tokenizer_r.pad_token = None # Hotfixing padding = None
# Simple input
__a : List[Any] = """This is a simple input"""
__a : Dict = ["""This is a simple input 1""", """This is a simple input 2"""]
__a : str = ("""This is a simple input""", """This is a pair""")
__a : str = [
("""This is a simple input 1""", """This is a simple input 2"""),
("""This is a simple pair 1""", """This is a simple pair 2"""),
]
# Simple input tests
try:
tokenizer_r.encode(__UpperCamelCase , max_length=__UpperCamelCase )
tokenizer_r.encode_plus(__UpperCamelCase , max_length=__UpperCamelCase )
tokenizer_r.batch_encode_plus(__UpperCamelCase , max_length=__UpperCamelCase )
tokenizer_r.encode(__UpperCamelCase , max_length=__UpperCamelCase )
tokenizer_r.batch_encode_plus(__UpperCamelCase , max_length=__UpperCamelCase )
except ValueError:
self.fail("""Bloom Tokenizer should be able to deal with padding""" )
__a : Dict = None # Hotfixing padding = None
self.assertRaises(__UpperCamelCase , tokenizer_r.encode , __UpperCamelCase , max_length=__UpperCamelCase , padding="""max_length""" )
# Simple input
self.assertRaises(__UpperCamelCase , tokenizer_r.encode_plus , __UpperCamelCase , max_length=__UpperCamelCase , padding="""max_length""" )
# Simple input
self.assertRaises(
__UpperCamelCase , tokenizer_r.batch_encode_plus , __UpperCamelCase , max_length=__UpperCamelCase , padding="""max_length""" , )
# Pair input
self.assertRaises(__UpperCamelCase , tokenizer_r.encode , __UpperCamelCase , max_length=__UpperCamelCase , padding="""max_length""" )
# Pair input
self.assertRaises(__UpperCamelCase , tokenizer_r.encode_plus , __UpperCamelCase , max_length=__UpperCamelCase , padding="""max_length""" )
# Pair input
self.assertRaises(
__UpperCamelCase , tokenizer_r.batch_encode_plus , __UpperCamelCase , max_length=__UpperCamelCase , padding="""max_length""" , )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Union[str, Any] = self.get_rust_tokenizer()
__a : Optional[int] = load_dataset("""xnli""" , """all_languages""" , split="""test""" , streaming=__UpperCamelCase )
__a : Tuple = next(iter(__UpperCamelCase ) )["""premise"""] # pick up one data
__a : str = list(sample_data.values() )
__a : str = list(map(tokenizer.encode , __UpperCamelCase ) )
__a : List[str] = [tokenizer.decode(__UpperCamelCase , clean_up_tokenization_spaces=__UpperCamelCase ) for x in output_tokens]
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
self.assertGreaterEqual(len(self.tokenizer_class.pretrained_vocab_files_map ) , 1 )
self.assertGreaterEqual(len(list(self.tokenizer_class.pretrained_vocab_files_map.values() )[0] ) , 1 ) | 697 |
'''simple docstring'''
from itertools import product
def _snake_case ( lowercase , lowercase ) -> list[int]:
__a : Optional[int] = sides_number
__a : Union[str, Any] = max_face_number * dice_number
__a : Optional[Any] = [0] * (max_total + 1)
__a : Dict = 1
__a : str = range(lowercase , max_face_number + 1 )
for dice_numbers in product(lowercase , repeat=lowercase ):
__a : int = sum(lowercase )
totals_frequencies[total] += 1
return totals_frequencies
def _snake_case ( ) -> float:
__a : Tuple = total_frequency_distribution(
sides_number=4 , dice_number=9 )
__a : Union[str, Any] = total_frequency_distribution(
sides_number=6 , dice_number=6 )
__a : str = 0
__a : Dict = 9
__a : str = 4 * 9
__a : Any = 6
for peter_total in range(lowercase , max_peter_total + 1 ):
peter_wins_count += peter_totals_frequencies[peter_total] * sum(
colin_totals_frequencies[min_colin_total:peter_total] )
__a : str = (4**9) * (6**6)
__a : List[Any] = peter_wins_count / total_games_number
__a : List[Any] = round(lowercase , ndigits=7 )
return rounded_peter_win_probability
if __name__ == "__main__":
print(f'''{solution() = }''') | 697 | 1 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_pegasus import PegasusTokenizer
else:
__SCREAMING_SNAKE_CASE : Optional[int] = None
__SCREAMING_SNAKE_CASE : List[str] = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : Optional[int] = '▁'
__SCREAMING_SNAKE_CASE : str = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
__SCREAMING_SNAKE_CASE : Optional[Any] = {
'vocab_file': {'google/pegasus-xsum': 'https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model'},
'tokenizer_file': {
'google/pegasus-xsum': 'https://huggingface.co/google/pegasus-xsum/resolve/main/tokenizer.json'
},
}
__SCREAMING_SNAKE_CASE : Optional[Any] = {
'google/pegasus-xsum': 512,
}
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
lowercase__ = VOCAB_FILES_NAMES
lowercase__ = PRETRAINED_VOCAB_FILES_MAP
lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ = PegasusTokenizer
lowercase__ = ["input_ids", "attention_mask"]
def __init__( self , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase="<pad>" , __UpperCamelCase="</s>" , __UpperCamelCase="<unk>" , __UpperCamelCase="<mask_2>" , __UpperCamelCase="<mask_1>" , __UpperCamelCase=None , __UpperCamelCase=103 , **__UpperCamelCase , ):
'''simple docstring'''
__a : Tuple = offset
if additional_special_tokens is not None:
if not isinstance(__UpperCamelCase , __UpperCamelCase ):
raise TypeError(
f"""additional_special_tokens should be of type {type(__UpperCamelCase )}, but is"""
f""" {type(__UpperCamelCase )}""" )
__a : int = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
f"""<unk_{i}>""" for i in range(len(__UpperCamelCase ) , self.offset - 1 )
]
if len(set(__UpperCamelCase ) ) != len(__UpperCamelCase ):
raise ValueError(
"""Please make sure that the provided additional_special_tokens do not contain an incorrectly"""
f""" shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.""" )
__a : Optional[Any] = additional_special_tokens_extended
else:
__a : Optional[Any] = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [f"""<unk_{i}>""" for i in range(2 , self.offset )]
super().__init__(
__UpperCamelCase , tokenizer_file=__UpperCamelCase , pad_token=__UpperCamelCase , eos_token=__UpperCamelCase , unk_token=__UpperCamelCase , mask_token=__UpperCamelCase , mask_token_sent=__UpperCamelCase , offset=__UpperCamelCase , additional_special_tokens=__UpperCamelCase , **__UpperCamelCase , )
__a : int = vocab_file
__a : Union[str, Any] = False if not self.vocab_file else True
def __lowerCamelCase ( self , __UpperCamelCase ):
'''simple docstring'''
__a : Any = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
if all_special_ids != set(range(len(self.additional_special_tokens ) + 3 ) ):
raise ValueError(
"""There should be 3 special tokens: mask_token, pad_token, and eos_token +"""
f""" {len(self.additional_special_tokens )} additional_special_tokens, but got {all_special_ids}""" )
return [1 if x in all_special_ids else 0 for x in seq]
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = False ):
'''simple docstring'''
if already_has_special_tokens:
return self._special_token_mask(__UpperCamelCase )
elif token_ids_a is None:
return self._special_token_mask(__UpperCamelCase ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase=None ):
'''simple docstring'''
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(__UpperCamelCase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
__a : List[str] = os.path.join(
__UpperCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCamelCase ):
copyfile(self.vocab_file , __UpperCamelCase )
return (out_vocab_file,) | 697 |
'''simple docstring'''
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import DiffusionPipeline
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import logging
__SCREAMING_SNAKE_CASE : Union[str, Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
def __init__( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , ):
'''simple docstring'''
super().__init__()
self.register_modules(
vae=__UpperCamelCase , text_encoder=__UpperCamelCase , tokenizer=__UpperCamelCase , unet=__UpperCamelCase , scheduler=__UpperCamelCase , safety_checker=__UpperCamelCase , feature_extractor=__UpperCamelCase , )
def __lowerCamelCase ( self , __UpperCamelCase = "auto" ):
'''simple docstring'''
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
__a : Union[str, Any] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(__UpperCamelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
self.enable_attention_slicing(__UpperCamelCase )
@torch.no_grad()
def __call__( self , __UpperCamelCase , __UpperCamelCase = 512 , __UpperCamelCase = 512 , __UpperCamelCase = 50 , __UpperCamelCase = 7.5 , __UpperCamelCase = None , __UpperCamelCase = 1 , __UpperCamelCase = 0.0 , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = "pil" , __UpperCamelCase = True , __UpperCamelCase = None , __UpperCamelCase = 1 , __UpperCamelCase = None , **__UpperCamelCase , ):
'''simple docstring'''
if isinstance(__UpperCamelCase , __UpperCamelCase ):
__a : Union[str, Any] = 1
elif isinstance(__UpperCamelCase , __UpperCamelCase ):
__a : Tuple = len(__UpperCamelCase )
else:
raise ValueError(f"""`prompt` has to be of type `str` or `list` but is {type(__UpperCamelCase )}""" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"""`height` and `width` have to be divisible by 8 but are {height} and {width}.""" )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(__UpperCamelCase , __UpperCamelCase ) or callback_steps <= 0)
):
raise ValueError(
f"""`callback_steps` has to be a positive integer but is {callback_steps} of type"""
f""" {type(__UpperCamelCase )}.""" )
# get prompt text embeddings
__a : Tuple = self.tokenizer(
__UpperCamelCase , padding="""max_length""" , max_length=self.tokenizer.model_max_length , return_tensors="""pt""" , )
__a : Union[str, Any] = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
__a : str = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"""The following part of your input was truncated because CLIP can only handle sequences up to"""
f""" {self.tokenizer.model_max_length} tokens: {removed_text}""" )
__a : Optional[int] = text_input_ids[:, : self.tokenizer.model_max_length]
if text_embeddings is None:
__a : int = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
__a , __a , __a : Union[str, Any] = text_embeddings.shape
__a : Optional[Any] = text_embeddings.repeat(1 , __UpperCamelCase , 1 )
__a : Union[str, Any] = text_embeddings.view(bs_embed * num_images_per_prompt , __UpperCamelCase , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
__a : Any = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
__a : List[str]
if negative_prompt is None:
__a : Optional[Any] = [""""""]
elif type(__UpperCamelCase ) is not type(__UpperCamelCase ):
raise TypeError(
f"""`negative_prompt` should be the same type to `prompt`, but got {type(__UpperCamelCase )} !="""
f""" {type(__UpperCamelCase )}.""" )
elif isinstance(__UpperCamelCase , __UpperCamelCase ):
__a : Any = [negative_prompt]
elif batch_size != len(__UpperCamelCase ):
raise ValueError(
f"""`negative_prompt`: {negative_prompt} has batch size {len(__UpperCamelCase )}, but `prompt`:"""
f""" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"""
""" the batch size of `prompt`.""" )
else:
__a : Tuple = negative_prompt
__a : Any = text_input_ids.shape[-1]
__a : List[str] = self.tokenizer(
__UpperCamelCase , padding="""max_length""" , max_length=__UpperCamelCase , truncation=__UpperCamelCase , return_tensors="""pt""" , )
__a : str = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
__a : List[str] = uncond_embeddings.shape[1]
__a : List[Any] = uncond_embeddings.repeat(__UpperCamelCase , __UpperCamelCase , 1 )
__a : Tuple = uncond_embeddings.view(batch_size * num_images_per_prompt , __UpperCamelCase , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
__a : List[Any] = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
__a : Tuple = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
__a : List[Any] = (batch_size * num_images_per_prompt, self.unet.config.in_channels, 64, 64)
__a : int = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
__a : Any = torch.randn(
__UpperCamelCase , generator=__UpperCamelCase , device="""cpu""" , dtype=__UpperCamelCase ).to(self.device )
__a : Optional[Any] = torch.randn(__UpperCamelCase , generator=__UpperCamelCase , device="""cpu""" , dtype=__UpperCamelCase ).to(
self.device )
else:
__a : Optional[int] = torch.randn(
__UpperCamelCase , generator=__UpperCamelCase , device=self.device , dtype=__UpperCamelCase )
__a : str = torch.randn(__UpperCamelCase , generator=__UpperCamelCase , device=self.device , dtype=__UpperCamelCase )
else:
if latents_reference.shape != latents_shape:
raise ValueError(f"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" )
__a : Optional[Any] = latents_reference.to(self.device )
__a : str = latents.to(self.device )
# This is the key part of the pipeline where we
# try to ensure that the generated images w/ the same seed
# but different sizes actually result in similar images
__a : List[str] = (latents_shape[3] - latents_shape_reference[3]) // 2
__a : int = (latents_shape[2] - latents_shape_reference[2]) // 2
__a : int = latents_shape_reference[3] if dx >= 0 else latents_shape_reference[3] + 2 * dx
__a : Tuple = latents_shape_reference[2] if dy >= 0 else latents_shape_reference[2] + 2 * dy
__a : Optional[Any] = 0 if dx < 0 else dx
__a : Optional[Any] = 0 if dy < 0 else dy
__a : Optional[int] = max(-dx , 0 )
__a : Optional[Any] = max(-dy , 0 )
# import pdb
# pdb.set_trace()
__a : Optional[int] = latents_reference[:, :, dy : dy + h, dx : dx + w]
# set timesteps
self.scheduler.set_timesteps(__UpperCamelCase )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
__a : Dict = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
__a : Any = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
__a : List[Any] = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
__a : Optional[Any] = {}
if accepts_eta:
__a : Union[str, Any] = eta
for i, t in enumerate(self.progress_bar(__UpperCamelCase ) ):
# expand the latents if we are doing classifier free guidance
__a : List[str] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
__a : Tuple = self.scheduler.scale_model_input(__UpperCamelCase , __UpperCamelCase )
# predict the noise residual
__a : Union[str, Any] = self.unet(__UpperCamelCase , __UpperCamelCase , encoder_hidden_states=__UpperCamelCase ).sample
# perform guidance
if do_classifier_free_guidance:
__a , __a : List[str] = noise_pred.chunk(2 )
__a : Optional[int] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
__a : List[Any] = self.scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
__a : Optional[Any] = 1 / 0.1_8_2_1_5 * latents
__a : Optional[int] = self.vae.decode(__UpperCamelCase ).sample
__a : List[str] = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
__a : int = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if self.safety_checker is not None:
__a : List[str] = self.feature_extractor(self.numpy_to_pil(__UpperCamelCase ) , return_tensors="""pt""" ).to(
self.device )
__a , __a : int = self.safety_checker(
images=__UpperCamelCase , clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype ) )
else:
__a : Optional[int] = None
if output_type == "pil":
__a : str = self.numpy_to_pil(__UpperCamelCase )
if not return_dict:
return (image, has_nsfw_concept)
return StableDiffusionPipelineOutput(images=__UpperCamelCase , nsfw_content_detected=__UpperCamelCase ) | 697 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__SCREAMING_SNAKE_CASE : List[str] = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : str = {
'SCUT-DLVCLab/lilt-roberta-en-base': (
'https://huggingface.co/SCUT-DLVCLab/lilt-roberta-en-base/resolve/main/config.json'
),
}
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
lowercase__ = "lilt"
def __init__( self , __UpperCamelCase=3_0522 , __UpperCamelCase=768 , __UpperCamelCase=12 , __UpperCamelCase=12 , __UpperCamelCase=3072 , __UpperCamelCase="gelu" , __UpperCamelCase=0.1 , __UpperCamelCase=0.1 , __UpperCamelCase=512 , __UpperCamelCase=2 , __UpperCamelCase=0.0_2 , __UpperCamelCase=1E-12 , __UpperCamelCase=0 , __UpperCamelCase="absolute" , __UpperCamelCase=None , __UpperCamelCase=4 , __UpperCamelCase=1024 , **__UpperCamelCase , ):
'''simple docstring'''
super().__init__(pad_token_id=__UpperCamelCase , **__UpperCamelCase )
__a : Optional[Any] = vocab_size
__a : Union[str, Any] = hidden_size
__a : Optional[int] = num_hidden_layers
__a : Tuple = num_attention_heads
__a : int = hidden_act
__a : int = intermediate_size
__a : Optional[int] = hidden_dropout_prob
__a : int = attention_probs_dropout_prob
__a : str = max_position_embeddings
__a : Optional[int] = type_vocab_size
__a : Union[str, Any] = initializer_range
__a : str = layer_norm_eps
__a : List[Any] = position_embedding_type
__a : str = classifier_dropout
__a : Optional[Any] = channel_shrink_ratio
__a : List[str] = max_ad_position_embeddings | 697 |
'''simple docstring'''
import numpy as np
from PIL import Image
def _snake_case ( lowercase , lowercase , lowercase ) -> np.ndarray:
__a : Any = np.array(lowercase )
if arr.shape[0] != arr.shape[1]:
raise ValueError("""The input array is not a square matrix""" )
__a : Union[str, Any] = 0
__a : Dict = 0
__a : Optional[Any] = 0
__a : Tuple = 0
# compute the shape of the output matrix
__a : Optional[int] = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape maxpool_shape
__a : int = np.zeros((maxpool_shape, maxpool_shape) )
while i < arr.shape[0]:
if i + size > arr.shape[0]:
# if the end of the matrix is reached, break
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the maximum of the pooling matrix
__a : Optional[Any] = np.max(arr[i : i + size, j : j + size] )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
__a : Optional[Any] = 0
__a : str = 0
return updated_arr
def _snake_case ( lowercase , lowercase , lowercase ) -> np.ndarray:
__a : int = np.array(lowercase )
if arr.shape[0] != arr.shape[1]:
raise ValueError("""The input array is not a square matrix""" )
__a : int = 0
__a : Optional[Any] = 0
__a : str = 0
__a : List[Any] = 0
# compute the shape of the output matrix
__a : int = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape avgpool_shape
__a : Optional[int] = np.zeros((avgpool_shape, avgpool_shape) )
while i < arr.shape[0]:
# if the end of the matrix is reached, break
if i + size > arr.shape[0]:
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the average of the pooling matrix
__a : Any = int(np.average(arr[i : i + size, j : j + size] ) )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
__a : str = 0
__a : List[Any] = 0
return updated_arr
# Main Function
if __name__ == "__main__":
from doctest import testmod
testmod(name='avgpooling', verbose=True)
# Loading the image
__SCREAMING_SNAKE_CASE : str = Image.open('path_to_image')
# Converting the image to numpy array and maxpooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(maxpooling(np.array(image), size=3, stride=2)).show()
# Converting the image to numpy array and averagepooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(avgpooling(np.array(image), size=3, stride=2)).show() | 697 | 1 |
'''simple docstring'''
import warnings
from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401
warnings.warn(
'The `inpainting.py` script is outdated. Please use directly `from diffusers import'
' StableDiffusionInpaintPipeline` instead.'
) | 697 |
'''simple docstring'''
import qiskit
def _snake_case ( lowercase , lowercase ) -> qiskit.result.counts.Counts:
__a : Any = qiskit.Aer.get_backend("""aer_simulator""" )
# Create a Quantum Circuit acting on the q register
__a : str = qiskit.QuantumCircuit(lowercase , lowercase )
# Map the quantum measurement to the classical bits
circuit.measure([0] , [0] )
# Execute the circuit on the simulator
__a : Any = qiskit.execute(lowercase , lowercase , shots=1_0_0_0 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(lowercase )
if __name__ == "__main__":
print(f'''Total count for various states are: {single_qubit_measure(1, 1)}''') | 697 | 1 |
'''simple docstring'''
def _snake_case ( lowercase ) -> bool:
if not isinstance(lowercase , lowercase ):
raise ValueError("""Input series is not valid, valid series - [2, 4, 6]""" )
if len(lowercase ) == 0:
raise ValueError("""Input list must be a non empty list""" )
if len(lowercase ) == 1:
return True
__a : str = series[1] - series[0]
for index in range(len(lowercase ) - 1 ):
if series[index + 1] - series[index] != common_diff:
return False
return True
def _snake_case ( lowercase ) -> float:
if not isinstance(lowercase , lowercase ):
raise ValueError("""Input series is not valid, valid series - [2, 4, 6]""" )
if len(lowercase ) == 0:
raise ValueError("""Input list must be a non empty list""" )
__a : Union[str, Any] = 0
for val in series:
answer += val
return answer / len(lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod() | 697 |
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConformerConfig,
WavaVecaConformerForCTC,
WavaVecaConformerForPreTraining,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
__SCREAMING_SNAKE_CASE : str = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : Any = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.linear_k': 'encoder.layers.*.self_attn.linear_k',
'self_attn.linear_v': 'encoder.layers.*.self_attn.linear_v',
'self_attn.linear_q': 'encoder.layers.*.self_attn.linear_q',
'self_attn.pos_bias_u': 'encoder.layers.*.self_attn.pos_bias_u',
'self_attn.pos_bias_v': 'encoder.layers.*.self_attn.pos_bias_v',
'self_attn.linear_out': 'encoder.layers.*.self_attn.linear_out',
'self_attn.linear_pos': 'encoder.layers.*.self_attn.linear_pos',
'self_attn.rotary_emb': 'encoder.embed_positions',
'self_attn_layer_norm': 'encoder.layers.*.self_attn_layer_norm',
'conv_module.pointwise_conv1': 'encoder.layers.*.conv_module.pointwise_conv1',
'conv_module.pointwise_conv2': 'encoder.layers.*.conv_module.pointwise_conv2',
'conv_module.depthwise_conv': 'encoder.layers.*.conv_module.depthwise_conv',
'conv_module.batch_norm': 'encoder.layers.*.conv_module.batch_norm',
'conv_module.layer_norm': 'encoder.layers.*.conv_module.layer_norm',
'ffn1.w_1': 'encoder.layers.*.ffn1.intermediate_dense',
'ffn1.w_2': 'encoder.layers.*.ffn1.output_dense',
'ffn1.layer_norm': 'encoder.layers.*.ffn1_layer_norm',
'ffn2.w_1': 'encoder.layers.*.ffn2.intermediate_dense',
'ffn2.w_2': 'encoder.layers.*.ffn2.output_dense',
'ffn2.layer_norm': 'encoder.layers.*.ffn2_layer_norm',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
}
__SCREAMING_SNAKE_CASE : Optional[Any] = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def _snake_case ( lowercase , lowercase , lowercase , lowercase , lowercase ) -> List[Any]:
for attribute in key.split(""".""" ):
__a : str = getattr(lowercase , lowercase )
if weight_type is not None:
__a : Dict = getattr(lowercase , lowercase ).shape
else:
__a : Dict = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}""" )
if weight_type == "weight":
__a : Any = value
elif weight_type == "weight_g":
__a : int = value
elif weight_type == "weight_v":
__a : int = value
elif weight_type == "bias":
__a : List[Any] = value
elif weight_type == "running_mean":
__a : Union[str, Any] = value
elif weight_type == "running_var":
__a : Tuple = value
elif weight_type == "num_batches_tracked":
__a : Optional[int] = value
elif weight_type == "inv_freq":
__a : List[str] = value
else:
__a : List[str] = value
logger.info(F"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def _snake_case ( lowercase , lowercase , lowercase ) -> Dict:
__a : Dict = []
__a : Dict = fairseq_model.state_dict()
__a : Tuple = hf_model.wavaveca_conformer.feature_extractor
for name, value in fairseq_dict.items():
__a : int = False
if "conv_layers" in name:
load_conv_layer(
lowercase , lowercase , lowercase , lowercase , hf_model.config.feat_extract_norm == """group""" , )
__a : List[Any] = True
else:
for key, mapped_key in MAPPING.items():
__a : Optional[int] = """wav2vec2_conformer.""" + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
__a : str = True
if "*" in mapped_key:
__a : Optional[int] = name.split(lowercase )[0].split(""".""" )[-2]
__a : List[Any] = mapped_key.replace("""*""" , lowercase )
if "pos_bias_u" in name:
__a : Union[str, Any] = None
elif "pos_bias_v" in name:
__a : List[Any] = None
elif "weight_g" in name:
__a : List[Any] = """weight_g"""
elif "weight_v" in name:
__a : List[Any] = """weight_v"""
elif "bias" in name:
__a : Optional[int] = """bias"""
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__a : str = """weight"""
elif "running_mean" in name:
__a : List[str] = """running_mean"""
elif "inv_freq" in name:
__a : Dict = """inv_freq"""
elif "running_var" in name:
__a : Union[str, Any] = """running_var"""
elif "num_batches_tracked" in name:
__a : int = """num_batches_tracked"""
else:
__a : Optional[int] = None
set_recursively(lowercase , lowercase , lowercase , lowercase , lowercase )
continue
if not is_used:
unused_weights.append(lowercase )
logger.warning(F"""Unused weights: {unused_weights}""" )
def _snake_case ( lowercase , lowercase , lowercase , lowercase , lowercase ) -> List[str]:
__a : Optional[Any] = full_name.split("""conv_layers.""" )[-1]
__a : Union[str, Any] = name.split(""".""" )
__a : Optional[Any] = int(items[0] )
__a : int = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
__a : Dict = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
__a : str = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" )
__a : Dict = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" )
__a : Union[str, Any] = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(lowercase )
@torch.no_grad()
def _snake_case ( lowercase , lowercase , lowercase=None , lowercase=None , lowercase=True ) -> Optional[Any]:
if config_path is not None:
__a : Any = WavaVecaConformerConfig.from_pretrained(lowercase , hidden_act="""swish""" )
else:
__a : Optional[int] = WavaVecaConformerConfig()
if "rope" in checkpoint_path:
__a : Optional[Any] = """rotary"""
if is_finetuned:
if dict_path:
__a : List[Any] = Dictionary.load(lowercase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
__a : int = target_dict.pad_index
__a : List[str] = target_dict.bos_index
__a : str = target_dict.eos_index
__a : Dict = len(target_dict.symbols )
__a : Any = os.path.join(lowercase , """vocab.json""" )
if not os.path.isdir(lowercase ):
logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(lowercase ) )
return
os.makedirs(lowercase , exist_ok=lowercase )
__a : Dict = target_dict.indices
# fairseq has the <pad> and <s> switched
__a : Optional[Any] = 0
__a : List[Any] = 1
with open(lowercase , """w""" , encoding="""utf-8""" ) as vocab_handle:
json.dump(lowercase , lowercase )
__a : int = WavaVecaCTCTokenizer(
lowercase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=lowercase , )
__a : Optional[int] = True if config.feat_extract_norm == """layer""" else False
__a : Dict = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6_0_0_0 , padding_value=0 , do_normalize=lowercase , return_attention_mask=lowercase , )
__a : str = WavaVecaProcessor(feature_extractor=lowercase , tokenizer=lowercase )
processor.save_pretrained(lowercase )
__a : List[str] = WavaVecaConformerForCTC(lowercase )
else:
__a : Optional[int] = WavaVecaConformerForPreTraining(lowercase )
if is_finetuned:
__a , __a , __a : Dict = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
else:
__a : Optional[int] = argparse.Namespace(task="""audio_pretraining""" )
__a : Tuple = fairseq.tasks.setup_task(lowercase )
__a , __a , __a : int = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=lowercase )
__a : Any = model[0].eval()
recursively_load_weights(lowercase , lowercase , not is_finetuned )
hf_wavavec.save_pretrained(lowercase )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : Dict = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
__SCREAMING_SNAKE_CASE : int = parser.parse_args()
convert_wavaveca_conformer_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
) | 697 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
__SCREAMING_SNAKE_CASE : List[Any] = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : Tuple = {
'microsoft/layoutlmv3-base': 'https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json',
}
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
lowercase__ = "layoutlmv3"
def __init__( self , __UpperCamelCase=5_0265 , __UpperCamelCase=768 , __UpperCamelCase=12 , __UpperCamelCase=12 , __UpperCamelCase=3072 , __UpperCamelCase="gelu" , __UpperCamelCase=0.1 , __UpperCamelCase=0.1 , __UpperCamelCase=512 , __UpperCamelCase=2 , __UpperCamelCase=0.0_2 , __UpperCamelCase=1E-5 , __UpperCamelCase=1 , __UpperCamelCase=0 , __UpperCamelCase=2 , __UpperCamelCase=1024 , __UpperCamelCase=128 , __UpperCamelCase=128 , __UpperCamelCase=True , __UpperCamelCase=32 , __UpperCamelCase=128 , __UpperCamelCase=64 , __UpperCamelCase=256 , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=224 , __UpperCamelCase=3 , __UpperCamelCase=16 , __UpperCamelCase=None , **__UpperCamelCase , ):
'''simple docstring'''
super().__init__(
vocab_size=__UpperCamelCase , hidden_size=__UpperCamelCase , num_hidden_layers=__UpperCamelCase , num_attention_heads=__UpperCamelCase , intermediate_size=__UpperCamelCase , hidden_act=__UpperCamelCase , hidden_dropout_prob=__UpperCamelCase , attention_probs_dropout_prob=__UpperCamelCase , max_position_embeddings=__UpperCamelCase , type_vocab_size=__UpperCamelCase , initializer_range=__UpperCamelCase , layer_norm_eps=__UpperCamelCase , pad_token_id=__UpperCamelCase , bos_token_id=__UpperCamelCase , eos_token_id=__UpperCamelCase , **__UpperCamelCase , )
__a : Optional[int] = max_ad_position_embeddings
__a : Optional[int] = coordinate_size
__a : Optional[int] = shape_size
__a : List[Any] = has_relative_attention_bias
__a : List[Any] = rel_pos_bins
__a : Union[str, Any] = max_rel_pos
__a : List[str] = has_spatial_attention_bias
__a : List[str] = rel_ad_pos_bins
__a : Any = max_rel_ad_pos
__a : Tuple = text_embed
__a : Dict = visual_embed
__a : Dict = input_size
__a : Dict = num_channels
__a : Union[str, Any] = patch_size
__a : Optional[int] = classifier_dropout
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
lowercase__ = version.parse("1.12" )
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
if self.task in ["question-answering", "sequence-classification"]:
return OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """sequence"""}),
("""attention_mask""", {0: """batch""", 1: """sequence"""}),
("""bbox""", {0: """batch""", 1: """sequence"""}),
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
else:
return OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """sequence"""}),
("""bbox""", {0: """batch""", 1: """sequence"""}),
("""attention_mask""", {0: """batch""", 1: """sequence"""}),
("""pixel_values""", {0: """batch""", 1: """num_channels"""}),
] )
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return 1E-5
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return 12
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase = -1 , __UpperCamelCase = -1 , __UpperCamelCase = False , __UpperCamelCase = None , __UpperCamelCase = 3 , __UpperCamelCase = 40 , __UpperCamelCase = 40 , ):
'''simple docstring'''
setattr(processor.image_processor , """apply_ocr""" , __UpperCamelCase )
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
__a : str = compute_effective_axis_dimension(
__UpperCamelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
__a : int = processor.tokenizer.num_special_tokens_to_add(__UpperCamelCase )
__a : Any = compute_effective_axis_dimension(
__UpperCamelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__UpperCamelCase )
# Generate dummy inputs according to compute batch and sequence
__a : Union[str, Any] = [[""" """.join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size
# Generate dummy bounding boxes
__a : Optional[Any] = [[[48, 84, 73, 128]]] * batch_size
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
# batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
__a : int = self._generate_dummy_images(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
__a : Tuple = dict(
processor(
__UpperCamelCase , text=__UpperCamelCase , boxes=__UpperCamelCase , return_tensors=__UpperCamelCase , ) )
return inputs | 697 |
'''simple docstring'''
import warnings
from functools import wraps
from typing import Callable
def _snake_case ( lowercase ) -> Callable:
@wraps(lowercase )
def _inner_fn(*lowercase , **lowercase ):
warnings.warn(
(F"""'{fn.__name__}' is experimental and might be subject to breaking changes in the future.""") , lowercase , )
return fn(*lowercase , **lowercase )
return _inner_fn | 697 | 1 |
'''simple docstring'''
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline
from diffusers.utils import floats_tensor, nightly, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def __lowerCamelCase ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Union[str, Any] = 1
__a : Tuple = 3
__a : Optional[int] = (32, 32)
__a : str = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(__UpperCamelCase )
return image
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
torch.manual_seed(0 )
__a : str = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
return model
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
torch.manual_seed(0 )
__a : Optional[Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
return model
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
torch.manual_seed(0 )
__a : Union[str, Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModel(__UpperCamelCase )
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
def extract(*__UpperCamelCase , **__UpperCamelCase ):
class SCREAMING_SNAKE_CASE__ :
def __init__( self ):
'''simple docstring'''
__a : List[Any] = torch.ones([0] )
def __lowerCamelCase ( self , __UpperCamelCase ):
'''simple docstring'''
self.pixel_values.to(__UpperCamelCase )
return self
return Out()
return extract
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Union[str, Any] = """cpu""" # ensure determinism for the device-dependent torch.Generator
__a : List[str] = self.dummy_cond_unet
__a : Optional[int] = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="""scaled_linear""" , clip_sample=__UpperCamelCase , set_alpha_to_one=__UpperCamelCase , )
__a : int = self.dummy_vae
__a : Dict = self.dummy_text_encoder
__a : str = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
# make sure here that pndm scheduler skips prk
__a : Any = StableDiffusionPipeline(
unet=__UpperCamelCase , scheduler=__UpperCamelCase , vae=__UpperCamelCase , text_encoder=__UpperCamelCase , tokenizer=__UpperCamelCase , safety_checker=__UpperCamelCase , feature_extractor=self.dummy_extractor , )
__a : Optional[Any] = sd_pipe.to(__UpperCamelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCamelCase )
__a : str = """A painting of a squirrel eating a burger"""
__a : List[str] = torch.Generator(device=__UpperCamelCase ).manual_seed(0 )
__a : List[str] = sd_pipe([prompt] , generator=__UpperCamelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" )
__a : List[Any] = output.images
__a : List[str] = torch.Generator(device=__UpperCamelCase ).manual_seed(0 )
__a : str = sd_pipe(
[prompt] , generator=__UpperCamelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" , return_dict=__UpperCamelCase , )[0]
__a : Union[str, Any] = image[0, -3:, -3:, -1]
__a : Optional[int] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__a : int = np.array([0.5_7_5_6, 0.6_1_1_8, 0.5_0_0_5, 0.5_0_4_1, 0.5_4_7_1, 0.4_7_2_6, 0.4_9_7_6, 0.4_8_6_5, 0.4_8_6_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : str = """cpu""" # ensure determinism for the device-dependent torch.Generator
__a : Optional[Any] = self.dummy_cond_unet
__a : Optional[int] = PNDMScheduler(skip_prk_steps=__UpperCamelCase )
__a : Union[str, Any] = self.dummy_vae
__a : List[str] = self.dummy_text_encoder
__a : Tuple = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
# make sure here that pndm scheduler skips prk
__a : Union[str, Any] = StableDiffusionPipeline(
unet=__UpperCamelCase , scheduler=__UpperCamelCase , vae=__UpperCamelCase , text_encoder=__UpperCamelCase , tokenizer=__UpperCamelCase , safety_checker=__UpperCamelCase , feature_extractor=self.dummy_extractor , )
__a : List[Any] = sd_pipe.to(__UpperCamelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCamelCase )
__a : Any = """A painting of a squirrel eating a burger"""
__a : int = torch.Generator(device=__UpperCamelCase ).manual_seed(0 )
__a : Optional[int] = sd_pipe([prompt] , generator=__UpperCamelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" )
__a : Dict = output.images
__a : Dict = torch.Generator(device=__UpperCamelCase ).manual_seed(0 )
__a : Any = sd_pipe(
[prompt] , generator=__UpperCamelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" , return_dict=__UpperCamelCase , )[0]
__a : Optional[Any] = image[0, -3:, -3:, -1]
__a : List[str] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__a : List[str] = np.array([0.5_1_2_5, 0.5_7_1_6, 0.4_8_2_8, 0.5_0_6_0, 0.5_6_5_0, 0.4_7_6_8, 0.5_1_8_5, 0.4_8_9_5, 0.4_9_9_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : List[str] = StableDiffusionPipeline.from_pretrained(
"""hf-internal-testing/tiny-stable-diffusion-lms-pipe""" , safety_checker=__UpperCamelCase )
assert isinstance(__UpperCamelCase , __UpperCamelCase )
assert isinstance(pipe.scheduler , __UpperCamelCase )
assert pipe.safety_checker is None
__a : List[str] = pipe("""example prompt""" , num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(__UpperCamelCase )
__a : List[str] = StableDiffusionPipeline.from_pretrained(__UpperCamelCase )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
__a : Dict = pipe("""example prompt""" , num_inference_steps=2 ).images[0]
assert image is not None
@unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""" )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Union[str, Any] = self.dummy_cond_unet
__a : Union[str, Any] = PNDMScheduler(skip_prk_steps=__UpperCamelCase )
__a : Union[str, Any] = self.dummy_vae
__a : Any = self.dummy_text_encoder
__a : List[Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
# put models in fp16
__a : int = unet.half()
__a : Union[str, Any] = vae.half()
__a : Optional[int] = bert.half()
# make sure here that pndm scheduler skips prk
__a : List[str] = StableDiffusionPipeline(
unet=__UpperCamelCase , scheduler=__UpperCamelCase , vae=__UpperCamelCase , text_encoder=__UpperCamelCase , tokenizer=__UpperCamelCase , safety_checker=__UpperCamelCase , feature_extractor=self.dummy_extractor , )
__a : Optional[int] = sd_pipe.to(__UpperCamelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCamelCase )
__a : Any = """A painting of a squirrel eating a burger"""
__a : Tuple = sd_pipe([prompt] , num_inference_steps=2 , output_type="""np""" ).images
assert image.shape == (1, 64, 64, 3)
@nightly
@require_torch_gpu
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def __lowerCamelCase ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Optional[Any] = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" , safety_checker=__UpperCamelCase )
__a : Union[str, Any] = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
__a : str = sd_pipe.to(__UpperCamelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCamelCase )
__a : Optional[int] = (
"""portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle"""
""" coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with"""
""" anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and"""
""" children from bahnhof zoo, detailed """
)
__a : Optional[int] = 40_0366_0346
__a : Optional[Any] = 7
# without safety guidance (sld_guidance_scale = 0)
__a : Tuple = torch.manual_seed(__UpperCamelCase )
__a : List[str] = sd_pipe(
[prompt] , generator=__UpperCamelCase , guidance_scale=__UpperCamelCase , num_inference_steps=50 , output_type="""np""" , width=512 , height=512 , sld_guidance_scale=0 , )
__a : Any = output.images
__a : Tuple = image[0, -3:, -3:, -1]
__a : Dict = [0.2_2_7_8, 0.2_2_3_1, 0.2_2_4_9, 0.2_3_3_3, 0.2_3_0_3, 0.1_8_8_5, 0.2_2_7_3, 0.2_1_4_4, 0.2_1_7_6]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
# without safety guidance (strong configuration)
__a : int = torch.manual_seed(__UpperCamelCase )
__a : List[str] = sd_pipe(
[prompt] , generator=__UpperCamelCase , guidance_scale=__UpperCamelCase , num_inference_steps=50 , output_type="""np""" , width=512 , height=512 , sld_guidance_scale=2000 , sld_warmup_steps=7 , sld_threshold=0.0_2_5 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
__a : Dict = output.images
__a : Any = image[0, -3:, -3:, -1]
__a : List[str] = [0.2_3_8_3, 0.2_2_7_6, 0.2_3_6, 0.2_1_9_2, 0.2_1_8_6, 0.2_0_5_3, 0.1_9_7_1, 0.1_9_0_1, 0.1_7_1_9]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Tuple = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" , safety_checker=__UpperCamelCase )
__a : Tuple = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
__a : Optional[int] = sd_pipe.to(__UpperCamelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCamelCase )
__a : Any = """padme amidala taking a bath artwork, safe for work, no nudity"""
__a : List[str] = 27_3497_1755
__a : Tuple = 7
__a : List[Any] = torch.manual_seed(__UpperCamelCase )
__a : List[Any] = sd_pipe(
[prompt] , generator=__UpperCamelCase , guidance_scale=__UpperCamelCase , num_inference_steps=50 , output_type="""np""" , width=512 , height=512 , sld_guidance_scale=0 , )
__a : str = output.images
__a : Any = image[0, -3:, -3:, -1]
__a : Optional[int] = [0.3_5_0_2, 0.3_6_2_2, 0.3_3_9_6, 0.3_6_4_2, 0.3_4_7_8, 0.3_3_1_8, 0.3_5, 0.3_3_4_8, 0.3_2_9_7]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
__a : Any = torch.manual_seed(__UpperCamelCase )
__a : Optional[Any] = sd_pipe(
[prompt] , generator=__UpperCamelCase , guidance_scale=__UpperCamelCase , num_inference_steps=50 , output_type="""np""" , width=512 , height=512 , sld_guidance_scale=2000 , sld_warmup_steps=7 , sld_threshold=0.0_2_5 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
__a : int = output.images
__a : Dict = image[0, -3:, -3:, -1]
__a : int = [0.5_5_3_1, 0.5_2_0_6, 0.4_8_9_5, 0.5_1_5_6, 0.5_1_8_2, 0.4_7_5_1, 0.4_8_0_2, 0.4_8_0_3, 0.4_4_4_3]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : List[Any] = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" )
__a : List[str] = sd_pipe.to(__UpperCamelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCamelCase )
__a : Optional[int] = (
"""the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c."""
""" leyendecker"""
)
__a : Tuple = 10_4435_5234
__a : Optional[int] = 12
__a : str = torch.manual_seed(__UpperCamelCase )
__a : Dict = sd_pipe(
[prompt] , generator=__UpperCamelCase , guidance_scale=__UpperCamelCase , num_inference_steps=50 , output_type="""np""" , width=512 , height=512 , sld_guidance_scale=0 , )
__a : Dict = output.images
__a : List[Any] = image[0, -3:, -3:, -1]
__a : Any = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] )
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-7
__a : Tuple = torch.manual_seed(__UpperCamelCase )
__a : int = sd_pipe(
[prompt] , generator=__UpperCamelCase , guidance_scale=__UpperCamelCase , num_inference_steps=50 , output_type="""np""" , width=512 , height=512 , sld_guidance_scale=2000 , sld_warmup_steps=7 , sld_threshold=0.0_2_5 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
__a : Optional[Any] = output.images
__a : List[Any] = image[0, -3:, -3:, -1]
__a : Tuple = np.array([0.5_8_1_8, 0.6_2_8_5, 0.6_8_3_5, 0.6_0_1_9, 0.6_2_5, 0.6_7_5_4, 0.6_0_9_6, 0.6_3_3_4, 0.6_5_6_1] )
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 | 697 |
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
from ....audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ....feature_extraction_sequence_utils import SequenceFeatureExtractor
from ....feature_extraction_utils import BatchFeature
from ....file_utils import PaddingStrategy, TensorType
from ....utils import logging
__SCREAMING_SNAKE_CASE : Tuple = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
lowercase__ = ["input_features", "attention_mask"]
def __init__( self , __UpperCamelCase=80 , __UpperCamelCase=1_6000 , __UpperCamelCase=0.0 , __UpperCamelCase=10 , __UpperCamelCase=25 , __UpperCamelCase="hamming_window" , __UpperCamelCase=3_2_7_6_8.0 , __UpperCamelCase=0.9_7 , __UpperCamelCase=1.0 , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=False , **__UpperCamelCase , ):
'''simple docstring'''
super().__init__(feature_size=__UpperCamelCase , sampling_rate=__UpperCamelCase , padding_value=__UpperCamelCase , **__UpperCamelCase )
__a : List[str] = feature_size
__a : List[str] = sampling_rate
__a : int = padding_value
__a : Any = hop_length
__a : int = win_length
__a : Tuple = frame_signal_scale
__a : Union[str, Any] = preemphasis_coeff
__a : List[str] = mel_floor
__a : Union[str, Any] = normalize_means
__a : Optional[Any] = normalize_vars
__a : Optional[Any] = win_function
__a : Union[str, Any] = return_attention_mask
__a : List[Any] = win_length * sampling_rate // 1000
__a : List[Any] = hop_length * sampling_rate // 1000
__a : Optional[Any] = optimal_fft_length(self.sample_size )
__a : Any = (self.n_fft // 2) + 1
def __lowerCamelCase ( self , __UpperCamelCase ):
'''simple docstring'''
if self.win_function == "hamming_window":
__a : str = window_function(window_length=self.sample_size , name=self.win_function , periodic=__UpperCamelCase )
else:
__a : Dict = window_function(window_length=self.sample_size , name=self.win_function )
__a : Optional[Any] = mel_filter_bank(
num_frequency_bins=self.n_freqs , num_mel_filters=self.feature_size , min_frequency=0.0 , max_frequency=self.sampling_rate / 2.0 , sampling_rate=self.sampling_rate , )
__a : Any = spectrogram(
one_waveform * self.frame_signal_scale , window=__UpperCamelCase , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , center=__UpperCamelCase , preemphasis=self.preemphasis_coeff , mel_filters=__UpperCamelCase , mel_floor=self.mel_floor , log_mel="""log""" , )
return msfc_features.T
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
if self.normalize_means:
__a : int = x[:input_length].mean(axis=0 )
__a : str = np.subtract(__UpperCamelCase , __UpperCamelCase )
if self.normalize_vars:
__a : Dict = x[:input_length].std(axis=0 )
__a : Dict = np.divide(__UpperCamelCase , __UpperCamelCase )
if input_length < x.shape[0]:
__a : Union[str, Any] = padding_value
# make sure array is in float32
__a : Any = x.astype(np.floataa )
return x
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase = None ):
'''simple docstring'''
__a : Tuple = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [self._normalize_one(__UpperCamelCase , __UpperCamelCase , self.padding_value ) for x, n in zip(__UpperCamelCase , __UpperCamelCase )]
def __call__( self , __UpperCamelCase , __UpperCamelCase = False , __UpperCamelCase = None , __UpperCamelCase = False , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , **__UpperCamelCase , ):
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"""The model corresponding to this feature extractor: {self} was trained using a sampling rate of"""
f""" {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with"""
f""" {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
"""It is strongly recommended to pass the ``sampling_rate`` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
__a : Tuple = isinstance(__UpperCamelCase , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f"""Only mono-channel audio is supported for input to {self}""" )
__a : Tuple = is_batched_numpy or (
isinstance(__UpperCamelCase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
__a : Tuple = [np.asarray(__UpperCamelCase , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(__UpperCamelCase , np.ndarray ):
__a : List[str] = np.asarray(__UpperCamelCase , dtype=np.floataa )
elif isinstance(__UpperCamelCase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
__a : str = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
__a : Any = [raw_speech]
# extract fbank features
__a : str = [self._extract_mfsc_features(__UpperCamelCase ) for one_waveform in raw_speech]
# convert into correct format for padding
__a : Optional[Any] = BatchFeature({"""input_features""": features} )
__a : Any = self.pad(
__UpperCamelCase , padding=__UpperCamelCase , max_length=__UpperCamelCase , truncation=__UpperCamelCase , pad_to_multiple_of=__UpperCamelCase , return_attention_mask=__UpperCamelCase , **__UpperCamelCase , )
# make sure list is in array format
__a : int = padded_inputs.get("""input_features""" )
if isinstance(input_features[0] , __UpperCamelCase ):
__a : Union[str, Any] = [np.asarray(__UpperCamelCase , dtype=np.floataa ) for feature in input_features]
__a : List[str] = padded_inputs.get("""attention_mask""" )
if attention_mask is not None:
__a : Optional[int] = [np.asarray(__UpperCamelCase , dtype=np.intaa ) for array in attention_mask]
if self.normalize_means or self.normalize_vars:
__a : Optional[Any] = (
np.array(__UpperCamelCase , dtype=np.intaa )
if self._get_padding_strategies(__UpperCamelCase , max_length=__UpperCamelCase ) is not PaddingStrategy.DO_NOT_PAD
and padding
else None
)
__a : int = self.normalize(
padded_inputs["""input_features"""] , attention_mask=__UpperCamelCase )
if return_tensors is not None:
__a : List[Any] = padded_inputs.convert_to_tensors(__UpperCamelCase )
return padded_inputs | 697 | 1 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer
from transformers.testing_utils import require_tokenizers, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor
@require_tokenizers
@require_vision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Union[str, Any] = tempfile.mkdtemp()
# fmt: off
__a : Optional[Any] = ["""[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing""", """,""", """low""", """lowest"""]
# fmt: on
__a : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
__a : Any = {
"""do_resize""": True,
"""size""": {"""height""": 18, """width""": 18},
"""do_normalize""": True,
"""image_mean""": [0.5, 0.5, 0.5],
"""image_std""": [0.5, 0.5, 0.5],
}
__a : Tuple = os.path.join(self.tmpdirname , __UpperCamelCase )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(__UpperCamelCase , __UpperCamelCase )
def __lowerCamelCase ( self , **__UpperCamelCase ):
'''simple docstring'''
return BertTokenizer.from_pretrained(self.tmpdirname , **__UpperCamelCase )
def __lowerCamelCase ( self , **__UpperCamelCase ):
'''simple docstring'''
return ViTImageProcessor.from_pretrained(self.tmpdirname , **__UpperCamelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Optional[Any] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
__a : Union[str, Any] = [Image.fromarray(np.moveaxis(__UpperCamelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Optional[int] = self.get_tokenizer()
__a : Optional[int] = self.get_image_processor()
__a : Tuple = VisionTextDualEncoderProcessor(tokenizer=__UpperCamelCase , image_processor=__UpperCamelCase )
processor.save_pretrained(self.tmpdirname )
__a : List[str] = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , __UpperCamelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Optional[Any] = VisionTextDualEncoderProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__a : str = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
__a : Tuple = self.get_image_processor(do_normalize=__UpperCamelCase , padding_value=1.0 )
__a : str = VisionTextDualEncoderProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=__UpperCamelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __UpperCamelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Optional[int] = self.get_image_processor()
__a : Dict = self.get_tokenizer()
__a : Union[str, Any] = VisionTextDualEncoderProcessor(tokenizer=__UpperCamelCase , image_processor=__UpperCamelCase )
__a : List[str] = self.prepare_image_inputs()
__a : Optional[int] = image_processor(__UpperCamelCase , return_tensors="""np""" )
__a : Union[str, Any] = processor(images=__UpperCamelCase , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Tuple = self.get_image_processor()
__a : Tuple = self.get_tokenizer()
__a : Dict = VisionTextDualEncoderProcessor(tokenizer=__UpperCamelCase , image_processor=__UpperCamelCase )
__a : List[str] = """lower newer"""
__a : Tuple = processor(text=__UpperCamelCase )
__a : Union[str, Any] = tokenizer(__UpperCamelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : str = self.get_image_processor()
__a : Tuple = self.get_tokenizer()
__a : int = VisionTextDualEncoderProcessor(tokenizer=__UpperCamelCase , image_processor=__UpperCamelCase )
__a : Any = """lower newer"""
__a : Dict = self.prepare_image_inputs()
__a : List[str] = processor(text=__UpperCamelCase , images=__UpperCamelCase )
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """token_type_ids""", """attention_mask""", """pixel_values"""] )
# test if it raises when no input is passed
with self.assertRaises(__UpperCamelCase ):
processor()
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : int = self.get_image_processor()
__a : List[Any] = self.get_tokenizer()
__a : Optional[int] = VisionTextDualEncoderProcessor(tokenizer=__UpperCamelCase , image_processor=__UpperCamelCase )
__a : List[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__a : Any = processor.batch_decode(__UpperCamelCase )
__a : List[str] = tokenizer.batch_decode(__UpperCamelCase )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Union[str, Any] = self.get_image_processor()
__a : str = self.get_tokenizer()
__a : List[Any] = VisionTextDualEncoderProcessor(tokenizer=__UpperCamelCase , image_processor=__UpperCamelCase )
__a : List[Any] = """lower newer"""
__a : str = self.prepare_image_inputs()
__a : Union[str, Any] = processor(text=__UpperCamelCase , images=__UpperCamelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names ) | 697 |
'''simple docstring'''
__SCREAMING_SNAKE_CASE : int = 9.80_665
def _snake_case ( lowercase , lowercase , lowercase = g ) -> float:
if fluid_density <= 0:
raise ValueError("""Impossible fluid density""" )
if volume < 0:
raise ValueError("""Impossible Object volume""" )
if gravity <= 0:
raise ValueError("""Impossible Gravity""" )
return fluid_density * gravity * volume
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod() | 697 | 1 |
'''simple docstring'''
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
__SCREAMING_SNAKE_CASE : Tuple = '\nHugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf originally as a company that developed a chatbot app targeted at teenagers.[2] After open-sourcing the model behind the chatbot, the company pivoted to focus on being a platform for machine learning.\n\nIn March 2021, Hugging Face raised $40 million in a Series B funding round.[3]\n\nOn April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model.[4] In 2022, the workshop concluded with the announcement of BLOOM, a multilingual large language model with 176 billion parameters.[5]\n'
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase , __UpperCamelCase ):
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : int = load_tool("""text-question-answering""" )
self.tool.setup()
__a : List[str] = load_tool("""text-question-answering""" , remote=__UpperCamelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Dict = self.tool(__UpperCamelCase , """What did Hugging Face do in April 2021?""" )
self.assertEqual(__UpperCamelCase , """launched the BigScience Research Workshop""" )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Optional[int] = self.remote_tool(__UpperCamelCase , """What did Hugging Face do in April 2021?""" )
self.assertEqual(__UpperCamelCase , """launched the BigScience Research Workshop""" )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : List[Any] = self.tool(text=__UpperCamelCase , question="""What did Hugging Face do in April 2021?""" )
self.assertEqual(__UpperCamelCase , """launched the BigScience Research Workshop""" )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : int = self.remote_tool(text=__UpperCamelCase , question="""What did Hugging Face do in April 2021?""" )
self.assertEqual(__UpperCamelCase , """launched the BigScience Research Workshop""" ) | 697 |
'''simple docstring'''
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetrImageProcessor
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def __init__( self , __UpperCamelCase , __UpperCamelCase=7 , __UpperCamelCase=3 , __UpperCamelCase=30 , __UpperCamelCase=400 , __UpperCamelCase=True , __UpperCamelCase=None , __UpperCamelCase=True , __UpperCamelCase=1 / 255 , __UpperCamelCase=True , __UpperCamelCase=[0.5, 0.5, 0.5] , __UpperCamelCase=[0.5, 0.5, 0.5] , __UpperCamelCase=True , ):
'''simple docstring'''
__a : List[Any] = size if size is not None else {"""shortest_edge""": 18, """longest_edge""": 1333}
__a : Dict = parent
__a : Union[str, Any] = batch_size
__a : Optional[int] = num_channels
__a : Dict = min_resolution
__a : List[Any] = max_resolution
__a : int = do_resize
__a : str = size
__a : Optional[Any] = do_rescale
__a : Optional[Any] = rescale_factor
__a : str = do_normalize
__a : Any = image_mean
__a : Optional[Any] = image_std
__a : Dict = do_pad
def __lowerCamelCase ( self ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_pad": self.do_pad,
}
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase=False ):
'''simple docstring'''
if not batched:
__a : Union[str, Any] = image_inputs[0]
if isinstance(__UpperCamelCase , Image.Image ):
__a , __a : Tuple = image.size
else:
__a , __a : Tuple = image.shape[1], image.shape[2]
if w < h:
__a : Optional[int] = int(self.size["""shortest_edge"""] * h / w )
__a : Tuple = self.size["""shortest_edge"""]
elif w > h:
__a : Optional[Any] = self.size["""shortest_edge"""]
__a : Any = int(self.size["""shortest_edge"""] * w / h )
else:
__a : Any = self.size["""shortest_edge"""]
__a : Optional[int] = self.size["""shortest_edge"""]
else:
__a : Any = []
for image in image_inputs:
__a , __a : Any = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
__a : List[Any] = max(__UpperCamelCase , key=lambda __UpperCamelCase : item[0] )[0]
__a : Optional[Any] = max(__UpperCamelCase , key=lambda __UpperCamelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase , unittest.TestCase ):
lowercase__ = DetrImageProcessor if is_vision_available() else None
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : str = DetrImageProcessingTester(self )
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Optional[int] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__UpperCamelCase , """image_mean""" ) )
self.assertTrue(hasattr(__UpperCamelCase , """image_std""" ) )
self.assertTrue(hasattr(__UpperCamelCase , """do_normalize""" ) )
self.assertTrue(hasattr(__UpperCamelCase , """do_rescale""" ) )
self.assertTrue(hasattr(__UpperCamelCase , """rescale_factor""" ) )
self.assertTrue(hasattr(__UpperCamelCase , """do_resize""" ) )
self.assertTrue(hasattr(__UpperCamelCase , """size""" ) )
self.assertTrue(hasattr(__UpperCamelCase , """do_pad""" ) )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 18, """longest_edge""": 1333} )
self.assertEqual(image_processor.do_pad , __UpperCamelCase )
__a : List[Any] = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=__UpperCamelCase )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42, """longest_edge""": 84} )
self.assertEqual(image_processor.do_pad , __UpperCamelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
pass
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__a : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCamelCase , Image.Image )
# Test not batched input
__a : Optional[Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
__a , __a : Any = self.image_processor_tester.get_expected_values(__UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__a , __a : Optional[int] = self.image_processor_tester.get_expected_values(__UpperCamelCase , batched=__UpperCamelCase )
__a : Any = image_processing(__UpperCamelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Dict = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__a : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCamelCase , numpify=__UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCamelCase , np.ndarray )
# Test not batched input
__a : Dict = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
__a , __a : Any = self.image_processor_tester.get_expected_values(__UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__a : List[str] = image_processing(__UpperCamelCase , return_tensors="""pt""" ).pixel_values
__a , __a : str = self.image_processor_tester.get_expected_values(__UpperCamelCase , batched=__UpperCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Any = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__a : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCamelCase , torchify=__UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCamelCase , torch.Tensor )
# Test not batched input
__a : Any = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
__a , __a : Any = self.image_processor_tester.get_expected_values(__UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__a : List[str] = image_processing(__UpperCamelCase , return_tensors="""pt""" ).pixel_values
__a , __a : Any = self.image_processor_tester.get_expected_values(__UpperCamelCase , batched=__UpperCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : List[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_annotations.txt""" , """r""" ) as f:
__a : Dict = json.loads(f.read() )
__a : Optional[int] = {"""image_id""": 3_9769, """annotations""": target}
# encode them
__a : List[str] = DetrImageProcessor.from_pretrained("""facebook/detr-resnet-50""" )
__a : Tuple = image_processing(images=__UpperCamelCase , annotations=__UpperCamelCase , return_tensors="""pt""" )
# verify pixel values
__a : Union[str, Any] = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["""pixel_values"""].shape , __UpperCamelCase )
__a : List[str] = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , __UpperCamelCase , atol=1E-4 ) )
# verify area
__a : List[Any] = torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , __UpperCamelCase ) )
# verify boxes
__a : Optional[int] = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , __UpperCamelCase )
__a : Any = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , __UpperCamelCase , atol=1E-3 ) )
# verify image_id
__a : Union[str, Any] = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , __UpperCamelCase ) )
# verify is_crowd
__a : List[Any] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , __UpperCamelCase ) )
# verify class_labels
__a : Any = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , __UpperCamelCase ) )
# verify orig_size
__a : Any = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , __UpperCamelCase ) )
# verify size
__a : str = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , __UpperCamelCase ) )
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : List[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt""" , """r""" ) as f:
__a : Tuple = json.loads(f.read() )
__a : str = {"""file_name""": """000000039769.png""", """image_id""": 3_9769, """segments_info""": target}
__a : int = pathlib.Path("""./tests/fixtures/tests_samples/COCO/coco_panoptic""" )
# encode them
__a : List[str] = DetrImageProcessor.from_pretrained("""facebook/detr-resnet-50-panoptic""" )
__a : Tuple = image_processing(images=__UpperCamelCase , annotations=__UpperCamelCase , masks_path=__UpperCamelCase , return_tensors="""pt""" )
# verify pixel values
__a : List[str] = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["""pixel_values"""].shape , __UpperCamelCase )
__a : Any = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , __UpperCamelCase , atol=1E-4 ) )
# verify area
__a : Optional[Any] = torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , __UpperCamelCase ) )
# verify boxes
__a : Optional[Any] = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , __UpperCamelCase )
__a : List[str] = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , __UpperCamelCase , atol=1E-3 ) )
# verify image_id
__a : List[str] = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , __UpperCamelCase ) )
# verify is_crowd
__a : Optional[int] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , __UpperCamelCase ) )
# verify class_labels
__a : Optional[int] = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , __UpperCamelCase ) )
# verify masks
__a : Union[str, Any] = 82_2873
self.assertEqual(encoding["""labels"""][0]["""masks"""].sum().item() , __UpperCamelCase )
# verify orig_size
__a : str = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , __UpperCamelCase ) )
# verify size
__a : List[Any] = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , __UpperCamelCase ) ) | 697 | 1 |
'''simple docstring'''
import unicodedata
from dataclasses import dataclass
from typing import Optional, Union
import numpy as np
from transformers.data.data_collator import DataCollatorMixin
from transformers.file_utils import PaddingStrategy
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
def _snake_case ( lowercase , lowercase , lowercase , lowercase ) -> List[Any]:
if isinstance(lowercase , lowercase ):
__a : Union[str, Any] = np.full((len(lowercase ), sequence_length, 2) , lowercase )
else:
__a : Union[str, Any] = np.full((len(lowercase ), sequence_length) , lowercase )
for i, tensor in enumerate(lowercase ):
if padding_side == "right":
if isinstance(lowercase , lowercase ):
__a : Optional[int] = tensor[:sequence_length]
else:
__a : Tuple = tensor[:sequence_length]
else:
if isinstance(lowercase , lowercase ):
__a : List[Any] = tensor[:sequence_length]
else:
__a : str = tensor[:sequence_length]
return out_tensor.tolist()
def _snake_case ( lowercase ) -> Tuple:
__a : List[str] = ord(lowercase )
if (cp >= 3_3 and cp <= 4_7) or (cp >= 5_8 and cp <= 6_4) or (cp >= 9_1 and cp <= 9_6) or (cp >= 1_2_3 and cp <= 1_2_6):
return True
__a : Optional[Any] = unicodedata.category(lowercase )
if cat.startswith("""P""" ):
return True
return False
@dataclass
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
lowercase__ = 42
lowercase__ = True
lowercase__ = None
lowercase__ = None
lowercase__ = -1_00
lowercase__ = "pt"
def __lowerCamelCase ( self , __UpperCamelCase ):
'''simple docstring'''
import torch
__a : Optional[int] = """label""" if """label""" in features[0].keys() else """labels"""
__a : Tuple = [feature[label_name] for feature in features] if label_name in features[0].keys() else None
__a : Any = self.tokenizer.pad(
__UpperCamelCase , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="""pt""" if labels is None else None , )
if labels is None:
return batch
__a : Union[str, Any] = torch.tensor(batch["""entity_ids"""] ).shape[1]
__a : List[str] = self.tokenizer.padding_side
if padding_side == "right":
__a : Optional[Any] = [
list(__UpperCamelCase ) + [self.label_pad_token_id] * (sequence_length - len(__UpperCamelCase )) for label in labels
]
else:
__a : Dict = [
[self.label_pad_token_id] * (sequence_length - len(__UpperCamelCase )) + list(__UpperCamelCase ) for label in labels
]
__a : Any = [feature["""ner_tags"""] for feature in features]
__a : int = padding_tensor(__UpperCamelCase , -1 , __UpperCamelCase , __UpperCamelCase )
__a : Optional[Any] = [feature["""original_entity_spans"""] for feature in features]
__a : Dict = padding_tensor(__UpperCamelCase , (-1, -1) , __UpperCamelCase , __UpperCamelCase )
__a : str = {k: torch.tensor(__UpperCamelCase , dtype=torch.intaa ) for k, v in batch.items()}
return batch | 697 |
'''simple docstring'''
import argparse
import logging
import os
import time
import timeit
import datasets
import numpy as np
import pycuda.autoinit # noqa: F401
import pycuda.driver as cuda
import tensorrt as trt
import torch
from absl import logging as absl_logging
from accelerate import Accelerator
from datasets import load_dataset, load_metric
from torch.utils.data import DataLoader
from utils_qa import postprocess_qa_predictions
import transformers
from transformers import AutoTokenizer, EvalPrediction, default_data_collator, set_seed
from transformers.trainer_pt_utils import nested_concat, nested_truncate
__SCREAMING_SNAKE_CASE : Optional[int] = trt.Logger(trt.Logger.WARNING)
__SCREAMING_SNAKE_CASE : Tuple = absl_logging.get_absl_logger()
absl_logger.setLevel(logging.WARNING)
__SCREAMING_SNAKE_CASE : Any = logging.getLogger(__name__)
__SCREAMING_SNAKE_CASE : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--onnx_model_path',
default=None,
type=str,
required=True,
help='Path to ONNX model: ',
)
parser.add_argument(
'--output_dir',
default=None,
type=str,
required=True,
help='The output directory where the model checkpoints and predictions will be written.',
)
# Other parameters
parser.add_argument(
'--tokenizer_name',
default='',
type=str,
required=True,
help='Pretrained tokenizer name or path if not the same as model_name',
)
parser.add_argument(
'--version_2_with_negative',
action='store_true',
help='If true, the SQuAD examples contain some that do not have an answer.',
)
parser.add_argument(
'--null_score_diff_threshold',
type=float,
default=0.0,
help='If null_score - best_non_null is greater than the threshold predict null.',
)
parser.add_argument(
'--max_seq_length',
default=384,
type=int,
help=(
'The maximum total input sequence length after WordPiece tokenization. Sequences '
'longer than this will be truncated, and sequences shorter than this will be padded.'
),
)
parser.add_argument(
'--doc_stride',
default=128,
type=int,
help='When splitting up a long document into chunks, how much stride to take between chunks.',
)
parser.add_argument('--per_device_eval_batch_size', default=8, type=int, help='Batch size per GPU/CPU for evaluation.')
parser.add_argument(
'--n_best_size',
default=20,
type=int,
help='The total number of n-best predictions to generate in the nbest_predictions.json output file.',
)
parser.add_argument(
'--max_answer_length',
default=30,
type=int,
help=(
'The maximum length of an answer that can be generated. This is needed because the start '
'and end predictions are not conditioned on one another.'
),
)
parser.add_argument('--seed', type=int, default=42, help='random seed for initialization')
parser.add_argument(
'--dataset_name',
type=str,
default=None,
required=True,
help='The name of the dataset to use (via the datasets library).',
)
parser.add_argument(
'--dataset_config_name',
type=str,
default=None,
help='The configuration name of the dataset to use (via the datasets library).',
)
parser.add_argument(
'--preprocessing_num_workers', type=int, default=4, help='A csv or a json file containing the training data.'
)
parser.add_argument('--overwrite_cache', action='store_true', help='Overwrite the cached training and evaluation sets')
parser.add_argument(
'--fp16',
action='store_true',
help='Whether to use 16-bit (mixed) precision instead of 32-bit',
)
parser.add_argument(
'--int8',
action='store_true',
help='Whether to use INT8',
)
__SCREAMING_SNAKE_CASE : Optional[int] = parser.parse_args()
if args.tokenizer_name:
__SCREAMING_SNAKE_CASE : str = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=True)
else:
raise ValueError(
'You are instantiating a new tokenizer from scratch. This is not supported by this script.'
'You can do it from another script, save it, and load it from here, using --tokenizer_name.'
)
logger.info('Training/evaluation parameters %s', args)
__SCREAMING_SNAKE_CASE : List[Any] = args.per_device_eval_batch_size
__SCREAMING_SNAKE_CASE : int = (args.eval_batch_size, args.max_seq_length)
# TRT Engine properties
__SCREAMING_SNAKE_CASE : Optional[Any] = True
__SCREAMING_SNAKE_CASE : Tuple = 'temp_engine/bert-fp32.engine'
if args.fpaa:
__SCREAMING_SNAKE_CASE : Dict = 'temp_engine/bert-fp16.engine'
if args.inta:
__SCREAMING_SNAKE_CASE : Tuple = 'temp_engine/bert-int8.engine'
# import ONNX file
if not os.path.exists('temp_engine'):
os.makedirs('temp_engine')
__SCREAMING_SNAKE_CASE : Optional[Any] = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network, trt.OnnxParser(
network, TRT_LOGGER
) as parser:
with open(args.onnx_model_path, 'rb') as model:
if not parser.parse(model.read()):
for error in range(parser.num_errors):
print(parser.get_error(error))
# Query input names and shapes from parsed TensorRT network
__SCREAMING_SNAKE_CASE : List[Any] = [network.get_input(i) for i in range(network.num_inputs)]
__SCREAMING_SNAKE_CASE : List[Any] = [_input.name for _input in network_inputs] # ex: ["actual_input1"]
with builder.create_builder_config() as config:
__SCREAMING_SNAKE_CASE : Tuple = 1 << 50
if STRICT_TYPES:
config.set_flag(trt.BuilderFlag.STRICT_TYPES)
if args.fpaa:
config.set_flag(trt.BuilderFlag.FPaa)
if args.inta:
config.set_flag(trt.BuilderFlag.INTa)
__SCREAMING_SNAKE_CASE : Dict = builder.create_optimization_profile()
config.add_optimization_profile(profile)
for i in range(len(input_names)):
profile.set_shape(input_names[i], INPUT_SHAPE, INPUT_SHAPE, INPUT_SHAPE)
__SCREAMING_SNAKE_CASE : Union[str, Any] = builder.build_engine(network, config)
# serialize_engine and store in file (can be directly loaded and deserialized):
with open(engine_name, 'wb') as f:
f.write(engine.serialize())
def _snake_case ( lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> List[Any]:
__a : Dict = np.asarray(inputs["""input_ids"""] , dtype=np.intaa )
__a : List[Any] = np.asarray(inputs["""attention_mask"""] , dtype=np.intaa )
__a : str = np.asarray(inputs["""token_type_ids"""] , dtype=np.intaa )
# Copy inputs
cuda.memcpy_htod_async(d_inputs[0] , input_ids.ravel() , lowercase )
cuda.memcpy_htod_async(d_inputs[1] , attention_mask.ravel() , lowercase )
cuda.memcpy_htod_async(d_inputs[2] , token_type_ids.ravel() , lowercase )
# start time
__a : Optional[Any] = time.time()
# Run inference
context.execute_async(
bindings=[int(lowercase ) for d_inp in d_inputs] + [int(lowercase ), int(lowercase )] , stream_handle=stream.handle )
# Transfer predictions back from GPU
cuda.memcpy_dtoh_async(lowercase , lowercase , lowercase )
cuda.memcpy_dtoh_async(lowercase , lowercase , lowercase )
# Synchronize the stream and take time
stream.synchronize()
# end time
__a : str = time.time()
__a : Any = end_time - start_time
__a : Optional[int] = (h_outputa, h_outputa)
# print(outputs)
return outputs, infer_time
# Initialize the accelerator. We will let the accelerator handle device placement for us in this example.
__SCREAMING_SNAKE_CASE : Optional[Any] = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO,
)
# Setup logging, we only want one process per machine to log things on the screen.
# accelerator.is_local_main_process is only True for one process per machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR)
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed)
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
if args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
__SCREAMING_SNAKE_CASE : List[str] = load_dataset(args.dataset_name, args.dataset_config_name)
else:
raise ValueError('Evaluation requires a dataset name')
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Preprocessing the datasets.
# Preprocessing is slighlty different for training and evaluation.
__SCREAMING_SNAKE_CASE : int = raw_datasets['validation'].column_names
__SCREAMING_SNAKE_CASE : Tuple = 'question' if 'question' in column_names else column_names[0]
__SCREAMING_SNAKE_CASE : List[Any] = 'context' if 'context' in column_names else column_names[1]
__SCREAMING_SNAKE_CASE : Tuple = 'answers' if 'answers' in column_names else column_names[2]
# Padding side determines if we do (question|context) or (context|question).
__SCREAMING_SNAKE_CASE : Tuple = tokenizer.padding_side == 'right'
if args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f'''The max_seq_length passed ({args.max_seq_length}) is larger than the maximum length for the'''
f'''model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.'''
)
__SCREAMING_SNAKE_CASE : Dict = min(args.max_seq_length, tokenizer.model_max_length)
def _snake_case ( lowercase ) -> Tuple:
# Some of the questions have lots of whitespace on the left, which is not useful and will make the
# truncation of the context fail (the tokenized question will take a lots of space). So we remove that
# left whitespace
__a : Optional[Any] = [q.lstrip() for q in examples[question_column_name]]
# Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results
# in one example possible giving several features when a context is long, each of those features having a
# context that overlaps a bit the context of the previous feature.
__a : Optional[int] = tokenizer(
examples[question_column_name if pad_on_right else context_column_name] , examples[context_column_name if pad_on_right else question_column_name] , truncation="""only_second""" if pad_on_right else """only_first""" , max_length=lowercase , stride=args.doc_stride , return_overflowing_tokens=lowercase , return_offsets_mapping=lowercase , padding="""max_length""" , )
# Since one example might give us several features if it has a long context, we need a map from a feature to
# its corresponding example. This key gives us just that.
__a : Optional[Any] = tokenized_examples.pop("""overflow_to_sample_mapping""" )
# For evaluation, we will need to convert our predictions to substrings of the context, so we keep the
# corresponding example_id and we will store the offset mappings.
__a : Optional[Any] = []
for i in range(len(tokenized_examples["""input_ids"""] ) ):
# Grab the sequence corresponding to that example (to know what is the context and what is the question).
__a : Dict = tokenized_examples.sequence_ids(lowercase )
__a : Optional[Any] = 1 if pad_on_right else 0
# One example can give several spans, this is the index of the example containing this span of text.
__a : Union[str, Any] = sample_mapping[i]
tokenized_examples["example_id"].append(examples["""id"""][sample_index] )
# Set to None the offset_mapping that are not part of the context so it's easy to determine if a token
# position is part of the context or not.
__a : int = [
(o if sequence_ids[k] == context_index else None)
for k, o in enumerate(tokenized_examples["""offset_mapping"""][i] )
]
return tokenized_examples
__SCREAMING_SNAKE_CASE : int = raw_datasets['validation']
# Validation Feature Creation
__SCREAMING_SNAKE_CASE : Union[str, Any] = eval_examples.map(
prepare_validation_features,
batched=True,
num_proc=args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not args.overwrite_cache,
desc='Running tokenizer on validation dataset',
)
__SCREAMING_SNAKE_CASE : List[Any] = default_data_collator
__SCREAMING_SNAKE_CASE : Union[str, Any] = eval_dataset.remove_columns(['example_id', 'offset_mapping'])
__SCREAMING_SNAKE_CASE : List[str] = DataLoader(
eval_dataset_for_model, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size
)
def _snake_case ( lowercase , lowercase , lowercase , lowercase="eval" ) -> Any:
# Post-processing: we match the start logits and end logits to answers in the original context.
__a : List[str] = postprocess_qa_predictions(
examples=lowercase , features=lowercase , predictions=lowercase , version_2_with_negative=args.version_2_with_negative , n_best_size=args.n_best_size , max_answer_length=args.max_answer_length , null_score_diff_threshold=args.null_score_diff_threshold , output_dir=args.output_dir , prefix=lowercase , )
# Format the result to the format the metric expects.
if args.version_2_with_negative:
__a : List[str] = [
{"""id""": k, """prediction_text""": v, """no_answer_probability""": 0.0} for k, v in predictions.items()
]
else:
__a : List[str] = [{"""id""": k, """prediction_text""": v} for k, v in predictions.items()]
__a : Optional[Any] = [{"""id""": ex["""id"""], """answers""": ex[answer_column_name]} for ex in examples]
return EvalPrediction(predictions=lowercase , label_ids=lowercase )
__SCREAMING_SNAKE_CASE : List[Any] = load_metric('squad_v2' if args.version_2_with_negative else 'squad')
# Evaluation!
logger.info('Loading ONNX model %s for evaluation', args.onnx_model_path)
with open(engine_name, 'rb') as f, trt.Runtime(TRT_LOGGER) as runtime, runtime.deserialize_cuda_engine(
f.read()
) as engine, engine.create_execution_context() as context:
# setup for TRT inferrence
for i in range(len(input_names)):
context.set_binding_shape(i, INPUT_SHAPE)
assert context.all_binding_shapes_specified
def _snake_case ( lowercase ) -> Optional[int]:
return trt.volume(engine.get_binding_shape(lowercase ) ) * engine.get_binding_dtype(lowercase ).itemsize
# Allocate device memory for inputs and outputs.
__SCREAMING_SNAKE_CASE : List[str] = [cuda.mem_alloc(binding_nbytes(binding)) for binding in engine if engine.binding_is_input(binding)]
# Allocate output buffer
__SCREAMING_SNAKE_CASE : str = cuda.pagelocked_empty(tuple(context.get_binding_shape(3)), dtype=np.floataa)
__SCREAMING_SNAKE_CASE : Union[str, Any] = cuda.pagelocked_empty(tuple(context.get_binding_shape(4)), dtype=np.floataa)
__SCREAMING_SNAKE_CASE : str = cuda.mem_alloc(h_outputa.nbytes)
__SCREAMING_SNAKE_CASE : Tuple = cuda.mem_alloc(h_outputa.nbytes)
# Create a stream in which to copy inputs/outputs and run inference.
__SCREAMING_SNAKE_CASE : Tuple = cuda.Stream()
# Evaluation
logger.info('***** Running Evaluation *****')
logger.info(f''' Num examples = {len(eval_dataset)}''')
logger.info(f''' Batch size = {args.per_device_eval_batch_size}''')
__SCREAMING_SNAKE_CASE : Union[str, Any] = 0.0
__SCREAMING_SNAKE_CASE : str = 0
__SCREAMING_SNAKE_CASE : str = timeit.default_timer()
__SCREAMING_SNAKE_CASE : Dict = None
for step, batch in enumerate(eval_dataloader):
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Union[str, Any] = model_infer(batch, context, d_inputs, h_outputa, h_outputa, d_outputa, d_outputa, stream)
total_time += infer_time
niter += 1
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Optional[Any] = outputs
__SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor(start_logits)
__SCREAMING_SNAKE_CASE : Tuple = torch.tensor(end_logits)
# necessary to pad predictions and labels for being gathered
__SCREAMING_SNAKE_CASE : Optional[int] = accelerator.pad_across_processes(start_logits, dim=1, pad_index=-100)
__SCREAMING_SNAKE_CASE : Dict = accelerator.pad_across_processes(end_logits, dim=1, pad_index=-100)
__SCREAMING_SNAKE_CASE : List[str] = (accelerator.gather(start_logits).cpu().numpy(), accelerator.gather(end_logits).cpu().numpy())
__SCREAMING_SNAKE_CASE : List[str] = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-100)
if all_preds is not None:
__SCREAMING_SNAKE_CASE : Tuple = nested_truncate(all_preds, len(eval_dataset))
__SCREAMING_SNAKE_CASE : str = timeit.default_timer() - start_time
logger.info(' Evaluation done in total %f secs (%f sec per example)', evalTime, evalTime / len(eval_dataset))
# Inference time from TRT
logger.info('Average Inference Time = {:.3f} ms'.format(total_time * 1_000 / niter))
logger.info('Total Inference Time = {:.3f} ms'.format(total_time * 1_000))
logger.info('Total Number of Inference = %d', niter)
__SCREAMING_SNAKE_CASE : Optional[int] = post_processing_function(eval_examples, eval_dataset, all_preds)
__SCREAMING_SNAKE_CASE : List[Any] = metric.compute(predictions=prediction.predictions, references=prediction.label_ids)
logger.info(f'''Evaluation metrics: {eval_metric}''') | 697 | 1 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class SCREAMING_SNAKE_CASE__ ( metaclass=__UpperCamelCase ):
lowercase__ = ["flax", "transformers"]
def __init__( self , *__UpperCamelCase , **__UpperCamelCase ):
'''simple docstring'''
requires_backends(self , ["""flax""", """transformers"""] )
@classmethod
def __lowerCamelCase ( cls , *__UpperCamelCase , **__UpperCamelCase ):
'''simple docstring'''
requires_backends(cls , ["""flax""", """transformers"""] )
@classmethod
def __lowerCamelCase ( cls , *__UpperCamelCase , **__UpperCamelCase ):
'''simple docstring'''
requires_backends(cls , ["""flax""", """transformers"""] )
class SCREAMING_SNAKE_CASE__ ( metaclass=__UpperCamelCase ):
lowercase__ = ["flax", "transformers"]
def __init__( self , *__UpperCamelCase , **__UpperCamelCase ):
'''simple docstring'''
requires_backends(self , ["""flax""", """transformers"""] )
@classmethod
def __lowerCamelCase ( cls , *__UpperCamelCase , **__UpperCamelCase ):
'''simple docstring'''
requires_backends(cls , ["""flax""", """transformers"""] )
@classmethod
def __lowerCamelCase ( cls , *__UpperCamelCase , **__UpperCamelCase ):
'''simple docstring'''
requires_backends(cls , ["""flax""", """transformers"""] )
class SCREAMING_SNAKE_CASE__ ( metaclass=__UpperCamelCase ):
lowercase__ = ["flax", "transformers"]
def __init__( self , *__UpperCamelCase , **__UpperCamelCase ):
'''simple docstring'''
requires_backends(self , ["""flax""", """transformers"""] )
@classmethod
def __lowerCamelCase ( cls , *__UpperCamelCase , **__UpperCamelCase ):
'''simple docstring'''
requires_backends(cls , ["""flax""", """transformers"""] )
@classmethod
def __lowerCamelCase ( cls , *__UpperCamelCase , **__UpperCamelCase ):
'''simple docstring'''
requires_backends(cls , ["""flax""", """transformers"""] )
class SCREAMING_SNAKE_CASE__ ( metaclass=__UpperCamelCase ):
lowercase__ = ["flax", "transformers"]
def __init__( self , *__UpperCamelCase , **__UpperCamelCase ):
'''simple docstring'''
requires_backends(self , ["""flax""", """transformers"""] )
@classmethod
def __lowerCamelCase ( cls , *__UpperCamelCase , **__UpperCamelCase ):
'''simple docstring'''
requires_backends(cls , ["""flax""", """transformers"""] )
@classmethod
def __lowerCamelCase ( cls , *__UpperCamelCase , **__UpperCamelCase ):
'''simple docstring'''
requires_backends(cls , ["""flax""", """transformers"""] ) | 697 |
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import KarrasVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
lowercase__ = 42
lowercase__ = 42
def __init__( self , __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
super().__init__()
self.register_modules(unet=__UpperCamelCase , scheduler=__UpperCamelCase )
@torch.no_grad()
def __call__( self , __UpperCamelCase = 1 , __UpperCamelCase = 50 , __UpperCamelCase = None , __UpperCamelCase = "pil" , __UpperCamelCase = True , **__UpperCamelCase , ):
'''simple docstring'''
__a : int = self.unet.config.sample_size
__a : Optional[int] = (batch_size, 3, img_size, img_size)
__a : Union[str, Any] = self.unet
# sample x_0 ~ N(0, sigma_0^2 * I)
__a : Dict = randn_tensor(__UpperCamelCase , generator=__UpperCamelCase , device=self.device ) * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(__UpperCamelCase )
for t in self.progress_bar(self.scheduler.timesteps ):
# here sigma_t == t_i from the paper
__a : Dict = self.scheduler.schedule[t]
__a : Any = self.scheduler.schedule[t - 1] if t > 0 else 0
# 1. Select temporarily increased noise level sigma_hat
# 2. Add new noise to move from sample_i to sample_hat
__a , __a : Tuple = self.scheduler.add_noise_to_input(__UpperCamelCase , __UpperCamelCase , generator=__UpperCamelCase )
# 3. Predict the noise residual given the noise magnitude `sigma_hat`
# The model inputs and output are adjusted by following eq. (213) in [1].
__a : List[Any] = (sigma_hat / 2) * model((sample_hat + 1) / 2 , sigma_hat / 2 ).sample
# 4. Evaluate dx/dt at sigma_hat
# 5. Take Euler step from sigma to sigma_prev
__a : str = self.scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
if sigma_prev != 0:
# 6. Apply 2nd order correction
# The model inputs and output are adjusted by following eq. (213) in [1].
__a : Union[str, Any] = (sigma_prev / 2) * model((step_output.prev_sample + 1) / 2 , sigma_prev / 2 ).sample
__a : Tuple = self.scheduler.step_correct(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , step_output.prev_sample , step_output["""derivative"""] , )
__a : Tuple = step_output.prev_sample
__a : Optional[Any] = (sample / 2 + 0.5).clamp(0 , 1 )
__a : Dict = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__a : List[Any] = self.numpy_to_pil(__UpperCamelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__UpperCamelCase ) | 697 | 1 |
'''simple docstring'''
import math
import sys
def _snake_case ( lowercase ) -> int:
if number != int(lowercase ):
raise ValueError("""the value of input must be a natural number""" )
if number < 0:
raise ValueError("""the value of input must not be a negative number""" )
if number == 0:
return 1
__a : Any = [-1] * (number + 1)
__a : Union[str, Any] = 0
for i in range(1 , number + 1 ):
__a : Any = sys.maxsize
__a : List[Any] = int(math.sqrt(lowercase ) )
for j in range(1 , root + 1 ):
__a : Optional[Any] = 1 + answers[i - (j**2)]
__a : Union[str, Any] = min(lowercase , lowercase )
__a : List[Any] = answer
return answers[number]
if __name__ == "__main__":
import doctest
doctest.testmod() | 697 |
'''simple docstring'''
def _snake_case ( lowercase ) -> bool:
if not isinstance(lowercase , lowercase ):
raise ValueError("""check_bouncy() accepts only integer arguments""" )
__a : str = str(lowercase )
__a : Any = """""".join(sorted(lowercase ) )
return sorted_str_n != str_n and sorted_str_n[::-1] != str_n
def _snake_case ( lowercase = 9_9 ) -> int:
if not 0 < percent < 1_0_0:
raise ValueError("""solution() only accepts values from 0 to 100""" )
__a : List[str] = 0
__a : Union[str, Any] = 1
while True:
if check_bouncy(lowercase ):
bouncy_num += 1
if (bouncy_num / num) * 1_0_0 >= percent:
return num
num += 1
if __name__ == "__main__":
from doctest import testmod
testmod()
print(f'''{solution(99)}''') | 697 | 1 |
'''simple docstring'''
import pickle
import numpy as np
from matplotlib import pyplot as plt
class SCREAMING_SNAKE_CASE__ :
def __init__( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=0.2 , __UpperCamelCase=0.2 ):
'''simple docstring'''
__a : int = bp_numa
__a : int = bp_numa
__a : int = bp_numa
__a : Dict = conva_get[:2]
__a : Tuple = conva_get[2]
__a : Tuple = size_pa
__a : int = rate_w
__a : Dict = rate_t
__a : Optional[Any] = [
np.mat(-1 * np.random.rand(self.conva[0] , self.conva[0] ) + 0.5 )
for i in range(self.conva[1] )
]
__a : Any = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 )
__a : List[Any] = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 )
__a : List[str] = -2 * np.random.rand(self.conva[1] ) + 1
__a : List[Any] = -2 * np.random.rand(self.num_bpa ) + 1
__a : Tuple = -2 * np.random.rand(self.num_bpa ) + 1
def __lowerCamelCase ( self , __UpperCamelCase ):
'''simple docstring'''
__a : Tuple = {
"""num_bp1""": self.num_bpa,
"""num_bp2""": self.num_bpa,
"""num_bp3""": self.num_bpa,
"""conv1""": self.conva,
"""step_conv1""": self.step_conva,
"""size_pooling1""": self.size_poolinga,
"""rate_weight""": self.rate_weight,
"""rate_thre""": self.rate_thre,
"""w_conv1""": self.w_conva,
"""wkj""": self.wkj,
"""vji""": self.vji,
"""thre_conv1""": self.thre_conva,
"""thre_bp2""": self.thre_bpa,
"""thre_bp3""": self.thre_bpa,
}
with open(__UpperCamelCase , """wb""" ) as f:
pickle.dump(__UpperCamelCase , __UpperCamelCase )
print(f"""Model saved: {save_path}""" )
@classmethod
def __lowerCamelCase ( cls , __UpperCamelCase ):
'''simple docstring'''
with open(__UpperCamelCase , """rb""" ) as f:
__a : Any = pickle.load(__UpperCamelCase ) # noqa: S301
__a : Union[str, Any] = model_dic.get("""conv1""" )
conv_get.append(model_dic.get("""step_conv1""" ) )
__a : Optional[int] = model_dic.get("""size_pooling1""" )
__a : int = model_dic.get("""num_bp1""" )
__a : Any = model_dic.get("""num_bp2""" )
__a : Tuple = model_dic.get("""num_bp3""" )
__a : Dict = model_dic.get("""rate_weight""" )
__a : int = model_dic.get("""rate_thre""" )
# create model instance
__a : Union[str, Any] = CNN(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# modify model parameter
__a : Optional[int] = model_dic.get("""w_conv1""" )
__a : int = model_dic.get("""wkj""" )
__a : List[str] = model_dic.get("""vji""" )
__a : List[str] = model_dic.get("""thre_conv1""" )
__a : Union[str, Any] = model_dic.get("""thre_bp2""" )
__a : Optional[int] = model_dic.get("""thre_bp3""" )
return conv_ins
def __lowerCamelCase ( self , __UpperCamelCase ):
'''simple docstring'''
return 1 / (1 + np.exp(-1 * x ))
def __lowerCamelCase ( self , __UpperCamelCase ):
'''simple docstring'''
return round(__UpperCamelCase , 3 )
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
__a : Optional[int] = convs[0]
__a : List[str] = convs[1]
__a : Tuple = np.shape(__UpperCamelCase )[0]
# get the data slice of original image data, data_focus
__a : Union[str, Any] = []
for i_focus in range(0 , size_data - size_conv + 1 , __UpperCamelCase ):
for j_focus in range(0 , size_data - size_conv + 1 , __UpperCamelCase ):
__a : int = data[
i_focus : i_focus + size_conv, j_focus : j_focus + size_conv
]
data_focus.append(__UpperCamelCase )
# calculate the feature map of every single kernel, and saved as list of matrix
__a : int = []
__a : List[Any] = int((size_data - size_conv) / conv_step + 1 )
for i_map in range(__UpperCamelCase ):
__a : List[Any] = []
for i_focus in range(len(__UpperCamelCase ) ):
__a : Optional[Any] = (
np.sum(np.multiply(data_focus[i_focus] , w_convs[i_map] ) )
- thre_convs[i_map]
)
featuremap.append(self.sig(__UpperCamelCase ) )
__a : Any = np.asmatrix(__UpperCamelCase ).reshape(
__UpperCamelCase , __UpperCamelCase )
data_featuremap.append(__UpperCamelCase )
# expanding the data slice to One dimenssion
__a : Optional[int] = []
for each_focus in data_focus:
focusa_list.extend(self.Expand_Mat(__UpperCamelCase ) )
__a : str = np.asarray(__UpperCamelCase )
return focus_list, data_featuremap
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase="average_pool" ):
'''simple docstring'''
__a : Union[str, Any] = len(featuremaps[0] )
__a : str = int(size_map / size_pooling )
__a : str = []
for i_map in range(len(__UpperCamelCase ) ):
__a : Optional[Any] = featuremaps[i_map]
__a : Optional[Any] = []
for i_focus in range(0 , __UpperCamelCase , __UpperCamelCase ):
for j_focus in range(0 , __UpperCamelCase , __UpperCamelCase ):
__a : List[str] = feature_map[
i_focus : i_focus + size_pooling,
j_focus : j_focus + size_pooling,
]
if pooling_type == "average_pool":
# average pooling
map_pooled.append(np.average(__UpperCamelCase ) )
elif pooling_type == "max_pooling":
# max pooling
map_pooled.append(np.max(__UpperCamelCase ) )
__a : List[Any] = np.asmatrix(__UpperCamelCase ).reshape(__UpperCamelCase , __UpperCamelCase )
featuremap_pooled.append(__UpperCamelCase )
return featuremap_pooled
def __lowerCamelCase ( self , __UpperCamelCase ):
'''simple docstring'''
__a : Tuple = []
for i in range(len(__UpperCamelCase ) ):
__a : Optional[Any] = np.shape(data[i] )
__a : Dict = data[i].reshape(1 , shapes[0] * shapes[1] )
__a : Dict = data_listed.getA().tolist()[0]
data_expanded.extend(__UpperCamelCase )
__a : List[Any] = np.asarray(__UpperCamelCase )
return data_expanded
def __lowerCamelCase ( self , __UpperCamelCase ):
'''simple docstring'''
__a : Dict = np.asarray(__UpperCamelCase )
__a : Tuple = np.shape(__UpperCamelCase )
__a : Tuple = data_mat.reshape(1 , shapes[0] * shapes[1] )
return data_expanded
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
__a : List[str] = []
__a : Optional[Any] = 0
for i_map in range(__UpperCamelCase ):
__a : str = np.ones((size_map, size_map) )
for i in range(0 , __UpperCamelCase , __UpperCamelCase ):
for j in range(0 , __UpperCamelCase , __UpperCamelCase ):
__a : Optional[int] = pd_pool[
i_pool
]
__a : Optional[Any] = i_pool + 1
__a : Optional[int] = np.multiply(
__UpperCamelCase , np.multiply(out_map[i_map] , (1 - out_map[i_map]) ) )
pd_all.append(__UpperCamelCase )
return pd_all
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=bool ):
'''simple docstring'''
print("""----------------------Start Training-------------------------""" )
print((""" - - Shape: Train_Data """, np.shape(__UpperCamelCase )) )
print((""" - - Shape: Teach_Data """, np.shape(__UpperCamelCase )) )
__a : Union[str, Any] = 0
__a : List[str] = []
__a : Optional[int] = 1_0000
while rp < n_repeat and mse >= error_accuracy:
__a : Dict = 0
print(f"""-------------Learning Time {rp}--------------""" )
for p in range(len(__UpperCamelCase ) ):
# print('------------Learning Image: %d--------------'%p)
__a : Optional[int] = np.asmatrix(datas_train[p] )
__a : Dict = np.asarray(datas_teach[p] )
__a , __a : List[str] = self.convolute(
__UpperCamelCase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
__a : Tuple = self.pooling(__UpperCamelCase , self.size_poolinga )
__a : int = np.shape(__UpperCamelCase )
__a : Tuple = self._expand(__UpperCamelCase )
__a : Tuple = data_bp_input
__a : Dict = np.dot(__UpperCamelCase , self.vji.T ) - self.thre_bpa
__a : int = self.sig(__UpperCamelCase )
__a : List[Any] = np.dot(__UpperCamelCase , self.wkj.T ) - self.thre_bpa
__a : str = self.sig(__UpperCamelCase )
# --------------Model Leaning ------------------------
# calculate error and gradient---------------
__a : List[str] = np.multiply(
(data_teach - bp_outa) , np.multiply(__UpperCamelCase , (1 - bp_outa) ) )
__a : List[str] = np.multiply(
np.dot(__UpperCamelCase , self.wkj ) , np.multiply(__UpperCamelCase , (1 - bp_outa) ) )
__a : str = np.dot(__UpperCamelCase , self.vji )
__a : Optional[int] = pd_i_all / (self.size_poolinga * self.size_poolinga)
__a : str = pd_conva_pooled.T.getA().tolist()
__a : Tuple = self._calculate_gradient_from_pool(
__UpperCamelCase , __UpperCamelCase , shape_featuremapa[0] , shape_featuremapa[1] , self.size_poolinga , )
# weight and threshold learning process---------
# convolution layer
for k_conv in range(self.conva[1] ):
__a : str = self._expand_mat(pd_conva_all[k_conv] )
__a : Dict = self.rate_weight * np.dot(__UpperCamelCase , __UpperCamelCase )
__a : int = self.w_conva[k_conv] + delta_w.reshape(
(self.conva[0], self.conva[0]) )
__a : Tuple = (
self.thre_conva[k_conv]
- np.sum(pd_conva_all[k_conv] ) * self.rate_thre
)
# all connected layer
__a : Tuple = self.wkj + pd_k_all.T * bp_outa * self.rate_weight
__a : List[Any] = self.vji + pd_j_all.T * bp_outa * self.rate_weight
__a : Any = self.thre_bpa - pd_k_all * self.rate_thre
__a : Dict = self.thre_bpa - pd_j_all * self.rate_thre
# calculate the sum error of all single image
__a : int = np.sum(abs(data_teach - bp_outa ) )
error_count += errors
# print(' ----Teach ',data_teach)
# print(' ----BP_output ',bp_out3)
__a : List[Any] = rp + 1
__a : Dict = error_count / patterns
all_mse.append(__UpperCamelCase )
def draw_error():
__a : List[Any] = [error_accuracy for i in range(int(n_repeat * 1.2 ) )]
plt.plot(__UpperCamelCase , """+-""" )
plt.plot(__UpperCamelCase , """r--""" )
plt.xlabel("""Learning Times""" )
plt.ylabel("""All_mse""" )
plt.grid(__UpperCamelCase , alpha=0.5 )
plt.show()
print("""------------------Training Complished---------------------""" )
print((""" - - Training epoch: """, rp, f""" - - Mse: {mse:.6f}""") )
if draw_e:
draw_error()
return mse
def __lowerCamelCase ( self , __UpperCamelCase ):
'''simple docstring'''
__a : Optional[int] = []
print("""-------------------Start Testing-------------------------""" )
print((""" - - Shape: Test_Data """, np.shape(__UpperCamelCase )) )
for p in range(len(__UpperCamelCase ) ):
__a : Tuple = np.asmatrix(datas_test[p] )
__a , __a : Tuple = self.convolute(
__UpperCamelCase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
__a : Optional[int] = self.pooling(__UpperCamelCase , self.size_poolinga )
__a : Any = self._expand(__UpperCamelCase )
__a : Tuple = data_bp_input
__a : Union[str, Any] = bp_outa * self.vji.T - self.thre_bpa
__a : Tuple = self.sig(__UpperCamelCase )
__a : Any = bp_outa * self.wkj.T - self.thre_bpa
__a : List[str] = self.sig(__UpperCamelCase )
produce_out.extend(bp_outa.getA().tolist() )
__a : str = [list(map(self.do_round , __UpperCamelCase ) ) for each in produce_out]
return np.asarray(__UpperCamelCase )
def __lowerCamelCase ( self , __UpperCamelCase ):
'''simple docstring'''
__a : Dict = np.asmatrix(__UpperCamelCase )
__a , __a : int = self.convolute(
__UpperCamelCase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
__a : Optional[int] = self.pooling(__UpperCamelCase , self.size_poolinga )
return data_conveda, data_pooleda
if __name__ == "__main__":
pass | 697 |
'''simple docstring'''
import argparse
import torch
from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def _snake_case ( lowercase , lowercase , lowercase ) -> Any:
# Construct model
if gpta_config_file == "":
__a : Dict = GPTaConfig()
else:
__a : Optional[Any] = GPTaConfig.from_json_file(lowercase )
__a : Union[str, Any] = GPTaModel(lowercase )
# Load weights from numpy
load_tf_weights_in_gpta(lowercase , lowercase , lowercase )
# Save pytorch-model
__a : Optional[int] = pytorch_dump_folder_path + """/""" + WEIGHTS_NAME
__a : Dict = pytorch_dump_folder_path + """/""" + CONFIG_NAME
print(F"""Save PyTorch model to {pytorch_weights_dump_path}""" )
torch.save(model.state_dict() , lowercase )
print(F"""Save configuration file to {pytorch_config_dump_path}""" )
with open(lowercase , """w""" , encoding="""utf-8""" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--gpt2_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--gpt2_config_file',
default='',
type=str,
help=(
'An optional config json file corresponding to the pre-trained OpenAI model. \n'
'This specifies the model architecture.'
),
)
__SCREAMING_SNAKE_CASE : Dict = parser.parse_args()
convert_gpta_checkpoint_to_pytorch(args.gpta_checkpoint_path, args.gpta_config_file, args.pytorch_dump_folder_path) | 697 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
__SCREAMING_SNAKE_CASE : Dict = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : List[str] = {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json',
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
lowercase__ = "blenderbot-small"
lowercase__ = ["past_key_values"]
lowercase__ = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self , __UpperCamelCase=5_0265 , __UpperCamelCase=512 , __UpperCamelCase=8 , __UpperCamelCase=2048 , __UpperCamelCase=16 , __UpperCamelCase=8 , __UpperCamelCase=2048 , __UpperCamelCase=16 , __UpperCamelCase=0.0 , __UpperCamelCase=0.0 , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase="gelu" , __UpperCamelCase=512 , __UpperCamelCase=0.1 , __UpperCamelCase=0.0 , __UpperCamelCase=0.0 , __UpperCamelCase=0.0_2 , __UpperCamelCase=1 , __UpperCamelCase=False , __UpperCamelCase=0 , __UpperCamelCase=1 , __UpperCamelCase=2 , __UpperCamelCase=2 , **__UpperCamelCase , ):
'''simple docstring'''
__a : str = vocab_size
__a : int = max_position_embeddings
__a : List[str] = d_model
__a : List[str] = encoder_ffn_dim
__a : List[Any] = encoder_layers
__a : int = encoder_attention_heads
__a : Tuple = decoder_ffn_dim
__a : str = decoder_layers
__a : Dict = decoder_attention_heads
__a : Optional[int] = dropout
__a : str = attention_dropout
__a : Dict = activation_dropout
__a : Optional[Any] = activation_function
__a : List[Any] = init_std
__a : Any = encoder_layerdrop
__a : List[Any] = decoder_layerdrop
__a : Optional[Any] = use_cache
__a : Optional[int] = encoder_layers
__a : Optional[Any] = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=__UpperCamelCase , bos_token_id=__UpperCamelCase , eos_token_id=__UpperCamelCase , is_encoder_decoder=__UpperCamelCase , decoder_start_token_id=__UpperCamelCase , forced_eos_token_id=__UpperCamelCase , **__UpperCamelCase , )
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
__a : str = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
] )
if self.use_past:
__a : Optional[int] = {0: """batch"""}
__a : Union[str, Any] = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
else:
__a : List[Any] = {0: """batch""", 1: """decoder_sequence"""}
__a : List[str] = {0: """batch""", 1: """decoder_sequence"""}
if self.use_past:
self.fill_with_past_key_values_(__UpperCamelCase , direction="""inputs""" )
elif self.task == "causal-lm":
# TODO: figure this case out.
__a : Tuple = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
] )
if self.use_past:
__a , __a : Union[str, Any] = self.num_layers
for i in range(__UpperCamelCase ):
__a : Tuple = {0: """batch""", 2: """past_sequence + sequence"""}
__a : Optional[int] = {0: """batch""", 2: """past_sequence + sequence"""}
else:
__a : Optional[Any] = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
("""decoder_input_ids""", {0: """batch""", 1: """decoder_sequence"""}),
("""decoder_attention_mask""", {0: """batch""", 1: """decoder_sequence"""}),
] )
return common_inputs
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
__a : Optional[Any] = super().outputs
else:
__a : Any = super(__UpperCamelCase , self ).outputs
if self.use_past:
__a , __a : int = self.num_layers
for i in range(__UpperCamelCase ):
__a : Any = {0: """batch""", 2: """past_sequence + sequence"""}
__a : Union[str, Any] = {0: """batch""", 2: """past_sequence + sequence"""}
return common_outputs
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase = -1 , __UpperCamelCase = -1 , __UpperCamelCase = False , __UpperCamelCase = None , ):
'''simple docstring'''
__a : Any = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# Generate decoder inputs
__a : Optional[int] = seq_length if not self.use_past else 1
__a : List[Any] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
__a : Optional[int] = {f"""decoder_{name}""": tensor for name, tensor in decoder_inputs.items()}
__a : Dict = dict(**__UpperCamelCase , **__UpperCamelCase )
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
__a , __a : Any = common_inputs["""input_ids"""].shape
__a : str = common_inputs["""decoder_input_ids"""].shape[1]
__a , __a : Dict = self.num_attention_heads
__a : int = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__a : Dict = decoder_seq_length + 3
__a : Optional[int] = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
__a : str = torch.cat(
[common_inputs["""decoder_attention_mask"""], torch.ones(__UpperCamelCase , __UpperCamelCase )] , dim=1 )
__a : Union[str, Any] = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
__a , __a : int = self.num_layers
__a : Dict = min(__UpperCamelCase , __UpperCamelCase )
__a : Optional[Any] = max(__UpperCamelCase , __UpperCamelCase ) - min_num_layers
__a : Optional[int] = """encoder""" if num_encoder_layers > num_decoder_layers else """decoder"""
for _ in range(__UpperCamelCase ):
common_inputs["past_key_values"].append(
(
torch.zeros(__UpperCamelCase ),
torch.zeros(__UpperCamelCase ),
torch.zeros(__UpperCamelCase ),
torch.zeros(__UpperCamelCase ),
) )
# TODO: test this.
__a : List[Any] = encoder_shape if remaining_side_name == """encoder""" else decoder_shape
for _ in range(__UpperCamelCase , __UpperCamelCase ):
common_inputs["past_key_values"].append((torch.zeros(__UpperCamelCase ), torch.zeros(__UpperCamelCase )) )
return common_inputs
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase = -1 , __UpperCamelCase = -1 , __UpperCamelCase = False , __UpperCamelCase = None , ):
'''simple docstring'''
__a : Union[str, Any] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
__a , __a : Tuple = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
__a : List[Any] = seqlen + 2
__a , __a : Union[str, Any] = self.num_layers
__a , __a : List[str] = self.num_attention_heads
__a : int = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__a : Optional[Any] = common_inputs["""attention_mask"""].dtype
__a : Any = torch.cat(
[common_inputs["""attention_mask"""], torch.ones(__UpperCamelCase , __UpperCamelCase , dtype=__UpperCamelCase )] , dim=1 )
__a : int = [
(torch.zeros(__UpperCamelCase ), torch.zeros(__UpperCamelCase )) for _ in range(__UpperCamelCase )
]
return common_inputs
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase = -1 , __UpperCamelCase = -1 , __UpperCamelCase = False , __UpperCamelCase = None , ):
'''simple docstring'''
__a : Optional[int] = compute_effective_axis_dimension(
__UpperCamelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
__a : Dict = tokenizer.num_special_tokens_to_add(__UpperCamelCase )
__a : Tuple = compute_effective_axis_dimension(
__UpperCamelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__UpperCamelCase )
# Generate dummy inputs according to compute batch and sequence
__a : Optional[Any] = [""" """.join([tokenizer.unk_token] ) * seq_length] * batch_size
__a : Dict = dict(tokenizer(__UpperCamelCase , return_tensors=__UpperCamelCase ) )
return common_inputs
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase = -1 , __UpperCamelCase = -1 , __UpperCamelCase = False , __UpperCamelCase = None , ):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
__a : Tuple = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
__UpperCamelCase , batch_size=__UpperCamelCase , seq_length=__UpperCamelCase , is_pair=__UpperCamelCase , framework=__UpperCamelCase )
elif self.task == "causal-lm":
__a : str = self._generate_dummy_inputs_for_causal_lm(
__UpperCamelCase , batch_size=__UpperCamelCase , seq_length=__UpperCamelCase , is_pair=__UpperCamelCase , framework=__UpperCamelCase )
else:
__a : Any = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__UpperCamelCase , batch_size=__UpperCamelCase , seq_length=__UpperCamelCase , is_pair=__UpperCamelCase , framework=__UpperCamelCase )
return common_inputs
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
__a : Union[str, Any] = super()._flatten_past_key_values_(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
else:
__a : Union[str, Any] = super(__UpperCamelCase , self )._flatten_past_key_values_(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) | 697 |
'''simple docstring'''
import unittest
from transformers import (
MODEL_FOR_OBJECT_DETECTION_MAPPING,
AutoFeatureExtractor,
AutoModelForObjectDetection,
ObjectDetectionPipeline,
is_vision_available,
pipeline,
)
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_pytesseract,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class SCREAMING_SNAKE_CASE__ :
@staticmethod
def __lowerCamelCase ( *__UpperCamelCase , **__UpperCamelCase ):
'''simple docstring'''
pass
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
lowercase__ = MODEL_FOR_OBJECT_DETECTION_MAPPING
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
__a : Optional[Any] = ObjectDetectionPipeline(model=__UpperCamelCase , image_processor=__UpperCamelCase )
return object_detector, ["./tests/fixtures/tests_samples/COCO/000000039769.png"]
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
__a : List[str] = object_detector("""./tests/fixtures/tests_samples/COCO/000000039769.png""" , threshold=0.0 )
self.assertGreater(len(__UpperCamelCase ) , 0 )
for detected_object in outputs:
self.assertEqual(
__UpperCamelCase , {
"""score""": ANY(__UpperCamelCase ),
"""label""": ANY(__UpperCamelCase ),
"""box""": {"""xmin""": ANY(__UpperCamelCase ), """ymin""": ANY(__UpperCamelCase ), """xmax""": ANY(__UpperCamelCase ), """ymax""": ANY(__UpperCamelCase )},
} , )
import datasets
__a : Optional[int] = datasets.load_dataset("""hf-internal-testing/fixtures_image_utils""" , """image""" , split="""test""" )
__a : Tuple = [
Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ),
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
# RGBA
dataset[0]["""file"""],
# LA
dataset[1]["""file"""],
# L
dataset[2]["""file"""],
]
__a : Any = object_detector(__UpperCamelCase , threshold=0.0 )
self.assertEqual(len(__UpperCamelCase ) , len(__UpperCamelCase ) )
for outputs in batch_outputs:
self.assertGreater(len(__UpperCamelCase ) , 0 )
for detected_object in outputs:
self.assertEqual(
__UpperCamelCase , {
"""score""": ANY(__UpperCamelCase ),
"""label""": ANY(__UpperCamelCase ),
"""box""": {"""xmin""": ANY(__UpperCamelCase ), """ymin""": ANY(__UpperCamelCase ), """xmax""": ANY(__UpperCamelCase ), """ymax""": ANY(__UpperCamelCase )},
} , )
@require_tf
@unittest.skip("""Object detection not implemented in TF""" )
def __lowerCamelCase ( self ):
'''simple docstring'''
pass
@require_torch
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Optional[Any] = """hf-internal-testing/tiny-detr-mobilenetsv3"""
__a : Dict = AutoModelForObjectDetection.from_pretrained(__UpperCamelCase )
__a : Optional[Any] = AutoFeatureExtractor.from_pretrained(__UpperCamelCase )
__a : str = ObjectDetectionPipeline(model=__UpperCamelCase , feature_extractor=__UpperCamelCase )
__a : Optional[int] = object_detector("""http://images.cocodataset.org/val2017/000000039769.jpg""" , threshold=0.0 )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [
{"""score""": 0.3_3_7_6, """label""": """LABEL_0""", """box""": {"""xmin""": 159, """ymin""": 120, """xmax""": 480, """ymax""": 359}},
{"""score""": 0.3_3_7_6, """label""": """LABEL_0""", """box""": {"""xmin""": 159, """ymin""": 120, """xmax""": 480, """ymax""": 359}},
] , )
__a : Union[str, Any] = object_detector(
[
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
] , threshold=0.0 , )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [
[
{"""score""": 0.3_3_7_6, """label""": """LABEL_0""", """box""": {"""xmin""": 159, """ymin""": 120, """xmax""": 480, """ymax""": 359}},
{"""score""": 0.3_3_7_6, """label""": """LABEL_0""", """box""": {"""xmin""": 159, """ymin""": 120, """xmax""": 480, """ymax""": 359}},
],
[
{"""score""": 0.3_3_7_6, """label""": """LABEL_0""", """box""": {"""xmin""": 159, """ymin""": 120, """xmax""": 480, """ymax""": 359}},
{"""score""": 0.3_3_7_6, """label""": """LABEL_0""", """box""": {"""xmin""": 159, """ymin""": 120, """xmax""": 480, """ymax""": 359}},
],
] , )
@require_torch
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : str = """facebook/detr-resnet-50"""
__a : Dict = AutoModelForObjectDetection.from_pretrained(__UpperCamelCase )
__a : int = AutoFeatureExtractor.from_pretrained(__UpperCamelCase )
__a : int = ObjectDetectionPipeline(model=__UpperCamelCase , feature_extractor=__UpperCamelCase )
__a : Any = object_detector("""http://images.cocodataset.org/val2017/000000039769.jpg""" )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [
{"""score""": 0.9_9_8_2, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 175, """ymax""": 117}},
{"""score""": 0.9_9_6_0, """label""": """remote""", """box""": {"""xmin""": 333, """ymin""": 72, """xmax""": 368, """ymax""": 187}},
{"""score""": 0.9_9_5_5, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 639, """ymax""": 473}},
{"""score""": 0.9_9_8_8, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}},
{"""score""": 0.9_9_8_7, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}},
] , )
__a : Optional[Any] = object_detector(
[
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
] )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [
[
{"""score""": 0.9_9_8_2, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 175, """ymax""": 117}},
{"""score""": 0.9_9_6_0, """label""": """remote""", """box""": {"""xmin""": 333, """ymin""": 72, """xmax""": 368, """ymax""": 187}},
{"""score""": 0.9_9_5_5, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 639, """ymax""": 473}},
{"""score""": 0.9_9_8_8, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}},
{"""score""": 0.9_9_8_7, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}},
],
[
{"""score""": 0.9_9_8_2, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 175, """ymax""": 117}},
{"""score""": 0.9_9_6_0, """label""": """remote""", """box""": {"""xmin""": 333, """ymin""": 72, """xmax""": 368, """ymax""": 187}},
{"""score""": 0.9_9_5_5, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 639, """ymax""": 473}},
{"""score""": 0.9_9_8_8, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}},
{"""score""": 0.9_9_8_7, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}},
],
] , )
@require_torch
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : int = """facebook/detr-resnet-50"""
__a : Optional[int] = pipeline("""object-detection""" , model=__UpperCamelCase )
__a : Optional[int] = object_detector("""http://images.cocodataset.org/val2017/000000039769.jpg""" )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [
{"""score""": 0.9_9_8_2, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 175, """ymax""": 117}},
{"""score""": 0.9_9_6_0, """label""": """remote""", """box""": {"""xmin""": 333, """ymin""": 72, """xmax""": 368, """ymax""": 187}},
{"""score""": 0.9_9_5_5, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 639, """ymax""": 473}},
{"""score""": 0.9_9_8_8, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}},
{"""score""": 0.9_9_8_7, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}},
] , )
__a : List[str] = object_detector(
[
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
] )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [
[
{"""score""": 0.9_9_8_2, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 175, """ymax""": 117}},
{"""score""": 0.9_9_6_0, """label""": """remote""", """box""": {"""xmin""": 333, """ymin""": 72, """xmax""": 368, """ymax""": 187}},
{"""score""": 0.9_9_5_5, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 639, """ymax""": 473}},
{"""score""": 0.9_9_8_8, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}},
{"""score""": 0.9_9_8_7, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}},
],
[
{"""score""": 0.9_9_8_2, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 175, """ymax""": 117}},
{"""score""": 0.9_9_6_0, """label""": """remote""", """box""": {"""xmin""": 333, """ymin""": 72, """xmax""": 368, """ymax""": 187}},
{"""score""": 0.9_9_5_5, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 639, """ymax""": 473}},
{"""score""": 0.9_9_8_8, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}},
{"""score""": 0.9_9_8_7, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}},
],
] , )
@require_torch
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Union[str, Any] = 0.9_9_8_5
__a : Union[str, Any] = """facebook/detr-resnet-50"""
__a : Optional[int] = pipeline("""object-detection""" , model=__UpperCamelCase )
__a : Union[str, Any] = object_detector("""http://images.cocodataset.org/val2017/000000039769.jpg""" , threshold=__UpperCamelCase )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [
{"""score""": 0.9_9_8_8, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}},
{"""score""": 0.9_9_8_7, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}},
] , )
@require_torch
@require_pytesseract
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : str = """Narsil/layoutlmv3-finetuned-funsd"""
__a : List[Any] = 0.9_9_9_3
__a : Dict = pipeline("""object-detection""" , model=__UpperCamelCase , threshold=__UpperCamelCase )
__a : List[str] = object_detector(
"""https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png""" )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [
{"""score""": 0.9_9_9_3, """label""": """I-ANSWER""", """box""": {"""xmin""": 294, """ymin""": 254, """xmax""": 343, """ymax""": 264}},
{"""score""": 0.9_9_9_3, """label""": """I-ANSWER""", """box""": {"""xmin""": 294, """ymin""": 254, """xmax""": 343, """ymax""": 264}},
] , ) | 697 | 1 |
'''simple docstring'''
import logging
import os
from typing import List, Tuple
import numpy as np
import psutil
import torch
import torch.distributed as dist
from transformers import RagRetriever
__SCREAMING_SNAKE_CASE : Any = logging.getLogger(__name__)
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
def __init__( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=None ):
'''simple docstring'''
super().__init__(
__UpperCamelCase , question_encoder_tokenizer=__UpperCamelCase , generator_tokenizer=__UpperCamelCase , index=__UpperCamelCase , init_retrieval=__UpperCamelCase , )
__a : int = None
def __lowerCamelCase ( self , __UpperCamelCase ):
'''simple docstring'''
logger.info("""initializing retrieval""" )
# initializing a separate process group for retrieval as the default
# nccl backend doesn't support gather/scatter operations while gloo
# is too slow to replace nccl for the core gpu communication
if dist.is_initialized():
logger.info("""dist initialized""" )
# needs to be set manually
__a : Any = self._infer_socket_ifname()
# avoid clash with the NCCL port
__a : Any = str(distributed_port + 1 )
__a : Optional[int] = dist.new_group(ranks=__UpperCamelCase , backend="""gloo""" )
# initialize retriever only on the main worker
if not dist.is_initialized() or self._is_main():
logger.info("""dist not initialized / main""" )
self.index.init_index()
# all processes wait untill the retriever is initialized by the main process
if dist.is_initialized():
torch.distributed.barrier(group=self.process_group )
def __lowerCamelCase ( self ):
'''simple docstring'''
return dist.get_rank(group=self.process_group ) == 0
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=torch.floataa ):
'''simple docstring'''
__a : Tuple = torch.empty(__UpperCamelCase , dtype=__UpperCamelCase )
dist.scatter(__UpperCamelCase , src=0 , scatter_list=__UpperCamelCase , group=self.process_group )
return target_tensor
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : int = psutil.net_if_addrs()
# a hacky way to deal with varying network interface names
__a : int = next((addr for addr in addrs if addr.startswith("""e""" )) , __UpperCamelCase )
return ifname
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
if not dist.is_initialized():
__a , __a : List[Any] = self._main_retrieve(__UpperCamelCase , __UpperCamelCase )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(__UpperCamelCase )
# distributed training
__a : List[Any] = dist.get_world_size(group=self.process_group )
# gather logic
__a : str = None
if self._is_main():
__a : List[str] = [torch.empty(question_hidden_states.shape , dtype=torch.floataa ) for _ in range(__UpperCamelCase )]
dist.gather(torch.tensor(__UpperCamelCase ) , dst=0 , gather_list=__UpperCamelCase , group=self.process_group )
# scatter logic
__a : Optional[Any] = question_hidden_states.shape[0]
__a : List[str] = []
__a : List[Any] = []
if self._is_main():
assert len(__UpperCamelCase ) == world_size
__a , __a : Tuple = self._main_retrieve(torch.cat(__UpperCamelCase ).numpy() , __UpperCamelCase )
__a , __a : Optional[Any] = torch.tensor(__UpperCamelCase ), torch.tensor(__UpperCamelCase )
__a : str = self._chunk_tensor(__UpperCamelCase , __UpperCamelCase )
__a : Any = self._chunk_tensor(__UpperCamelCase , __UpperCamelCase )
__a : Union[str, Any] = self._scattered(__UpperCamelCase , [n_queries, n_docs] , target_type=torch.intaa )
__a : Any = self._scattered(__UpperCamelCase , [n_queries, n_docs, question_hidden_states.shape[1]] )
return retrieved_doc_embeds.numpy(), doc_ids.numpy(), self.index.get_doc_dicts(__UpperCamelCase ) | 697 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__SCREAMING_SNAKE_CASE : List[str] = {
'configuration_blenderbot_small': [
'BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP',
'BlenderbotSmallConfig',
'BlenderbotSmallOnnxConfig',
],
'tokenization_blenderbot_small': ['BlenderbotSmallTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Union[str, Any] = ['BlenderbotSmallTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : List[str] = [
'BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST',
'BlenderbotSmallForCausalLM',
'BlenderbotSmallForConditionalGeneration',
'BlenderbotSmallModel',
'BlenderbotSmallPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Optional[int] = [
'TFBlenderbotSmallForConditionalGeneration',
'TFBlenderbotSmallModel',
'TFBlenderbotSmallPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Optional[Any] = [
'FlaxBlenderbotSmallForConditionalGeneration',
'FlaxBlenderbotSmallModel',
'FlaxBlenderbotSmallPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotSmallConfig,
BlenderbotSmallOnnxConfig,
)
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_small_fast import BlenderbotSmallTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotSmallForCausalLM,
BlenderbotSmallForConditionalGeneration,
BlenderbotSmallModel,
BlenderbotSmallPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot_small import (
TFBlenderbotSmallForConditionalGeneration,
TFBlenderbotSmallModel,
TFBlenderbotSmallPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot_small import (
FlaxBlenderbotSmallForConditionalGeneration,
FlaxBlenderbotSmallModel,
FlaxBlenderbotSmallPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 697 | 1 |
'''simple docstring'''
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast
@require_vision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Tuple = tempfile.mkdtemp()
__a : Any = BlipImageProcessor()
__a : List[Any] = BertTokenizer.from_pretrained("""hf-internal-testing/tiny-random-BertModel""" )
__a : str = BlipProcessor(__UpperCamelCase , __UpperCamelCase )
processor.save_pretrained(self.tmpdirname )
def __lowerCamelCase ( self , **__UpperCamelCase ):
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **__UpperCamelCase ).tokenizer
def __lowerCamelCase ( self , **__UpperCamelCase ):
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **__UpperCamelCase ).image_processor
def __lowerCamelCase ( self ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : List[Any] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
__a : Dict = [Image.fromarray(np.moveaxis(__UpperCamelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Optional[int] = BlipProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__a : Optional[Any] = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
__a : str = self.get_image_processor(do_normalize=__UpperCamelCase , padding_value=1.0 )
__a : Any = BlipProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=__UpperCamelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __UpperCamelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __UpperCamelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Dict = self.get_image_processor()
__a : Dict = self.get_tokenizer()
__a : Optional[Any] = BlipProcessor(tokenizer=__UpperCamelCase , image_processor=__UpperCamelCase )
__a : List[Any] = self.prepare_image_inputs()
__a : List[str] = image_processor(__UpperCamelCase , return_tensors="""np""" )
__a : Optional[int] = processor(images=__UpperCamelCase , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Tuple = self.get_image_processor()
__a : Tuple = self.get_tokenizer()
__a : Tuple = BlipProcessor(tokenizer=__UpperCamelCase , image_processor=__UpperCamelCase )
__a : Optional[Any] = """lower newer"""
__a : Optional[int] = processor(text=__UpperCamelCase )
__a : List[str] = tokenizer(__UpperCamelCase , return_token_type_ids=__UpperCamelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : List[str] = self.get_image_processor()
__a : List[str] = self.get_tokenizer()
__a : List[str] = BlipProcessor(tokenizer=__UpperCamelCase , image_processor=__UpperCamelCase )
__a : Tuple = """lower newer"""
__a : Any = self.prepare_image_inputs()
__a : Optional[int] = processor(text=__UpperCamelCase , images=__UpperCamelCase )
self.assertListEqual(list(inputs.keys() ) , ["""pixel_values""", """input_ids""", """attention_mask"""] )
# test if it raises when no input is passed
with pytest.raises(__UpperCamelCase ):
processor()
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : str = self.get_image_processor()
__a : Dict = self.get_tokenizer()
__a : List[str] = BlipProcessor(tokenizer=__UpperCamelCase , image_processor=__UpperCamelCase )
__a : Union[str, Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__a : Dict = processor.batch_decode(__UpperCamelCase )
__a : Optional[int] = tokenizer.batch_decode(__UpperCamelCase )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Union[str, Any] = self.get_image_processor()
__a : Optional[int] = self.get_tokenizer()
__a : str = BlipProcessor(tokenizer=__UpperCamelCase , image_processor=__UpperCamelCase )
__a : Dict = """lower newer"""
__a : List[str] = self.prepare_image_inputs()
__a : List[Any] = processor(text=__UpperCamelCase , images=__UpperCamelCase )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) , ["""pixel_values""", """input_ids""", """attention_mask"""] ) | 697 |
'''simple docstring'''
import numpy as np
import torch
from torch.utils.data import Dataset
from utils import logger
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
def __init__( self , __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
__a : Any = params
__a : Optional[Any] = np.array(__UpperCamelCase )
__a : Union[str, Any] = np.array([len(__UpperCamelCase ) for t in data] )
self.check()
self.remove_long_sequences()
self.remove_empty_sequences()
self.remove_unknown_sequences()
self.check()
self.print_statistics()
def __getitem__( self , __UpperCamelCase ):
'''simple docstring'''
return (self.token_ids[index], self.lengths[index])
def __len__( self ):
'''simple docstring'''
return len(self.lengths )
def __lowerCamelCase ( self ):
'''simple docstring'''
assert len(self.token_ids ) == len(self.lengths )
assert all(self.lengths[i] == len(self.token_ids[i] ) for i in range(len(self.lengths ) ) )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Tuple = self.params.max_model_input_size
__a : Union[str, Any] = self.lengths > max_len
logger.info(f"""Splitting {sum(__UpperCamelCase )} too long sequences.""" )
def divide_chunks(__UpperCamelCase , __UpperCamelCase ):
return [l[i : i + n] for i in range(0 , len(__UpperCamelCase ) , __UpperCamelCase )]
__a : int = []
__a : Union[str, Any] = []
if self.params.mlm:
__a , __a : Any = self.params.special_tok_ids["""cls_token"""], self.params.special_tok_ids["""sep_token"""]
else:
__a , __a : str = self.params.special_tok_ids["""bos_token"""], self.params.special_tok_ids["""eos_token"""]
for seq_, len_ in zip(self.token_ids , self.lengths ):
assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_
if len_ <= max_len:
new_tok_ids.append(seq_ )
new_lengths.append(len_ )
else:
__a : Any = []
for sub_s in divide_chunks(seq_ , max_len - 2 ):
if sub_s[0] != cls_id:
__a : int = np.insert(__UpperCamelCase , 0 , __UpperCamelCase )
if sub_s[-1] != sep_id:
__a : str = np.insert(__UpperCamelCase , len(__UpperCamelCase ) , __UpperCamelCase )
assert len(__UpperCamelCase ) <= max_len
assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s
sub_seqs.append(__UpperCamelCase )
new_tok_ids.extend(__UpperCamelCase )
new_lengths.extend([len(__UpperCamelCase ) for l in sub_seqs] )
__a : Dict = np.array(__UpperCamelCase )
__a : Tuple = np.array(__UpperCamelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : List[str] = len(self )
__a : List[str] = self.lengths > 11
__a : int = self.token_ids[indices]
__a : Union[str, Any] = self.lengths[indices]
__a : Any = len(self )
logger.info(f"""Remove {init_size - new_size} too short (<=11 tokens) sequences.""" )
def __lowerCamelCase ( self ):
'''simple docstring'''
if "unk_token" not in self.params.special_tok_ids:
return
else:
__a : List[str] = self.params.special_tok_ids["""unk_token"""]
__a : str = len(self )
__a : str = np.array([np.count_nonzero(a == unk_token_id ) for a in self.token_ids] )
__a : Optional[Any] = (unk_occs / self.lengths) < 0.5
__a : List[str] = self.token_ids[indices]
__a : Optional[int] = self.lengths[indices]
__a : Any = len(self )
logger.info(f"""Remove {init_size - new_size} sequences with a high level of unknown tokens (50%).""" )
def __lowerCamelCase ( self ):
'''simple docstring'''
if not self.params.is_master:
return
logger.info(f"""{len(self )} sequences""" )
# data_len = sum(self.lengths)
# nb_unique_tokens = len(Counter(list(chain(*self.token_ids))))
# logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)')
# unk_idx = self.params.special_tok_ids['unk_token']
# nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids])
# logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)')
def __lowerCamelCase ( self , __UpperCamelCase ):
'''simple docstring'''
__a : List[str] = [t[0] for t in batch]
__a : str = [t[1] for t in batch]
assert len(__UpperCamelCase ) == len(__UpperCamelCase )
# Max for paddings
__a : Optional[int] = max(__UpperCamelCase )
# Pad token ids
if self.params.mlm:
__a : int = self.params.special_tok_ids["""pad_token"""]
else:
__a : Tuple = self.params.special_tok_ids["""unk_token"""]
__a : Any = [list(t.astype(__UpperCamelCase ) ) + [pad_idx] * (max_seq_len_ - len(__UpperCamelCase )) for t in token_ids]
assert len(tk_ ) == len(__UpperCamelCase )
assert all(len(__UpperCamelCase ) == max_seq_len_ for t in tk_ )
__a : Any = torch.tensor(tk_ ) # (bs, max_seq_len_)
__a : Optional[Any] = torch.tensor(__UpperCamelCase ) # (bs)
return tk_t, lg_t | 697 | 1 |
'''simple docstring'''
import math
import qiskit
def _snake_case ( lowercase = 1 , lowercase = 1 , lowercase = 1 ) -> qiskit.result.counts.Counts:
if (
isinstance(lowercase , lowercase )
or isinstance(lowercase , lowercase )
or isinstance(lowercase , lowercase )
):
raise TypeError("""inputs must be integers.""" )
if (input_a < 0) or (input_a < 0) or (carry_in < 0):
raise ValueError("""inputs must be positive.""" )
if (
(math.floor(lowercase ) != input_a)
or (math.floor(lowercase ) != input_a)
or (math.floor(lowercase ) != carry_in)
):
raise ValueError("""inputs must be exact integers.""" )
if (input_a > 2) or (input_a > 2) or (carry_in > 2):
raise ValueError("""inputs must be less or equal to 2.""" )
# build registers
__a : Any = qiskit.QuantumRegister(4 , """qr""" )
__a : str = qiskit.ClassicalRegister(2 , """cr""" )
# list the entries
__a : Dict = [input_a, input_a, carry_in]
__a : int = qiskit.QuantumCircuit(lowercase , lowercase )
for i in range(0 , 3 ):
if entry[i] == 2:
quantum_circuit.h(lowercase ) # for hadamard entries
elif entry[i] == 1:
quantum_circuit.x(lowercase ) # for 1 entries
elif entry[i] == 0:
quantum_circuit.i(lowercase ) # for 0 entries
# build the circuit
quantum_circuit.ccx(0 , 1 , 3 ) # ccx = toffoli gate
quantum_circuit.cx(0 , 1 )
quantum_circuit.ccx(1 , 2 , 3 )
quantum_circuit.cx(1 , 2 )
quantum_circuit.cx(0 , 1 )
quantum_circuit.measure([2, 3] , lowercase ) # measure the last two qbits
__a : Optional[int] = qiskit.Aer.get_backend("""aer_simulator""" )
__a : List[Any] = qiskit.execute(lowercase , lowercase , shots=1_0_0_0 )
return job.result().get_counts(lowercase )
if __name__ == "__main__":
print(f'''Total sum count for state is: {quantum_full_adder(1, 1, 1)}''') | 697 |
'''simple docstring'''
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
lowercase__ = ""
lowercase__ = "hf-legacy" # "hf://"" is reserved for hffs
def __init__( self , __UpperCamelCase = None , __UpperCamelCase = None , **__UpperCamelCase , ):
'''simple docstring'''
super().__init__(self , **__UpperCamelCase )
__a : int = repo_info
__a : int = token
__a : Any = None
def __lowerCamelCase ( self ):
'''simple docstring'''
if self.dir_cache is None:
__a : Union[str, Any] = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
__a : List[str] = {
"""name""": hf_file.rfilename,
"""size""": None,
"""type""": """file""",
}
self.dir_cache.update(
{
str(__UpperCamelCase ): {"""name""": str(__UpperCamelCase ), """size""": None, """type""": """directory"""}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase = "rb" , **__UpperCamelCase , ):
'''simple docstring'''
if not isinstance(self.repo_info , __UpperCamelCase ):
raise NotImplementedError(f"""Open is only implemented for dataset repositories, but got {self.repo_info}""" )
__a : Any = hf_hub_url(self.repo_info.id , __UpperCamelCase , revision=self.repo_info.sha )
return fsspec.open(
__UpperCamelCase , mode=__UpperCamelCase , headers=get_authentication_headers_for_url(__UpperCamelCase , use_auth_token=self.token ) , client_kwargs={"""trust_env""": True} , ).open()
def __lowerCamelCase ( self , __UpperCamelCase , **__UpperCamelCase ):
'''simple docstring'''
self._get_dirs()
__a : str = self._strip_protocol(__UpperCamelCase )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(__UpperCamelCase )
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase=False , **__UpperCamelCase ):
'''simple docstring'''
self._get_dirs()
__a : int = PurePosixPath(path.strip("""/""" ) )
__a : List[str] = {}
for p, f in self.dir_cache.items():
__a : str = PurePosixPath(p.strip("""/""" ) )
__a : Optional[int] = p.parent
if root == path:
__a : List[str] = f
__a : str = list(paths.values() )
if detail:
return out
else:
return sorted(f["""name"""] for f in out ) | 697 | 1 |
'''simple docstring'''
def _snake_case ( lowercase = 1_0_0_0 ) -> int:
__a : str = 2**power
__a : List[Any] = 0
while n:
__a , __a : Any = r + n % 1_0, n // 1_0
return r
if __name__ == "__main__":
print(solution(int(str(input()).strip()))) | 697 |
'''simple docstring'''
import inspect
import unittest
from transformers import DPTConfig
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel
from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class SCREAMING_SNAKE_CASE__ :
def __init__( self , __UpperCamelCase , __UpperCamelCase=2 , __UpperCamelCase=32 , __UpperCamelCase=16 , __UpperCamelCase=3 , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=32 , __UpperCamelCase=4 , __UpperCamelCase=[0, 1, 2, 3] , __UpperCamelCase=4 , __UpperCamelCase=37 , __UpperCamelCase="gelu" , __UpperCamelCase=0.1 , __UpperCamelCase=0.1 , __UpperCamelCase=0.0_2 , __UpperCamelCase=3 , __UpperCamelCase=[1, 384, 24, 24] , __UpperCamelCase=True , __UpperCamelCase=None , ):
'''simple docstring'''
__a : List[str] = parent
__a : Tuple = batch_size
__a : str = image_size
__a : int = patch_size
__a : Dict = num_channels
__a : int = is_training
__a : Dict = use_labels
__a : Union[str, Any] = hidden_size
__a : Dict = num_hidden_layers
__a : Dict = backbone_out_indices
__a : Optional[int] = num_attention_heads
__a : List[str] = intermediate_size
__a : Optional[Any] = hidden_act
__a : Dict = hidden_dropout_prob
__a : Tuple = attention_probs_dropout_prob
__a : Any = initializer_range
__a : Any = num_labels
__a : Optional[Any] = backbone_featmap_shape
__a : List[Any] = scope
__a : List[str] = is_hybrid
# sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token)
__a : Union[str, Any] = (image_size // patch_size) ** 2
__a : List[str] = num_patches + 1
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__a : Union[str, Any] = None
if self.use_labels:
__a : str = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__a : Tuple = self.get_config()
return config, pixel_values, labels
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : List[str] = {
"""global_padding""": """same""",
"""layer_type""": """bottleneck""",
"""depths""": [3, 4, 9],
"""out_features""": ["""stage1""", """stage2""", """stage3"""],
"""embedding_dynamic_padding""": True,
"""hidden_sizes""": [96, 192, 384, 768],
"""num_groups""": 2,
}
return DPTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , backbone_out_indices=self.backbone_out_indices , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__UpperCamelCase , initializer_range=self.initializer_range , is_hybrid=self.is_hybrid , backbone_config=__UpperCamelCase , backbone_featmap_shape=self.backbone_featmap_shape , )
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
__a : Optional[Any] = DPTModel(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
__a : List[str] = model(__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
__a : List[str] = self.num_labels
__a : Union[str, Any] = DPTForDepthEstimation(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
__a : Tuple = model(__UpperCamelCase )
self.parent.assertEqual(result.predicted_depth.shape , (self.batch_size, self.image_size, self.image_size) )
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
__a : Dict = self.num_labels
__a : Tuple = DPTForSemanticSegmentation(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
__a : str = model(__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Optional[int] = self.prepare_config_and_inputs()
__a , __a , __a : Tuple = config_and_inputs
__a : List[str] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase , __UpperCamelCase , unittest.TestCase ):
lowercase__ = (DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else ()
lowercase__ = (
{
"depth-estimation": DPTForDepthEstimation,
"feature-extraction": DPTModel,
"image-segmentation": DPTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
lowercase__ = False
lowercase__ = False
lowercase__ = False
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Optional[int] = DPTModelTester(self )
__a : List[Any] = ConfigTester(self , config_class=__UpperCamelCase , has_text_modality=__UpperCamelCase , hidden_size=37 )
def __lowerCamelCase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="""DPT does not use inputs_embeds""" )
def __lowerCamelCase ( self ):
'''simple docstring'''
pass
def __lowerCamelCase ( self ):
'''simple docstring'''
__a , __a : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a : str = model_class(__UpperCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__a : Any = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__UpperCamelCase , nn.Linear ) )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a , __a : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a : Any = model_class(__UpperCamelCase )
__a : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__a : int = [*signature.parameters.keys()]
__a : List[str] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __UpperCamelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_depth_estimation(*__UpperCamelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__UpperCamelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
__a , __a : Dict = self.model_tester.prepare_config_and_inputs_for_common()
__a : List[Any] = True
if model_class in get_values(__UpperCamelCase ):
continue
__a : str = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.train()
__a : Union[str, Any] = self._prepare_for_class(__UpperCamelCase , __UpperCamelCase , return_labels=__UpperCamelCase )
__a : List[Any] = model(**__UpperCamelCase ).loss
loss.backward()
def __lowerCamelCase ( self ):
'''simple docstring'''
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
__a , __a : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
__a : Any = False
__a : Dict = True
if model_class in get_values(__UpperCamelCase ) or not model_class.supports_gradient_checkpointing:
continue
__a : Any = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.gradient_checkpointing_enable()
model.train()
__a : List[str] = self._prepare_for_class(__UpperCamelCase , __UpperCamelCase , return_labels=__UpperCamelCase )
__a : Dict = model(**__UpperCamelCase ).loss
loss.backward()
def __lowerCamelCase ( self ):
'''simple docstring'''
__a , __a : Any = self.model_tester.prepare_config_and_inputs_for_common()
__a : Any = _config_zero_init(__UpperCamelCase )
for model_class in self.all_model_classes:
__a : Any = model_class(config=__UpperCamelCase )
# Skip the check for the backbone
__a : Optional[Any] = []
for name, module in model.named_modules():
if module.__class__.__name__ == "DPTViTHybridEmbeddings":
__a : Optional[int] = [f"""{name}.{key}""" for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def __lowerCamelCase ( self ):
'''simple docstring'''
pass
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]:
__a : int = DPTModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a , __a : int = self.model_tester.prepare_config_and_inputs_for_common()
__a : Optional[int] = """add"""
with self.assertRaises(__UpperCamelCase ):
__a : int = DPTForDepthEstimation(__UpperCamelCase )
def _snake_case ( ) -> Any:
__a : Dict = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
@slow
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : int = DPTImageProcessor.from_pretrained("""Intel/dpt-hybrid-midas""" )
__a : int = DPTForDepthEstimation.from_pretrained("""Intel/dpt-hybrid-midas""" ).to(__UpperCamelCase )
__a : Union[str, Any] = prepare_img()
__a : Any = image_processor(images=__UpperCamelCase , return_tensors="""pt""" ).to(__UpperCamelCase )
# forward pass
with torch.no_grad():
__a : Optional[Any] = model(**__UpperCamelCase )
__a : int = outputs.predicted_depth
# verify the predicted depth
__a : Any = torch.Size((1, 384, 384) )
self.assertEqual(predicted_depth.shape , __UpperCamelCase )
__a : int = torch.tensor(
[[[5.6_4_3_7, 5.6_1_4_6, 5.6_5_1_1], [5.4_3_7_1, 5.5_6_4_9, 5.5_9_5_8], [5.5_2_1_5, 5.5_1_8_4, 5.5_2_9_3]]] ).to(__UpperCamelCase )
self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 100 , __UpperCamelCase , atol=1E-4 ) ) | 697 | 1 |
'''simple docstring'''
def _snake_case ( lowercase ) -> list:
__a : List[Any] = len(lowercase )
for i in range(1 , lowercase ):
__a : Dict = collection[i]
__a : Any = 0
__a : str = i - 1
while low <= high:
__a : Any = (low + high) // 2
if val < collection[mid]:
__a : Dict = mid - 1
else:
__a : Optional[int] = mid + 1
for j in range(lowercase , lowercase , -1 ):
__a : List[Any] = collection[j - 1]
__a : Dict = val
return collection
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : Tuple = input('Enter numbers separated by a comma:\n').strip()
__SCREAMING_SNAKE_CASE : Dict = [int(item) for item in user_input.split(',')]
print(binary_insertion_sort(unsorted)) | 697 |
'''simple docstring'''
import unittest
from .lib import (
Matrix,
Vector,
axpy,
square_zero_matrix,
unit_basis_vector,
zero_vector,
)
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Dict = Vector([1, 2, 3] )
self.assertEqual(x.component(0 ) , 1 )
self.assertEqual(x.component(2 ) , 3 )
__a : Optional[int] = Vector()
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Any = Vector([0, 0, 0, 0, 0, 1] )
self.assertEqual(str(__UpperCamelCase ) , """(0,0,0,0,0,1)""" )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Tuple = Vector([1, 2, 3, 4] )
self.assertEqual(len(__UpperCamelCase ) , 4 )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Optional[Any] = Vector([1, 2] )
__a : List[str] = Vector([1, 2, 3, 4, 5] )
__a : Optional[int] = Vector([0, 0, 0, 0, 0, 0, 0, 0, 0, 0] )
__a : Dict = Vector([1, -1, 1, -1, 2, -3, 4, -5] )
self.assertAlmostEqual(x.euclidean_length() , 2.2_3_6 , 3 )
self.assertAlmostEqual(y.euclidean_length() , 7.4_1_6 , 3 )
self.assertEqual(z.euclidean_length() , 0 )
self.assertAlmostEqual(w.euclidean_length() , 7.6_1_6 , 3 )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Dict = Vector([1, 2, 3] )
__a : Union[str, Any] = Vector([1, 1, 1] )
self.assertEqual((x + y).component(0 ) , 2 )
self.assertEqual((x + y).component(1 ) , 3 )
self.assertEqual((x + y).component(2 ) , 4 )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : List[str] = Vector([1, 2, 3] )
__a : Any = Vector([1, 1, 1] )
self.assertEqual((x - y).component(0 ) , 0 )
self.assertEqual((x - y).component(1 ) , 1 )
self.assertEqual((x - y).component(2 ) , 2 )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Tuple = Vector([1, 2, 3] )
__a : Optional[Any] = Vector([2, -1, 4] ) # for test of dot product
__a : Union[str, Any] = Vector([1, -2, -1] )
self.assertEqual(str(x * 3.0 ) , """(3.0,6.0,9.0)""" )
self.assertEqual((a * b) , 0 )
def __lowerCamelCase ( self ):
'''simple docstring'''
self.assertEqual(str(zero_vector(10 ) ).count("""0""" ) , 10 )
def __lowerCamelCase ( self ):
'''simple docstring'''
self.assertEqual(str(unit_basis_vector(3 , 1 ) ) , """(0,1,0)""" )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Dict = Vector([1, 2, 3] )
__a : Optional[int] = Vector([1, 0, 1] )
self.assertEqual(str(axpy(2 , __UpperCamelCase , __UpperCamelCase ) ) , """(3,4,7)""" )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : int = Vector([1, 0, 0, 0, 0, 0] )
__a : Any = x.copy()
self.assertEqual(str(__UpperCamelCase ) , str(__UpperCamelCase ) )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Union[str, Any] = Vector([1, 0, 0] )
x.change_component(0 , 0 )
x.change_component(1 , 1 )
self.assertEqual(str(__UpperCamelCase ) , """(0,1,0)""" )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Dict = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual("""|1,2,3|\n|2,4,5|\n|6,7,8|\n""" , str(__UpperCamelCase ) )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : List[Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
__a : List[Any] = [[-3, -14, -10], [-5, -10, -5], [-2, -1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(minors[x][y] , a.minor(__UpperCamelCase , __UpperCamelCase ) )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : List[Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
__a : Any = [[-3, 14, -10], [5, -10, 5], [-2, 1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(cofactors[x][y] , a.cofactor(__UpperCamelCase , __UpperCamelCase ) )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : List[Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(-5 , a.determinant() )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Any = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]] , 3 , 3 )
__a : List[Any] = Vector([1, 2, 3] )
self.assertEqual("""(14,32,50)""" , str(a * x ) )
self.assertEqual("""|2,4,6|\n|8,10,12|\n|14,16,18|\n""" , str(a * 2 ) )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : List[str] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
a.change_component(0 , 2 , 5 )
self.assertEqual("""|1,2,5|\n|2,4,5|\n|6,7,8|\n""" , str(__UpperCamelCase ) )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Tuple = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(7 , a.component(2 , 1 ) , 0.0_1 )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Dict = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
__a : Union[str, Any] = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 )
self.assertEqual("""|2,4,10|\n|4,8,10|\n|12,14,18|\n""" , str(a + b ) )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : List[Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
__a : List[str] = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 )
self.assertEqual("""|0,0,-4|\n|0,0,0|\n|0,0,-2|\n""" , str(a - b ) )
def __lowerCamelCase ( self ):
'''simple docstring'''
self.assertEqual(
"""|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n""" , str(square_zero_matrix(5 ) ) , )
if __name__ == "__main__":
unittest.main() | 697 | 1 |
'''simple docstring'''
import inspect
import unittest
from transformers import MobileViTConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTModel
from transformers.models.mobilevit.modeling_mobilevit import MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Optional[int] = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__UpperCamelCase , """hidden_sizes""" ) )
self.parent.assertTrue(hasattr(__UpperCamelCase , """neck_hidden_sizes""" ) )
self.parent.assertTrue(hasattr(__UpperCamelCase , """num_attention_heads""" ) )
class SCREAMING_SNAKE_CASE__ :
def __init__( self , __UpperCamelCase , __UpperCamelCase=13 , __UpperCamelCase=32 , __UpperCamelCase=2 , __UpperCamelCase=3 , __UpperCamelCase=640 , __UpperCamelCase=4 , __UpperCamelCase="silu" , __UpperCamelCase=3 , __UpperCamelCase=32 , __UpperCamelCase=0.1 , __UpperCamelCase=0.1 , __UpperCamelCase=0.1 , __UpperCamelCase=0.0_2 , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=10 , __UpperCamelCase=None , ):
'''simple docstring'''
__a : int = parent
__a : List[Any] = batch_size
__a : Tuple = image_size
__a : List[str] = patch_size
__a : List[Any] = num_channels
__a : List[Any] = last_hidden_size
__a : List[Any] = num_attention_heads
__a : Any = hidden_act
__a : Dict = conv_kernel_size
__a : List[str] = output_stride
__a : Optional[int] = hidden_dropout_prob
__a : Optional[Any] = attention_probs_dropout_prob
__a : Optional[Any] = classifier_dropout_prob
__a : Optional[Any] = use_labels
__a : Tuple = is_training
__a : Optional[int] = num_labels
__a : List[str] = initializer_range
__a : List[str] = scope
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__a : Union[str, Any] = None
__a : Union[str, Any] = None
if self.use_labels:
__a : Any = ids_tensor([self.batch_size] , self.num_labels )
__a : Optional[int] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__a : List[Any] = self.get_config()
return config, pixel_values, labels, pixel_labels
def __lowerCamelCase ( self ):
'''simple docstring'''
return MobileViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
__a : Union[str, Any] = MobileViTModel(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
__a : Union[str, Any] = model(__UpperCamelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
__a : Union[str, Any] = self.num_labels
__a : Union[str, Any] = MobileViTForImageClassification(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
__a : Tuple = model(__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
__a : List[Any] = self.num_labels
__a : Dict = MobileViTForSemanticSegmentation(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
__a : List[str] = model(__UpperCamelCase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
__a : Union[str, Any] = model(__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Dict = self.prepare_config_and_inputs()
__a , __a , __a , __a : Optional[int] = config_and_inputs
__a : int = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase , __UpperCamelCase , unittest.TestCase ):
lowercase__ = (
(MobileViTModel, MobileViTForImageClassification, MobileViTForSemanticSegmentation)
if is_torch_available()
else ()
)
lowercase__ = (
{
"feature-extraction": MobileViTModel,
"image-classification": MobileViTForImageClassification,
"image-segmentation": MobileViTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : List[str] = MobileViTModelTester(self )
__a : Optional[Any] = MobileViTConfigTester(self , config_class=__UpperCamelCase , has_text_modality=__UpperCamelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="""MobileViT does not use inputs_embeds""" )
def __lowerCamelCase ( self ):
'''simple docstring'''
pass
@unittest.skip(reason="""MobileViT does not support input and output embeddings""" )
def __lowerCamelCase ( self ):
'''simple docstring'''
pass
@unittest.skip(reason="""MobileViT does not output attentions""" )
def __lowerCamelCase ( self ):
'''simple docstring'''
pass
def __lowerCamelCase ( self ):
'''simple docstring'''
__a , __a : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a : List[Any] = model_class(__UpperCamelCase )
__a : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__a : str = [*signature.parameters.keys()]
__a : Optional[int] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __UpperCamelCase )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def __lowerCamelCase ( self ):
'''simple docstring'''
pass
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
def check_hidden_states_output(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
__a : Optional[Any] = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
with torch.no_grad():
__a : Tuple = model(**self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) )
__a : int = outputs.hidden_states
__a : List[Any] = 5
self.assertEqual(len(__UpperCamelCase ) , __UpperCamelCase )
# MobileViT's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
__a : List[str] = 2
for i in range(len(__UpperCamelCase ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
__a , __a : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a : List[str] = True
check_hidden_states_output(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__a : Union[str, Any] = True
check_hidden_states_output(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__UpperCamelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__UpperCamelCase )
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
for model_name in MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a : int = MobileViTModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
def _snake_case ( ) -> Dict:
__a : Tuple = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@cached_property
def __lowerCamelCase ( self ):
'''simple docstring'''
return MobileViTImageProcessor.from_pretrained("""apple/mobilevit-xx-small""" ) if is_vision_available() else None
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : int = MobileViTForImageClassification.from_pretrained("""apple/mobilevit-xx-small""" ).to(__UpperCamelCase )
__a : str = self.default_image_processor
__a : List[Any] = prepare_img()
__a : Union[str, Any] = image_processor(images=__UpperCamelCase , return_tensors="""pt""" ).to(__UpperCamelCase )
# forward pass
with torch.no_grad():
__a : Any = model(**__UpperCamelCase )
# verify the logits
__a : Optional[Any] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , __UpperCamelCase )
__a : Any = torch.tensor([-1.9_3_6_4, -1.2_3_2_7, -0.4_6_5_3] ).to(__UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __UpperCamelCase , atol=1E-4 ) )
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : str = MobileViTForSemanticSegmentation.from_pretrained("""apple/deeplabv3-mobilevit-xx-small""" )
__a : Optional[int] = model.to(__UpperCamelCase )
__a : Tuple = MobileViTImageProcessor.from_pretrained("""apple/deeplabv3-mobilevit-xx-small""" )
__a : List[str] = prepare_img()
__a : Tuple = image_processor(images=__UpperCamelCase , return_tensors="""pt""" ).to(__UpperCamelCase )
# forward pass
with torch.no_grad():
__a : int = model(**__UpperCamelCase )
__a : Any = outputs.logits
# verify the logits
__a : List[str] = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape , __UpperCamelCase )
__a : Dict = torch.tensor(
[
[[6.9_7_1_3, 6.9_7_8_6, 7.2_4_2_2], [7.2_8_9_3, 7.2_8_2_5, 7.4_4_4_6], [7.6_5_8_0, 7.8_7_9_7, 7.9_4_2_0]],
[[-1_0.6_8_6_9, -1_0.3_2_5_0, -1_0.3_4_7_1], [-1_0.4_2_2_8, -9.9_8_6_8, -9.7_1_3_2], [-1_1.0_4_0_5, -1_1.0_2_2_1, -1_0.7_3_1_8]],
[[-3.3_0_8_9, -2.8_5_3_9, -2.6_7_4_0], [-3.2_7_0_6, -2.5_6_2_1, -2.5_1_0_8], [-3.2_5_3_4, -2.6_6_1_5, -2.6_6_5_1]],
] , device=__UpperCamelCase , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , __UpperCamelCase , atol=1E-4 ) )
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : int = MobileViTForSemanticSegmentation.from_pretrained("""apple/deeplabv3-mobilevit-xx-small""" )
__a : Tuple = model.to(__UpperCamelCase )
__a : List[str] = MobileViTImageProcessor.from_pretrained("""apple/deeplabv3-mobilevit-xx-small""" )
__a : Optional[Any] = prepare_img()
__a : str = image_processor(images=__UpperCamelCase , return_tensors="""pt""" ).to(__UpperCamelCase )
# forward pass
with torch.no_grad():
__a : Any = model(**__UpperCamelCase )
__a : Any = outputs.logits.detach().cpu()
__a : List[str] = image_processor.post_process_semantic_segmentation(outputs=__UpperCamelCase , target_sizes=[(50, 60)] )
__a : int = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape , __UpperCamelCase )
__a : Optional[Any] = image_processor.post_process_semantic_segmentation(outputs=__UpperCamelCase )
__a : Union[str, Any] = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape , __UpperCamelCase ) | 697 |
'''simple docstring'''
import os
from itertools import chain
from random import randrange, shuffle
import pytest
from .sola import PokerHand
__SCREAMING_SNAKE_CASE : List[str] = (
'4S 3H 2C 7S 5H',
'9D 8H 2C 6S 7H',
'2D 6D 9D TH 7D',
'TC 8C 2S JH 6C',
'JH 8S TH AH QH',
'TS KS 5S 9S AC',
'KD 6S 9D TH AD',
'KS 8D 4D 9S 4S', # pair
'8C 4S KH JS 4D', # pair
'QH 8H KD JH 8S', # pair
'KC 4H KS 2H 8D', # pair
'KD 4S KC 3H 8S', # pair
'AH 8S AS KC JH', # pair
'3H 4C 4H 3S 2H', # 2 pairs
'5S 5D 2C KH KH', # 2 pairs
'3C KH 5D 5S KH', # 2 pairs
'AS 3C KH AD KH', # 2 pairs
'7C 7S 3S 7H 5S', # 3 of a kind
'7C 7S KH 2H 7H', # 3 of a kind
'AC KH QH AH AS', # 3 of a kind
'2H 4D 3C AS 5S', # straight (low ace)
'3C 5C 4C 2C 6H', # straight
'6S 8S 7S 5H 9H', # straight
'JS QS 9H TS KH', # straight
'QC KH TS JS AH', # straight (high ace)
'8C 9C 5C 3C TC', # flush
'3S 8S 9S 5S KS', # flush
'4C 5C 9C 8C KC', # flush
'JH 8H AH KH QH', # flush
'3D 2H 3H 2C 2D', # full house
'2H 2C 3S 3H 3D', # full house
'KH KC 3S 3H 3D', # full house
'JC 6H JS JD JH', # 4 of a kind
'JC 7H JS JD JH', # 4 of a kind
'JC KH JS JD JH', # 4 of a kind
'2S AS 4S 5S 3S', # straight flush (low ace)
'2D 6D 3D 4D 5D', # straight flush
'5C 6C 3C 7C 4C', # straight flush
'JH 9H TH KH QH', # straight flush
'JH AH TH KH QH', # royal flush (high ace straight flush)
)
__SCREAMING_SNAKE_CASE : Optional[Any] = (
('2H 3H 4H 5H 6H', 'KS AS TS QS JS', 'Loss'),
('2H 3H 4H 5H 6H', 'AS AD AC AH JD', 'Win'),
('AS AH 2H AD AC', 'JS JD JC JH 3D', 'Win'),
('2S AH 2H AS AC', 'JS JD JC JH AD', 'Loss'),
('2S AH 2H AS AC', '2H 3H 5H 6H 7H', 'Win'),
('AS 3S 4S 8S 2S', '2H 3H 5H 6H 7H', 'Win'),
('2H 3H 5H 6H 7H', '2S 3H 4H 5S 6C', 'Win'),
('2S 3H 4H 5S 6C', '3D 4C 5H 6H 2S', 'Tie'),
('2S 3H 4H 5S 6C', 'AH AC 5H 6H AS', 'Win'),
('2S 2H 4H 5S 4C', 'AH AC 5H 6H AS', 'Loss'),
('2S 2H 4H 5S 4C', 'AH AC 5H 6H 7S', 'Win'),
('6S AD 7H 4S AS', 'AH AC 5H 6H 7S', 'Loss'),
('2S AH 4H 5S KC', 'AH AC 5H 6H 7S', 'Loss'),
('2S 3H 6H 7S 9C', '7H 3C TH 6H 9S', 'Loss'),
('4S 5H 6H TS AC', '3S 5H 6H TS AC', 'Win'),
('2S AH 4H 5S 6C', 'AD 4C 5H 6H 2C', 'Tie'),
('AS AH 3H AD AC', 'AS AH 2H AD AC', 'Win'),
('AH AC 5H 5C QS', 'AH AC 5H 5C KS', 'Loss'),
('AH AC 5H 5C QS', 'KH KC 5H 5C QS', 'Win'),
('7C 7S KH 2H 7H', '3C 3S AH 2H 3H', 'Win'),
('3C 3S AH 2H 3H', '7C 7S KH 2H 7H', 'Loss'),
('6H 5H 4H 3H 2H', '5H 4H 3H 2H AH', 'Win'),
('5H 4H 3H 2H AH', '5H 4H 3H 2H AH', 'Tie'),
('5H 4H 3H 2H AH', '6H 5H 4H 3H 2H', 'Loss'),
('AH AD KS KC AC', 'AH KD KH AC KC', 'Win'),
('2H 4D 3C AS 5S', '2H 4D 3C 6S 5S', 'Loss'),
('2H 3S 3C 3H 2S', '3S 3C 2S 2H 2D', 'Win'),
('4D 6D 5D 2D JH', '3S 8S 3H TC KH', 'Loss'),
('4S 6C 8S 3S 7S', 'AD KS 2D 7D 7C', 'Loss'),
('6S 4C 7H 8C 3H', '5H JC AH 9D 9C', 'Loss'),
('9D 9H JH TC QH', '3C 2S JS 5C 7H', 'Win'),
('2H TC 8S AD 9S', '4H TS 7H 2C 5C', 'Win'),
('9D 3S 2C 7S 7C', 'JC TD 3C TC 9H', 'Loss'),
)
__SCREAMING_SNAKE_CASE : Tuple = (
('2H 3H 4H 5H 6H', True),
('AS AH 2H AD AC', False),
('2H 3H 5H 6H 7H', True),
('KS AS TS QS JS', True),
('8H 9H QS JS TH', False),
('AS 3S 4S 8S 2S', True),
)
__SCREAMING_SNAKE_CASE : Dict = (
('2H 3H 4H 5H 6H', True),
('AS AH 2H AD AC', False),
('2H 3H 5H 6H 7H', False),
('KS AS TS QS JS', True),
('8H 9H QS JS TH', True),
)
__SCREAMING_SNAKE_CASE : Optional[int] = (
('2H 4D 3C AS 5S', True, [5, 4, 3, 2, 14]),
('2H 5D 3C AS 5S', False, [14, 5, 5, 3, 2]),
('JH QD KC AS TS', False, [14, 13, 12, 11, 10]),
('9D 3S 2C 7S 7C', False, [9, 7, 7, 3, 2]),
)
__SCREAMING_SNAKE_CASE : int = (
('JH AH TH KH QH', 0),
('JH 9H TH KH QH', 0),
('JC KH JS JD JH', 7),
('KH KC 3S 3H 3D', 6),
('8C 9C 5C 3C TC', 0),
('JS QS 9H TS KH', 0),
('7C 7S KH 2H 7H', 3),
('3C KH 5D 5S KH', 2),
('QH 8H KD JH 8S', 1),
('2D 6D 9D TH 7D', 0),
)
__SCREAMING_SNAKE_CASE : int = (
('JH AH TH KH QH', 23),
('JH 9H TH KH QH', 22),
('JC KH JS JD JH', 21),
('KH KC 3S 3H 3D', 20),
('8C 9C 5C 3C TC', 19),
('JS QS 9H TS KH', 18),
('7C 7S KH 2H 7H', 17),
('3C KH 5D 5S KH', 16),
('QH 8H KD JH 8S', 15),
('2D 6D 9D TH 7D', 14),
)
def _snake_case ( ) -> List[str]:
__a , __a : List[Any] = randrange(len(lowercase ) ), randrange(len(lowercase ) )
__a : int = ["""Loss""", """Tie""", """Win"""][(play >= oppo) + (play > oppo)]
__a , __a : int = SORTED_HANDS[play], SORTED_HANDS[oppo]
return hand, other, expected
def _snake_case ( lowercase = 1_0_0 ) -> Any:
return (generate_random_hand() for _ in range(lowercase ))
@pytest.mark.parametrize("""hand, expected""" , lowercase )
def _snake_case ( lowercase , lowercase ) -> int:
assert PokerHand(lowercase )._is_flush() == expected
@pytest.mark.parametrize("""hand, expected""" , lowercase )
def _snake_case ( lowercase , lowercase ) -> Any:
assert PokerHand(lowercase )._is_straight() == expected
@pytest.mark.parametrize("""hand, expected, card_values""" , lowercase )
def _snake_case ( lowercase , lowercase , lowercase ) -> List[str]:
__a : Union[str, Any] = PokerHand(lowercase )
assert player._is_five_high_straight() == expected
assert player._card_values == card_values
@pytest.mark.parametrize("""hand, expected""" , lowercase )
def _snake_case ( lowercase , lowercase ) -> Optional[int]:
assert PokerHand(lowercase )._is_same_kind() == expected
@pytest.mark.parametrize("""hand, expected""" , lowercase )
def _snake_case ( lowercase , lowercase ) -> Union[str, Any]:
assert PokerHand(lowercase )._hand_type == expected
@pytest.mark.parametrize("""hand, other, expected""" , lowercase )
def _snake_case ( lowercase , lowercase , lowercase ) -> Optional[int]:
assert PokerHand(lowercase ).compare_with(PokerHand(lowercase ) ) == expected
@pytest.mark.parametrize("""hand, other, expected""" , generate_random_hands() )
def _snake_case ( lowercase , lowercase , lowercase ) -> int:
assert PokerHand(lowercase ).compare_with(PokerHand(lowercase ) ) == expected
def _snake_case ( ) -> Union[str, Any]:
__a : Tuple = [PokerHand(lowercase ) for hand in SORTED_HANDS]
__a : Optional[int] = poker_hands.copy()
shuffle(lowercase )
__a : List[str] = chain(sorted(lowercase ) )
for index, hand in enumerate(lowercase ):
assert hand == poker_hands[index]
def _snake_case ( ) -> List[str]:
# Test that five high straights are compared correctly.
__a : Optional[int] = [PokerHand("""2D AC 3H 4H 5S""" ), PokerHand("""2S 3H 4H 5S 6C""" )]
pokerhands.sort(reverse=lowercase )
assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C"
def _snake_case ( ) -> List[str]:
# Multiple calls to five_high_straight function should still return True
# and shouldn't mutate the list in every call other than the first.
__a : Dict = PokerHand("""2C 4S AS 3D 5C""" )
__a : Dict = True
__a : Optional[int] = [5, 4, 3, 2, 1_4]
for _ in range(1_0 ):
assert pokerhand._is_five_high_straight() == expected
assert pokerhand._card_values == expected_card_values
def _snake_case ( ) -> Dict:
# Problem number 54 from Project Euler
# Testing from poker_hands.txt file
__a : Tuple = 0
__a : int = os.path.abspath(os.path.dirname(lowercase ) )
__a : Union[str, Any] = os.path.join(lowercase , """poker_hands.txt""" )
with open(lowercase ) as file_hand:
for line in file_hand:
__a : Union[str, Any] = line[:1_4].strip()
__a : Optional[Any] = line[1_5:].strip()
__a , __a : List[str] = PokerHand(lowercase ), PokerHand(lowercase )
__a : str = player.compare_with(lowercase )
if output == "Win":
answer += 1
assert answer == 3_7_6 | 697 | 1 |
'''simple docstring'''
import io
import json
import fsspec
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.json import JsonDatasetReader, JsonDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def _snake_case ( lowercase , lowercase ) -> Union[str, Any]:
assert isinstance(lowercase , lowercase )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def _snake_case ( lowercase , lowercase , lowercase ) -> Dict:
__a : Optional[int] = tmp_path / """cache"""
__a : Optional[int] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__a : List[str] = JsonDatasetReader(lowercase , cache_dir=lowercase , keep_in_memory=lowercase ).read()
_check_json_dataset(lowercase , lowercase )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""},
{"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""},
{"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""},
{"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""},
] , )
def _snake_case ( lowercase , lowercase , lowercase ) -> Any:
__a : int = tmp_path / """cache"""
__a : Dict = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
__a : Union[str, Any] = features.copy() if features else default_expected_features
__a : Optional[Any] = (
Features({feature: Value(lowercase ) for feature, dtype in features.items()} ) if features is not None else None
)
__a : List[Any] = JsonDatasetReader(lowercase , features=lowercase , cache_dir=lowercase ).read()
_check_json_dataset(lowercase , lowercase )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_3""": """float64""", """col_1""": """string""", """col_2""": """int64"""},
] , )
def _snake_case ( lowercase , lowercase , lowercase ) -> Any:
__a : Tuple = tmp_path / """cache"""
__a : Union[str, Any] = {"""col_3""": """float64""", """col_1""": """string""", """col_2""": """int64"""}
__a : Tuple = features.copy() if features else default_expected_features
__a : List[Any] = (
Features({feature: Value(lowercase ) for feature, dtype in features.items()} ) if features is not None else None
)
__a : str = JsonDatasetReader(lowercase , features=lowercase , cache_dir=lowercase ).read()
assert isinstance(lowercase , lowercase )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_3", "col_1", "col_2"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
def _snake_case ( lowercase , lowercase ) -> str:
# jsonl_312_path features are {"col_3": "float64", "col_1": "string", "col_2": "int64"}
__a : int = {"""col_2""": """int64""", """col_3""": """float64""", """col_1""": """string"""}
__a : List[str] = features.copy()
__a : str = (
Features({feature: Value(lowercase ) for feature, dtype in features.items()} ) if features is not None else None
)
__a : Tuple = tmp_path / """cache"""
__a : Any = JsonDatasetReader(lowercase , features=lowercase , cache_dir=lowercase ).read()
assert isinstance(lowercase , lowercase )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_2", "col_3", "col_1"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] )
def _snake_case ( lowercase , lowercase , lowercase ) -> str:
__a : List[Any] = tmp_path / """cache"""
__a : List[str] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
__a : List[str] = JsonDatasetReader(lowercase , cache_dir=lowercase , split=lowercase ).read()
_check_json_dataset(lowercase , lowercase )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("""path_type""" , [str, list] )
def _snake_case ( lowercase , lowercase , lowercase ) -> List[Any]:
if issubclass(lowercase , lowercase ):
__a : str = jsonl_path
elif issubclass(lowercase , lowercase ):
__a : Tuple = [jsonl_path]
__a : Any = tmp_path / """cache"""
__a : Tuple = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
__a : Optional[int] = JsonDatasetReader(lowercase , cache_dir=lowercase ).read()
_check_json_dataset(lowercase , lowercase )
def _snake_case ( lowercase , lowercase , lowercase=("train",) ) -> Optional[int]:
assert isinstance(lowercase , lowercase )
for split in splits:
__a : Optional[int] = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def _snake_case ( lowercase , lowercase , lowercase ) -> List[Any]:
__a : Optional[int] = tmp_path / """cache"""
__a : List[str] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__a : int = JsonDatasetReader({"""train""": jsonl_path} , cache_dir=lowercase , keep_in_memory=lowercase ).read()
_check_json_datasetdict(lowercase , lowercase )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""},
{"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""},
{"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""},
{"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""},
] , )
def _snake_case ( lowercase , lowercase , lowercase ) -> Any:
__a : str = tmp_path / """cache"""
__a : int = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
__a : str = features.copy() if features else default_expected_features
__a : List[str] = (
Features({feature: Value(lowercase ) for feature, dtype in features.items()} ) if features is not None else None
)
__a : List[str] = JsonDatasetReader({"""train""": jsonl_path} , features=lowercase , cache_dir=lowercase ).read()
_check_json_datasetdict(lowercase , lowercase )
@pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] )
def _snake_case ( lowercase , lowercase , lowercase ) -> Dict:
if split:
__a : str = {split: jsonl_path}
else:
__a : Dict = """train"""
__a : Optional[int] = {"""train""": jsonl_path, """test""": jsonl_path}
__a : int = tmp_path / """cache"""
__a : Dict = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
__a : Optional[int] = JsonDatasetReader(lowercase , cache_dir=lowercase ).read()
_check_json_datasetdict(lowercase , lowercase , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def _snake_case ( lowercase ) -> Any:
return json.load(lowercase )
def _snake_case ( lowercase ) -> Union[str, Any]:
return [json.loads(lowercase ) for line in buffer]
class SCREAMING_SNAKE_CASE__ :
@pytest.mark.parametrize("""lines, load_json_function""" , [(True, load_json_lines), (False, load_json)] )
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
with io.BytesIO() as buffer:
JsonDatasetWriter(__UpperCamelCase , __UpperCamelCase , lines=__UpperCamelCase ).write()
buffer.seek(0 )
__a : List[str] = load_json_function(__UpperCamelCase )
assert isinstance(__UpperCamelCase , __UpperCamelCase )
assert isinstance(exported_content[0] , __UpperCamelCase )
assert len(__UpperCamelCase ) == 10
@pytest.mark.parametrize(
"""orient, container, keys, len_at""" , [
("""records""", list, {"""tokens""", """labels""", """answers""", """id"""}, None),
("""split""", dict, {"""columns""", """data"""}, """data"""),
("""index""", dict, set("""0123456789""" ), None),
("""columns""", dict, {"""tokens""", """labels""", """answers""", """id"""}, """tokens"""),
("""values""", list, None, None),
("""table""", dict, {"""schema""", """data"""}, """data"""),
] , )
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
with io.BytesIO() as buffer:
JsonDatasetWriter(__UpperCamelCase , __UpperCamelCase , lines=__UpperCamelCase , orient=__UpperCamelCase ).write()
buffer.seek(0 )
__a : List[str] = load_json(__UpperCamelCase )
assert isinstance(__UpperCamelCase , __UpperCamelCase )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(__UpperCamelCase , """keys""" ) and not hasattr(exported_content[0] , """keys""" )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(__UpperCamelCase ) == 10
@pytest.mark.parametrize("""lines, load_json_function""" , [(True, load_json_lines), (False, load_json)] )
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
with io.BytesIO() as buffer:
JsonDatasetWriter(__UpperCamelCase , __UpperCamelCase , lines=__UpperCamelCase , num_proc=2 ).write()
buffer.seek(0 )
__a : List[str] = load_json_function(__UpperCamelCase )
assert isinstance(__UpperCamelCase , __UpperCamelCase )
assert isinstance(exported_content[0] , __UpperCamelCase )
assert len(__UpperCamelCase ) == 10
@pytest.mark.parametrize(
"""orient, container, keys, len_at""" , [
("""records""", list, {"""tokens""", """labels""", """answers""", """id"""}, None),
("""split""", dict, {"""columns""", """data"""}, """data"""),
("""index""", dict, set("""0123456789""" ), None),
("""columns""", dict, {"""tokens""", """labels""", """answers""", """id"""}, """tokens"""),
("""values""", list, None, None),
("""table""", dict, {"""schema""", """data"""}, """data"""),
] , )
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
with io.BytesIO() as buffer:
JsonDatasetWriter(__UpperCamelCase , __UpperCamelCase , lines=__UpperCamelCase , orient=__UpperCamelCase , num_proc=2 ).write()
buffer.seek(0 )
__a : Any = load_json(__UpperCamelCase )
assert isinstance(__UpperCamelCase , __UpperCamelCase )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(__UpperCamelCase , """keys""" ) and not hasattr(exported_content[0] , """keys""" )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(__UpperCamelCase ) == 10
def __lowerCamelCase ( self , __UpperCamelCase ):
'''simple docstring'''
with pytest.raises(__UpperCamelCase ):
with io.BytesIO() as buffer:
JsonDatasetWriter(__UpperCamelCase , __UpperCamelCase , num_proc=0 )
@pytest.mark.parametrize("""compression, extension""" , [("""gzip""", """gz"""), ("""bz2""", """bz2"""), ("""xz""", """xz""")] )
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
__a : Tuple = tmp_path_factory.mktemp("""data""" ) / f"""test.json.{extension}"""
__a : Optional[Any] = str(shared_datadir / f"""test_file.json.{extension}""" )
JsonDatasetWriter(__UpperCamelCase , __UpperCamelCase , compression=__UpperCamelCase ).write()
with fsspec.open(__UpperCamelCase , """rb""" , compression="""infer""" ) as f:
__a : Optional[int] = f.read()
with fsspec.open(__UpperCamelCase , """rb""" , compression="""infer""" ) as f:
__a : Dict = f.read()
assert exported_content == original_content | 697 |
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__SCREAMING_SNAKE_CASE : Optional[Any] = {'configuration_focalnet': ['FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FocalNetConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : List[Any] = [
'FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'FocalNetForImageClassification',
'FocalNetForMaskedImageModeling',
'FocalNetBackbone',
'FocalNetModel',
'FocalNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 697 | 1 |
'''simple docstring'''
import qiskit
def _snake_case ( lowercase , lowercase ) -> qiskit.result.counts.Counts:
__a : Any = qiskit.Aer.get_backend("""aer_simulator""" )
# Create a Quantum Circuit acting on the q register
__a : str = qiskit.QuantumCircuit(lowercase , lowercase )
# Map the quantum measurement to the classical bits
circuit.measure([0] , [0] )
# Execute the circuit on the simulator
__a : Any = qiskit.execute(lowercase , lowercase , shots=1_0_0_0 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(lowercase )
if __name__ == "__main__":
print(f'''Total count for various states are: {single_qubit_measure(1, 1)}''') | 697 |
'''simple docstring'''
from __future__ import annotations
import bisect
def _snake_case ( lowercase , lowercase , lowercase = 0 , lowercase = -1 ) -> int:
if hi < 0:
__a : Union[str, Any] = len(lowercase )
while lo < hi:
__a : List[str] = lo + (hi - lo) // 2
if sorted_collection[mid] < item:
__a : int = mid + 1
else:
__a : int = mid
return lo
def _snake_case ( lowercase , lowercase , lowercase = 0 , lowercase = -1 ) -> int:
if hi < 0:
__a : Any = len(lowercase )
while lo < hi:
__a : Any = lo + (hi - lo) // 2
if sorted_collection[mid] <= item:
__a : List[str] = mid + 1
else:
__a : Any = mid
return lo
def _snake_case ( lowercase , lowercase , lowercase = 0 , lowercase = -1 ) -> None:
sorted_collection.insert(bisect_left(lowercase , lowercase , lowercase , lowercase ) , lowercase )
def _snake_case ( lowercase , lowercase , lowercase = 0 , lowercase = -1 ) -> None:
sorted_collection.insert(bisect_right(lowercase , lowercase , lowercase , lowercase ) , lowercase )
def _snake_case ( lowercase , lowercase ) -> int | None:
__a : Dict = 0
__a : Any = len(lowercase ) - 1
while left <= right:
__a : str = left + (right - left) // 2
__a : List[Any] = sorted_collection[midpoint]
if current_item == item:
return midpoint
elif item < current_item:
__a : Optional[Any] = midpoint - 1
else:
__a : Optional[int] = midpoint + 1
return None
def _snake_case ( lowercase , lowercase ) -> int | None:
__a : Optional[int] = bisect.bisect_left(lowercase , lowercase )
if index != len(lowercase ) and sorted_collection[index] == item:
return index
return None
def _snake_case ( lowercase , lowercase , lowercase , lowercase ) -> int | None:
if right < left:
return None
__a : Any = left + (right - left) // 2
if sorted_collection[midpoint] == item:
return midpoint
elif sorted_collection[midpoint] > item:
return binary_search_by_recursion(lowercase , lowercase , lowercase , midpoint - 1 )
else:
return binary_search_by_recursion(lowercase , lowercase , midpoint + 1 , lowercase )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : List[Any] = input('Enter numbers separated by comma:\n').strip()
__SCREAMING_SNAKE_CASE : Optional[Any] = sorted(int(item) for item in user_input.split(','))
__SCREAMING_SNAKE_CASE : List[str] = int(input('Enter a single number to be found in the list:\n'))
__SCREAMING_SNAKE_CASE : Optional[int] = binary_search(collection, target)
if result is None:
print(f'''{target} was not found in {collection}.''')
else:
print(f'''{target} was found at position {result} in {collection}.''') | 697 | 1 |
'''simple docstring'''
import argparse
import torch
from ...utils import logging
from . import AlbertConfig, AlbertForPreTraining, load_tf_weights_in_albert
logging.set_verbosity_info()
def _snake_case ( lowercase , lowercase , lowercase ) -> List[Any]:
# Initialise PyTorch model
__a : int = AlbertConfig.from_json_file(lowercase )
print(F"""Building PyTorch model from configuration: {config}""" )
__a : Optional[Any] = AlbertForPreTraining(lowercase )
# Load weights from tf checkpoint
load_tf_weights_in_albert(lowercase , lowercase , lowercase )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , lowercase )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--albert_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained ALBERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
__SCREAMING_SNAKE_CASE : List[str] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.albert_config_file, args.pytorch_dump_path) | 697 |
'''simple docstring'''
from itertools import product
def _snake_case ( lowercase , lowercase ) -> list[int]:
__a : Optional[int] = sides_number
__a : Union[str, Any] = max_face_number * dice_number
__a : Optional[Any] = [0] * (max_total + 1)
__a : Dict = 1
__a : str = range(lowercase , max_face_number + 1 )
for dice_numbers in product(lowercase , repeat=lowercase ):
__a : int = sum(lowercase )
totals_frequencies[total] += 1
return totals_frequencies
def _snake_case ( ) -> float:
__a : Tuple = total_frequency_distribution(
sides_number=4 , dice_number=9 )
__a : Union[str, Any] = total_frequency_distribution(
sides_number=6 , dice_number=6 )
__a : str = 0
__a : Dict = 9
__a : str = 4 * 9
__a : Any = 6
for peter_total in range(lowercase , max_peter_total + 1 ):
peter_wins_count += peter_totals_frequencies[peter_total] * sum(
colin_totals_frequencies[min_colin_total:peter_total] )
__a : str = (4**9) * (6**6)
__a : List[Any] = peter_wins_count / total_games_number
__a : List[Any] = round(lowercase , ndigits=7 )
return rounded_peter_win_probability
if __name__ == "__main__":
print(f'''{solution() = }''') | 697 | 1 |
'''simple docstring'''
import argparse
from tax import checkpoints
from transformers import AutoConfig, FlaxAutoModelForSeqaSeqLM
def _snake_case ( lowercase , lowercase , lowercase ) -> Optional[int]:
__a : int = AutoConfig.from_pretrained(lowercase )
__a : List[Any] = FlaxAutoModelForSeqaSeqLM.from_config(config=lowercase )
__a : Dict = checkpoints.load_tax_checkpoint(lowercase )
__a : Tuple = """wi_0""" in tax_model["""target"""]["""encoder"""]["""layers_0"""]["""mlp"""]
if config.model_type == "t5":
__a : Optional[Any] = """SelfAttention"""
if config.model_type == "longt5" and config.encoder_attention_type == "local":
__a : List[Any] = """LocalSelfAttention"""
elif config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
__a : Optional[int] = """TransientGlobalSelfAttention"""
else:
raise ValueError(
"""Given config is expected to have `model_type='t5'`, or `model_type='longt5` with `encoder_attention_type`"""
""" attribute with a value from ['local', 'transient-global].""" )
# Encoder
for layer_index in range(config.num_layers ):
__a : Optional[Any] = F"""layers_{str(lowercase )}"""
# Self-Attention
__a : Dict = tax_model["""target"""]["""encoder"""][layer_name]["""attention"""]["""key"""]["""kernel"""]
__a : str = tax_model["""target"""]["""encoder"""][layer_name]["""attention"""]["""out"""]["""kernel"""]
__a : int = tax_model["""target"""]["""encoder"""][layer_name]["""attention"""]["""query"""]["""kernel"""]
__a : Dict = tax_model["""target"""]["""encoder"""][layer_name]["""attention"""]["""value"""]["""kernel"""]
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
__a : Optional[int] = tax_model["""target"""]["""encoder"""][layer_name]["""attention"""]["""T5LayerNorm_0"""]["""scale"""]
# Layer Normalization
__a : Union[str, Any] = tax_model["""target"""]["""encoder"""][layer_name]["""pre_attention_layer_norm"""]["""scale"""]
if split_mlp_wi:
__a : Optional[int] = tax_model["""target"""]["""encoder"""][layer_name]["""mlp"""]["""wi_0"""]["""kernel"""]
__a : Optional[Any] = tax_model["""target"""]["""encoder"""][layer_name]["""mlp"""]["""wi_1"""]["""kernel"""]
else:
__a : int = tax_model["""target"""]["""encoder"""][layer_name]["""mlp"""]["""wi"""]["""kernel"""]
__a : Optional[Any] = tax_model["""target"""]["""encoder"""][layer_name]["""mlp"""]["""wo"""]["""kernel"""]
# Layer Normalization
__a : Optional[Any] = tax_model["""target"""]["""encoder"""][layer_name]["""pre_mlp_layer_norm"""]["""scale"""]
# Assigning
__a : int = flax_model.params["""encoder"""]["""block"""][str(lowercase )]["""layer"""]
__a : Union[str, Any] = tax_attention_key
__a : List[Any] = tax_attention_out
__a : List[str] = tax_attention_query
__a : List[str] = tax_attention_value
__a : str = tax_attention_layer_norm
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
__a : Tuple = tax_global_layer_norm
if split_mlp_wi:
__a : Any = tax_mlp_wi_a
__a : Optional[Any] = tax_mlp_wi_a
else:
__a : List[Any] = tax_mlp_wi
__a : int = tax_mlp_wo
__a : Optional[int] = tax_mlp_layer_norm
__a : str = flax_model_encoder_layer_block
# Only for layer 0:
__a : Union[str, Any] = tax_model["""target"""]["""encoder"""]["""relpos_bias"""]["""rel_embedding"""].T
__a : List[str] = tax_encoder_rel_embedding
# Side/global relative position_bias + layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
__a : Tuple = tax_model["""target"""]["""encoder"""]["""side_relpos_bias"""]["""rel_embedding"""].T
__a : Tuple = tax_encoder_global_rel_embedding
# Assigning
__a : List[str] = tax_model["""target"""]["""encoder"""]["""encoder_norm"""]["""scale"""]
__a : Dict = tax_encoder_norm
# Decoder
for layer_index in range(config.num_layers ):
__a : Dict = F"""layers_{str(lowercase )}"""
# Self-Attention
__a : Any = tax_model["""target"""]["""decoder"""][layer_name]["""self_attention"""]["""key"""]["""kernel"""]
__a : List[str] = tax_model["""target"""]["""decoder"""][layer_name]["""self_attention"""]["""out"""]["""kernel"""]
__a : List[str] = tax_model["""target"""]["""decoder"""][layer_name]["""self_attention"""]["""query"""]["""kernel"""]
__a : Optional[int] = tax_model["""target"""]["""decoder"""][layer_name]["""self_attention"""]["""value"""]["""kernel"""]
# Layer Normalization
__a : int = tax_model["""target"""]["""decoder"""][layer_name]["""pre_self_attention_layer_norm"""][
"""scale"""
]
# Encoder-Decoder-Attention
__a : List[str] = tax_model["""target"""]["""decoder"""][layer_name]["""encoder_decoder_attention"""]
__a : Any = tax_enc_dec_attention_module["""key"""]["""kernel"""]
__a : Union[str, Any] = tax_enc_dec_attention_module["""out"""]["""kernel"""]
__a : Union[str, Any] = tax_enc_dec_attention_module["""query"""]["""kernel"""]
__a : str = tax_enc_dec_attention_module["""value"""]["""kernel"""]
# Layer Normalization
__a : Union[str, Any] = tax_model["""target"""]["""decoder"""][layer_name]["""pre_cross_attention_layer_norm"""]["""scale"""]
# MLP
if split_mlp_wi:
__a : Optional[Any] = tax_model["""target"""]["""decoder"""][layer_name]["""mlp"""]["""wi_0"""]["""kernel"""]
__a : Union[str, Any] = tax_model["""target"""]["""decoder"""][layer_name]["""mlp"""]["""wi_1"""]["""kernel"""]
else:
__a : Any = tax_model["""target"""]["""decoder"""][layer_name]["""mlp"""]["""wi"""]["""kernel"""]
__a : str = tax_model["""target"""]["""decoder"""][layer_name]["""mlp"""]["""wo"""]["""kernel"""]
# Layer Normalization
__a : Optional[int] = tax_model["""target"""]["""decoder"""][layer_name]["""pre_mlp_layer_norm"""]["""scale"""]
# Assigning
__a : Optional[Any] = flax_model.params["""decoder"""]["""block"""][str(lowercase )]["""layer"""]
__a : List[str] = tax_attention_key
__a : int = tax_attention_out
__a : Any = tax_attention_query
__a : Optional[int] = tax_attention_value
__a : Dict = tax_pre_attention_layer_norm
__a : List[str] = tax_enc_dec_attention_key
__a : Union[str, Any] = tax_enc_dec_attention_out
__a : Any = tax_enc_dec_attention_query
__a : Optional[int] = tax_enc_dec_attention_value
__a : Dict = tax_cross_layer_norm
if split_mlp_wi:
__a : Dict = tax_mlp_wi_a
__a : Optional[int] = tax_mlp_wi_a
else:
__a : int = tax_mlp_wi
__a : Dict = tax_mlp_wo
__a : List[str] = txa_mlp_layer_norm
__a : List[Any] = flax_model_decoder_layer_block
# Decoder Normalization
__a : List[Any] = tax_model["""target"""]["""decoder"""]["""decoder_norm"""]["""scale"""]
__a : Tuple = txa_decoder_norm
# Only for layer 0:
__a : int = tax_model["""target"""]["""decoder"""]["""relpos_bias"""]["""rel_embedding"""].T
__a : Dict = tax_decoder_rel_embedding
# Token Embeddings
__a : List[str] = tax_model["""target"""]["""token_embedder"""]["""embedding"""]
__a : Optional[Any] = txa_token_embeddings
# LM Head (only in v1.1 and LongT5 checkpoints)
if "logits_dense" in tax_model["target"]["decoder"]:
__a : Union[str, Any] = tax_model["""target"""]["""decoder"""]["""logits_dense"""]["""kernel"""]
flax_model.save_pretrained(lowercase )
print("""T5X Model was sucessfully converted!""" )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--t5x_checkpoint_path', default=None, type=str, required=True, help='Path the T5X checkpoint.'
)
parser.add_argument('--config_name', default=None, type=str, required=True, help='Config name of LongT5/T5 model.')
parser.add_argument(
'--flax_dump_folder_path', default=None, type=str, required=True, help='Path to the output FLAX model.'
)
__SCREAMING_SNAKE_CASE : Tuple = parser.parse_args()
convert_tax_checkpoint_to_flax(args.tax_checkpoint_path, args.config_name, args.flax_dump_folder_path) | 697 |
'''simple docstring'''
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import DiffusionPipeline
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import logging
__SCREAMING_SNAKE_CASE : Union[str, Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
def __init__( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , ):
'''simple docstring'''
super().__init__()
self.register_modules(
vae=__UpperCamelCase , text_encoder=__UpperCamelCase , tokenizer=__UpperCamelCase , unet=__UpperCamelCase , scheduler=__UpperCamelCase , safety_checker=__UpperCamelCase , feature_extractor=__UpperCamelCase , )
def __lowerCamelCase ( self , __UpperCamelCase = "auto" ):
'''simple docstring'''
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
__a : Union[str, Any] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(__UpperCamelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
self.enable_attention_slicing(__UpperCamelCase )
@torch.no_grad()
def __call__( self , __UpperCamelCase , __UpperCamelCase = 512 , __UpperCamelCase = 512 , __UpperCamelCase = 50 , __UpperCamelCase = 7.5 , __UpperCamelCase = None , __UpperCamelCase = 1 , __UpperCamelCase = 0.0 , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = "pil" , __UpperCamelCase = True , __UpperCamelCase = None , __UpperCamelCase = 1 , __UpperCamelCase = None , **__UpperCamelCase , ):
'''simple docstring'''
if isinstance(__UpperCamelCase , __UpperCamelCase ):
__a : Union[str, Any] = 1
elif isinstance(__UpperCamelCase , __UpperCamelCase ):
__a : Tuple = len(__UpperCamelCase )
else:
raise ValueError(f"""`prompt` has to be of type `str` or `list` but is {type(__UpperCamelCase )}""" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"""`height` and `width` have to be divisible by 8 but are {height} and {width}.""" )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(__UpperCamelCase , __UpperCamelCase ) or callback_steps <= 0)
):
raise ValueError(
f"""`callback_steps` has to be a positive integer but is {callback_steps} of type"""
f""" {type(__UpperCamelCase )}.""" )
# get prompt text embeddings
__a : Tuple = self.tokenizer(
__UpperCamelCase , padding="""max_length""" , max_length=self.tokenizer.model_max_length , return_tensors="""pt""" , )
__a : Union[str, Any] = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
__a : str = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"""The following part of your input was truncated because CLIP can only handle sequences up to"""
f""" {self.tokenizer.model_max_length} tokens: {removed_text}""" )
__a : Optional[int] = text_input_ids[:, : self.tokenizer.model_max_length]
if text_embeddings is None:
__a : int = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
__a , __a , __a : Union[str, Any] = text_embeddings.shape
__a : Optional[Any] = text_embeddings.repeat(1 , __UpperCamelCase , 1 )
__a : Union[str, Any] = text_embeddings.view(bs_embed * num_images_per_prompt , __UpperCamelCase , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
__a : Any = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
__a : List[str]
if negative_prompt is None:
__a : Optional[Any] = [""""""]
elif type(__UpperCamelCase ) is not type(__UpperCamelCase ):
raise TypeError(
f"""`negative_prompt` should be the same type to `prompt`, but got {type(__UpperCamelCase )} !="""
f""" {type(__UpperCamelCase )}.""" )
elif isinstance(__UpperCamelCase , __UpperCamelCase ):
__a : Any = [negative_prompt]
elif batch_size != len(__UpperCamelCase ):
raise ValueError(
f"""`negative_prompt`: {negative_prompt} has batch size {len(__UpperCamelCase )}, but `prompt`:"""
f""" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"""
""" the batch size of `prompt`.""" )
else:
__a : Tuple = negative_prompt
__a : Any = text_input_ids.shape[-1]
__a : List[str] = self.tokenizer(
__UpperCamelCase , padding="""max_length""" , max_length=__UpperCamelCase , truncation=__UpperCamelCase , return_tensors="""pt""" , )
__a : str = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
__a : List[str] = uncond_embeddings.shape[1]
__a : List[Any] = uncond_embeddings.repeat(__UpperCamelCase , __UpperCamelCase , 1 )
__a : Tuple = uncond_embeddings.view(batch_size * num_images_per_prompt , __UpperCamelCase , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
__a : List[Any] = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
__a : Tuple = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
__a : List[Any] = (batch_size * num_images_per_prompt, self.unet.config.in_channels, 64, 64)
__a : int = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
__a : Any = torch.randn(
__UpperCamelCase , generator=__UpperCamelCase , device="""cpu""" , dtype=__UpperCamelCase ).to(self.device )
__a : Optional[Any] = torch.randn(__UpperCamelCase , generator=__UpperCamelCase , device="""cpu""" , dtype=__UpperCamelCase ).to(
self.device )
else:
__a : Optional[int] = torch.randn(
__UpperCamelCase , generator=__UpperCamelCase , device=self.device , dtype=__UpperCamelCase )
__a : str = torch.randn(__UpperCamelCase , generator=__UpperCamelCase , device=self.device , dtype=__UpperCamelCase )
else:
if latents_reference.shape != latents_shape:
raise ValueError(f"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" )
__a : Optional[Any] = latents_reference.to(self.device )
__a : str = latents.to(self.device )
# This is the key part of the pipeline where we
# try to ensure that the generated images w/ the same seed
# but different sizes actually result in similar images
__a : List[str] = (latents_shape[3] - latents_shape_reference[3]) // 2
__a : int = (latents_shape[2] - latents_shape_reference[2]) // 2
__a : int = latents_shape_reference[3] if dx >= 0 else latents_shape_reference[3] + 2 * dx
__a : Tuple = latents_shape_reference[2] if dy >= 0 else latents_shape_reference[2] + 2 * dy
__a : Optional[Any] = 0 if dx < 0 else dx
__a : Optional[Any] = 0 if dy < 0 else dy
__a : Optional[int] = max(-dx , 0 )
__a : Optional[Any] = max(-dy , 0 )
# import pdb
# pdb.set_trace()
__a : Optional[int] = latents_reference[:, :, dy : dy + h, dx : dx + w]
# set timesteps
self.scheduler.set_timesteps(__UpperCamelCase )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
__a : Dict = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
__a : Any = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
__a : List[Any] = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
__a : Optional[Any] = {}
if accepts_eta:
__a : Union[str, Any] = eta
for i, t in enumerate(self.progress_bar(__UpperCamelCase ) ):
# expand the latents if we are doing classifier free guidance
__a : List[str] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
__a : Tuple = self.scheduler.scale_model_input(__UpperCamelCase , __UpperCamelCase )
# predict the noise residual
__a : Union[str, Any] = self.unet(__UpperCamelCase , __UpperCamelCase , encoder_hidden_states=__UpperCamelCase ).sample
# perform guidance
if do_classifier_free_guidance:
__a , __a : List[str] = noise_pred.chunk(2 )
__a : Optional[int] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
__a : List[Any] = self.scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
__a : Optional[Any] = 1 / 0.1_8_2_1_5 * latents
__a : Optional[int] = self.vae.decode(__UpperCamelCase ).sample
__a : List[str] = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
__a : int = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if self.safety_checker is not None:
__a : List[str] = self.feature_extractor(self.numpy_to_pil(__UpperCamelCase ) , return_tensors="""pt""" ).to(
self.device )
__a , __a : int = self.safety_checker(
images=__UpperCamelCase , clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype ) )
else:
__a : Optional[int] = None
if output_type == "pil":
__a : str = self.numpy_to_pil(__UpperCamelCase )
if not return_dict:
return (image, has_nsfw_concept)
return StableDiffusionPipelineOutput(images=__UpperCamelCase , nsfw_content_detected=__UpperCamelCase ) | 697 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__SCREAMING_SNAKE_CASE : Optional[Any] = {
'configuration_m2m_100': ['M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP', 'M2M100Config', 'M2M100OnnxConfig'],
'tokenization_m2m_100': ['M2M100Tokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : List[Any] = [
'M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST',
'M2M100ForConditionalGeneration',
'M2M100Model',
'M2M100PreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mam_aaa import M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP, MaMaaaConfig, MaMaaaOnnxConfig
from .tokenization_mam_aaa import MaMaaaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mam_aaa import (
M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST,
MaMaaaForConditionalGeneration,
MaMaaaModel,
MaMaaaPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 697 |
'''simple docstring'''
import numpy as np
from PIL import Image
def _snake_case ( lowercase , lowercase , lowercase ) -> np.ndarray:
__a : Any = np.array(lowercase )
if arr.shape[0] != arr.shape[1]:
raise ValueError("""The input array is not a square matrix""" )
__a : Union[str, Any] = 0
__a : Dict = 0
__a : Optional[Any] = 0
__a : Tuple = 0
# compute the shape of the output matrix
__a : Optional[int] = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape maxpool_shape
__a : int = np.zeros((maxpool_shape, maxpool_shape) )
while i < arr.shape[0]:
if i + size > arr.shape[0]:
# if the end of the matrix is reached, break
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the maximum of the pooling matrix
__a : Optional[Any] = np.max(arr[i : i + size, j : j + size] )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
__a : Optional[Any] = 0
__a : str = 0
return updated_arr
def _snake_case ( lowercase , lowercase , lowercase ) -> np.ndarray:
__a : int = np.array(lowercase )
if arr.shape[0] != arr.shape[1]:
raise ValueError("""The input array is not a square matrix""" )
__a : int = 0
__a : Optional[Any] = 0
__a : str = 0
__a : List[Any] = 0
# compute the shape of the output matrix
__a : int = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape avgpool_shape
__a : Optional[int] = np.zeros((avgpool_shape, avgpool_shape) )
while i < arr.shape[0]:
# if the end of the matrix is reached, break
if i + size > arr.shape[0]:
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the average of the pooling matrix
__a : Any = int(np.average(arr[i : i + size, j : j + size] ) )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
__a : str = 0
__a : List[Any] = 0
return updated_arr
# Main Function
if __name__ == "__main__":
from doctest import testmod
testmod(name='avgpooling', verbose=True)
# Loading the image
__SCREAMING_SNAKE_CASE : str = Image.open('path_to_image')
# Converting the image to numpy array and maxpooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(maxpooling(np.array(image), size=3, stride=2)).show()
# Converting the image to numpy array and averagepooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(avgpooling(np.array(image), size=3, stride=2)).show() | 697 | 1 |
'''simple docstring'''
import argparse
import torch
from transformers import LxmertConfig, LxmertForPreTraining, load_tf_weights_in_lxmert
from transformers.utils import logging
logging.set_verbosity_info()
def _snake_case ( lowercase , lowercase , lowercase ) -> Optional[Any]:
# Initialise PyTorch model
__a : str = LxmertConfig.from_json_file(lowercase )
print(F"""Building PyTorch model from configuration: {config}""" )
__a : int = LxmertForPreTraining(lowercase )
# Load weights from tf checkpoint
load_tf_weights_in_lxmert(lowercase , lowercase , lowercase )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , lowercase )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.',
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
__SCREAMING_SNAKE_CASE : str = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path) | 697 |
'''simple docstring'''
import qiskit
def _snake_case ( lowercase , lowercase ) -> qiskit.result.counts.Counts:
__a : Any = qiskit.Aer.get_backend("""aer_simulator""" )
# Create a Quantum Circuit acting on the q register
__a : str = qiskit.QuantumCircuit(lowercase , lowercase )
# Map the quantum measurement to the classical bits
circuit.measure([0] , [0] )
# Execute the circuit on the simulator
__a : Any = qiskit.execute(lowercase , lowercase , shots=1_0_0_0 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(lowercase )
if __name__ == "__main__":
print(f'''Total count for various states are: {single_qubit_measure(1, 1)}''') | 697 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
__SCREAMING_SNAKE_CASE : List[str] = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Tuple = ['GPTSw3Tokenizer']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_swa import GPTSwaTokenizer
else:
import sys
__SCREAMING_SNAKE_CASE : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 697 |
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConformerConfig,
WavaVecaConformerForCTC,
WavaVecaConformerForPreTraining,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
__SCREAMING_SNAKE_CASE : str = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : Any = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.linear_k': 'encoder.layers.*.self_attn.linear_k',
'self_attn.linear_v': 'encoder.layers.*.self_attn.linear_v',
'self_attn.linear_q': 'encoder.layers.*.self_attn.linear_q',
'self_attn.pos_bias_u': 'encoder.layers.*.self_attn.pos_bias_u',
'self_attn.pos_bias_v': 'encoder.layers.*.self_attn.pos_bias_v',
'self_attn.linear_out': 'encoder.layers.*.self_attn.linear_out',
'self_attn.linear_pos': 'encoder.layers.*.self_attn.linear_pos',
'self_attn.rotary_emb': 'encoder.embed_positions',
'self_attn_layer_norm': 'encoder.layers.*.self_attn_layer_norm',
'conv_module.pointwise_conv1': 'encoder.layers.*.conv_module.pointwise_conv1',
'conv_module.pointwise_conv2': 'encoder.layers.*.conv_module.pointwise_conv2',
'conv_module.depthwise_conv': 'encoder.layers.*.conv_module.depthwise_conv',
'conv_module.batch_norm': 'encoder.layers.*.conv_module.batch_norm',
'conv_module.layer_norm': 'encoder.layers.*.conv_module.layer_norm',
'ffn1.w_1': 'encoder.layers.*.ffn1.intermediate_dense',
'ffn1.w_2': 'encoder.layers.*.ffn1.output_dense',
'ffn1.layer_norm': 'encoder.layers.*.ffn1_layer_norm',
'ffn2.w_1': 'encoder.layers.*.ffn2.intermediate_dense',
'ffn2.w_2': 'encoder.layers.*.ffn2.output_dense',
'ffn2.layer_norm': 'encoder.layers.*.ffn2_layer_norm',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
}
__SCREAMING_SNAKE_CASE : Optional[Any] = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def _snake_case ( lowercase , lowercase , lowercase , lowercase , lowercase ) -> List[Any]:
for attribute in key.split(""".""" ):
__a : str = getattr(lowercase , lowercase )
if weight_type is not None:
__a : Dict = getattr(lowercase , lowercase ).shape
else:
__a : Dict = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}""" )
if weight_type == "weight":
__a : Any = value
elif weight_type == "weight_g":
__a : int = value
elif weight_type == "weight_v":
__a : int = value
elif weight_type == "bias":
__a : List[Any] = value
elif weight_type == "running_mean":
__a : Union[str, Any] = value
elif weight_type == "running_var":
__a : Tuple = value
elif weight_type == "num_batches_tracked":
__a : Optional[int] = value
elif weight_type == "inv_freq":
__a : List[str] = value
else:
__a : List[str] = value
logger.info(F"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def _snake_case ( lowercase , lowercase , lowercase ) -> Dict:
__a : Dict = []
__a : Dict = fairseq_model.state_dict()
__a : Tuple = hf_model.wavaveca_conformer.feature_extractor
for name, value in fairseq_dict.items():
__a : int = False
if "conv_layers" in name:
load_conv_layer(
lowercase , lowercase , lowercase , lowercase , hf_model.config.feat_extract_norm == """group""" , )
__a : List[Any] = True
else:
for key, mapped_key in MAPPING.items():
__a : Optional[int] = """wav2vec2_conformer.""" + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
__a : str = True
if "*" in mapped_key:
__a : Optional[int] = name.split(lowercase )[0].split(""".""" )[-2]
__a : List[Any] = mapped_key.replace("""*""" , lowercase )
if "pos_bias_u" in name:
__a : Union[str, Any] = None
elif "pos_bias_v" in name:
__a : List[Any] = None
elif "weight_g" in name:
__a : List[Any] = """weight_g"""
elif "weight_v" in name:
__a : List[Any] = """weight_v"""
elif "bias" in name:
__a : Optional[int] = """bias"""
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__a : str = """weight"""
elif "running_mean" in name:
__a : List[str] = """running_mean"""
elif "inv_freq" in name:
__a : Dict = """inv_freq"""
elif "running_var" in name:
__a : Union[str, Any] = """running_var"""
elif "num_batches_tracked" in name:
__a : int = """num_batches_tracked"""
else:
__a : Optional[int] = None
set_recursively(lowercase , lowercase , lowercase , lowercase , lowercase )
continue
if not is_used:
unused_weights.append(lowercase )
logger.warning(F"""Unused weights: {unused_weights}""" )
def _snake_case ( lowercase , lowercase , lowercase , lowercase , lowercase ) -> List[str]:
__a : Optional[Any] = full_name.split("""conv_layers.""" )[-1]
__a : Union[str, Any] = name.split(""".""" )
__a : Optional[Any] = int(items[0] )
__a : int = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
__a : Dict = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
__a : str = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" )
__a : Dict = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" )
__a : Union[str, Any] = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(lowercase )
@torch.no_grad()
def _snake_case ( lowercase , lowercase , lowercase=None , lowercase=None , lowercase=True ) -> Optional[Any]:
if config_path is not None:
__a : Any = WavaVecaConformerConfig.from_pretrained(lowercase , hidden_act="""swish""" )
else:
__a : Optional[int] = WavaVecaConformerConfig()
if "rope" in checkpoint_path:
__a : Optional[Any] = """rotary"""
if is_finetuned:
if dict_path:
__a : List[Any] = Dictionary.load(lowercase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
__a : int = target_dict.pad_index
__a : List[str] = target_dict.bos_index
__a : str = target_dict.eos_index
__a : Dict = len(target_dict.symbols )
__a : Any = os.path.join(lowercase , """vocab.json""" )
if not os.path.isdir(lowercase ):
logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(lowercase ) )
return
os.makedirs(lowercase , exist_ok=lowercase )
__a : Dict = target_dict.indices
# fairseq has the <pad> and <s> switched
__a : Optional[Any] = 0
__a : List[Any] = 1
with open(lowercase , """w""" , encoding="""utf-8""" ) as vocab_handle:
json.dump(lowercase , lowercase )
__a : int = WavaVecaCTCTokenizer(
lowercase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=lowercase , )
__a : Optional[int] = True if config.feat_extract_norm == """layer""" else False
__a : Dict = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6_0_0_0 , padding_value=0 , do_normalize=lowercase , return_attention_mask=lowercase , )
__a : str = WavaVecaProcessor(feature_extractor=lowercase , tokenizer=lowercase )
processor.save_pretrained(lowercase )
__a : List[str] = WavaVecaConformerForCTC(lowercase )
else:
__a : Optional[int] = WavaVecaConformerForPreTraining(lowercase )
if is_finetuned:
__a , __a , __a : Dict = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
else:
__a : Optional[int] = argparse.Namespace(task="""audio_pretraining""" )
__a : Tuple = fairseq.tasks.setup_task(lowercase )
__a , __a , __a : int = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=lowercase )
__a : Any = model[0].eval()
recursively_load_weights(lowercase , lowercase , not is_finetuned )
hf_wavavec.save_pretrained(lowercase )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : Dict = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
__SCREAMING_SNAKE_CASE : int = parser.parse_args()
convert_wavaveca_conformer_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
) | 697 | 1 |
'''simple docstring'''
from math import sqrt
def _snake_case ( lowercase ) -> bool:
assert isinstance(lowercase , lowercase ) and (
number >= 0
), "'number' must been an int and positive"
__a : Union[str, Any] = True
# 0 and 1 are none primes.
if number <= 1:
__a : Union[str, Any] = False
for divisor in range(2 , int(round(sqrt(lowercase ) ) ) + 1 ):
# if 'number' divisible by 'divisor' then sets 'status'
# of false and break up the loop.
if number % divisor == 0:
__a : str = False
break
# precondition
assert isinstance(lowercase , lowercase ), "'status' must been from type bool"
return status
def _snake_case ( lowercase ) -> int:
assert isinstance(lowercase , lowercase ) and (n > 2), "'N' must been an int and > 2"
# beginList: contains all natural numbers from 2 up to N
__a : List[str] = list(range(2 , n + 1 ) )
__a : int = [] # this list will be returns.
# actual sieve of erathostenes
for i in range(len(lowercase ) ):
for j in range(i + 1 , len(lowercase ) ):
if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0):
__a : Optional[int] = 0
# filters actual prime numbers.
__a : Tuple = [x for x in begin_list if x != 0]
# precondition
assert isinstance(lowercase , lowercase ), "'ans' must been from type list"
return ans
def _snake_case ( lowercase ) -> Optional[Any]:
assert isinstance(lowercase , lowercase ) and (n > 2), "'N' must been an int and > 2"
__a : Optional[Any] = []
# iterates over all numbers between 2 up to N+1
# if a number is prime then appends to list 'ans'
for number in range(2 , n + 1 ):
if is_prime(lowercase ):
ans.append(lowercase )
# precondition
assert isinstance(lowercase , lowercase ), "'ans' must been from type list"
return ans
def _snake_case ( lowercase ) -> Optional[Any]:
assert isinstance(lowercase , lowercase ) and number >= 0, "'number' must been an int and >= 0"
__a : List[Any] = [] # this list will be returns of the function.
# potential prime number factors.
__a : str = 2
__a : int = number
if number == 0 or number == 1:
ans.append(lowercase )
# if 'number' not prime then builds the prime factorization of 'number'
elif not is_prime(lowercase ):
while quotient != 1:
if is_prime(lowercase ) and (quotient % factor == 0):
ans.append(lowercase )
quotient /= factor
else:
factor += 1
else:
ans.append(lowercase )
# precondition
assert isinstance(lowercase , lowercase ), "'ans' must been from type list"
return ans
def _snake_case ( lowercase ) -> int:
assert isinstance(lowercase , lowercase ) and (
number >= 0
), "'number' bust been an int and >= 0"
__a : Tuple = 0
# prime factorization of 'number'
__a : int = prime_factorization(lowercase )
__a : List[Any] = max(lowercase )
# precondition
assert isinstance(lowercase , lowercase ), "'ans' must been from type int"
return ans
def _snake_case ( lowercase ) -> Optional[int]:
assert isinstance(lowercase , lowercase ) and (
number >= 0
), "'number' bust been an int and >= 0"
__a : Optional[Any] = 0
# prime factorization of 'number'
__a : List[Any] = prime_factorization(lowercase )
__a : Optional[int] = min(lowercase )
# precondition
assert isinstance(lowercase , lowercase ), "'ans' must been from type int"
return ans
def _snake_case ( lowercase ) -> List[str]:
assert isinstance(lowercase , lowercase ), "'number' must been an int"
assert isinstance(number % 2 == 0 , lowercase ), "compare bust been from type bool"
return number % 2 == 0
def _snake_case ( lowercase ) -> List[Any]:
assert isinstance(lowercase , lowercase ), "'number' must been an int"
assert isinstance(number % 2 != 0 , lowercase ), "compare bust been from type bool"
return number % 2 != 0
def _snake_case ( lowercase ) -> int:
assert (
isinstance(lowercase , lowercase ) and (number > 2) and is_even(lowercase )
), "'number' must been an int, even and > 2"
__a : List[str] = [] # this list will returned
# creates a list of prime numbers between 2 up to 'number'
__a : List[Any] = get_prime_numbers(lowercase )
__a : List[Any] = len(lowercase )
# run variable for while-loops.
__a : Union[str, Any] = 0
__a : Optional[Any] = None
# exit variable. for break up the loops
__a : Optional[Any] = True
while i < len_pn and loop:
__a : Optional[int] = i + 1
while j < len_pn and loop:
if prime_numbers[i] + prime_numbers[j] == number:
__a : int = False
ans.append(prime_numbers[i] )
ans.append(prime_numbers[j] )
j += 1
i += 1
# precondition
assert (
isinstance(lowercase , lowercase )
and (len(lowercase ) == 2)
and (ans[0] + ans[1] == number)
and is_prime(ans[0] )
and is_prime(ans[1] )
), "'ans' must contains two primes. And sum of elements must been eq 'number'"
return ans
def _snake_case ( lowercase , lowercase ) -> str:
assert (
isinstance(lowercase , lowercase )
and isinstance(lowercase , lowercase )
and (numbera >= 0)
and (numbera >= 0)
), "'number1' and 'number2' must been positive integer."
__a : Optional[int] = 0
while numbera != 0:
__a : Union[str, Any] = numbera % numbera
__a : List[str] = numbera
__a : List[Any] = rest
# precondition
assert isinstance(lowercase , lowercase ) and (
numbera >= 0
), "'number' must been from type int and positive"
return numbera
def _snake_case ( lowercase , lowercase ) -> List[Any]:
assert (
isinstance(lowercase , lowercase )
and isinstance(lowercase , lowercase )
and (numbera >= 1)
and (numbera >= 1)
), "'number1' and 'number2' must been positive integer."
__a : Dict = 1 # actual answer that will be return.
# for kgV (x,1)
if numbera > 1 and numbera > 1:
# builds the prime factorization of 'number1' and 'number2'
__a : Optional[Any] = prime_factorization(lowercase )
__a : List[str] = prime_factorization(lowercase )
elif numbera == 1 or numbera == 1:
__a : Optional[int] = []
__a : List[str] = []
__a : Union[str, Any] = max(lowercase , lowercase )
__a : str = 0
__a : int = 0
__a : List[str] = [] # captured numbers int both 'primeFac1' and 'primeFac2'
# iterates through primeFac1
for n in prime_fac_a:
if n not in done:
if n in prime_fac_a:
__a : List[Any] = prime_fac_a.count(lowercase )
__a : List[str] = prime_fac_a.count(lowercase )
for _ in range(max(lowercase , lowercase ) ):
ans *= n
else:
__a : int = prime_fac_a.count(lowercase )
for _ in range(lowercase ):
ans *= n
done.append(lowercase )
# iterates through primeFac2
for n in prime_fac_a:
if n not in done:
__a : Tuple = prime_fac_a.count(lowercase )
for _ in range(lowercase ):
ans *= n
done.append(lowercase )
# precondition
assert isinstance(lowercase , lowercase ) and (
ans >= 0
), "'ans' must been from type int and positive"
return ans
def _snake_case ( lowercase ) -> int:
assert isinstance(lowercase , lowercase ) and (n >= 0), "'number' must been a positive int"
__a : Optional[Any] = 0
__a : Union[str, Any] = 2 # this variable holds the answer
while index < n:
index += 1
ans += 1 # counts to the next number
# if ans not prime then
# runs to the next prime number.
while not is_prime(lowercase ):
ans += 1
# precondition
assert isinstance(lowercase , lowercase ) and is_prime(
lowercase ), "'ans' must been a prime number and from type int"
return ans
def _snake_case ( lowercase , lowercase ) -> List[str]:
assert (
is_prime(lowercase ) and is_prime(lowercase ) and (p_number_a < p_number_a)
), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'"
__a : List[Any] = p_number_a + 1 # jump to the next number
__a : Dict = [] # this list will be returns.
# if number is not prime then
# fetch the next prime number.
while not is_prime(lowercase ):
number += 1
while number < p_number_a:
ans.append(lowercase )
number += 1
# fetch the next prime number.
while not is_prime(lowercase ):
number += 1
# precondition
assert (
isinstance(lowercase , lowercase )
and ans[0] != p_number_a
and ans[len(lowercase ) - 1] != p_number_a
), "'ans' must been a list without the arguments"
# 'ans' contains not 'pNumber1' and 'pNumber2' !
return ans
def _snake_case ( lowercase ) -> Any:
assert isinstance(lowercase , lowercase ) and (n >= 1), "'n' must been int and >= 1"
__a : Any = [] # will be returned.
for divisor in range(1 , n + 1 ):
if n % divisor == 0:
ans.append(lowercase )
# precondition
assert ans[0] == 1 and ans[len(lowercase ) - 1] == n, "Error in function getDivisiors(...)"
return ans
def _snake_case ( lowercase ) -> str:
assert isinstance(lowercase , lowercase ) and (
number > 1
), "'number' must been an int and >= 1"
__a : Optional[Any] = get_divisors(lowercase )
# precondition
assert (
isinstance(lowercase , lowercase )
and (divisors[0] == 1)
and (divisors[len(lowercase ) - 1] == number)
), "Error in help-function getDivisiors(...)"
# summed all divisors up to 'number' (exclusive), hence [:-1]
return sum(divisors[:-1] ) == number
def _snake_case ( lowercase , lowercase ) -> str:
assert (
isinstance(lowercase , lowercase )
and isinstance(lowercase , lowercase )
and (denominator != 0)
), "The arguments must been from type int and 'denominator' != 0"
# build the greatest common divisor of numerator and denominator.
__a : List[str] = gcd(abs(lowercase ) , abs(lowercase ) )
# precondition
assert (
isinstance(lowercase , lowercase )
and (numerator % gcd_of_fraction == 0)
and (denominator % gcd_of_fraction == 0)
), "Error in function gcd(...,...)"
return (numerator // gcd_of_fraction, denominator // gcd_of_fraction)
def _snake_case ( lowercase ) -> Any:
assert isinstance(lowercase , lowercase ) and (n >= 0), "'n' must been a int and >= 0"
__a : Dict = 1 # this will be return.
for factor in range(1 , n + 1 ):
ans *= factor
return ans
def _snake_case ( lowercase ) -> Dict:
assert isinstance(lowercase , lowercase ) and (n >= 0), "'n' must been an int and >= 0"
__a : Any = 0
__a : Union[str, Any] = 1
__a : Optional[int] = 1 # this will be return
for _ in range(n - 1 ):
__a : int = ans
ans += fiba
__a : Tuple = tmp
return ans | 697 |
'''simple docstring'''
import warnings
from functools import wraps
from typing import Callable
def _snake_case ( lowercase ) -> Callable:
@wraps(lowercase )
def _inner_fn(*lowercase , **lowercase ):
warnings.warn(
(F"""'{fn.__name__}' is experimental and might be subject to breaking changes in the future.""") , lowercase , )
return fn(*lowercase , **lowercase )
return _inner_fn | 697 | 1 |
'''simple docstring'''
from typing import List
import jiwer
import jiwer.transforms as tr
from packaging import version
import datasets
from datasets.config import PY_VERSION
if PY_VERSION < version.parse('3.8'):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
__SCREAMING_SNAKE_CASE : List[str] = ''
if version.parse(importlib_metadata.version('jiwer')) < version.parse('2.3.0'):
class SCREAMING_SNAKE_CASE__ ( tr.AbstractTransform ):
def __init__( self , __UpperCamelCase = " " ):
'''simple docstring'''
__a : Optional[int] = sentence_delimiter
def __lowerCamelCase ( self , __UpperCamelCase ):
'''simple docstring'''
return list(__UpperCamelCase )
def __lowerCamelCase ( self , __UpperCamelCase ):
'''simple docstring'''
__a : Union[str, Any] = []
for sent_idx, sentence in enumerate(__UpperCamelCase ):
chars.extend(self.process_string(__UpperCamelCase ) )
if self.sentence_delimiter is not None and self.sentence_delimiter != "" and sent_idx < len(__UpperCamelCase ) - 1:
chars.append(self.sentence_delimiter )
return chars
__SCREAMING_SNAKE_CASE : Tuple = tr.Compose(
[tr.RemoveMultipleSpaces(), tr.Strip(), SentencesToListOfCharacters(SENTENCE_DELIMITER)]
)
else:
__SCREAMING_SNAKE_CASE : Dict = tr.Compose(
[
tr.RemoveMultipleSpaces(),
tr.Strip(),
tr.ReduceToSingleSentence(SENTENCE_DELIMITER),
tr.ReduceToListOfListOfChars(),
]
)
__SCREAMING_SNAKE_CASE : List[str] = '\\n@inproceedings{inproceedings,\n author = {Morris, Andrew and Maier, Viktoria and Green, Phil},\n year = {2004},\n month = {01},\n pages = {},\n title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}\n}\n'
__SCREAMING_SNAKE_CASE : str = '\\nCharacter error rate (CER) is a common metric of the performance of an automatic speech recognition system.\n\nCER is similar to Word Error Rate (WER), but operates on character instead of word. Please refer to docs of WER for further information.\n\nCharacter error rate can be computed as:\n\nCER = (S + D + I) / N = (S + D + I) / (S + D + C)\n\nwhere\n\nS is the number of substitutions,\nD is the number of deletions,\nI is the number of insertions,\nC is the number of correct characters,\nN is the number of characters in the reference (N=S+D+C).\n\nCER\'s output is not always a number between 0 and 1, in particular when there is a high number of insertions. This value is often associated to the percentage of characters that were incorrectly predicted. The lower the value, the better the\nperformance of the ASR system with a CER of 0 being a perfect score.\n'
__SCREAMING_SNAKE_CASE : Tuple = '\nComputes CER score of transcribed segments against references.\nArgs:\n references: list of references for each speech input.\n predictions: list of transcribtions to score.\n concatenate_texts: Whether or not to concatenate sentences before evaluation, set to True for more accurate result.\nReturns:\n (float): the character error rate\n\nExamples:\n\n >>> predictions = ["this is the prediction", "there is an other sample"]\n >>> references = ["this is the reference", "there is another one"]\n >>> cer = datasets.load_metric("cer")\n >>> cer_score = cer.compute(predictions=predictions, references=references)\n >>> print(cer_score)\n 0.34146341463414637\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE__ ( datasets.Metric ):
def __lowerCamelCase ( self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , codebase_urls=["""https://github.com/jitsi/jiwer/"""] , reference_urls=[
"""https://en.wikipedia.org/wiki/Word_error_rate""",
"""https://sites.google.com/site/textdigitisation/qualitymeasures/computingerrorrates""",
] , )
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=False ):
'''simple docstring'''
if concatenate_texts:
return jiwer.compute_measures(
__UpperCamelCase , __UpperCamelCase , truth_transform=__UpperCamelCase , hypothesis_transform=__UpperCamelCase , )["wer"]
__a : Any = 0
__a : Tuple = 0
for prediction, reference in zip(__UpperCamelCase , __UpperCamelCase ):
__a : str = jiwer.compute_measures(
__UpperCamelCase , __UpperCamelCase , truth_transform=__UpperCamelCase , hypothesis_transform=__UpperCamelCase , )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total | 697 |
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
from ....audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ....feature_extraction_sequence_utils import SequenceFeatureExtractor
from ....feature_extraction_utils import BatchFeature
from ....file_utils import PaddingStrategy, TensorType
from ....utils import logging
__SCREAMING_SNAKE_CASE : Tuple = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
lowercase__ = ["input_features", "attention_mask"]
def __init__( self , __UpperCamelCase=80 , __UpperCamelCase=1_6000 , __UpperCamelCase=0.0 , __UpperCamelCase=10 , __UpperCamelCase=25 , __UpperCamelCase="hamming_window" , __UpperCamelCase=3_2_7_6_8.0 , __UpperCamelCase=0.9_7 , __UpperCamelCase=1.0 , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=False , **__UpperCamelCase , ):
'''simple docstring'''
super().__init__(feature_size=__UpperCamelCase , sampling_rate=__UpperCamelCase , padding_value=__UpperCamelCase , **__UpperCamelCase )
__a : List[str] = feature_size
__a : List[str] = sampling_rate
__a : int = padding_value
__a : Any = hop_length
__a : int = win_length
__a : Tuple = frame_signal_scale
__a : Union[str, Any] = preemphasis_coeff
__a : List[str] = mel_floor
__a : Union[str, Any] = normalize_means
__a : Optional[Any] = normalize_vars
__a : Optional[Any] = win_function
__a : Union[str, Any] = return_attention_mask
__a : List[Any] = win_length * sampling_rate // 1000
__a : List[Any] = hop_length * sampling_rate // 1000
__a : Optional[Any] = optimal_fft_length(self.sample_size )
__a : Any = (self.n_fft // 2) + 1
def __lowerCamelCase ( self , __UpperCamelCase ):
'''simple docstring'''
if self.win_function == "hamming_window":
__a : str = window_function(window_length=self.sample_size , name=self.win_function , periodic=__UpperCamelCase )
else:
__a : Dict = window_function(window_length=self.sample_size , name=self.win_function )
__a : Optional[Any] = mel_filter_bank(
num_frequency_bins=self.n_freqs , num_mel_filters=self.feature_size , min_frequency=0.0 , max_frequency=self.sampling_rate / 2.0 , sampling_rate=self.sampling_rate , )
__a : Any = spectrogram(
one_waveform * self.frame_signal_scale , window=__UpperCamelCase , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , center=__UpperCamelCase , preemphasis=self.preemphasis_coeff , mel_filters=__UpperCamelCase , mel_floor=self.mel_floor , log_mel="""log""" , )
return msfc_features.T
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
if self.normalize_means:
__a : int = x[:input_length].mean(axis=0 )
__a : str = np.subtract(__UpperCamelCase , __UpperCamelCase )
if self.normalize_vars:
__a : Dict = x[:input_length].std(axis=0 )
__a : Dict = np.divide(__UpperCamelCase , __UpperCamelCase )
if input_length < x.shape[0]:
__a : Union[str, Any] = padding_value
# make sure array is in float32
__a : Any = x.astype(np.floataa )
return x
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase = None ):
'''simple docstring'''
__a : Tuple = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [self._normalize_one(__UpperCamelCase , __UpperCamelCase , self.padding_value ) for x, n in zip(__UpperCamelCase , __UpperCamelCase )]
def __call__( self , __UpperCamelCase , __UpperCamelCase = False , __UpperCamelCase = None , __UpperCamelCase = False , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , **__UpperCamelCase , ):
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"""The model corresponding to this feature extractor: {self} was trained using a sampling rate of"""
f""" {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with"""
f""" {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
"""It is strongly recommended to pass the ``sampling_rate`` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
__a : Tuple = isinstance(__UpperCamelCase , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f"""Only mono-channel audio is supported for input to {self}""" )
__a : Tuple = is_batched_numpy or (
isinstance(__UpperCamelCase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
__a : Tuple = [np.asarray(__UpperCamelCase , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(__UpperCamelCase , np.ndarray ):
__a : List[str] = np.asarray(__UpperCamelCase , dtype=np.floataa )
elif isinstance(__UpperCamelCase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
__a : str = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
__a : Any = [raw_speech]
# extract fbank features
__a : str = [self._extract_mfsc_features(__UpperCamelCase ) for one_waveform in raw_speech]
# convert into correct format for padding
__a : Optional[Any] = BatchFeature({"""input_features""": features} )
__a : Any = self.pad(
__UpperCamelCase , padding=__UpperCamelCase , max_length=__UpperCamelCase , truncation=__UpperCamelCase , pad_to_multiple_of=__UpperCamelCase , return_attention_mask=__UpperCamelCase , **__UpperCamelCase , )
# make sure list is in array format
__a : int = padded_inputs.get("""input_features""" )
if isinstance(input_features[0] , __UpperCamelCase ):
__a : Union[str, Any] = [np.asarray(__UpperCamelCase , dtype=np.floataa ) for feature in input_features]
__a : List[str] = padded_inputs.get("""attention_mask""" )
if attention_mask is not None:
__a : Optional[int] = [np.asarray(__UpperCamelCase , dtype=np.intaa ) for array in attention_mask]
if self.normalize_means or self.normalize_vars:
__a : Optional[Any] = (
np.array(__UpperCamelCase , dtype=np.intaa )
if self._get_padding_strategies(__UpperCamelCase , max_length=__UpperCamelCase ) is not PaddingStrategy.DO_NOT_PAD
and padding
else None
)
__a : int = self.normalize(
padded_inputs["""input_features"""] , attention_mask=__UpperCamelCase )
if return_tensors is not None:
__a : List[Any] = padded_inputs.convert_to_tensors(__UpperCamelCase )
return padded_inputs | 697 | 1 |
'''simple docstring'''
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__SCREAMING_SNAKE_CASE : int = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : Union[str, Any] = '▁'
__SCREAMING_SNAKE_CASE : Tuple = {
'vocab_file': 'vocab.json',
'spm_file': 'sentencepiece.bpe.model',
}
__SCREAMING_SNAKE_CASE : Optional[int] = {
'vocab_file': {
'facebook/s2t-small-librispeech-asr': (
'https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/vocab.json'
),
},
'spm_file': {
'facebook/s2t-small-librispeech-asr': (
'https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/sentencepiece.bpe.model'
)
},
}
__SCREAMING_SNAKE_CASE : Union[str, Any] = {
'facebook/s2t-small-librispeech-asr': 1_024,
}
__SCREAMING_SNAKE_CASE : List[str] = ['pt', 'fr', 'ru', 'nl', 'ro', 'it', 'es', 'de']
__SCREAMING_SNAKE_CASE : Any = {'mustc': MUSTC_LANGS}
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
lowercase__ = VOCAB_FILES_NAMES
lowercase__ = PRETRAINED_VOCAB_FILES_MAP
lowercase__ = MAX_MODEL_INPUT_SIZES
lowercase__ = ["input_ids", "attention_mask"]
lowercase__ = []
def __init__( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase="<s>" , __UpperCamelCase="</s>" , __UpperCamelCase="<pad>" , __UpperCamelCase="<unk>" , __UpperCamelCase=False , __UpperCamelCase=False , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase = None , **__UpperCamelCase , ):
'''simple docstring'''
__a : Tuple = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__UpperCamelCase , eos_token=__UpperCamelCase , unk_token=__UpperCamelCase , pad_token=__UpperCamelCase , do_upper_case=__UpperCamelCase , do_lower_case=__UpperCamelCase , tgt_lang=__UpperCamelCase , lang_codes=__UpperCamelCase , sp_model_kwargs=self.sp_model_kwargs , **__UpperCamelCase , )
__a : int = do_upper_case
__a : int = do_lower_case
__a : Union[str, Any] = load_json(__UpperCamelCase )
__a : Optional[Any] = {v: k for k, v in self.encoder.items()}
__a : Union[str, Any] = spm_file
__a : int = load_spm(__UpperCamelCase , self.sp_model_kwargs )
if lang_codes is not None:
__a : List[Any] = lang_codes
__a : List[str] = LANGUAGES[lang_codes]
__a : int = [f"""<lang:{lang}>""" for lang in self.langs]
__a : Optional[Any] = {lang: self.sp_model.PieceToId(f"""<lang:{lang}>""" ) for lang in self.langs}
__a : str = self.lang_tokens
__a : int = tgt_lang if tgt_lang is not None else self.langs[0]
self.set_tgt_lang_special_tokens(self._tgt_lang )
else:
__a : str = {}
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return len(self.encoder )
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self._tgt_lang
@tgt_lang.setter
def __lowerCamelCase ( self , __UpperCamelCase ):
'''simple docstring'''
__a : Union[str, Any] = new_tgt_lang
self.set_tgt_lang_special_tokens(__UpperCamelCase )
def __lowerCamelCase ( self , __UpperCamelCase ):
'''simple docstring'''
__a : Optional[Any] = self.lang_code_to_id[tgt_lang]
__a : Dict = [lang_code_id]
def __lowerCamelCase ( self , __UpperCamelCase ):
'''simple docstring'''
return self.sp_model.encode(__UpperCamelCase , out_type=__UpperCamelCase )
def __lowerCamelCase ( self , __UpperCamelCase ):
'''simple docstring'''
return self.encoder.get(__UpperCamelCase , self.encoder[self.unk_token] )
def __lowerCamelCase ( self , __UpperCamelCase ):
'''simple docstring'''
return self.decoder.get(__UpperCamelCase , self.unk_token )
def __lowerCamelCase ( self , __UpperCamelCase ):
'''simple docstring'''
__a : str = []
__a : Any = """"""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
__a : str = self.sp_model.decode(__UpperCamelCase )
out_string += (decoded.upper() if self.do_upper_case else decoded) + token + " "
__a : Dict = []
else:
current_sub_tokens.append(__UpperCamelCase )
__a : Dict = self.sp_model.decode(__UpperCamelCase )
out_string += decoded.upper() if self.do_upper_case else decoded
return out_string.strip()
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase=None ):
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + [self.eos_token_id]
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__UpperCamelCase , token_ids_a=__UpperCamelCase , already_has_special_tokens=__UpperCamelCase )
__a : Union[str, Any] = [1] * len(self.prefix_tokens )
__a : int = [1]
if token_ids_a is None:
return prefix_ones + ([0] * len(__UpperCamelCase )) + suffix_ones
return prefix_ones + ([0] * len(__UpperCamelCase )) + ([0] * len(__UpperCamelCase )) + suffix_ones
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Any = self.encoder.copy()
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
'''simple docstring'''
__a : int = self.__dict__.copy()
__a : Tuple = None
return state
def __setstate__( self , __UpperCamelCase ):
'''simple docstring'''
__a : Optional[Any] = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
__a : Any = {}
__a : Any = load_spm(self.spm_file , self.sp_model_kwargs )
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase = None ):
'''simple docstring'''
__a : Dict = Path(__UpperCamelCase )
assert save_dir.is_dir(), f"""{save_directory} should be a directory"""
__a : Union[str, Any] = save_dir / (
(filename_prefix + """-""" if filename_prefix else """""") + self.vocab_files_names["""vocab_file"""]
)
__a : Tuple = save_dir / (
(filename_prefix + """-""" if filename_prefix else """""") + self.vocab_files_names["""spm_file"""]
)
save_json(self.encoder , __UpperCamelCase )
if os.path.abspath(self.spm_file ) != os.path.abspath(__UpperCamelCase ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , __UpperCamelCase )
elif not os.path.isfile(self.spm_file ):
with open(__UpperCamelCase , """wb""" ) as fi:
__a : int = self.sp_model.serialized_model_proto()
fi.write(__UpperCamelCase )
return (str(__UpperCamelCase ), str(__UpperCamelCase ))
def _snake_case ( lowercase , lowercase ) -> sentencepiece.SentencePieceProcessor:
__a : Union[str, Any] = sentencepiece.SentencePieceProcessor(**lowercase )
spm.Load(str(lowercase ) )
return spm
def _snake_case ( lowercase ) -> Union[Dict, List]:
with open(lowercase , """r""" ) as f:
return json.load(lowercase )
def _snake_case ( lowercase , lowercase ) -> None:
with open(lowercase , """w""" ) as f:
json.dump(lowercase , lowercase , indent=2 ) | 697 |
'''simple docstring'''
__SCREAMING_SNAKE_CASE : int = 9.80_665
def _snake_case ( lowercase , lowercase , lowercase = g ) -> float:
if fluid_density <= 0:
raise ValueError("""Impossible fluid density""" )
if volume < 0:
raise ValueError("""Impossible Object volume""" )
if gravity <= 0:
raise ValueError("""Impossible Gravity""" )
return fluid_density * gravity * volume
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod() | 697 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__SCREAMING_SNAKE_CASE : Union[str, Any] = {
'configuration_transfo_xl': ['TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TransfoXLConfig'],
'tokenization_transfo_xl': ['TransfoXLCorpus', 'TransfoXLTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Optional[int] = [
'TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST',
'AdaptiveEmbedding',
'TransfoXLForSequenceClassification',
'TransfoXLLMHeadModel',
'TransfoXLModel',
'TransfoXLPreTrainedModel',
'load_tf_weights_in_transfo_xl',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : List[Any] = [
'TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFAdaptiveEmbedding',
'TFTransfoXLForSequenceClassification',
'TFTransfoXLLMHeadModel',
'TFTransfoXLMainLayer',
'TFTransfoXLModel',
'TFTransfoXLPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_transfo_xl import TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, TransfoXLConfig
from .tokenization_transfo_xl import TransfoXLCorpus, TransfoXLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_transfo_xl import (
TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
AdaptiveEmbedding,
TransfoXLForSequenceClassification,
TransfoXLLMHeadModel,
TransfoXLModel,
TransfoXLPreTrainedModel,
load_tf_weights_in_transfo_xl,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_transfo_xl import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFAdaptiveEmbedding,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLMainLayer,
TFTransfoXLModel,
TFTransfoXLPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 697 |
'''simple docstring'''
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetrImageProcessor
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def __init__( self , __UpperCamelCase , __UpperCamelCase=7 , __UpperCamelCase=3 , __UpperCamelCase=30 , __UpperCamelCase=400 , __UpperCamelCase=True , __UpperCamelCase=None , __UpperCamelCase=True , __UpperCamelCase=1 / 255 , __UpperCamelCase=True , __UpperCamelCase=[0.5, 0.5, 0.5] , __UpperCamelCase=[0.5, 0.5, 0.5] , __UpperCamelCase=True , ):
'''simple docstring'''
__a : List[Any] = size if size is not None else {"""shortest_edge""": 18, """longest_edge""": 1333}
__a : Dict = parent
__a : Union[str, Any] = batch_size
__a : Optional[int] = num_channels
__a : Dict = min_resolution
__a : List[Any] = max_resolution
__a : int = do_resize
__a : str = size
__a : Optional[Any] = do_rescale
__a : Optional[Any] = rescale_factor
__a : str = do_normalize
__a : Any = image_mean
__a : Optional[Any] = image_std
__a : Dict = do_pad
def __lowerCamelCase ( self ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_pad": self.do_pad,
}
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase=False ):
'''simple docstring'''
if not batched:
__a : Union[str, Any] = image_inputs[0]
if isinstance(__UpperCamelCase , Image.Image ):
__a , __a : Tuple = image.size
else:
__a , __a : Tuple = image.shape[1], image.shape[2]
if w < h:
__a : Optional[int] = int(self.size["""shortest_edge"""] * h / w )
__a : Tuple = self.size["""shortest_edge"""]
elif w > h:
__a : Optional[Any] = self.size["""shortest_edge"""]
__a : Any = int(self.size["""shortest_edge"""] * w / h )
else:
__a : Any = self.size["""shortest_edge"""]
__a : Optional[int] = self.size["""shortest_edge"""]
else:
__a : Any = []
for image in image_inputs:
__a , __a : Any = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
__a : List[Any] = max(__UpperCamelCase , key=lambda __UpperCamelCase : item[0] )[0]
__a : Optional[Any] = max(__UpperCamelCase , key=lambda __UpperCamelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase , unittest.TestCase ):
lowercase__ = DetrImageProcessor if is_vision_available() else None
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : str = DetrImageProcessingTester(self )
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Optional[int] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__UpperCamelCase , """image_mean""" ) )
self.assertTrue(hasattr(__UpperCamelCase , """image_std""" ) )
self.assertTrue(hasattr(__UpperCamelCase , """do_normalize""" ) )
self.assertTrue(hasattr(__UpperCamelCase , """do_rescale""" ) )
self.assertTrue(hasattr(__UpperCamelCase , """rescale_factor""" ) )
self.assertTrue(hasattr(__UpperCamelCase , """do_resize""" ) )
self.assertTrue(hasattr(__UpperCamelCase , """size""" ) )
self.assertTrue(hasattr(__UpperCamelCase , """do_pad""" ) )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 18, """longest_edge""": 1333} )
self.assertEqual(image_processor.do_pad , __UpperCamelCase )
__a : List[Any] = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=__UpperCamelCase )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42, """longest_edge""": 84} )
self.assertEqual(image_processor.do_pad , __UpperCamelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
pass
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__a : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCamelCase , Image.Image )
# Test not batched input
__a : Optional[Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
__a , __a : Any = self.image_processor_tester.get_expected_values(__UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__a , __a : Optional[int] = self.image_processor_tester.get_expected_values(__UpperCamelCase , batched=__UpperCamelCase )
__a : Any = image_processing(__UpperCamelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Dict = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__a : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCamelCase , numpify=__UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCamelCase , np.ndarray )
# Test not batched input
__a : Dict = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
__a , __a : Any = self.image_processor_tester.get_expected_values(__UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__a : List[str] = image_processing(__UpperCamelCase , return_tensors="""pt""" ).pixel_values
__a , __a : str = self.image_processor_tester.get_expected_values(__UpperCamelCase , batched=__UpperCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Any = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__a : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCamelCase , torchify=__UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCamelCase , torch.Tensor )
# Test not batched input
__a : Any = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
__a , __a : Any = self.image_processor_tester.get_expected_values(__UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__a : List[str] = image_processing(__UpperCamelCase , return_tensors="""pt""" ).pixel_values
__a , __a : Any = self.image_processor_tester.get_expected_values(__UpperCamelCase , batched=__UpperCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : List[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_annotations.txt""" , """r""" ) as f:
__a : Dict = json.loads(f.read() )
__a : Optional[int] = {"""image_id""": 3_9769, """annotations""": target}
# encode them
__a : List[str] = DetrImageProcessor.from_pretrained("""facebook/detr-resnet-50""" )
__a : Tuple = image_processing(images=__UpperCamelCase , annotations=__UpperCamelCase , return_tensors="""pt""" )
# verify pixel values
__a : Union[str, Any] = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["""pixel_values"""].shape , __UpperCamelCase )
__a : List[str] = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , __UpperCamelCase , atol=1E-4 ) )
# verify area
__a : List[Any] = torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , __UpperCamelCase ) )
# verify boxes
__a : Optional[int] = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , __UpperCamelCase )
__a : Any = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , __UpperCamelCase , atol=1E-3 ) )
# verify image_id
__a : Union[str, Any] = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , __UpperCamelCase ) )
# verify is_crowd
__a : List[Any] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , __UpperCamelCase ) )
# verify class_labels
__a : Any = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , __UpperCamelCase ) )
# verify orig_size
__a : Any = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , __UpperCamelCase ) )
# verify size
__a : str = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , __UpperCamelCase ) )
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : List[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt""" , """r""" ) as f:
__a : Tuple = json.loads(f.read() )
__a : str = {"""file_name""": """000000039769.png""", """image_id""": 3_9769, """segments_info""": target}
__a : int = pathlib.Path("""./tests/fixtures/tests_samples/COCO/coco_panoptic""" )
# encode them
__a : List[str] = DetrImageProcessor.from_pretrained("""facebook/detr-resnet-50-panoptic""" )
__a : Tuple = image_processing(images=__UpperCamelCase , annotations=__UpperCamelCase , masks_path=__UpperCamelCase , return_tensors="""pt""" )
# verify pixel values
__a : List[str] = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["""pixel_values"""].shape , __UpperCamelCase )
__a : Any = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , __UpperCamelCase , atol=1E-4 ) )
# verify area
__a : Optional[Any] = torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , __UpperCamelCase ) )
# verify boxes
__a : Optional[Any] = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , __UpperCamelCase )
__a : List[str] = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , __UpperCamelCase , atol=1E-3 ) )
# verify image_id
__a : List[str] = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , __UpperCamelCase ) )
# verify is_crowd
__a : Optional[int] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , __UpperCamelCase ) )
# verify class_labels
__a : Optional[int] = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , __UpperCamelCase ) )
# verify masks
__a : Union[str, Any] = 82_2873
self.assertEqual(encoding["""labels"""][0]["""masks"""].sum().item() , __UpperCamelCase )
# verify orig_size
__a : str = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , __UpperCamelCase ) )
# verify size
__a : List[Any] = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , __UpperCamelCase ) ) | 697 | 1 |
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import PIL.Image
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
PILImageResampling,
get_image_size,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
__SCREAMING_SNAKE_CASE : Optional[int] = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
lowercase__ = ["pixel_values"]
def __init__( self , __UpperCamelCase = True , __UpperCamelCase = 32 , __UpperCamelCase=PILImageResampling.BILINEAR , __UpperCamelCase = True , **__UpperCamelCase , ):
'''simple docstring'''
__a : Union[str, Any] = do_resize
__a : Any = do_rescale
__a : Optional[Any] = size_divisor
__a : Optional[Any] = resample
super().__init__(**__UpperCamelCase )
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = None , **__UpperCamelCase ):
'''simple docstring'''
__a , __a : Optional[Any] = get_image_size(__UpperCamelCase )
# Rounds the height and width down to the closest multiple of size_divisor
__a : int = height // size_divisor * size_divisor
__a : Any = width // size_divisor * size_divisor
__a : Any = resize(__UpperCamelCase , (new_h, new_w) , resample=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase )
return image
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = None , **__UpperCamelCase ):
'''simple docstring'''
return rescale(image=__UpperCamelCase , scale=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase )
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase=None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = ChannelDimension.FIRST , **__UpperCamelCase , ):
'''simple docstring'''
__a : List[Any] = do_resize if do_resize is not None else self.do_resize
__a : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale
__a : List[Any] = size_divisor if size_divisor is not None else self.size_divisor
__a : str = resample if resample is not None else self.resample
if do_resize and size_divisor is None:
raise ValueError("""size_divisor is required for resizing""" )
__a : Any = make_list_of_images(__UpperCamelCase )
if not valid_images(__UpperCamelCase ):
raise ValueError("""Invalid image(s)""" )
# All transformations expect numpy arrays.
__a : Tuple = [to_numpy_array(__UpperCamelCase ) for img in images]
if do_resize:
__a : List[str] = [self.resize(__UpperCamelCase , size_divisor=__UpperCamelCase , resample=__UpperCamelCase ) for image in images]
if do_rescale:
__a : Union[str, Any] = [self.rescale(__UpperCamelCase , scale=1 / 255 ) for image in images]
__a : Any = [to_channel_dimension_format(__UpperCamelCase , __UpperCamelCase ) for image in images]
__a : Any = {"""pixel_values""": images}
return BatchFeature(data=__UpperCamelCase , tensor_type=__UpperCamelCase ) | 697 |
'''simple docstring'''
import argparse
import logging
import os
import time
import timeit
import datasets
import numpy as np
import pycuda.autoinit # noqa: F401
import pycuda.driver as cuda
import tensorrt as trt
import torch
from absl import logging as absl_logging
from accelerate import Accelerator
from datasets import load_dataset, load_metric
from torch.utils.data import DataLoader
from utils_qa import postprocess_qa_predictions
import transformers
from transformers import AutoTokenizer, EvalPrediction, default_data_collator, set_seed
from transformers.trainer_pt_utils import nested_concat, nested_truncate
__SCREAMING_SNAKE_CASE : Optional[int] = trt.Logger(trt.Logger.WARNING)
__SCREAMING_SNAKE_CASE : Tuple = absl_logging.get_absl_logger()
absl_logger.setLevel(logging.WARNING)
__SCREAMING_SNAKE_CASE : Any = logging.getLogger(__name__)
__SCREAMING_SNAKE_CASE : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--onnx_model_path',
default=None,
type=str,
required=True,
help='Path to ONNX model: ',
)
parser.add_argument(
'--output_dir',
default=None,
type=str,
required=True,
help='The output directory where the model checkpoints and predictions will be written.',
)
# Other parameters
parser.add_argument(
'--tokenizer_name',
default='',
type=str,
required=True,
help='Pretrained tokenizer name or path if not the same as model_name',
)
parser.add_argument(
'--version_2_with_negative',
action='store_true',
help='If true, the SQuAD examples contain some that do not have an answer.',
)
parser.add_argument(
'--null_score_diff_threshold',
type=float,
default=0.0,
help='If null_score - best_non_null is greater than the threshold predict null.',
)
parser.add_argument(
'--max_seq_length',
default=384,
type=int,
help=(
'The maximum total input sequence length after WordPiece tokenization. Sequences '
'longer than this will be truncated, and sequences shorter than this will be padded.'
),
)
parser.add_argument(
'--doc_stride',
default=128,
type=int,
help='When splitting up a long document into chunks, how much stride to take between chunks.',
)
parser.add_argument('--per_device_eval_batch_size', default=8, type=int, help='Batch size per GPU/CPU for evaluation.')
parser.add_argument(
'--n_best_size',
default=20,
type=int,
help='The total number of n-best predictions to generate in the nbest_predictions.json output file.',
)
parser.add_argument(
'--max_answer_length',
default=30,
type=int,
help=(
'The maximum length of an answer that can be generated. This is needed because the start '
'and end predictions are not conditioned on one another.'
),
)
parser.add_argument('--seed', type=int, default=42, help='random seed for initialization')
parser.add_argument(
'--dataset_name',
type=str,
default=None,
required=True,
help='The name of the dataset to use (via the datasets library).',
)
parser.add_argument(
'--dataset_config_name',
type=str,
default=None,
help='The configuration name of the dataset to use (via the datasets library).',
)
parser.add_argument(
'--preprocessing_num_workers', type=int, default=4, help='A csv or a json file containing the training data.'
)
parser.add_argument('--overwrite_cache', action='store_true', help='Overwrite the cached training and evaluation sets')
parser.add_argument(
'--fp16',
action='store_true',
help='Whether to use 16-bit (mixed) precision instead of 32-bit',
)
parser.add_argument(
'--int8',
action='store_true',
help='Whether to use INT8',
)
__SCREAMING_SNAKE_CASE : Optional[int] = parser.parse_args()
if args.tokenizer_name:
__SCREAMING_SNAKE_CASE : str = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=True)
else:
raise ValueError(
'You are instantiating a new tokenizer from scratch. This is not supported by this script.'
'You can do it from another script, save it, and load it from here, using --tokenizer_name.'
)
logger.info('Training/evaluation parameters %s', args)
__SCREAMING_SNAKE_CASE : List[Any] = args.per_device_eval_batch_size
__SCREAMING_SNAKE_CASE : int = (args.eval_batch_size, args.max_seq_length)
# TRT Engine properties
__SCREAMING_SNAKE_CASE : Optional[Any] = True
__SCREAMING_SNAKE_CASE : Tuple = 'temp_engine/bert-fp32.engine'
if args.fpaa:
__SCREAMING_SNAKE_CASE : Dict = 'temp_engine/bert-fp16.engine'
if args.inta:
__SCREAMING_SNAKE_CASE : Tuple = 'temp_engine/bert-int8.engine'
# import ONNX file
if not os.path.exists('temp_engine'):
os.makedirs('temp_engine')
__SCREAMING_SNAKE_CASE : Optional[Any] = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network, trt.OnnxParser(
network, TRT_LOGGER
) as parser:
with open(args.onnx_model_path, 'rb') as model:
if not parser.parse(model.read()):
for error in range(parser.num_errors):
print(parser.get_error(error))
# Query input names and shapes from parsed TensorRT network
__SCREAMING_SNAKE_CASE : List[Any] = [network.get_input(i) for i in range(network.num_inputs)]
__SCREAMING_SNAKE_CASE : List[Any] = [_input.name for _input in network_inputs] # ex: ["actual_input1"]
with builder.create_builder_config() as config:
__SCREAMING_SNAKE_CASE : Tuple = 1 << 50
if STRICT_TYPES:
config.set_flag(trt.BuilderFlag.STRICT_TYPES)
if args.fpaa:
config.set_flag(trt.BuilderFlag.FPaa)
if args.inta:
config.set_flag(trt.BuilderFlag.INTa)
__SCREAMING_SNAKE_CASE : Dict = builder.create_optimization_profile()
config.add_optimization_profile(profile)
for i in range(len(input_names)):
profile.set_shape(input_names[i], INPUT_SHAPE, INPUT_SHAPE, INPUT_SHAPE)
__SCREAMING_SNAKE_CASE : Union[str, Any] = builder.build_engine(network, config)
# serialize_engine and store in file (can be directly loaded and deserialized):
with open(engine_name, 'wb') as f:
f.write(engine.serialize())
def _snake_case ( lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> List[Any]:
__a : Dict = np.asarray(inputs["""input_ids"""] , dtype=np.intaa )
__a : List[Any] = np.asarray(inputs["""attention_mask"""] , dtype=np.intaa )
__a : str = np.asarray(inputs["""token_type_ids"""] , dtype=np.intaa )
# Copy inputs
cuda.memcpy_htod_async(d_inputs[0] , input_ids.ravel() , lowercase )
cuda.memcpy_htod_async(d_inputs[1] , attention_mask.ravel() , lowercase )
cuda.memcpy_htod_async(d_inputs[2] , token_type_ids.ravel() , lowercase )
# start time
__a : Optional[Any] = time.time()
# Run inference
context.execute_async(
bindings=[int(lowercase ) for d_inp in d_inputs] + [int(lowercase ), int(lowercase )] , stream_handle=stream.handle )
# Transfer predictions back from GPU
cuda.memcpy_dtoh_async(lowercase , lowercase , lowercase )
cuda.memcpy_dtoh_async(lowercase , lowercase , lowercase )
# Synchronize the stream and take time
stream.synchronize()
# end time
__a : str = time.time()
__a : Any = end_time - start_time
__a : Optional[int] = (h_outputa, h_outputa)
# print(outputs)
return outputs, infer_time
# Initialize the accelerator. We will let the accelerator handle device placement for us in this example.
__SCREAMING_SNAKE_CASE : Optional[Any] = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO,
)
# Setup logging, we only want one process per machine to log things on the screen.
# accelerator.is_local_main_process is only True for one process per machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR)
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed)
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
if args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
__SCREAMING_SNAKE_CASE : List[str] = load_dataset(args.dataset_name, args.dataset_config_name)
else:
raise ValueError('Evaluation requires a dataset name')
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Preprocessing the datasets.
# Preprocessing is slighlty different for training and evaluation.
__SCREAMING_SNAKE_CASE : int = raw_datasets['validation'].column_names
__SCREAMING_SNAKE_CASE : Tuple = 'question' if 'question' in column_names else column_names[0]
__SCREAMING_SNAKE_CASE : List[Any] = 'context' if 'context' in column_names else column_names[1]
__SCREAMING_SNAKE_CASE : Tuple = 'answers' if 'answers' in column_names else column_names[2]
# Padding side determines if we do (question|context) or (context|question).
__SCREAMING_SNAKE_CASE : Tuple = tokenizer.padding_side == 'right'
if args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f'''The max_seq_length passed ({args.max_seq_length}) is larger than the maximum length for the'''
f'''model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.'''
)
__SCREAMING_SNAKE_CASE : Dict = min(args.max_seq_length, tokenizer.model_max_length)
def _snake_case ( lowercase ) -> Tuple:
# Some of the questions have lots of whitespace on the left, which is not useful and will make the
# truncation of the context fail (the tokenized question will take a lots of space). So we remove that
# left whitespace
__a : Optional[Any] = [q.lstrip() for q in examples[question_column_name]]
# Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results
# in one example possible giving several features when a context is long, each of those features having a
# context that overlaps a bit the context of the previous feature.
__a : Optional[int] = tokenizer(
examples[question_column_name if pad_on_right else context_column_name] , examples[context_column_name if pad_on_right else question_column_name] , truncation="""only_second""" if pad_on_right else """only_first""" , max_length=lowercase , stride=args.doc_stride , return_overflowing_tokens=lowercase , return_offsets_mapping=lowercase , padding="""max_length""" , )
# Since one example might give us several features if it has a long context, we need a map from a feature to
# its corresponding example. This key gives us just that.
__a : Optional[Any] = tokenized_examples.pop("""overflow_to_sample_mapping""" )
# For evaluation, we will need to convert our predictions to substrings of the context, so we keep the
# corresponding example_id and we will store the offset mappings.
__a : Optional[Any] = []
for i in range(len(tokenized_examples["""input_ids"""] ) ):
# Grab the sequence corresponding to that example (to know what is the context and what is the question).
__a : Dict = tokenized_examples.sequence_ids(lowercase )
__a : Optional[Any] = 1 if pad_on_right else 0
# One example can give several spans, this is the index of the example containing this span of text.
__a : Union[str, Any] = sample_mapping[i]
tokenized_examples["example_id"].append(examples["""id"""][sample_index] )
# Set to None the offset_mapping that are not part of the context so it's easy to determine if a token
# position is part of the context or not.
__a : int = [
(o if sequence_ids[k] == context_index else None)
for k, o in enumerate(tokenized_examples["""offset_mapping"""][i] )
]
return tokenized_examples
__SCREAMING_SNAKE_CASE : int = raw_datasets['validation']
# Validation Feature Creation
__SCREAMING_SNAKE_CASE : Union[str, Any] = eval_examples.map(
prepare_validation_features,
batched=True,
num_proc=args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not args.overwrite_cache,
desc='Running tokenizer on validation dataset',
)
__SCREAMING_SNAKE_CASE : List[Any] = default_data_collator
__SCREAMING_SNAKE_CASE : Union[str, Any] = eval_dataset.remove_columns(['example_id', 'offset_mapping'])
__SCREAMING_SNAKE_CASE : List[str] = DataLoader(
eval_dataset_for_model, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size
)
def _snake_case ( lowercase , lowercase , lowercase , lowercase="eval" ) -> Any:
# Post-processing: we match the start logits and end logits to answers in the original context.
__a : List[str] = postprocess_qa_predictions(
examples=lowercase , features=lowercase , predictions=lowercase , version_2_with_negative=args.version_2_with_negative , n_best_size=args.n_best_size , max_answer_length=args.max_answer_length , null_score_diff_threshold=args.null_score_diff_threshold , output_dir=args.output_dir , prefix=lowercase , )
# Format the result to the format the metric expects.
if args.version_2_with_negative:
__a : List[str] = [
{"""id""": k, """prediction_text""": v, """no_answer_probability""": 0.0} for k, v in predictions.items()
]
else:
__a : List[str] = [{"""id""": k, """prediction_text""": v} for k, v in predictions.items()]
__a : Optional[Any] = [{"""id""": ex["""id"""], """answers""": ex[answer_column_name]} for ex in examples]
return EvalPrediction(predictions=lowercase , label_ids=lowercase )
__SCREAMING_SNAKE_CASE : List[Any] = load_metric('squad_v2' if args.version_2_with_negative else 'squad')
# Evaluation!
logger.info('Loading ONNX model %s for evaluation', args.onnx_model_path)
with open(engine_name, 'rb') as f, trt.Runtime(TRT_LOGGER) as runtime, runtime.deserialize_cuda_engine(
f.read()
) as engine, engine.create_execution_context() as context:
# setup for TRT inferrence
for i in range(len(input_names)):
context.set_binding_shape(i, INPUT_SHAPE)
assert context.all_binding_shapes_specified
def _snake_case ( lowercase ) -> Optional[int]:
return trt.volume(engine.get_binding_shape(lowercase ) ) * engine.get_binding_dtype(lowercase ).itemsize
# Allocate device memory for inputs and outputs.
__SCREAMING_SNAKE_CASE : List[str] = [cuda.mem_alloc(binding_nbytes(binding)) for binding in engine if engine.binding_is_input(binding)]
# Allocate output buffer
__SCREAMING_SNAKE_CASE : str = cuda.pagelocked_empty(tuple(context.get_binding_shape(3)), dtype=np.floataa)
__SCREAMING_SNAKE_CASE : Union[str, Any] = cuda.pagelocked_empty(tuple(context.get_binding_shape(4)), dtype=np.floataa)
__SCREAMING_SNAKE_CASE : str = cuda.mem_alloc(h_outputa.nbytes)
__SCREAMING_SNAKE_CASE : Tuple = cuda.mem_alloc(h_outputa.nbytes)
# Create a stream in which to copy inputs/outputs and run inference.
__SCREAMING_SNAKE_CASE : Tuple = cuda.Stream()
# Evaluation
logger.info('***** Running Evaluation *****')
logger.info(f''' Num examples = {len(eval_dataset)}''')
logger.info(f''' Batch size = {args.per_device_eval_batch_size}''')
__SCREAMING_SNAKE_CASE : Union[str, Any] = 0.0
__SCREAMING_SNAKE_CASE : str = 0
__SCREAMING_SNAKE_CASE : str = timeit.default_timer()
__SCREAMING_SNAKE_CASE : Dict = None
for step, batch in enumerate(eval_dataloader):
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Union[str, Any] = model_infer(batch, context, d_inputs, h_outputa, h_outputa, d_outputa, d_outputa, stream)
total_time += infer_time
niter += 1
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Optional[Any] = outputs
__SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor(start_logits)
__SCREAMING_SNAKE_CASE : Tuple = torch.tensor(end_logits)
# necessary to pad predictions and labels for being gathered
__SCREAMING_SNAKE_CASE : Optional[int] = accelerator.pad_across_processes(start_logits, dim=1, pad_index=-100)
__SCREAMING_SNAKE_CASE : Dict = accelerator.pad_across_processes(end_logits, dim=1, pad_index=-100)
__SCREAMING_SNAKE_CASE : List[str] = (accelerator.gather(start_logits).cpu().numpy(), accelerator.gather(end_logits).cpu().numpy())
__SCREAMING_SNAKE_CASE : List[str] = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-100)
if all_preds is not None:
__SCREAMING_SNAKE_CASE : Tuple = nested_truncate(all_preds, len(eval_dataset))
__SCREAMING_SNAKE_CASE : str = timeit.default_timer() - start_time
logger.info(' Evaluation done in total %f secs (%f sec per example)', evalTime, evalTime / len(eval_dataset))
# Inference time from TRT
logger.info('Average Inference Time = {:.3f} ms'.format(total_time * 1_000 / niter))
logger.info('Total Inference Time = {:.3f} ms'.format(total_time * 1_000))
logger.info('Total Number of Inference = %d', niter)
__SCREAMING_SNAKE_CASE : Optional[int] = post_processing_function(eval_examples, eval_dataset, all_preds)
__SCREAMING_SNAKE_CASE : List[Any] = metric.compute(predictions=prediction.predictions, references=prediction.label_ids)
logger.info(f'''Evaluation metrics: {eval_metric}''') | 697 | 1 |
'''simple docstring'''
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Dict = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
__a : Optional[int] = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(__UpperCamelCase )
__a : Optional[Any] = -1
__a : Optional[int] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__UpperCamelCase )
__a : Tuple = model.generate(__UpperCamelCase , max_new_tokens=10 , do_sample=__UpperCamelCase )
__a : Any = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
__a : Optional[Any] = TextStreamer(__UpperCamelCase )
model.generate(__UpperCamelCase , max_new_tokens=10 , do_sample=__UpperCamelCase , streamer=__UpperCamelCase )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
__a : List[Any] = cs.out[:-1]
self.assertEqual(__UpperCamelCase , __UpperCamelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Any = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
__a : int = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(__UpperCamelCase )
__a : Tuple = -1
__a : int = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__UpperCamelCase )
__a : Any = model.generate(__UpperCamelCase , max_new_tokens=10 , do_sample=__UpperCamelCase )
__a : Union[str, Any] = tokenizer.decode(greedy_ids[0] )
__a : List[Any] = TextIteratorStreamer(__UpperCamelCase )
__a : Any = {"""input_ids""": input_ids, """max_new_tokens""": 10, """do_sample""": False, """streamer""": streamer}
__a : Tuple = Thread(target=model.generate , kwargs=__UpperCamelCase )
thread.start()
__a : Any = """"""
for new_text in streamer:
streamer_text += new_text
self.assertEqual(__UpperCamelCase , __UpperCamelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : int = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
__a : Union[str, Any] = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(__UpperCamelCase )
__a : List[str] = -1
__a : Tuple = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__UpperCamelCase )
__a : str = model.generate(__UpperCamelCase , max_new_tokens=10 , do_sample=__UpperCamelCase )
__a : List[str] = greedy_ids[:, input_ids.shape[1] :]
__a : Optional[int] = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
__a : int = TextStreamer(__UpperCamelCase , skip_prompt=__UpperCamelCase )
model.generate(__UpperCamelCase , max_new_tokens=10 , do_sample=__UpperCamelCase , streamer=__UpperCamelCase )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
__a : Any = cs.out[:-1]
self.assertEqual(__UpperCamelCase , __UpperCamelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Union[str, Any] = AutoTokenizer.from_pretrained("""distilgpt2""" )
__a : List[str] = AutoModelForCausalLM.from_pretrained("""distilgpt2""" ).to(__UpperCamelCase )
__a : int = -1
__a : Tuple = torch.ones((1, 5) , device=__UpperCamelCase ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
__a : List[str] = TextStreamer(__UpperCamelCase , skip_special_tokens=__UpperCamelCase )
model.generate(__UpperCamelCase , max_new_tokens=1 , do_sample=__UpperCamelCase , streamer=__UpperCamelCase )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
__a : Optional[Any] = cs.out[:-1] # Remove the final "\n"
__a : Tuple = tokenizer(__UpperCamelCase , return_tensors="""pt""" )
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Optional[Any] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
__a : Optional[int] = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(__UpperCamelCase )
__a : int = -1
__a : int = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__UpperCamelCase )
__a : Union[str, Any] = TextIteratorStreamer(__UpperCamelCase , timeout=0.0_0_1 )
__a : Tuple = {"""input_ids""": input_ids, """max_new_tokens""": 10, """do_sample""": False, """streamer""": streamer}
__a : Dict = Thread(target=model.generate , kwargs=__UpperCamelCase )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(__UpperCamelCase ):
__a : Tuple = """"""
for new_text in streamer:
streamer_text += new_text | 697 |
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import KarrasVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
lowercase__ = 42
lowercase__ = 42
def __init__( self , __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
super().__init__()
self.register_modules(unet=__UpperCamelCase , scheduler=__UpperCamelCase )
@torch.no_grad()
def __call__( self , __UpperCamelCase = 1 , __UpperCamelCase = 50 , __UpperCamelCase = None , __UpperCamelCase = "pil" , __UpperCamelCase = True , **__UpperCamelCase , ):
'''simple docstring'''
__a : int = self.unet.config.sample_size
__a : Optional[int] = (batch_size, 3, img_size, img_size)
__a : Union[str, Any] = self.unet
# sample x_0 ~ N(0, sigma_0^2 * I)
__a : Dict = randn_tensor(__UpperCamelCase , generator=__UpperCamelCase , device=self.device ) * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(__UpperCamelCase )
for t in self.progress_bar(self.scheduler.timesteps ):
# here sigma_t == t_i from the paper
__a : Dict = self.scheduler.schedule[t]
__a : Any = self.scheduler.schedule[t - 1] if t > 0 else 0
# 1. Select temporarily increased noise level sigma_hat
# 2. Add new noise to move from sample_i to sample_hat
__a , __a : Tuple = self.scheduler.add_noise_to_input(__UpperCamelCase , __UpperCamelCase , generator=__UpperCamelCase )
# 3. Predict the noise residual given the noise magnitude `sigma_hat`
# The model inputs and output are adjusted by following eq. (213) in [1].
__a : List[Any] = (sigma_hat / 2) * model((sample_hat + 1) / 2 , sigma_hat / 2 ).sample
# 4. Evaluate dx/dt at sigma_hat
# 5. Take Euler step from sigma to sigma_prev
__a : str = self.scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
if sigma_prev != 0:
# 6. Apply 2nd order correction
# The model inputs and output are adjusted by following eq. (213) in [1].
__a : Union[str, Any] = (sigma_prev / 2) * model((step_output.prev_sample + 1) / 2 , sigma_prev / 2 ).sample
__a : Tuple = self.scheduler.step_correct(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , step_output.prev_sample , step_output["""derivative"""] , )
__a : Tuple = step_output.prev_sample
__a : Optional[Any] = (sample / 2 + 0.5).clamp(0 , 1 )
__a : Dict = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__a : List[Any] = self.numpy_to_pil(__UpperCamelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__UpperCamelCase ) | 697 | 1 |
'''simple docstring'''
from sympy import diff, lambdify, symbols
from sympy.functions import * # noqa: F403
def _snake_case ( lowercase , lowercase , lowercase = "x" , lowercase = 1_0**-1_0 , lowercase = 1 , ) -> complex:
__a : int = symbols(lowercase )
__a : Union[str, Any] = lambdify(lowercase , lowercase )
__a : int = lambdify(lowercase , diff(lowercase , lowercase ) )
__a : Optional[int] = starting_point
while True:
if diff_function(lowercase ) != 0:
__a : Optional[Any] = prev_guess - multiplicity * func(lowercase ) / diff_function(
lowercase )
else:
raise ZeroDivisionError("""Could not find root""" ) from None
# Precision is checked by comparing the difference of consecutive guesses
if abs(next_guess - prev_guess ) < precision:
return next_guess
__a : Union[str, Any] = next_guess
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(f'''The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}''')
# Find root of polynomial
# Find fourth Root of 5
print(f'''The root of x**4 - 5 = 0 is {newton_raphson('x**4 -5', 0.4 +5J)}''')
# Find value of e
print(
'The root of log(y) - 1 = 0 is ',
f'''{newton_raphson('log(y) - 1', 2, variable='y')}''',
)
# Exponential Roots
print(
'The root of exp(x) - 1 = 0 is',
f'''{newton_raphson('exp(x) - 1', 10, precision=0.005)}''',
)
# Find root of cos(x)
print(f'''The root of cos(x) = 0 is {newton_raphson('cos(x)', 0)}''') | 697 |
'''simple docstring'''
def _snake_case ( lowercase ) -> bool:
if not isinstance(lowercase , lowercase ):
raise ValueError("""check_bouncy() accepts only integer arguments""" )
__a : str = str(lowercase )
__a : Any = """""".join(sorted(lowercase ) )
return sorted_str_n != str_n and sorted_str_n[::-1] != str_n
def _snake_case ( lowercase = 9_9 ) -> int:
if not 0 < percent < 1_0_0:
raise ValueError("""solution() only accepts values from 0 to 100""" )
__a : List[str] = 0
__a : Union[str, Any] = 1
while True:
if check_bouncy(lowercase ):
bouncy_num += 1
if (bouncy_num / num) * 1_0_0 >= percent:
return num
num += 1
if __name__ == "__main__":
from doctest import testmod
testmod()
print(f'''{solution(99)}''') | 697 | 1 |
'''simple docstring'''
def _snake_case ( lowercase ) -> str:
return "".join(chr(ord(lowercase ) - 3_2 ) if """a""" <= char <= """z""" else char for char in word )
if __name__ == "__main__":
from doctest import testmod
testmod() | 697 |
'''simple docstring'''
import argparse
import torch
from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def _snake_case ( lowercase , lowercase , lowercase ) -> Any:
# Construct model
if gpta_config_file == "":
__a : Dict = GPTaConfig()
else:
__a : Optional[Any] = GPTaConfig.from_json_file(lowercase )
__a : Union[str, Any] = GPTaModel(lowercase )
# Load weights from numpy
load_tf_weights_in_gpta(lowercase , lowercase , lowercase )
# Save pytorch-model
__a : Optional[int] = pytorch_dump_folder_path + """/""" + WEIGHTS_NAME
__a : Dict = pytorch_dump_folder_path + """/""" + CONFIG_NAME
print(F"""Save PyTorch model to {pytorch_weights_dump_path}""" )
torch.save(model.state_dict() , lowercase )
print(F"""Save configuration file to {pytorch_config_dump_path}""" )
with open(lowercase , """w""" , encoding="""utf-8""" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--gpt2_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--gpt2_config_file',
default='',
type=str,
help=(
'An optional config json file corresponding to the pre-trained OpenAI model. \n'
'This specifies the model architecture.'
),
)
__SCREAMING_SNAKE_CASE : Dict = parser.parse_args()
convert_gpta_checkpoint_to_pytorch(args.gpta_checkpoint_path, args.gpta_config_file, args.pytorch_dump_folder_path) | 697 | 1 |
'''simple docstring'''
import inspect
import tempfile
import unittest
from huggingface_hub import hf_hub_download
from transformers import is_torch_available
from transformers.testing_utils import is_flaky, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
__SCREAMING_SNAKE_CASE : str = 1E-4
if is_torch_available():
import torch
from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel
from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder
@require_torch
class SCREAMING_SNAKE_CASE__ :
def __init__( self , __UpperCamelCase , __UpperCamelCase=16 , __UpperCamelCase=13 , __UpperCamelCase=7 , __UpperCamelCase=14 , __UpperCamelCase=10 , __UpperCamelCase=19 , __UpperCamelCase=5 , __UpperCamelCase=4 , __UpperCamelCase=True , __UpperCamelCase=16 , __UpperCamelCase=2 , __UpperCamelCase=4 , __UpperCamelCase=4 , __UpperCamelCase="gelu" , __UpperCamelCase=0.1 , __UpperCamelCase=0.1 , __UpperCamelCase=[1, 2, 3, 4, 5] , __UpperCamelCase=25 , __UpperCamelCase=5 , ):
'''simple docstring'''
__a : Union[str, Any] = d_model
__a : int = parent
__a : Tuple = batch_size
__a : Tuple = prediction_length
__a : Optional[Any] = context_length
__a : str = cardinality
__a : int = num_time_features
__a : Any = lags_sequence
__a : List[Any] = embedding_dimension
__a : str = is_training
__a : Optional[Any] = hidden_size
__a : List[Any] = num_hidden_layers
__a : Dict = num_attention_heads
__a : int = intermediate_size
__a : Any = hidden_act
__a : str = hidden_dropout_prob
__a : Union[str, Any] = attention_probs_dropout_prob
__a : Dict = context_length
__a : Union[str, Any] = prediction_length + label_length
__a : Union[str, Any] = label_length
__a : Tuple = moving_average
__a : Tuple = autocorrelation_factor
def __lowerCamelCase ( self ):
'''simple docstring'''
return AutoformerConfig(
d_model=self.d_model , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , prediction_length=self.prediction_length , context_length=self.context_length , label_length=self.label_length , lags_sequence=self.lags_sequence , num_time_features=self.num_time_features , num_static_categorical_features=1 , cardinality=[self.cardinality] , embedding_dimension=[self.embedding_dimension] , moving_average=self.moving_average , )
def __lowerCamelCase ( self , __UpperCamelCase ):
'''simple docstring'''
__a : List[Any] = config.context_length + max(config.lags_sequence )
__a : int = ids_tensor([self.batch_size, 1] , config.cardinality[0] )
__a : Optional[Any] = floats_tensor([self.batch_size, _past_length, config.num_time_features] )
__a : List[Any] = floats_tensor([self.batch_size, _past_length] )
__a : Dict = floats_tensor([self.batch_size, _past_length] ) > 0.5
# decoder inputs
__a : int = floats_tensor([self.batch_size, config.prediction_length, config.num_time_features] )
__a : Optional[Any] = floats_tensor([self.batch_size, config.prediction_length] )
__a : Any = {
"""past_values""": past_values,
"""static_categorical_features""": static_categorical_features,
"""past_time_features""": past_time_features,
"""past_observed_mask""": past_observed_mask,
"""future_time_features""": future_time_features,
"""future_values""": future_values,
}
return inputs_dict
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Tuple = self.get_config()
__a : Tuple = self.prepare_autoformer_inputs_dict(__UpperCamelCase )
return config, inputs_dict
def __lowerCamelCase ( self ):
'''simple docstring'''
__a , __a : List[str] = self.prepare_config_and_inputs()
return config, inputs_dict
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
__a : Optional[Any] = AutoformerModel(config=__UpperCamelCase ).to(__UpperCamelCase ).eval()
__a : Union[str, Any] = model(**__UpperCamelCase )
__a : int = outputs.encoder_last_hidden_state
__a : Optional[int] = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
__a : List[str] = model.get_encoder()
encoder.save_pretrained(__UpperCamelCase )
__a : Dict = AutoformerEncoder.from_pretrained(__UpperCamelCase ).to(__UpperCamelCase )
__a , __a , __a , __a , __a : Optional[int] = model.create_network_inputs(**__UpperCamelCase )
__a , __a : str = model.decomposition_layer(transformer_inputs[:, : config.context_length, ...] )
__a : Optional[int] = torch.cat(
(transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]) , dim=-1 , )
__a : Optional[int] = encoder(inputs_embeds=__UpperCamelCase )[0]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1E-3 )
__a : Optional[int] = (
torch.mean(transformer_inputs[:, : config.context_length, ...] , dim=1 )
.unsqueeze(1 )
.repeat(1 , config.prediction_length , 1 )
)
__a : Optional[Any] = torch.zeros(
[transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]] , device=enc_input.device , )
__a : Tuple = torch.cat(
(
torch.cat((seasonal_input[:, -config.label_length :, ...], zeros) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
__a : Dict = torch.cat(
(
torch.cat((trend_input[:, -config.label_length :, ...], mean) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
with tempfile.TemporaryDirectory() as tmpdirname:
__a : List[Any] = model.get_decoder()
decoder.save_pretrained(__UpperCamelCase )
__a : List[str] = AutoformerDecoder.from_pretrained(__UpperCamelCase ).to(__UpperCamelCase )
__a : Dict = decoder(
trend=__UpperCamelCase , inputs_embeds=__UpperCamelCase , encoder_hidden_states=__UpperCamelCase , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1E-3 )
@require_torch
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase , __UpperCamelCase , unittest.TestCase ):
lowercase__ = (AutoformerModel, AutoformerForPrediction) if is_torch_available() else ()
lowercase__ = (AutoformerForPrediction,) if is_torch_available() else ()
lowercase__ = {"feature-extraction": AutoformerModel} if is_torch_available() else {}
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : List[Any] = AutoformerModelTester(self )
__a : Optional[int] = ConfigTester(self , config_class=__UpperCamelCase , has_text_modality=__UpperCamelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def __lowerCamelCase ( self ):
'''simple docstring'''
__a , __a : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
__a : Any = model_class(__UpperCamelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__UpperCamelCase )
__a , __a : str = model_class.from_pretrained(__UpperCamelCase , output_loading_info=__UpperCamelCase )
self.assertEqual(info["""missing_keys"""] , [] )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*__UpperCamelCase )
@unittest.skip(reason="""Model has no tokens embeddings""" )
def __lowerCamelCase ( self ):
'''simple docstring'''
pass
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Union[str, Any] = inspect.signature(getattr(__UpperCamelCase , """forward""" ) )
# The main input is the name of the argument after `self`
__a : Any = list(model_signature.parameters.keys() )[1]
self.assertEqual(AutoformerModel.main_input_name , __UpperCamelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a , __a : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a : Tuple = model_class(__UpperCamelCase )
__a : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__a : Union[str, Any] = [*signature.parameters.keys()]
__a : Dict = [
"""past_values""",
"""past_time_features""",
"""past_observed_mask""",
"""static_categorical_features""",
"""static_real_features""",
"""future_values""",
"""future_time_features""",
]
if model.__class__.__name__ in ["AutoformerForPrediction"]:
expected_arg_names.append("""future_observed_mask""" )
expected_arg_names.extend(
[
"""decoder_attention_mask""",
"""head_mask""",
"""decoder_head_mask""",
"""cross_attn_head_mask""",
"""encoder_outputs""",
"""past_key_values""",
"""output_hidden_states""",
"""output_attentions""",
"""use_cache""",
"""return_dict""",
] )
self.assertListEqual(arg_names[: len(__UpperCamelCase )] , __UpperCamelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a , __a : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
__a : Any = True
__a : List[str] = getattr(self.model_tester , """seq_length""" , __UpperCamelCase )
__a : Optional[int] = getattr(self.model_tester , """decoder_seq_length""" , __UpperCamelCase )
__a : Tuple = getattr(self.model_tester , """encoder_seq_length""" , __UpperCamelCase )
__a : Dict = getattr(self.model_tester , """d_model""" , __UpperCamelCase )
__a : List[str] = getattr(self.model_tester , """num_attention_heads""" , __UpperCamelCase )
__a : Any = d_model // num_attention_heads
for model_class in self.all_model_classes:
__a : List[str] = True
__a : str = False
__a : Tuple = True
__a : Dict = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
with torch.no_grad():
__a : int = model(**self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) )
__a : Any = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(__UpperCamelCase ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__a : Optional[int] = True
__a : Union[str, Any] = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
with torch.no_grad():
__a : List[str] = model(**self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) )
__a : Any = outputs.encoder_attentions
self.assertEqual(len(__UpperCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
__a : List[Any] = len(__UpperCamelCase )
__a : str = 7
if "last_hidden_state" in outputs:
correct_outlen += 1
if "trend" in outputs:
correct_outlen += 1
if "past_key_values" in outputs:
correct_outlen += 1 # past_key_values have been returned
if "loss" in outputs:
correct_outlen += 1
if "params" in outputs:
correct_outlen += 1
self.assertEqual(__UpperCamelCase , __UpperCamelCase )
# decoder attentions
__a : int = outputs.decoder_attentions
self.assertIsInstance(__UpperCamelCase , (list, tuple) )
self.assertEqual(len(__UpperCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# cross attentions
__a : Union[str, Any] = outputs.cross_attentions
self.assertIsInstance(__UpperCamelCase , (list, tuple) )
self.assertEqual(len(__UpperCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(cross_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# Check attention is always last and order is fine
__a : Union[str, Any] = True
__a : List[str] = True
__a : Union[str, Any] = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
with torch.no_grad():
__a : str = model(**self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) )
self.assertEqual(out_len + 2 , len(__UpperCamelCase ) )
__a : List[str] = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(__UpperCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
@is_flaky()
def __lowerCamelCase ( self ):
'''simple docstring'''
super().test_retain_grad_hidden_states_attentions()
def _snake_case ( lowercase="train-batch.pt" ) -> List[str]:
__a : Union[str, Any] = hf_hub_download(repo_id="""hf-internal-testing/tourism-monthly-batch""" , filename=lowercase , repo_type="""dataset""" )
__a : str = torch.load(lowercase , map_location=lowercase )
return batch
@require_torch
@slow
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Any = AutoformerModel.from_pretrained("""huggingface/autoformer-tourism-monthly""" ).to(__UpperCamelCase )
__a : Dict = prepare_batch()
with torch.no_grad():
__a : List[str] = model(
past_values=batch["""past_values"""] , past_time_features=batch["""past_time_features"""] , past_observed_mask=batch["""past_observed_mask"""] , static_categorical_features=batch["""static_categorical_features"""] , future_values=batch["""future_values"""] , future_time_features=batch["""future_time_features"""] , )[0]
__a : Dict = torch.Size(
(64, model.config.prediction_length + model.config.label_length, model.config.feature_size) )
self.assertEqual(output.shape , __UpperCamelCase )
__a : List[Any] = torch.tensor(
[[0.3_5_9_3, -1.3_3_9_8, 0.6_3_3_0], [0.2_2_7_9, 1.5_3_9_6, -0.1_7_9_2], [0.0_4_5_0, 1.3_2_2_5, -0.2_3_3_5]] , device=__UpperCamelCase )
self.assertTrue(torch.allclose(output[0, :3, :3] , __UpperCamelCase , atol=__UpperCamelCase ) )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : int = AutoformerForPrediction.from_pretrained("""huggingface/autoformer-tourism-monthly""" ).to(__UpperCamelCase )
__a : List[str] = prepare_batch("""val-batch.pt""" )
with torch.no_grad():
__a : int = model(
past_values=batch["""past_values"""] , past_time_features=batch["""past_time_features"""] , past_observed_mask=batch["""past_observed_mask"""] , static_categorical_features=batch["""static_categorical_features"""] , ).encoder_last_hidden_state
__a : Any = torch.Size((64, model.config.context_length, model.config.d_model) )
self.assertEqual(output.shape , __UpperCamelCase )
__a : Optional[Any] = torch.tensor(
[[-0.0_7_3_4, -0.9_0_3_6, 0.8_3_5_8], [4.7_1_8_6, 2.4_1_1_3, 1.9_5_8_1], [1.7_9_5_3, 2.3_5_5_8, 1.2_9_7_0]] , device=__UpperCamelCase )
self.assertTrue(torch.allclose(output[0, :3, :3] , __UpperCamelCase , atol=__UpperCamelCase ) )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Optional[Any] = AutoformerForPrediction.from_pretrained("""huggingface/autoformer-tourism-monthly""" ).to(__UpperCamelCase )
__a : Dict = prepare_batch("""val-batch.pt""" )
with torch.no_grad():
__a : int = model.generate(
static_categorical_features=batch["""static_categorical_features"""] , past_time_features=batch["""past_time_features"""] , past_values=batch["""past_values"""] , future_time_features=batch["""future_time_features"""] , past_observed_mask=batch["""past_observed_mask"""] , )
__a : List[str] = torch.Size((64, model.config.num_parallel_samples, model.config.prediction_length) )
self.assertEqual(outputs.sequences.shape , __UpperCamelCase )
__a : Union[str, Any] = torch.tensor([3_1_3_0.6_7_6_3, 4_0_5_6.5_2_9_3, 7_0_5_3.0_7_8_6] , device=__UpperCamelCase )
__a : Union[str, Any] = outputs.sequences.mean(dim=1 )
self.assertTrue(torch.allclose(mean_prediction[0, -3:] , __UpperCamelCase , rtol=1E-1 ) ) | 697 |
'''simple docstring'''
import unittest
from transformers import (
MODEL_FOR_OBJECT_DETECTION_MAPPING,
AutoFeatureExtractor,
AutoModelForObjectDetection,
ObjectDetectionPipeline,
is_vision_available,
pipeline,
)
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_pytesseract,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class SCREAMING_SNAKE_CASE__ :
@staticmethod
def __lowerCamelCase ( *__UpperCamelCase , **__UpperCamelCase ):
'''simple docstring'''
pass
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
lowercase__ = MODEL_FOR_OBJECT_DETECTION_MAPPING
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
__a : Optional[Any] = ObjectDetectionPipeline(model=__UpperCamelCase , image_processor=__UpperCamelCase )
return object_detector, ["./tests/fixtures/tests_samples/COCO/000000039769.png"]
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
__a : List[str] = object_detector("""./tests/fixtures/tests_samples/COCO/000000039769.png""" , threshold=0.0 )
self.assertGreater(len(__UpperCamelCase ) , 0 )
for detected_object in outputs:
self.assertEqual(
__UpperCamelCase , {
"""score""": ANY(__UpperCamelCase ),
"""label""": ANY(__UpperCamelCase ),
"""box""": {"""xmin""": ANY(__UpperCamelCase ), """ymin""": ANY(__UpperCamelCase ), """xmax""": ANY(__UpperCamelCase ), """ymax""": ANY(__UpperCamelCase )},
} , )
import datasets
__a : Optional[int] = datasets.load_dataset("""hf-internal-testing/fixtures_image_utils""" , """image""" , split="""test""" )
__a : Tuple = [
Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ),
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
# RGBA
dataset[0]["""file"""],
# LA
dataset[1]["""file"""],
# L
dataset[2]["""file"""],
]
__a : Any = object_detector(__UpperCamelCase , threshold=0.0 )
self.assertEqual(len(__UpperCamelCase ) , len(__UpperCamelCase ) )
for outputs in batch_outputs:
self.assertGreater(len(__UpperCamelCase ) , 0 )
for detected_object in outputs:
self.assertEqual(
__UpperCamelCase , {
"""score""": ANY(__UpperCamelCase ),
"""label""": ANY(__UpperCamelCase ),
"""box""": {"""xmin""": ANY(__UpperCamelCase ), """ymin""": ANY(__UpperCamelCase ), """xmax""": ANY(__UpperCamelCase ), """ymax""": ANY(__UpperCamelCase )},
} , )
@require_tf
@unittest.skip("""Object detection not implemented in TF""" )
def __lowerCamelCase ( self ):
'''simple docstring'''
pass
@require_torch
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Optional[Any] = """hf-internal-testing/tiny-detr-mobilenetsv3"""
__a : Dict = AutoModelForObjectDetection.from_pretrained(__UpperCamelCase )
__a : Optional[Any] = AutoFeatureExtractor.from_pretrained(__UpperCamelCase )
__a : str = ObjectDetectionPipeline(model=__UpperCamelCase , feature_extractor=__UpperCamelCase )
__a : Optional[int] = object_detector("""http://images.cocodataset.org/val2017/000000039769.jpg""" , threshold=0.0 )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [
{"""score""": 0.3_3_7_6, """label""": """LABEL_0""", """box""": {"""xmin""": 159, """ymin""": 120, """xmax""": 480, """ymax""": 359}},
{"""score""": 0.3_3_7_6, """label""": """LABEL_0""", """box""": {"""xmin""": 159, """ymin""": 120, """xmax""": 480, """ymax""": 359}},
] , )
__a : Union[str, Any] = object_detector(
[
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
] , threshold=0.0 , )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [
[
{"""score""": 0.3_3_7_6, """label""": """LABEL_0""", """box""": {"""xmin""": 159, """ymin""": 120, """xmax""": 480, """ymax""": 359}},
{"""score""": 0.3_3_7_6, """label""": """LABEL_0""", """box""": {"""xmin""": 159, """ymin""": 120, """xmax""": 480, """ymax""": 359}},
],
[
{"""score""": 0.3_3_7_6, """label""": """LABEL_0""", """box""": {"""xmin""": 159, """ymin""": 120, """xmax""": 480, """ymax""": 359}},
{"""score""": 0.3_3_7_6, """label""": """LABEL_0""", """box""": {"""xmin""": 159, """ymin""": 120, """xmax""": 480, """ymax""": 359}},
],
] , )
@require_torch
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : str = """facebook/detr-resnet-50"""
__a : Dict = AutoModelForObjectDetection.from_pretrained(__UpperCamelCase )
__a : int = AutoFeatureExtractor.from_pretrained(__UpperCamelCase )
__a : int = ObjectDetectionPipeline(model=__UpperCamelCase , feature_extractor=__UpperCamelCase )
__a : Any = object_detector("""http://images.cocodataset.org/val2017/000000039769.jpg""" )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [
{"""score""": 0.9_9_8_2, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 175, """ymax""": 117}},
{"""score""": 0.9_9_6_0, """label""": """remote""", """box""": {"""xmin""": 333, """ymin""": 72, """xmax""": 368, """ymax""": 187}},
{"""score""": 0.9_9_5_5, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 639, """ymax""": 473}},
{"""score""": 0.9_9_8_8, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}},
{"""score""": 0.9_9_8_7, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}},
] , )
__a : Optional[Any] = object_detector(
[
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
] )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [
[
{"""score""": 0.9_9_8_2, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 175, """ymax""": 117}},
{"""score""": 0.9_9_6_0, """label""": """remote""", """box""": {"""xmin""": 333, """ymin""": 72, """xmax""": 368, """ymax""": 187}},
{"""score""": 0.9_9_5_5, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 639, """ymax""": 473}},
{"""score""": 0.9_9_8_8, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}},
{"""score""": 0.9_9_8_7, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}},
],
[
{"""score""": 0.9_9_8_2, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 175, """ymax""": 117}},
{"""score""": 0.9_9_6_0, """label""": """remote""", """box""": {"""xmin""": 333, """ymin""": 72, """xmax""": 368, """ymax""": 187}},
{"""score""": 0.9_9_5_5, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 639, """ymax""": 473}},
{"""score""": 0.9_9_8_8, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}},
{"""score""": 0.9_9_8_7, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}},
],
] , )
@require_torch
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : int = """facebook/detr-resnet-50"""
__a : Optional[int] = pipeline("""object-detection""" , model=__UpperCamelCase )
__a : Optional[int] = object_detector("""http://images.cocodataset.org/val2017/000000039769.jpg""" )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [
{"""score""": 0.9_9_8_2, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 175, """ymax""": 117}},
{"""score""": 0.9_9_6_0, """label""": """remote""", """box""": {"""xmin""": 333, """ymin""": 72, """xmax""": 368, """ymax""": 187}},
{"""score""": 0.9_9_5_5, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 639, """ymax""": 473}},
{"""score""": 0.9_9_8_8, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}},
{"""score""": 0.9_9_8_7, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}},
] , )
__a : List[str] = object_detector(
[
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
] )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [
[
{"""score""": 0.9_9_8_2, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 175, """ymax""": 117}},
{"""score""": 0.9_9_6_0, """label""": """remote""", """box""": {"""xmin""": 333, """ymin""": 72, """xmax""": 368, """ymax""": 187}},
{"""score""": 0.9_9_5_5, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 639, """ymax""": 473}},
{"""score""": 0.9_9_8_8, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}},
{"""score""": 0.9_9_8_7, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}},
],
[
{"""score""": 0.9_9_8_2, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 175, """ymax""": 117}},
{"""score""": 0.9_9_6_0, """label""": """remote""", """box""": {"""xmin""": 333, """ymin""": 72, """xmax""": 368, """ymax""": 187}},
{"""score""": 0.9_9_5_5, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 639, """ymax""": 473}},
{"""score""": 0.9_9_8_8, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}},
{"""score""": 0.9_9_8_7, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}},
],
] , )
@require_torch
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Union[str, Any] = 0.9_9_8_5
__a : Union[str, Any] = """facebook/detr-resnet-50"""
__a : Optional[int] = pipeline("""object-detection""" , model=__UpperCamelCase )
__a : Union[str, Any] = object_detector("""http://images.cocodataset.org/val2017/000000039769.jpg""" , threshold=__UpperCamelCase )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [
{"""score""": 0.9_9_8_8, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}},
{"""score""": 0.9_9_8_7, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}},
] , )
@require_torch
@require_pytesseract
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : str = """Narsil/layoutlmv3-finetuned-funsd"""
__a : List[Any] = 0.9_9_9_3
__a : Dict = pipeline("""object-detection""" , model=__UpperCamelCase , threshold=__UpperCamelCase )
__a : List[str] = object_detector(
"""https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png""" )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [
{"""score""": 0.9_9_9_3, """label""": """I-ANSWER""", """box""": {"""xmin""": 294, """ymin""": 254, """xmax""": 343, """ymax""": 264}},
{"""score""": 0.9_9_9_3, """label""": """I-ANSWER""", """box""": {"""xmin""": 294, """ymin""": 254, """xmax""": 343, """ymax""": 264}},
] , ) | 697 | 1 |
'''simple docstring'''
import json
import os
import unittest
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase , unittest.TestCase ):
lowercase__ = BioGptTokenizer
lowercase__ = False
def __lowerCamelCase ( self ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__a : Optional[Any] = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""w</w>""",
"""r</w>""",
"""t</w>""",
"""lo""",
"""low""",
"""er</w>""",
"""low</w>""",
"""lowest</w>""",
"""newer</w>""",
"""wider</w>""",
"""<unk>""",
]
__a : Optional[Any] = dict(zip(__UpperCamelCase , range(len(__UpperCamelCase ) ) ) )
__a : Union[str, Any] = ["""l o 123""", """lo w 1456""", """e r</w> 1789""", """"""]
__a : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
__a : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" ) as fp:
fp.write(json.dumps(__UpperCamelCase ) )
with open(self.merges_file , """w""" ) as fp:
fp.write("""\n""".join(__UpperCamelCase ) )
def __lowerCamelCase ( self , __UpperCamelCase ):
'''simple docstring'''
__a : List[str] = """lower newer"""
__a : int = """lower newer"""
return input_text, output_text
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Any = BioGptTokenizer(self.vocab_file , self.merges_file )
__a : Optional[int] = """lower"""
__a : Dict = ["""low""", """er</w>"""]
__a : str = tokenizer.tokenize(__UpperCamelCase )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
__a : str = tokens + ["""<unk>"""]
__a : Optional[Any] = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCamelCase ) , __UpperCamelCase )
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Dict = BioGptTokenizer.from_pretrained("""microsoft/biogpt""" )
__a : Optional[int] = tokenizer.encode("""sequence builders""" , add_special_tokens=__UpperCamelCase )
__a : List[Any] = tokenizer.encode("""multi-sequence build""" , add_special_tokens=__UpperCamelCase )
__a : Dict = tokenizer.build_inputs_with_special_tokens(__UpperCamelCase )
__a : List[Any] = tokenizer.build_inputs_with_special_tokens(__UpperCamelCase , __UpperCamelCase )
self.assertTrue(encoded_sentence == [2] + text )
self.assertTrue(encoded_pair == [2] + text + [2] + text_a ) | 697 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__SCREAMING_SNAKE_CASE : List[str] = {
'configuration_blenderbot_small': [
'BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP',
'BlenderbotSmallConfig',
'BlenderbotSmallOnnxConfig',
],
'tokenization_blenderbot_small': ['BlenderbotSmallTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Union[str, Any] = ['BlenderbotSmallTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : List[str] = [
'BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST',
'BlenderbotSmallForCausalLM',
'BlenderbotSmallForConditionalGeneration',
'BlenderbotSmallModel',
'BlenderbotSmallPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Optional[int] = [
'TFBlenderbotSmallForConditionalGeneration',
'TFBlenderbotSmallModel',
'TFBlenderbotSmallPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Optional[Any] = [
'FlaxBlenderbotSmallForConditionalGeneration',
'FlaxBlenderbotSmallModel',
'FlaxBlenderbotSmallPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotSmallConfig,
BlenderbotSmallOnnxConfig,
)
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_small_fast import BlenderbotSmallTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotSmallForCausalLM,
BlenderbotSmallForConditionalGeneration,
BlenderbotSmallModel,
BlenderbotSmallPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot_small import (
TFBlenderbotSmallForConditionalGeneration,
TFBlenderbotSmallModel,
TFBlenderbotSmallPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot_small import (
FlaxBlenderbotSmallForConditionalGeneration,
FlaxBlenderbotSmallModel,
FlaxBlenderbotSmallPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 697 | 1 |
'''simple docstring'''
from scipy.stats import pearsonr
import datasets
__SCREAMING_SNAKE_CASE : Optional[int] = '\nPearson correlation coefficient and p-value for testing non-correlation.\nThe Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.\nThe p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.\n'
__SCREAMING_SNAKE_CASE : List[Any] = '\nArgs:\n predictions (`list` of `int`): Predicted class labels, as returned by a model.\n references (`list` of `int`): Ground truth labels.\n return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.\n\nReturns:\n pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.\n p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.\n\nExamples:\n\n Example 1-A simple example using only predictions and references.\n >>> pearsonr_metric = datasets.load_metric("pearsonr")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])\n >>> print(round(results[\'pearsonr\'], 2))\n -0.74\n\n Example 2-The same as Example 1, but that also returns the `p-value`.\n >>> pearsonr_metric = datasets.load_metric("pearsonr")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)\n >>> print(sorted(list(results.keys())))\n [\'p-value\', \'pearsonr\']\n >>> print(round(results[\'pearsonr\'], 2))\n -0.74\n >>> print(round(results[\'p-value\'], 2))\n 0.15\n'
__SCREAMING_SNAKE_CASE : int = '\n@article{2020SciPy-NMeth,\nauthor = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and\n Haberland, Matt and Reddy, Tyler and Cournapeau, David and\n Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and\n Bright, Jonathan and {van der Walt}, St{\'e}fan J. and\n Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and\n Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and\n Kern, Robert and Larson, Eric and Carey, C J and\n Polat, Ilhan and Feng, Yu and Moore, Eric W. and\n {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and\n Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and\n Harris, Charles R. and Archibald, Anne M. and\n Ribeiro, Antonio H. and Pedregosa, Fabian and\n {van Mulbregt}, Paul and {SciPy 1.0 Contributors}},\ntitle = {{{SciPy} 1.0: Fundamental Algorithms for Scientific\n Computing in Python}},\njournal = {Nature Methods},\nyear = {2020},\nvolume = {17},\npages = {261--272},\nadsurl = {https://rdcu.be/b08Wh},\ndoi = {10.1038/s41592-019-0686-2},\n}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE__ ( datasets.Metric ):
def __lowerCamelCase ( self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""float""" ),
"""references""": datasets.Value("""float""" ),
} ) , reference_urls=["""https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html"""] , )
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=False ):
'''simple docstring'''
if return_pvalue:
__a : Union[str, Any] = pearsonr(__UpperCamelCase , __UpperCamelCase )
return {"pearsonr": results[0], "p-value": results[1]}
else:
return {"pearsonr": float(pearsonr(__UpperCamelCase , __UpperCamelCase )[0] )} | 697 |
'''simple docstring'''
import numpy as np
import torch
from torch.utils.data import Dataset
from utils import logger
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
def __init__( self , __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
__a : Any = params
__a : Optional[Any] = np.array(__UpperCamelCase )
__a : Union[str, Any] = np.array([len(__UpperCamelCase ) for t in data] )
self.check()
self.remove_long_sequences()
self.remove_empty_sequences()
self.remove_unknown_sequences()
self.check()
self.print_statistics()
def __getitem__( self , __UpperCamelCase ):
'''simple docstring'''
return (self.token_ids[index], self.lengths[index])
def __len__( self ):
'''simple docstring'''
return len(self.lengths )
def __lowerCamelCase ( self ):
'''simple docstring'''
assert len(self.token_ids ) == len(self.lengths )
assert all(self.lengths[i] == len(self.token_ids[i] ) for i in range(len(self.lengths ) ) )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Tuple = self.params.max_model_input_size
__a : Union[str, Any] = self.lengths > max_len
logger.info(f"""Splitting {sum(__UpperCamelCase )} too long sequences.""" )
def divide_chunks(__UpperCamelCase , __UpperCamelCase ):
return [l[i : i + n] for i in range(0 , len(__UpperCamelCase ) , __UpperCamelCase )]
__a : int = []
__a : Union[str, Any] = []
if self.params.mlm:
__a , __a : Any = self.params.special_tok_ids["""cls_token"""], self.params.special_tok_ids["""sep_token"""]
else:
__a , __a : str = self.params.special_tok_ids["""bos_token"""], self.params.special_tok_ids["""eos_token"""]
for seq_, len_ in zip(self.token_ids , self.lengths ):
assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_
if len_ <= max_len:
new_tok_ids.append(seq_ )
new_lengths.append(len_ )
else:
__a : Any = []
for sub_s in divide_chunks(seq_ , max_len - 2 ):
if sub_s[0] != cls_id:
__a : int = np.insert(__UpperCamelCase , 0 , __UpperCamelCase )
if sub_s[-1] != sep_id:
__a : str = np.insert(__UpperCamelCase , len(__UpperCamelCase ) , __UpperCamelCase )
assert len(__UpperCamelCase ) <= max_len
assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s
sub_seqs.append(__UpperCamelCase )
new_tok_ids.extend(__UpperCamelCase )
new_lengths.extend([len(__UpperCamelCase ) for l in sub_seqs] )
__a : Dict = np.array(__UpperCamelCase )
__a : Tuple = np.array(__UpperCamelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : List[str] = len(self )
__a : List[str] = self.lengths > 11
__a : int = self.token_ids[indices]
__a : Union[str, Any] = self.lengths[indices]
__a : Any = len(self )
logger.info(f"""Remove {init_size - new_size} too short (<=11 tokens) sequences.""" )
def __lowerCamelCase ( self ):
'''simple docstring'''
if "unk_token" not in self.params.special_tok_ids:
return
else:
__a : List[str] = self.params.special_tok_ids["""unk_token"""]
__a : str = len(self )
__a : str = np.array([np.count_nonzero(a == unk_token_id ) for a in self.token_ids] )
__a : Optional[Any] = (unk_occs / self.lengths) < 0.5
__a : List[str] = self.token_ids[indices]
__a : Optional[int] = self.lengths[indices]
__a : Any = len(self )
logger.info(f"""Remove {init_size - new_size} sequences with a high level of unknown tokens (50%).""" )
def __lowerCamelCase ( self ):
'''simple docstring'''
if not self.params.is_master:
return
logger.info(f"""{len(self )} sequences""" )
# data_len = sum(self.lengths)
# nb_unique_tokens = len(Counter(list(chain(*self.token_ids))))
# logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)')
# unk_idx = self.params.special_tok_ids['unk_token']
# nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids])
# logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)')
def __lowerCamelCase ( self , __UpperCamelCase ):
'''simple docstring'''
__a : List[str] = [t[0] for t in batch]
__a : str = [t[1] for t in batch]
assert len(__UpperCamelCase ) == len(__UpperCamelCase )
# Max for paddings
__a : Optional[int] = max(__UpperCamelCase )
# Pad token ids
if self.params.mlm:
__a : int = self.params.special_tok_ids["""pad_token"""]
else:
__a : Tuple = self.params.special_tok_ids["""unk_token"""]
__a : Any = [list(t.astype(__UpperCamelCase ) ) + [pad_idx] * (max_seq_len_ - len(__UpperCamelCase )) for t in token_ids]
assert len(tk_ ) == len(__UpperCamelCase )
assert all(len(__UpperCamelCase ) == max_seq_len_ for t in tk_ )
__a : Any = torch.tensor(tk_ ) # (bs, max_seq_len_)
__a : Optional[Any] = torch.tensor(__UpperCamelCase ) # (bs)
return tk_t, lg_t | 697 | 1 |
'''simple docstring'''
import os
from huggingface_hub.constants import HUGGINGFACE_HUB_CACHE, hf_cache_home
__SCREAMING_SNAKE_CASE : Tuple = HUGGINGFACE_HUB_CACHE
__SCREAMING_SNAKE_CASE : Dict = 'config.json'
__SCREAMING_SNAKE_CASE : str = 'diffusion_pytorch_model.bin'
__SCREAMING_SNAKE_CASE : Optional[Any] = 'diffusion_flax_model.msgpack'
__SCREAMING_SNAKE_CASE : int = 'model.onnx'
__SCREAMING_SNAKE_CASE : Tuple = 'diffusion_pytorch_model.safetensors'
__SCREAMING_SNAKE_CASE : Dict = 'weights.pb'
__SCREAMING_SNAKE_CASE : List[str] = 'https://huggingface.co'
__SCREAMING_SNAKE_CASE : str = default_cache_path
__SCREAMING_SNAKE_CASE : List[str] = 'diffusers_modules'
__SCREAMING_SNAKE_CASE : Tuple = os.getenv('HF_MODULES_CACHE', os.path.join(hf_cache_home, 'modules'))
__SCREAMING_SNAKE_CASE : List[Any] = ['fp16', 'non-ema']
__SCREAMING_SNAKE_CASE : Union[str, Any] = '.self_attn' | 697 |
'''simple docstring'''
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
lowercase__ = ""
lowercase__ = "hf-legacy" # "hf://"" is reserved for hffs
def __init__( self , __UpperCamelCase = None , __UpperCamelCase = None , **__UpperCamelCase , ):
'''simple docstring'''
super().__init__(self , **__UpperCamelCase )
__a : int = repo_info
__a : int = token
__a : Any = None
def __lowerCamelCase ( self ):
'''simple docstring'''
if self.dir_cache is None:
__a : Union[str, Any] = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
__a : List[str] = {
"""name""": hf_file.rfilename,
"""size""": None,
"""type""": """file""",
}
self.dir_cache.update(
{
str(__UpperCamelCase ): {"""name""": str(__UpperCamelCase ), """size""": None, """type""": """directory"""}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase = "rb" , **__UpperCamelCase , ):
'''simple docstring'''
if not isinstance(self.repo_info , __UpperCamelCase ):
raise NotImplementedError(f"""Open is only implemented for dataset repositories, but got {self.repo_info}""" )
__a : Any = hf_hub_url(self.repo_info.id , __UpperCamelCase , revision=self.repo_info.sha )
return fsspec.open(
__UpperCamelCase , mode=__UpperCamelCase , headers=get_authentication_headers_for_url(__UpperCamelCase , use_auth_token=self.token ) , client_kwargs={"""trust_env""": True} , ).open()
def __lowerCamelCase ( self , __UpperCamelCase , **__UpperCamelCase ):
'''simple docstring'''
self._get_dirs()
__a : str = self._strip_protocol(__UpperCamelCase )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(__UpperCamelCase )
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase=False , **__UpperCamelCase ):
'''simple docstring'''
self._get_dirs()
__a : int = PurePosixPath(path.strip("""/""" ) )
__a : List[str] = {}
for p, f in self.dir_cache.items():
__a : str = PurePosixPath(p.strip("""/""" ) )
__a : Optional[int] = p.parent
if root == path:
__a : List[str] = f
__a : str = list(paths.values() )
if detail:
return out
else:
return sorted(f["""name"""] for f in out ) | 697 | 1 |
'''simple docstring'''
import inspect
import unittest
import torch
import torch.nn as nn
from accelerate.hooks import (
AlignDevicesHook,
ModelHook,
SequentialHook,
add_hook_to_module,
attach_align_device_hook,
remove_hook_from_module,
remove_hook_from_submodules,
)
from accelerate.test_utils import require_multi_gpu
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
def __init__( self ):
'''simple docstring'''
super().__init__()
__a : Union[str, Any] = nn.Linear(3 , 4 )
__a : Optional[int] = nn.BatchNormad(4 )
__a : List[Any] = nn.Linear(4 , 5 )
def __lowerCamelCase ( self , __UpperCamelCase ):
'''simple docstring'''
return self.lineara(self.batchnorm(self.lineara(__UpperCamelCase ) ) )
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
def __lowerCamelCase ( self , __UpperCamelCase , *__UpperCamelCase , **__UpperCamelCase ):
'''simple docstring'''
return (args[0] + 1,) + args[1:], kwargs
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
return output + 1
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : List[Any] = ModelForTest()
__a : Any = ModelHook()
add_hook_to_module(__UpperCamelCase , __UpperCamelCase )
self.assertEqual(test_model._hf_hook , __UpperCamelCase )
self.assertTrue(hasattr(__UpperCamelCase , """_old_forward""" ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , """forward""" )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ["""x"""] )
remove_hook_from_module(__UpperCamelCase )
self.assertFalse(hasattr(__UpperCamelCase , """_hf_hook""" ) )
self.assertFalse(hasattr(__UpperCamelCase , """_old_forward""" ) )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Tuple = ModelForTest()
__a : Any = ModelHook()
add_hook_to_module(__UpperCamelCase , __UpperCamelCase )
add_hook_to_module(__UpperCamelCase , __UpperCamelCase , append=__UpperCamelCase )
self.assertEqual(isinstance(test_model._hf_hook , __UpperCamelCase ) , __UpperCamelCase )
self.assertEqual(len(test_model._hf_hook.hooks ) , 2 )
self.assertTrue(hasattr(__UpperCamelCase , """_old_forward""" ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , """forward""" )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ["""x"""] )
remove_hook_from_module(__UpperCamelCase )
self.assertFalse(hasattr(__UpperCamelCase , """_hf_hook""" ) )
self.assertFalse(hasattr(__UpperCamelCase , """_old_forward""" ) )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Optional[int] = ModelForTest()
__a : Optional[Any] = torch.randn(2 , 3 )
__a : Tuple = test_model(x + 1 )
__a : Optional[int] = test_model(x + 2 )
__a : Tuple = PreForwardHook()
add_hook_to_module(__UpperCamelCase , __UpperCamelCase )
__a : Optional[Any] = test_model(__UpperCamelCase )
self.assertTrue(torch.allclose(__UpperCamelCase , __UpperCamelCase , atol=1E-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
__a : str = PreForwardHook()
add_hook_to_module(__UpperCamelCase , __UpperCamelCase )
__a : Optional[int] = test_model(__UpperCamelCase )
self.assertTrue(torch.allclose(__UpperCamelCase , __UpperCamelCase , atol=1E-5 ) )
# You need to use the sequential hook to chain two or more hooks
__a : Dict = SequentialHook(PreForwardHook() , PreForwardHook() )
add_hook_to_module(__UpperCamelCase , __UpperCamelCase )
__a : Optional[int] = test_model(__UpperCamelCase )
assert torch.allclose(__UpperCamelCase , __UpperCamelCase , atol=1E-5 )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Union[str, Any] = ModelForTest()
__a : int = torch.randn(2 , 3 )
__a : Tuple = test_model(__UpperCamelCase )
__a : Union[str, Any] = PostForwardHook()
add_hook_to_module(__UpperCamelCase , __UpperCamelCase )
__a : str = test_model(__UpperCamelCase )
self.assertTrue(torch.allclose(__UpperCamelCase , output + 1 , atol=1E-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
__a : Any = PostForwardHook()
add_hook_to_module(__UpperCamelCase , __UpperCamelCase )
__a : Union[str, Any] = test_model(__UpperCamelCase )
self.assertTrue(torch.allclose(__UpperCamelCase , output + 1 , atol=1E-5 ) )
# You need to use the sequential hook to chain two or more hooks
__a : Any = SequentialHook(PostForwardHook() , PostForwardHook() )
add_hook_to_module(__UpperCamelCase , __UpperCamelCase )
__a : int = test_model(__UpperCamelCase )
assert torch.allclose(__UpperCamelCase , output + 2 , atol=1E-5 )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : List[str] = ModelForTest()
__a : List[str] = torch.randn(2 , 3 )
__a : Union[str, Any] = test_model(__UpperCamelCase )
__a : Any = PostForwardHook()
add_hook_to_module(__UpperCamelCase , __UpperCamelCase )
__a : str = test_model(__UpperCamelCase )
self.assertTrue(torch.allclose(__UpperCamelCase , output + 1 ) )
self.assertTrue(outputa.requires_grad )
__a : List[Any] = True
__a : Union[str, Any] = test_model(__UpperCamelCase )
self.assertFalse(outputa.requires_grad )
@require_multi_gpu
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Optional[Any] = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=1 ) )
self.assertEqual(model.lineara.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device(0 ) )
self.assertEqual(model.lineara.weight.device , torch.device(1 ) )
# We can still make a forward pass. The input does not need to be on any particular device
__a : str = torch.randn(2 , 3 )
__a : str = model(__UpperCamelCase )
self.assertEqual(output.device , torch.device(1 ) )
# We can add a general hook to put back output on same device as input.
add_hook_to_module(__UpperCamelCase , AlignDevicesHook(io_same_device=__UpperCamelCase ) )
__a : List[Any] = torch.randn(2 , 3 ).to(0 )
__a : Optional[Any] = model(__UpperCamelCase )
self.assertEqual(output.device , torch.device(0 ) )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Dict = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
__a : Optional[Any] = {"""execution_device""": 0 if torch.cuda.is_available() else """cpu""", """offload""": True}
add_hook_to_module(model.lineara , AlignDevicesHook(**__UpperCamelCase ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**__UpperCamelCase ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**__UpperCamelCase ) )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
__a : Union[str, Any] = torch.device(hook_kwargs["""execution_device"""] )
self.assertEqual(model.batchnorm.running_mean.device , __UpperCamelCase )
__a : List[str] = torch.randn(2 , 3 )
__a : List[Any] = model(__UpperCamelCase )
self.assertEqual(output.device , __UpperCamelCase )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# Now test with buffers included in the offload
__a : Any = {
"""execution_device""": 0 if torch.cuda.is_available() else """cpu""",
"""offload""": True,
"""offload_buffers""": True,
}
add_hook_to_module(model.lineara , AlignDevicesHook(**__UpperCamelCase ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**__UpperCamelCase ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**__UpperCamelCase ) )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) )
__a : Optional[Any] = torch.randn(2 , 3 )
__a : Optional[int] = model(__UpperCamelCase )
self.assertEqual(output.device , __UpperCamelCase )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Tuple = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
__a : Union[str, Any] = 0 if torch.cuda.is_available() else """cpu"""
attach_align_device_hook(__UpperCamelCase , execution_device=__UpperCamelCase , offload=__UpperCamelCase )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
__a : str = torch.device(__UpperCamelCase )
self.assertEqual(model.batchnorm.running_mean.device , __UpperCamelCase )
__a : Any = torch.randn(2 , 3 )
__a : List[Any] = model(__UpperCamelCase )
self.assertEqual(output.device , __UpperCamelCase )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__UpperCamelCase )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# Now test with buffers included in the offload
attach_align_device_hook(__UpperCamelCase , execution_device=__UpperCamelCase , offload=__UpperCamelCase , offload_buffers=__UpperCamelCase )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) )
__a : Tuple = torch.randn(2 , 3 )
__a : Union[str, Any] = model(__UpperCamelCase )
self.assertEqual(output.device , __UpperCamelCase )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__UpperCamelCase )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Tuple = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
__a : List[Any] = 0 if torch.cuda.is_available() else """cpu"""
attach_align_device_hook(
__UpperCamelCase , execution_device=__UpperCamelCase , offload=__UpperCamelCase , weights_map=model.state_dict() )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
__a : str = torch.device(__UpperCamelCase )
self.assertEqual(model.batchnorm.running_mean.device , __UpperCamelCase )
__a : List[str] = torch.randn(2 , 3 )
__a : Tuple = model(__UpperCamelCase )
self.assertEqual(output.device , __UpperCamelCase )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__UpperCamelCase )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# Now test with buffers included in the offload
attach_align_device_hook(
__UpperCamelCase , execution_device=__UpperCamelCase , offload=__UpperCamelCase , weights_map=model.state_dict() , offload_buffers=__UpperCamelCase , )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) )
__a : Union[str, Any] = torch.randn(2 , 3 )
__a : Tuple = model(__UpperCamelCase )
self.assertEqual(output.device , __UpperCamelCase )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__UpperCamelCase )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) ) | 697 |
'''simple docstring'''
import inspect
import unittest
from transformers import DPTConfig
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel
from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class SCREAMING_SNAKE_CASE__ :
def __init__( self , __UpperCamelCase , __UpperCamelCase=2 , __UpperCamelCase=32 , __UpperCamelCase=16 , __UpperCamelCase=3 , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=32 , __UpperCamelCase=4 , __UpperCamelCase=[0, 1, 2, 3] , __UpperCamelCase=4 , __UpperCamelCase=37 , __UpperCamelCase="gelu" , __UpperCamelCase=0.1 , __UpperCamelCase=0.1 , __UpperCamelCase=0.0_2 , __UpperCamelCase=3 , __UpperCamelCase=[1, 384, 24, 24] , __UpperCamelCase=True , __UpperCamelCase=None , ):
'''simple docstring'''
__a : List[str] = parent
__a : Tuple = batch_size
__a : str = image_size
__a : int = patch_size
__a : Dict = num_channels
__a : int = is_training
__a : Dict = use_labels
__a : Union[str, Any] = hidden_size
__a : Dict = num_hidden_layers
__a : Dict = backbone_out_indices
__a : Optional[int] = num_attention_heads
__a : List[str] = intermediate_size
__a : Optional[Any] = hidden_act
__a : Dict = hidden_dropout_prob
__a : Tuple = attention_probs_dropout_prob
__a : Any = initializer_range
__a : Any = num_labels
__a : Optional[Any] = backbone_featmap_shape
__a : List[Any] = scope
__a : List[str] = is_hybrid
# sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token)
__a : Union[str, Any] = (image_size // patch_size) ** 2
__a : List[str] = num_patches + 1
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__a : Union[str, Any] = None
if self.use_labels:
__a : str = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__a : Tuple = self.get_config()
return config, pixel_values, labels
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : List[str] = {
"""global_padding""": """same""",
"""layer_type""": """bottleneck""",
"""depths""": [3, 4, 9],
"""out_features""": ["""stage1""", """stage2""", """stage3"""],
"""embedding_dynamic_padding""": True,
"""hidden_sizes""": [96, 192, 384, 768],
"""num_groups""": 2,
}
return DPTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , backbone_out_indices=self.backbone_out_indices , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__UpperCamelCase , initializer_range=self.initializer_range , is_hybrid=self.is_hybrid , backbone_config=__UpperCamelCase , backbone_featmap_shape=self.backbone_featmap_shape , )
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
__a : Optional[Any] = DPTModel(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
__a : List[str] = model(__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
__a : List[str] = self.num_labels
__a : Union[str, Any] = DPTForDepthEstimation(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
__a : Tuple = model(__UpperCamelCase )
self.parent.assertEqual(result.predicted_depth.shape , (self.batch_size, self.image_size, self.image_size) )
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
__a : Dict = self.num_labels
__a : Tuple = DPTForSemanticSegmentation(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
__a : str = model(__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Optional[int] = self.prepare_config_and_inputs()
__a , __a , __a : Tuple = config_and_inputs
__a : List[str] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase , __UpperCamelCase , unittest.TestCase ):
lowercase__ = (DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else ()
lowercase__ = (
{
"depth-estimation": DPTForDepthEstimation,
"feature-extraction": DPTModel,
"image-segmentation": DPTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
lowercase__ = False
lowercase__ = False
lowercase__ = False
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Optional[int] = DPTModelTester(self )
__a : List[Any] = ConfigTester(self , config_class=__UpperCamelCase , has_text_modality=__UpperCamelCase , hidden_size=37 )
def __lowerCamelCase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="""DPT does not use inputs_embeds""" )
def __lowerCamelCase ( self ):
'''simple docstring'''
pass
def __lowerCamelCase ( self ):
'''simple docstring'''
__a , __a : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a : str = model_class(__UpperCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__a : Any = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__UpperCamelCase , nn.Linear ) )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a , __a : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a : Any = model_class(__UpperCamelCase )
__a : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__a : int = [*signature.parameters.keys()]
__a : List[str] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __UpperCamelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_depth_estimation(*__UpperCamelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__UpperCamelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
__a , __a : Dict = self.model_tester.prepare_config_and_inputs_for_common()
__a : List[Any] = True
if model_class in get_values(__UpperCamelCase ):
continue
__a : str = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.train()
__a : Union[str, Any] = self._prepare_for_class(__UpperCamelCase , __UpperCamelCase , return_labels=__UpperCamelCase )
__a : List[Any] = model(**__UpperCamelCase ).loss
loss.backward()
def __lowerCamelCase ( self ):
'''simple docstring'''
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
__a , __a : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
__a : Any = False
__a : Dict = True
if model_class in get_values(__UpperCamelCase ) or not model_class.supports_gradient_checkpointing:
continue
__a : Any = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.gradient_checkpointing_enable()
model.train()
__a : List[str] = self._prepare_for_class(__UpperCamelCase , __UpperCamelCase , return_labels=__UpperCamelCase )
__a : Dict = model(**__UpperCamelCase ).loss
loss.backward()
def __lowerCamelCase ( self ):
'''simple docstring'''
__a , __a : Any = self.model_tester.prepare_config_and_inputs_for_common()
__a : Any = _config_zero_init(__UpperCamelCase )
for model_class in self.all_model_classes:
__a : Any = model_class(config=__UpperCamelCase )
# Skip the check for the backbone
__a : Optional[Any] = []
for name, module in model.named_modules():
if module.__class__.__name__ == "DPTViTHybridEmbeddings":
__a : Optional[int] = [f"""{name}.{key}""" for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def __lowerCamelCase ( self ):
'''simple docstring'''
pass
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]:
__a : int = DPTModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a , __a : int = self.model_tester.prepare_config_and_inputs_for_common()
__a : Optional[int] = """add"""
with self.assertRaises(__UpperCamelCase ):
__a : int = DPTForDepthEstimation(__UpperCamelCase )
def _snake_case ( ) -> Any:
__a : Dict = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
@slow
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : int = DPTImageProcessor.from_pretrained("""Intel/dpt-hybrid-midas""" )
__a : int = DPTForDepthEstimation.from_pretrained("""Intel/dpt-hybrid-midas""" ).to(__UpperCamelCase )
__a : Union[str, Any] = prepare_img()
__a : Any = image_processor(images=__UpperCamelCase , return_tensors="""pt""" ).to(__UpperCamelCase )
# forward pass
with torch.no_grad():
__a : Optional[Any] = model(**__UpperCamelCase )
__a : int = outputs.predicted_depth
# verify the predicted depth
__a : Any = torch.Size((1, 384, 384) )
self.assertEqual(predicted_depth.shape , __UpperCamelCase )
__a : int = torch.tensor(
[[[5.6_4_3_7, 5.6_1_4_6, 5.6_5_1_1], [5.4_3_7_1, 5.5_6_4_9, 5.5_9_5_8], [5.5_2_1_5, 5.5_1_8_4, 5.5_2_9_3]]] ).to(__UpperCamelCase )
self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 100 , __UpperCamelCase , atol=1E-4 ) ) | 697 | 1 |
'''simple docstring'''
import math
def _snake_case ( lowercase ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(lowercase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def _snake_case ( lowercase = 1_0_0_0_1 ) -> int:
try:
__a : Union[str, Any] = int(lowercase )
except (TypeError, ValueError):
raise TypeError("""Parameter nth must be int or castable to int.""" ) from None
if nth <= 0:
raise ValueError("""Parameter nth must be greater than or equal to one.""" )
__a : list[int] = []
__a : List[str] = 2
while len(lowercase ) < nth:
if is_prime(lowercase ):
primes.append(lowercase )
num += 1
else:
num += 1
return primes[len(lowercase ) - 1]
if __name__ == "__main__":
print(f'''{solution() = }''') | 697 |
'''simple docstring'''
import unittest
from .lib import (
Matrix,
Vector,
axpy,
square_zero_matrix,
unit_basis_vector,
zero_vector,
)
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Dict = Vector([1, 2, 3] )
self.assertEqual(x.component(0 ) , 1 )
self.assertEqual(x.component(2 ) , 3 )
__a : Optional[int] = Vector()
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Any = Vector([0, 0, 0, 0, 0, 1] )
self.assertEqual(str(__UpperCamelCase ) , """(0,0,0,0,0,1)""" )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Tuple = Vector([1, 2, 3, 4] )
self.assertEqual(len(__UpperCamelCase ) , 4 )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Optional[Any] = Vector([1, 2] )
__a : List[str] = Vector([1, 2, 3, 4, 5] )
__a : Optional[int] = Vector([0, 0, 0, 0, 0, 0, 0, 0, 0, 0] )
__a : Dict = Vector([1, -1, 1, -1, 2, -3, 4, -5] )
self.assertAlmostEqual(x.euclidean_length() , 2.2_3_6 , 3 )
self.assertAlmostEqual(y.euclidean_length() , 7.4_1_6 , 3 )
self.assertEqual(z.euclidean_length() , 0 )
self.assertAlmostEqual(w.euclidean_length() , 7.6_1_6 , 3 )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Dict = Vector([1, 2, 3] )
__a : Union[str, Any] = Vector([1, 1, 1] )
self.assertEqual((x + y).component(0 ) , 2 )
self.assertEqual((x + y).component(1 ) , 3 )
self.assertEqual((x + y).component(2 ) , 4 )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : List[str] = Vector([1, 2, 3] )
__a : Any = Vector([1, 1, 1] )
self.assertEqual((x - y).component(0 ) , 0 )
self.assertEqual((x - y).component(1 ) , 1 )
self.assertEqual((x - y).component(2 ) , 2 )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Tuple = Vector([1, 2, 3] )
__a : Optional[Any] = Vector([2, -1, 4] ) # for test of dot product
__a : Union[str, Any] = Vector([1, -2, -1] )
self.assertEqual(str(x * 3.0 ) , """(3.0,6.0,9.0)""" )
self.assertEqual((a * b) , 0 )
def __lowerCamelCase ( self ):
'''simple docstring'''
self.assertEqual(str(zero_vector(10 ) ).count("""0""" ) , 10 )
def __lowerCamelCase ( self ):
'''simple docstring'''
self.assertEqual(str(unit_basis_vector(3 , 1 ) ) , """(0,1,0)""" )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Dict = Vector([1, 2, 3] )
__a : Optional[int] = Vector([1, 0, 1] )
self.assertEqual(str(axpy(2 , __UpperCamelCase , __UpperCamelCase ) ) , """(3,4,7)""" )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : int = Vector([1, 0, 0, 0, 0, 0] )
__a : Any = x.copy()
self.assertEqual(str(__UpperCamelCase ) , str(__UpperCamelCase ) )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Union[str, Any] = Vector([1, 0, 0] )
x.change_component(0 , 0 )
x.change_component(1 , 1 )
self.assertEqual(str(__UpperCamelCase ) , """(0,1,0)""" )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Dict = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual("""|1,2,3|\n|2,4,5|\n|6,7,8|\n""" , str(__UpperCamelCase ) )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : List[Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
__a : List[Any] = [[-3, -14, -10], [-5, -10, -5], [-2, -1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(minors[x][y] , a.minor(__UpperCamelCase , __UpperCamelCase ) )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : List[Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
__a : Any = [[-3, 14, -10], [5, -10, 5], [-2, 1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(cofactors[x][y] , a.cofactor(__UpperCamelCase , __UpperCamelCase ) )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : List[Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(-5 , a.determinant() )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Any = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]] , 3 , 3 )
__a : List[Any] = Vector([1, 2, 3] )
self.assertEqual("""(14,32,50)""" , str(a * x ) )
self.assertEqual("""|2,4,6|\n|8,10,12|\n|14,16,18|\n""" , str(a * 2 ) )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : List[str] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
a.change_component(0 , 2 , 5 )
self.assertEqual("""|1,2,5|\n|2,4,5|\n|6,7,8|\n""" , str(__UpperCamelCase ) )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Tuple = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(7 , a.component(2 , 1 ) , 0.0_1 )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Dict = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
__a : Union[str, Any] = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 )
self.assertEqual("""|2,4,10|\n|4,8,10|\n|12,14,18|\n""" , str(a + b ) )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : List[Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
__a : List[str] = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 )
self.assertEqual("""|0,0,-4|\n|0,0,0|\n|0,0,-2|\n""" , str(a - b ) )
def __lowerCamelCase ( self ):
'''simple docstring'''
self.assertEqual(
"""|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n""" , str(square_zero_matrix(5 ) ) , )
if __name__ == "__main__":
unittest.main() | 697 | 1 |
'''simple docstring'''
import argparse
import json
from typing import List
from ltp import LTP
from transformers.models.bert.tokenization_bert import BertTokenizer
def _snake_case ( lowercase ) -> Tuple:
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0x4_E_0_0 and cp <= 0x9_F_F_F)
or (cp >= 0x3_4_0_0 and cp <= 0x4_D_B_F) #
or (cp >= 0x2_0_0_0_0 and cp <= 0x2_A_6_D_F) #
or (cp >= 0x2_A_7_0_0 and cp <= 0x2_B_7_3_F) #
or (cp >= 0x2_B_7_4_0 and cp <= 0x2_B_8_1_F) #
or (cp >= 0x2_B_8_2_0 and cp <= 0x2_C_E_A_F) #
or (cp >= 0xF_9_0_0 and cp <= 0xF_A_F_F)
or (cp >= 0x2_F_8_0_0 and cp <= 0x2_F_A_1_F) #
): #
return True
return False
def _snake_case ( lowercase ) -> List[Any]:
# word like '180' or '身高' or '神'
for char in word:
__a : str = ord(lowercase )
if not _is_chinese_char(lowercase ):
return 0
return 1
def _snake_case ( lowercase ) -> Dict:
__a : Optional[Any] = set()
for token in tokens:
__a : Any = len(lowercase ) > 1 and is_chinese(lowercase )
if chinese_word:
word_set.add(lowercase )
__a : Optional[int] = list(lowercase )
return word_list
def _snake_case ( lowercase , lowercase ) -> Optional[int]:
if not chinese_word_set:
return bert_tokens
__a : int = max([len(lowercase ) for w in chinese_word_set] )
__a : Any = bert_tokens
__a , __a : List[str] = 0, len(lowercase )
while start < end:
__a : List[Any] = True
if is_chinese(bert_word[start] ):
__a : Dict = min(end - start , lowercase )
for i in range(lowercase , 1 , -1 ):
__a : Tuple = """""".join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
__a : str = """##""" + bert_word[j]
__a : Optional[Any] = start + i
__a : List[str] = False
break
if single_word:
start += 1
return bert_word
def _snake_case ( lowercase , lowercase , lowercase ) -> Dict:
__a : int = []
for i in range(0 , len(lowercase ) , 1_0_0 ):
__a : int = ltp_tokenizer.pipeline(lines[i : i + 1_0_0] , tasks=["""cws"""] ).cws
__a : str = [get_chinese_word(lowercase ) for r in res]
ltp_res.extend(lowercase )
assert len(lowercase ) == len(lowercase )
__a : List[str] = []
for i in range(0 , len(lowercase ) , 1_0_0 ):
__a : Any = bert_tokenizer(lines[i : i + 1_0_0] , add_special_tokens=lowercase , truncation=lowercase , max_length=5_1_2 )
bert_res.extend(res["""input_ids"""] )
assert len(lowercase ) == len(lowercase )
__a : List[str] = []
for input_ids, chinese_word in zip(lowercase , lowercase ):
__a : Tuple = []
for id in input_ids:
__a : int = bert_tokenizer._convert_id_to_token(lowercase )
input_tokens.append(lowercase )
__a : Optional[Any] = add_sub_symbol(lowercase , lowercase )
__a : str = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(lowercase ):
if token[:2] == "##":
__a : Any = token[2:]
# save chinese tokens' pos
if len(lowercase ) == 1 and _is_chinese_char(ord(lowercase ) ):
ref_id.append(lowercase )
ref_ids.append(lowercase )
assert len(lowercase ) == len(lowercase )
return ref_ids
def _snake_case ( lowercase ) -> Tuple:
# For Chinese (Ro)Bert, the best result is from : RoBERTa-wwm-ext (https://github.com/ymcui/Chinese-BERT-wwm)
# If we want to fine-tune these model, we have to use same tokenizer : LTP (https://github.com/HIT-SCIR/ltp)
with open(args.file_name , """r""" , encoding="""utf-8""" ) as f:
__a : int = f.readlines()
__a : int = [line.strip() for line in data if len(lowercase ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
__a : str = LTP(args.ltp ) # faster in GPU device
__a : str = BertTokenizer.from_pretrained(args.bert )
__a : Optional[int] = prepare_ref(lowercase , lowercase , lowercase )
with open(args.save_path , """w""" , encoding="""utf-8""" ) as f:
__a : int = [json.dumps(lowercase ) + """\n""" for ref in ref_ids]
f.writelines(lowercase )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : List[str] = argparse.ArgumentParser(description='prepare_chinese_ref')
parser.add_argument(
'--file_name',
required=False,
type=str,
default='./resources/chinese-demo.txt',
help='file need process, same as training data in lm',
)
parser.add_argument(
'--ltp',
required=False,
type=str,
default='./resources/ltp',
help='resources for LTP tokenizer, usually a path',
)
parser.add_argument(
'--bert',
required=False,
type=str,
default='./resources/robert',
help='resources for Bert tokenizer',
)
parser.add_argument(
'--save_path',
required=False,
type=str,
default='./resources/ref.txt',
help='path to save res',
)
__SCREAMING_SNAKE_CASE : Tuple = parser.parse_args()
main(args) | 697 |
'''simple docstring'''
import os
from itertools import chain
from random import randrange, shuffle
import pytest
from .sola import PokerHand
__SCREAMING_SNAKE_CASE : List[str] = (
'4S 3H 2C 7S 5H',
'9D 8H 2C 6S 7H',
'2D 6D 9D TH 7D',
'TC 8C 2S JH 6C',
'JH 8S TH AH QH',
'TS KS 5S 9S AC',
'KD 6S 9D TH AD',
'KS 8D 4D 9S 4S', # pair
'8C 4S KH JS 4D', # pair
'QH 8H KD JH 8S', # pair
'KC 4H KS 2H 8D', # pair
'KD 4S KC 3H 8S', # pair
'AH 8S AS KC JH', # pair
'3H 4C 4H 3S 2H', # 2 pairs
'5S 5D 2C KH KH', # 2 pairs
'3C KH 5D 5S KH', # 2 pairs
'AS 3C KH AD KH', # 2 pairs
'7C 7S 3S 7H 5S', # 3 of a kind
'7C 7S KH 2H 7H', # 3 of a kind
'AC KH QH AH AS', # 3 of a kind
'2H 4D 3C AS 5S', # straight (low ace)
'3C 5C 4C 2C 6H', # straight
'6S 8S 7S 5H 9H', # straight
'JS QS 9H TS KH', # straight
'QC KH TS JS AH', # straight (high ace)
'8C 9C 5C 3C TC', # flush
'3S 8S 9S 5S KS', # flush
'4C 5C 9C 8C KC', # flush
'JH 8H AH KH QH', # flush
'3D 2H 3H 2C 2D', # full house
'2H 2C 3S 3H 3D', # full house
'KH KC 3S 3H 3D', # full house
'JC 6H JS JD JH', # 4 of a kind
'JC 7H JS JD JH', # 4 of a kind
'JC KH JS JD JH', # 4 of a kind
'2S AS 4S 5S 3S', # straight flush (low ace)
'2D 6D 3D 4D 5D', # straight flush
'5C 6C 3C 7C 4C', # straight flush
'JH 9H TH KH QH', # straight flush
'JH AH TH KH QH', # royal flush (high ace straight flush)
)
__SCREAMING_SNAKE_CASE : Optional[Any] = (
('2H 3H 4H 5H 6H', 'KS AS TS QS JS', 'Loss'),
('2H 3H 4H 5H 6H', 'AS AD AC AH JD', 'Win'),
('AS AH 2H AD AC', 'JS JD JC JH 3D', 'Win'),
('2S AH 2H AS AC', 'JS JD JC JH AD', 'Loss'),
('2S AH 2H AS AC', '2H 3H 5H 6H 7H', 'Win'),
('AS 3S 4S 8S 2S', '2H 3H 5H 6H 7H', 'Win'),
('2H 3H 5H 6H 7H', '2S 3H 4H 5S 6C', 'Win'),
('2S 3H 4H 5S 6C', '3D 4C 5H 6H 2S', 'Tie'),
('2S 3H 4H 5S 6C', 'AH AC 5H 6H AS', 'Win'),
('2S 2H 4H 5S 4C', 'AH AC 5H 6H AS', 'Loss'),
('2S 2H 4H 5S 4C', 'AH AC 5H 6H 7S', 'Win'),
('6S AD 7H 4S AS', 'AH AC 5H 6H 7S', 'Loss'),
('2S AH 4H 5S KC', 'AH AC 5H 6H 7S', 'Loss'),
('2S 3H 6H 7S 9C', '7H 3C TH 6H 9S', 'Loss'),
('4S 5H 6H TS AC', '3S 5H 6H TS AC', 'Win'),
('2S AH 4H 5S 6C', 'AD 4C 5H 6H 2C', 'Tie'),
('AS AH 3H AD AC', 'AS AH 2H AD AC', 'Win'),
('AH AC 5H 5C QS', 'AH AC 5H 5C KS', 'Loss'),
('AH AC 5H 5C QS', 'KH KC 5H 5C QS', 'Win'),
('7C 7S KH 2H 7H', '3C 3S AH 2H 3H', 'Win'),
('3C 3S AH 2H 3H', '7C 7S KH 2H 7H', 'Loss'),
('6H 5H 4H 3H 2H', '5H 4H 3H 2H AH', 'Win'),
('5H 4H 3H 2H AH', '5H 4H 3H 2H AH', 'Tie'),
('5H 4H 3H 2H AH', '6H 5H 4H 3H 2H', 'Loss'),
('AH AD KS KC AC', 'AH KD KH AC KC', 'Win'),
('2H 4D 3C AS 5S', '2H 4D 3C 6S 5S', 'Loss'),
('2H 3S 3C 3H 2S', '3S 3C 2S 2H 2D', 'Win'),
('4D 6D 5D 2D JH', '3S 8S 3H TC KH', 'Loss'),
('4S 6C 8S 3S 7S', 'AD KS 2D 7D 7C', 'Loss'),
('6S 4C 7H 8C 3H', '5H JC AH 9D 9C', 'Loss'),
('9D 9H JH TC QH', '3C 2S JS 5C 7H', 'Win'),
('2H TC 8S AD 9S', '4H TS 7H 2C 5C', 'Win'),
('9D 3S 2C 7S 7C', 'JC TD 3C TC 9H', 'Loss'),
)
__SCREAMING_SNAKE_CASE : Tuple = (
('2H 3H 4H 5H 6H', True),
('AS AH 2H AD AC', False),
('2H 3H 5H 6H 7H', True),
('KS AS TS QS JS', True),
('8H 9H QS JS TH', False),
('AS 3S 4S 8S 2S', True),
)
__SCREAMING_SNAKE_CASE : Dict = (
('2H 3H 4H 5H 6H', True),
('AS AH 2H AD AC', False),
('2H 3H 5H 6H 7H', False),
('KS AS TS QS JS', True),
('8H 9H QS JS TH', True),
)
__SCREAMING_SNAKE_CASE : Optional[int] = (
('2H 4D 3C AS 5S', True, [5, 4, 3, 2, 14]),
('2H 5D 3C AS 5S', False, [14, 5, 5, 3, 2]),
('JH QD KC AS TS', False, [14, 13, 12, 11, 10]),
('9D 3S 2C 7S 7C', False, [9, 7, 7, 3, 2]),
)
__SCREAMING_SNAKE_CASE : int = (
('JH AH TH KH QH', 0),
('JH 9H TH KH QH', 0),
('JC KH JS JD JH', 7),
('KH KC 3S 3H 3D', 6),
('8C 9C 5C 3C TC', 0),
('JS QS 9H TS KH', 0),
('7C 7S KH 2H 7H', 3),
('3C KH 5D 5S KH', 2),
('QH 8H KD JH 8S', 1),
('2D 6D 9D TH 7D', 0),
)
__SCREAMING_SNAKE_CASE : int = (
('JH AH TH KH QH', 23),
('JH 9H TH KH QH', 22),
('JC KH JS JD JH', 21),
('KH KC 3S 3H 3D', 20),
('8C 9C 5C 3C TC', 19),
('JS QS 9H TS KH', 18),
('7C 7S KH 2H 7H', 17),
('3C KH 5D 5S KH', 16),
('QH 8H KD JH 8S', 15),
('2D 6D 9D TH 7D', 14),
)
def _snake_case ( ) -> List[str]:
__a , __a : List[Any] = randrange(len(lowercase ) ), randrange(len(lowercase ) )
__a : int = ["""Loss""", """Tie""", """Win"""][(play >= oppo) + (play > oppo)]
__a , __a : int = SORTED_HANDS[play], SORTED_HANDS[oppo]
return hand, other, expected
def _snake_case ( lowercase = 1_0_0 ) -> Any:
return (generate_random_hand() for _ in range(lowercase ))
@pytest.mark.parametrize("""hand, expected""" , lowercase )
def _snake_case ( lowercase , lowercase ) -> int:
assert PokerHand(lowercase )._is_flush() == expected
@pytest.mark.parametrize("""hand, expected""" , lowercase )
def _snake_case ( lowercase , lowercase ) -> Any:
assert PokerHand(lowercase )._is_straight() == expected
@pytest.mark.parametrize("""hand, expected, card_values""" , lowercase )
def _snake_case ( lowercase , lowercase , lowercase ) -> List[str]:
__a : Union[str, Any] = PokerHand(lowercase )
assert player._is_five_high_straight() == expected
assert player._card_values == card_values
@pytest.mark.parametrize("""hand, expected""" , lowercase )
def _snake_case ( lowercase , lowercase ) -> Optional[int]:
assert PokerHand(lowercase )._is_same_kind() == expected
@pytest.mark.parametrize("""hand, expected""" , lowercase )
def _snake_case ( lowercase , lowercase ) -> Union[str, Any]:
assert PokerHand(lowercase )._hand_type == expected
@pytest.mark.parametrize("""hand, other, expected""" , lowercase )
def _snake_case ( lowercase , lowercase , lowercase ) -> Optional[int]:
assert PokerHand(lowercase ).compare_with(PokerHand(lowercase ) ) == expected
@pytest.mark.parametrize("""hand, other, expected""" , generate_random_hands() )
def _snake_case ( lowercase , lowercase , lowercase ) -> int:
assert PokerHand(lowercase ).compare_with(PokerHand(lowercase ) ) == expected
def _snake_case ( ) -> Union[str, Any]:
__a : Tuple = [PokerHand(lowercase ) for hand in SORTED_HANDS]
__a : Optional[int] = poker_hands.copy()
shuffle(lowercase )
__a : List[str] = chain(sorted(lowercase ) )
for index, hand in enumerate(lowercase ):
assert hand == poker_hands[index]
def _snake_case ( ) -> List[str]:
# Test that five high straights are compared correctly.
__a : Optional[int] = [PokerHand("""2D AC 3H 4H 5S""" ), PokerHand("""2S 3H 4H 5S 6C""" )]
pokerhands.sort(reverse=lowercase )
assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C"
def _snake_case ( ) -> List[str]:
# Multiple calls to five_high_straight function should still return True
# and shouldn't mutate the list in every call other than the first.
__a : Dict = PokerHand("""2C 4S AS 3D 5C""" )
__a : Dict = True
__a : Optional[int] = [5, 4, 3, 2, 1_4]
for _ in range(1_0 ):
assert pokerhand._is_five_high_straight() == expected
assert pokerhand._card_values == expected_card_values
def _snake_case ( ) -> Dict:
# Problem number 54 from Project Euler
# Testing from poker_hands.txt file
__a : Tuple = 0
__a : int = os.path.abspath(os.path.dirname(lowercase ) )
__a : Union[str, Any] = os.path.join(lowercase , """poker_hands.txt""" )
with open(lowercase ) as file_hand:
for line in file_hand:
__a : Union[str, Any] = line[:1_4].strip()
__a : Optional[Any] = line[1_5:].strip()
__a , __a : List[str] = PokerHand(lowercase ), PokerHand(lowercase )
__a : str = player.compare_with(lowercase )
if output == "Win":
answer += 1
assert answer == 3_7_6 | 697 | 1 |
'''simple docstring'''
from __future__ import annotations
import bisect
def _snake_case ( lowercase , lowercase , lowercase = 0 , lowercase = -1 ) -> int:
if hi < 0:
__a : Union[str, Any] = len(lowercase )
while lo < hi:
__a : List[str] = lo + (hi - lo) // 2
if sorted_collection[mid] < item:
__a : int = mid + 1
else:
__a : int = mid
return lo
def _snake_case ( lowercase , lowercase , lowercase = 0 , lowercase = -1 ) -> int:
if hi < 0:
__a : Any = len(lowercase )
while lo < hi:
__a : Any = lo + (hi - lo) // 2
if sorted_collection[mid] <= item:
__a : List[str] = mid + 1
else:
__a : Any = mid
return lo
def _snake_case ( lowercase , lowercase , lowercase = 0 , lowercase = -1 ) -> None:
sorted_collection.insert(bisect_left(lowercase , lowercase , lowercase , lowercase ) , lowercase )
def _snake_case ( lowercase , lowercase , lowercase = 0 , lowercase = -1 ) -> None:
sorted_collection.insert(bisect_right(lowercase , lowercase , lowercase , lowercase ) , lowercase )
def _snake_case ( lowercase , lowercase ) -> int | None:
__a : Dict = 0
__a : Any = len(lowercase ) - 1
while left <= right:
__a : str = left + (right - left) // 2
__a : List[Any] = sorted_collection[midpoint]
if current_item == item:
return midpoint
elif item < current_item:
__a : Optional[Any] = midpoint - 1
else:
__a : Optional[int] = midpoint + 1
return None
def _snake_case ( lowercase , lowercase ) -> int | None:
__a : Optional[int] = bisect.bisect_left(lowercase , lowercase )
if index != len(lowercase ) and sorted_collection[index] == item:
return index
return None
def _snake_case ( lowercase , lowercase , lowercase , lowercase ) -> int | None:
if right < left:
return None
__a : Any = left + (right - left) // 2
if sorted_collection[midpoint] == item:
return midpoint
elif sorted_collection[midpoint] > item:
return binary_search_by_recursion(lowercase , lowercase , lowercase , midpoint - 1 )
else:
return binary_search_by_recursion(lowercase , lowercase , midpoint + 1 , lowercase )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : List[Any] = input('Enter numbers separated by comma:\n').strip()
__SCREAMING_SNAKE_CASE : Optional[Any] = sorted(int(item) for item in user_input.split(','))
__SCREAMING_SNAKE_CASE : List[str] = int(input('Enter a single number to be found in the list:\n'))
__SCREAMING_SNAKE_CASE : Optional[int] = binary_search(collection, target)
if result is None:
print(f'''{target} was not found in {collection}.''')
else:
print(f'''{target} was found at position {result} in {collection}.''') | 697 |
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__SCREAMING_SNAKE_CASE : Optional[Any] = {'configuration_focalnet': ['FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FocalNetConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : List[Any] = [
'FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'FocalNetForImageClassification',
'FocalNetForMaskedImageModeling',
'FocalNetBackbone',
'FocalNetModel',
'FocalNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 697 | 1 |
'''simple docstring'''
import importlib
import shutil
import threading
import warnings
from typing import List
import fsspec
import fsspec.asyn
from . import compression
from .hffilesystem import HfFileSystem
__SCREAMING_SNAKE_CASE : str = importlib.util.find_spec('s3fs') is not None
if _has_safs:
from .safilesystem import SaFileSystem # noqa: F401
__SCREAMING_SNAKE_CASE : List[compression.BaseCompressedFileFileSystem] = [
compression.BzaFileSystem,
compression.GzipFileSystem,
compression.LzaFileSystem,
compression.XzFileSystem,
compression.ZstdFileSystem,
]
# Register custom filesystems
for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]:
if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class:
warnings.warn(f'''A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.''')
fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True)
def _snake_case ( lowercase ) -> str:
if "://" in dataset_path:
__a : List[str] = dataset_path.split("""://""" )[1]
return dataset_path
def _snake_case ( lowercase ) -> bool:
if fs is not None and fs.protocol != "file":
return True
else:
return False
def _snake_case ( lowercase , lowercase , lowercase ) -> List[Any]:
__a : Optional[Any] = not is_remote_filesystem(lowercase )
if is_local:
# LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory
shutil.move(fs._strip_protocol(lowercase ) , fs._strip_protocol(lowercase ) )
else:
fs.mv(lowercase , lowercase , recursive=lowercase )
def _snake_case ( ) -> None:
if hasattr(fsspec.asyn , """reset_lock""" ):
# for future fsspec>2022.05.0
fsspec.asyn.reset_lock()
else:
__a : Union[str, Any] = None
__a : Optional[int] = None
__a : List[Any] = threading.Lock() | 697 |
'''simple docstring'''
from __future__ import annotations
import bisect
def _snake_case ( lowercase , lowercase , lowercase = 0 , lowercase = -1 ) -> int:
if hi < 0:
__a : Union[str, Any] = len(lowercase )
while lo < hi:
__a : List[str] = lo + (hi - lo) // 2
if sorted_collection[mid] < item:
__a : int = mid + 1
else:
__a : int = mid
return lo
def _snake_case ( lowercase , lowercase , lowercase = 0 , lowercase = -1 ) -> int:
if hi < 0:
__a : Any = len(lowercase )
while lo < hi:
__a : Any = lo + (hi - lo) // 2
if sorted_collection[mid] <= item:
__a : List[str] = mid + 1
else:
__a : Any = mid
return lo
def _snake_case ( lowercase , lowercase , lowercase = 0 , lowercase = -1 ) -> None:
sorted_collection.insert(bisect_left(lowercase , lowercase , lowercase , lowercase ) , lowercase )
def _snake_case ( lowercase , lowercase , lowercase = 0 , lowercase = -1 ) -> None:
sorted_collection.insert(bisect_right(lowercase , lowercase , lowercase , lowercase ) , lowercase )
def _snake_case ( lowercase , lowercase ) -> int | None:
__a : Dict = 0
__a : Any = len(lowercase ) - 1
while left <= right:
__a : str = left + (right - left) // 2
__a : List[Any] = sorted_collection[midpoint]
if current_item == item:
return midpoint
elif item < current_item:
__a : Optional[Any] = midpoint - 1
else:
__a : Optional[int] = midpoint + 1
return None
def _snake_case ( lowercase , lowercase ) -> int | None:
__a : Optional[int] = bisect.bisect_left(lowercase , lowercase )
if index != len(lowercase ) and sorted_collection[index] == item:
return index
return None
def _snake_case ( lowercase , lowercase , lowercase , lowercase ) -> int | None:
if right < left:
return None
__a : Any = left + (right - left) // 2
if sorted_collection[midpoint] == item:
return midpoint
elif sorted_collection[midpoint] > item:
return binary_search_by_recursion(lowercase , lowercase , lowercase , midpoint - 1 )
else:
return binary_search_by_recursion(lowercase , lowercase , midpoint + 1 , lowercase )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : List[Any] = input('Enter numbers separated by comma:\n').strip()
__SCREAMING_SNAKE_CASE : Optional[Any] = sorted(int(item) for item in user_input.split(','))
__SCREAMING_SNAKE_CASE : List[str] = int(input('Enter a single number to be found in the list:\n'))
__SCREAMING_SNAKE_CASE : Optional[int] = binary_search(collection, target)
if result is None:
print(f'''{target} was not found in {collection}.''')
else:
print(f'''{target} was found at position {result} in {collection}.''') | 697 | 1 |
'''simple docstring'''
def _snake_case ( lowercase ) -> float:
__a : int = 0
while len(lowercase ) > 1:
__a : Tuple = 0
# Consider two files with minimum cost to be merged
for _ in range(2 ):
__a : Dict = files.index(min(lowercase ) )
temp += files[min_index]
files.pop(lowercase )
files.append(lowercase )
optimal_merge_cost += temp
return optimal_merge_cost
if __name__ == "__main__":
import doctest
doctest.testmod() | 697 |
'''simple docstring'''
from itertools import product
def _snake_case ( lowercase , lowercase ) -> list[int]:
__a : Optional[int] = sides_number
__a : Union[str, Any] = max_face_number * dice_number
__a : Optional[Any] = [0] * (max_total + 1)
__a : Dict = 1
__a : str = range(lowercase , max_face_number + 1 )
for dice_numbers in product(lowercase , repeat=lowercase ):
__a : int = sum(lowercase )
totals_frequencies[total] += 1
return totals_frequencies
def _snake_case ( ) -> float:
__a : Tuple = total_frequency_distribution(
sides_number=4 , dice_number=9 )
__a : Union[str, Any] = total_frequency_distribution(
sides_number=6 , dice_number=6 )
__a : str = 0
__a : Dict = 9
__a : str = 4 * 9
__a : Any = 6
for peter_total in range(lowercase , max_peter_total + 1 ):
peter_wins_count += peter_totals_frequencies[peter_total] * sum(
colin_totals_frequencies[min_colin_total:peter_total] )
__a : str = (4**9) * (6**6)
__a : List[Any] = peter_wins_count / total_games_number
__a : List[Any] = round(lowercase , ndigits=7 )
return rounded_peter_win_probability
if __name__ == "__main__":
print(f'''{solution() = }''') | 697 | 1 |
'''simple docstring'''
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetrImageProcessor
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def __init__( self , __UpperCamelCase , __UpperCamelCase=7 , __UpperCamelCase=3 , __UpperCamelCase=30 , __UpperCamelCase=400 , __UpperCamelCase=True , __UpperCamelCase=None , __UpperCamelCase=True , __UpperCamelCase=1 / 255 , __UpperCamelCase=True , __UpperCamelCase=[0.5, 0.5, 0.5] , __UpperCamelCase=[0.5, 0.5, 0.5] , __UpperCamelCase=True , ):
'''simple docstring'''
__a : List[Any] = size if size is not None else {"""shortest_edge""": 18, """longest_edge""": 1333}
__a : Dict = parent
__a : Union[str, Any] = batch_size
__a : Optional[int] = num_channels
__a : Dict = min_resolution
__a : List[Any] = max_resolution
__a : int = do_resize
__a : str = size
__a : Optional[Any] = do_rescale
__a : Optional[Any] = rescale_factor
__a : str = do_normalize
__a : Any = image_mean
__a : Optional[Any] = image_std
__a : Dict = do_pad
def __lowerCamelCase ( self ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_pad": self.do_pad,
}
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase=False ):
'''simple docstring'''
if not batched:
__a : Union[str, Any] = image_inputs[0]
if isinstance(__UpperCamelCase , Image.Image ):
__a , __a : Tuple = image.size
else:
__a , __a : Tuple = image.shape[1], image.shape[2]
if w < h:
__a : Optional[int] = int(self.size["""shortest_edge"""] * h / w )
__a : Tuple = self.size["""shortest_edge"""]
elif w > h:
__a : Optional[Any] = self.size["""shortest_edge"""]
__a : Any = int(self.size["""shortest_edge"""] * w / h )
else:
__a : Any = self.size["""shortest_edge"""]
__a : Optional[int] = self.size["""shortest_edge"""]
else:
__a : Any = []
for image in image_inputs:
__a , __a : Any = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
__a : List[Any] = max(__UpperCamelCase , key=lambda __UpperCamelCase : item[0] )[0]
__a : Optional[Any] = max(__UpperCamelCase , key=lambda __UpperCamelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase , unittest.TestCase ):
lowercase__ = DetrImageProcessor if is_vision_available() else None
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : str = DetrImageProcessingTester(self )
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Optional[int] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__UpperCamelCase , """image_mean""" ) )
self.assertTrue(hasattr(__UpperCamelCase , """image_std""" ) )
self.assertTrue(hasattr(__UpperCamelCase , """do_normalize""" ) )
self.assertTrue(hasattr(__UpperCamelCase , """do_rescale""" ) )
self.assertTrue(hasattr(__UpperCamelCase , """rescale_factor""" ) )
self.assertTrue(hasattr(__UpperCamelCase , """do_resize""" ) )
self.assertTrue(hasattr(__UpperCamelCase , """size""" ) )
self.assertTrue(hasattr(__UpperCamelCase , """do_pad""" ) )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 18, """longest_edge""": 1333} )
self.assertEqual(image_processor.do_pad , __UpperCamelCase )
__a : List[Any] = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=__UpperCamelCase )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42, """longest_edge""": 84} )
self.assertEqual(image_processor.do_pad , __UpperCamelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
pass
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__a : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCamelCase , Image.Image )
# Test not batched input
__a : Optional[Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
__a , __a : Any = self.image_processor_tester.get_expected_values(__UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__a , __a : Optional[int] = self.image_processor_tester.get_expected_values(__UpperCamelCase , batched=__UpperCamelCase )
__a : Any = image_processing(__UpperCamelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Dict = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__a : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCamelCase , numpify=__UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCamelCase , np.ndarray )
# Test not batched input
__a : Dict = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
__a , __a : Any = self.image_processor_tester.get_expected_values(__UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__a : List[str] = image_processing(__UpperCamelCase , return_tensors="""pt""" ).pixel_values
__a , __a : str = self.image_processor_tester.get_expected_values(__UpperCamelCase , batched=__UpperCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Any = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__a : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCamelCase , torchify=__UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCamelCase , torch.Tensor )
# Test not batched input
__a : Any = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
__a , __a : Any = self.image_processor_tester.get_expected_values(__UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__a : List[str] = image_processing(__UpperCamelCase , return_tensors="""pt""" ).pixel_values
__a , __a : Any = self.image_processor_tester.get_expected_values(__UpperCamelCase , batched=__UpperCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : List[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_annotations.txt""" , """r""" ) as f:
__a : Dict = json.loads(f.read() )
__a : Optional[int] = {"""image_id""": 3_9769, """annotations""": target}
# encode them
__a : List[str] = DetrImageProcessor.from_pretrained("""facebook/detr-resnet-50""" )
__a : Tuple = image_processing(images=__UpperCamelCase , annotations=__UpperCamelCase , return_tensors="""pt""" )
# verify pixel values
__a : Union[str, Any] = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["""pixel_values"""].shape , __UpperCamelCase )
__a : List[str] = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , __UpperCamelCase , atol=1E-4 ) )
# verify area
__a : List[Any] = torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , __UpperCamelCase ) )
# verify boxes
__a : Optional[int] = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , __UpperCamelCase )
__a : Any = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , __UpperCamelCase , atol=1E-3 ) )
# verify image_id
__a : Union[str, Any] = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , __UpperCamelCase ) )
# verify is_crowd
__a : List[Any] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , __UpperCamelCase ) )
# verify class_labels
__a : Any = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , __UpperCamelCase ) )
# verify orig_size
__a : Any = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , __UpperCamelCase ) )
# verify size
__a : str = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , __UpperCamelCase ) )
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : List[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt""" , """r""" ) as f:
__a : Tuple = json.loads(f.read() )
__a : str = {"""file_name""": """000000039769.png""", """image_id""": 3_9769, """segments_info""": target}
__a : int = pathlib.Path("""./tests/fixtures/tests_samples/COCO/coco_panoptic""" )
# encode them
__a : List[str] = DetrImageProcessor.from_pretrained("""facebook/detr-resnet-50-panoptic""" )
__a : Tuple = image_processing(images=__UpperCamelCase , annotations=__UpperCamelCase , masks_path=__UpperCamelCase , return_tensors="""pt""" )
# verify pixel values
__a : List[str] = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["""pixel_values"""].shape , __UpperCamelCase )
__a : Any = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , __UpperCamelCase , atol=1E-4 ) )
# verify area
__a : Optional[Any] = torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , __UpperCamelCase ) )
# verify boxes
__a : Optional[Any] = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , __UpperCamelCase )
__a : List[str] = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , __UpperCamelCase , atol=1E-3 ) )
# verify image_id
__a : List[str] = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , __UpperCamelCase ) )
# verify is_crowd
__a : Optional[int] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , __UpperCamelCase ) )
# verify class_labels
__a : Optional[int] = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , __UpperCamelCase ) )
# verify masks
__a : Union[str, Any] = 82_2873
self.assertEqual(encoding["""labels"""][0]["""masks"""].sum().item() , __UpperCamelCase )
# verify orig_size
__a : str = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , __UpperCamelCase ) )
# verify size
__a : List[Any] = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , __UpperCamelCase ) ) | 697 |
'''simple docstring'''
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import DiffusionPipeline
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import logging
__SCREAMING_SNAKE_CASE : Union[str, Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
def __init__( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , ):
'''simple docstring'''
super().__init__()
self.register_modules(
vae=__UpperCamelCase , text_encoder=__UpperCamelCase , tokenizer=__UpperCamelCase , unet=__UpperCamelCase , scheduler=__UpperCamelCase , safety_checker=__UpperCamelCase , feature_extractor=__UpperCamelCase , )
def __lowerCamelCase ( self , __UpperCamelCase = "auto" ):
'''simple docstring'''
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
__a : Union[str, Any] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(__UpperCamelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
self.enable_attention_slicing(__UpperCamelCase )
@torch.no_grad()
def __call__( self , __UpperCamelCase , __UpperCamelCase = 512 , __UpperCamelCase = 512 , __UpperCamelCase = 50 , __UpperCamelCase = 7.5 , __UpperCamelCase = None , __UpperCamelCase = 1 , __UpperCamelCase = 0.0 , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = "pil" , __UpperCamelCase = True , __UpperCamelCase = None , __UpperCamelCase = 1 , __UpperCamelCase = None , **__UpperCamelCase , ):
'''simple docstring'''
if isinstance(__UpperCamelCase , __UpperCamelCase ):
__a : Union[str, Any] = 1
elif isinstance(__UpperCamelCase , __UpperCamelCase ):
__a : Tuple = len(__UpperCamelCase )
else:
raise ValueError(f"""`prompt` has to be of type `str` or `list` but is {type(__UpperCamelCase )}""" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"""`height` and `width` have to be divisible by 8 but are {height} and {width}.""" )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(__UpperCamelCase , __UpperCamelCase ) or callback_steps <= 0)
):
raise ValueError(
f"""`callback_steps` has to be a positive integer but is {callback_steps} of type"""
f""" {type(__UpperCamelCase )}.""" )
# get prompt text embeddings
__a : Tuple = self.tokenizer(
__UpperCamelCase , padding="""max_length""" , max_length=self.tokenizer.model_max_length , return_tensors="""pt""" , )
__a : Union[str, Any] = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
__a : str = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"""The following part of your input was truncated because CLIP can only handle sequences up to"""
f""" {self.tokenizer.model_max_length} tokens: {removed_text}""" )
__a : Optional[int] = text_input_ids[:, : self.tokenizer.model_max_length]
if text_embeddings is None:
__a : int = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
__a , __a , __a : Union[str, Any] = text_embeddings.shape
__a : Optional[Any] = text_embeddings.repeat(1 , __UpperCamelCase , 1 )
__a : Union[str, Any] = text_embeddings.view(bs_embed * num_images_per_prompt , __UpperCamelCase , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
__a : Any = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
__a : List[str]
if negative_prompt is None:
__a : Optional[Any] = [""""""]
elif type(__UpperCamelCase ) is not type(__UpperCamelCase ):
raise TypeError(
f"""`negative_prompt` should be the same type to `prompt`, but got {type(__UpperCamelCase )} !="""
f""" {type(__UpperCamelCase )}.""" )
elif isinstance(__UpperCamelCase , __UpperCamelCase ):
__a : Any = [negative_prompt]
elif batch_size != len(__UpperCamelCase ):
raise ValueError(
f"""`negative_prompt`: {negative_prompt} has batch size {len(__UpperCamelCase )}, but `prompt`:"""
f""" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"""
""" the batch size of `prompt`.""" )
else:
__a : Tuple = negative_prompt
__a : Any = text_input_ids.shape[-1]
__a : List[str] = self.tokenizer(
__UpperCamelCase , padding="""max_length""" , max_length=__UpperCamelCase , truncation=__UpperCamelCase , return_tensors="""pt""" , )
__a : str = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
__a : List[str] = uncond_embeddings.shape[1]
__a : List[Any] = uncond_embeddings.repeat(__UpperCamelCase , __UpperCamelCase , 1 )
__a : Tuple = uncond_embeddings.view(batch_size * num_images_per_prompt , __UpperCamelCase , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
__a : List[Any] = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
__a : Tuple = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
__a : List[Any] = (batch_size * num_images_per_prompt, self.unet.config.in_channels, 64, 64)
__a : int = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
__a : Any = torch.randn(
__UpperCamelCase , generator=__UpperCamelCase , device="""cpu""" , dtype=__UpperCamelCase ).to(self.device )
__a : Optional[Any] = torch.randn(__UpperCamelCase , generator=__UpperCamelCase , device="""cpu""" , dtype=__UpperCamelCase ).to(
self.device )
else:
__a : Optional[int] = torch.randn(
__UpperCamelCase , generator=__UpperCamelCase , device=self.device , dtype=__UpperCamelCase )
__a : str = torch.randn(__UpperCamelCase , generator=__UpperCamelCase , device=self.device , dtype=__UpperCamelCase )
else:
if latents_reference.shape != latents_shape:
raise ValueError(f"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" )
__a : Optional[Any] = latents_reference.to(self.device )
__a : str = latents.to(self.device )
# This is the key part of the pipeline where we
# try to ensure that the generated images w/ the same seed
# but different sizes actually result in similar images
__a : List[str] = (latents_shape[3] - latents_shape_reference[3]) // 2
__a : int = (latents_shape[2] - latents_shape_reference[2]) // 2
__a : int = latents_shape_reference[3] if dx >= 0 else latents_shape_reference[3] + 2 * dx
__a : Tuple = latents_shape_reference[2] if dy >= 0 else latents_shape_reference[2] + 2 * dy
__a : Optional[Any] = 0 if dx < 0 else dx
__a : Optional[Any] = 0 if dy < 0 else dy
__a : Optional[int] = max(-dx , 0 )
__a : Optional[Any] = max(-dy , 0 )
# import pdb
# pdb.set_trace()
__a : Optional[int] = latents_reference[:, :, dy : dy + h, dx : dx + w]
# set timesteps
self.scheduler.set_timesteps(__UpperCamelCase )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
__a : Dict = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
__a : Any = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
__a : List[Any] = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
__a : Optional[Any] = {}
if accepts_eta:
__a : Union[str, Any] = eta
for i, t in enumerate(self.progress_bar(__UpperCamelCase ) ):
# expand the latents if we are doing classifier free guidance
__a : List[str] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
__a : Tuple = self.scheduler.scale_model_input(__UpperCamelCase , __UpperCamelCase )
# predict the noise residual
__a : Union[str, Any] = self.unet(__UpperCamelCase , __UpperCamelCase , encoder_hidden_states=__UpperCamelCase ).sample
# perform guidance
if do_classifier_free_guidance:
__a , __a : List[str] = noise_pred.chunk(2 )
__a : Optional[int] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
__a : List[Any] = self.scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
__a : Optional[Any] = 1 / 0.1_8_2_1_5 * latents
__a : Optional[int] = self.vae.decode(__UpperCamelCase ).sample
__a : List[str] = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
__a : int = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if self.safety_checker is not None:
__a : List[str] = self.feature_extractor(self.numpy_to_pil(__UpperCamelCase ) , return_tensors="""pt""" ).to(
self.device )
__a , __a : int = self.safety_checker(
images=__UpperCamelCase , clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype ) )
else:
__a : Optional[int] = None
if output_type == "pil":
__a : str = self.numpy_to_pil(__UpperCamelCase )
if not return_dict:
return (image, has_nsfw_concept)
return StableDiffusionPipelineOutput(images=__UpperCamelCase , nsfw_content_detected=__UpperCamelCase ) | 697 | 1 |
'''simple docstring'''
import tempfile
import unittest
import numpy as np
import transformers
from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel
if is_torch_available():
import torch
class SCREAMING_SNAKE_CASE__ :
def __init__( self , __UpperCamelCase , __UpperCamelCase=14 , __UpperCamelCase=7 , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=False , __UpperCamelCase=True , __UpperCamelCase=99 , __UpperCamelCase=32 , __UpperCamelCase=4 , __UpperCamelCase=4 , __UpperCamelCase=4 , __UpperCamelCase=37 , __UpperCamelCase="gelu" , __UpperCamelCase=0.1 , __UpperCamelCase=0.1 , __UpperCamelCase=512 , __UpperCamelCase=0.0_2 , ):
'''simple docstring'''
__a : str = parent
__a : int = batch_size
__a : Tuple = seq_length
__a : int = is_training
__a : Union[str, Any] = use_input_mask
__a : Union[str, Any] = use_token_type_ids
__a : Union[str, Any] = use_labels
__a : Any = vocab_size
__a : Optional[int] = hidden_size
__a : List[str] = rotary_dim
__a : Optional[int] = num_hidden_layers
__a : str = num_attention_heads
__a : Optional[Any] = intermediate_size
__a : Any = hidden_act
__a : List[Any] = hidden_dropout_prob
__a : str = attention_probs_dropout_prob
__a : int = max_position_embeddings
__a : Optional[int] = initializer_range
__a : List[str] = None
__a : Tuple = vocab_size - 1
__a : Dict = vocab_size - 1
__a : Any = vocab_size - 1
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__a : str = None
if self.use_input_mask:
__a : str = random_attention_mask([self.batch_size, self.seq_length] )
__a : str = GPTJConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , use_cache=__UpperCamelCase , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , rotary_dim=self.rotary_dim , )
return (config, input_ids, input_mask)
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Optional[Any] = self.prepare_config_and_inputs()
__a , __a , __a : Union[str, Any] = config_and_inputs
__a : str = {"""input_ids""": input_ids, """attention_mask""": attention_mask}
return config, inputs_dict
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
__a : Optional[int] = 20
__a : Tuple = model_class_name(__UpperCamelCase )
__a : Dict = model.init_cache(input_ids.shape[0] , __UpperCamelCase )
__a : List[str] = jnp.ones((input_ids.shape[0], max_decoder_length) , dtype="""i4""" )
__a : List[Any] = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
__a : Union[str, Any] = model(
input_ids[:, :-1] , attention_mask=__UpperCamelCase , past_key_values=__UpperCamelCase , position_ids=__UpperCamelCase , )
__a : List[str] = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="""i4""" )
__a : Union[str, Any] = model(
input_ids[:, -1:] , attention_mask=__UpperCamelCase , past_key_values=outputs_cache.past_key_values , position_ids=__UpperCamelCase , )
__a : List[Any] = model(__UpperCamelCase )
__a : List[str] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f"""Max diff is {diff}""" )
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
__a : Dict = 20
__a : List[Any] = model_class_name(__UpperCamelCase )
__a : Optional[Any] = jnp.concatenate(
[attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]) )] , axis=-1 , )
__a : List[Any] = model.init_cache(input_ids.shape[0] , __UpperCamelCase )
__a : Optional[int] = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
__a : Tuple = model(
input_ids[:, :-1] , attention_mask=__UpperCamelCase , past_key_values=__UpperCamelCase , position_ids=__UpperCamelCase , )
__a : Optional[int] = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="""i4""" )
__a : str = model(
input_ids[:, -1:] , past_key_values=outputs_cache.past_key_values , attention_mask=__UpperCamelCase , position_ids=__UpperCamelCase , )
__a : Any = model(__UpperCamelCase , attention_mask=__UpperCamelCase )
__a : Optional[Any] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f"""Max diff is {diff}""" )
@require_flax
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase , __UpperCamelCase , unittest.TestCase ):
lowercase__ = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else ()
lowercase__ = (FlaxGPTJForCausalLM,) if is_flax_available() else ()
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : str = FlaxGPTJModelTester(self )
def __lowerCamelCase ( self ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
__a , __a , __a : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
__a , __a , __a : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward_with_attn_mask(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
@tooslow
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Any = GPTaTokenizer.from_pretrained("""gpt2""" , pad_token="""<|endoftext|>""" , padding_side="""left""" )
__a : Optional[int] = tokenizer(["""Hello this is a long string""", """Hey"""] , return_tensors="""np""" , padding=__UpperCamelCase , truncation=__UpperCamelCase )
__a : Tuple = FlaxGPTJForCausalLM.from_pretrained("""EleutherAI/gpt-j-6B""" )
__a : Optional[Any] = False
__a : Optional[Any] = model.config.eos_token_id
__a : int = jax.jit(model.generate )
__a : Optional[Any] = jit_generate(
inputs["""input_ids"""] , attention_mask=inputs["""attention_mask"""] , pad_token_id=tokenizer.pad_token_id ).sequences
__a : Union[str, Any] = tokenizer.batch_decode(__UpperCamelCase , skip_special_tokens=__UpperCamelCase )
__a : Tuple = [
"""Hello this is a long string of text.\n\nI'm trying to get the text of the""",
"""Hey, I'm a little late to the party. I'm going to""",
]
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
@is_pt_flax_cross_test
def __lowerCamelCase ( self ):
'''simple docstring'''
__a , __a : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
__a : List[str] = self._prepare_for_class(__UpperCamelCase , __UpperCamelCase )
__a : Any = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
__a : Dict = model_class.__name__[4:] # Skip the "Flax" at the beginning
__a : Optional[Any] = getattr(__UpperCamelCase , __UpperCamelCase )
__a , __a : List[str] = pt_inputs["""input_ids"""].shape
__a : List[str] = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(__UpperCamelCase ):
__a : Optional[int] = 0
__a : Union[str, Any] = 1
__a : Tuple = 0
__a : Union[str, Any] = 1
__a : Any = pt_model_class(__UpperCamelCase ).eval()
__a : str = model_class(__UpperCamelCase , dtype=jnp.floataa )
__a : Optional[int] = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , __UpperCamelCase )
__a : Any = fx_state
with torch.no_grad():
__a : Dict = pt_model(**__UpperCamelCase ).to_tuple()
__a : str = fx_model(**__UpperCamelCase ).to_tuple()
self.assertEqual(len(__UpperCamelCase ) , len(__UpperCamelCase ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output, pt_output in zip(__UpperCamelCase , __UpperCamelCase ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(__UpperCamelCase )
__a : Union[str, Any] = model_class.from_pretrained(__UpperCamelCase , from_pt=__UpperCamelCase )
__a : Union[str, Any] = fx_model_loaded(**__UpperCamelCase ).to_tuple()
self.assertEqual(
len(__UpperCamelCase ) , len(__UpperCamelCase ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output_loaded, pt_output in zip(__UpperCamelCase , __UpperCamelCase ):
self.assert_almost_equals(fx_output_loaded[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
@is_pt_flax_cross_test
def __lowerCamelCase ( self ):
'''simple docstring'''
__a , __a : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
__a : Optional[int] = self._prepare_for_class(__UpperCamelCase , __UpperCamelCase )
__a : Optional[Any] = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
__a : str = model_class.__name__[4:] # Skip the "Flax" at the beginning
__a : Union[str, Any] = getattr(__UpperCamelCase , __UpperCamelCase )
__a : Tuple = pt_model_class(__UpperCamelCase ).eval()
__a : str = model_class(__UpperCamelCase , dtype=jnp.floataa )
__a : str = load_flax_weights_in_pytorch_model(__UpperCamelCase , fx_model.params )
__a , __a : Dict = pt_inputs["""input_ids"""].shape
__a : Any = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(__UpperCamelCase ):
__a : List[Any] = 0
__a : Optional[Any] = 1
__a : List[Any] = 0
__a : int = 1
# make sure weights are tied in PyTorch
pt_model.tie_weights()
with torch.no_grad():
__a : str = pt_model(**__UpperCamelCase ).to_tuple()
__a : Dict = fx_model(**__UpperCamelCase ).to_tuple()
self.assertEqual(len(__UpperCamelCase ) , len(__UpperCamelCase ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output, pt_output in zip(__UpperCamelCase , __UpperCamelCase ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(__UpperCamelCase )
__a : Tuple = pt_model_class.from_pretrained(__UpperCamelCase , from_flax=__UpperCamelCase )
with torch.no_grad():
__a : List[str] = pt_model_loaded(**__UpperCamelCase ).to_tuple()
self.assertEqual(
len(__UpperCamelCase ) , len(__UpperCamelCase ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output, pt_output in zip(__UpperCamelCase , __UpperCamelCase ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
@tooslow
def __lowerCamelCase ( self ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
__a : Optional[Any] = model_class_name.from_pretrained("""EleutherAI/gpt-j-6B""" )
__a : Optional[Any] = model(np.ones((1, 1) ) )
self.assertIsNotNone(__UpperCamelCase ) | 697 |
'''simple docstring'''
import numpy as np
from PIL import Image
def _snake_case ( lowercase , lowercase , lowercase ) -> np.ndarray:
__a : Any = np.array(lowercase )
if arr.shape[0] != arr.shape[1]:
raise ValueError("""The input array is not a square matrix""" )
__a : Union[str, Any] = 0
__a : Dict = 0
__a : Optional[Any] = 0
__a : Tuple = 0
# compute the shape of the output matrix
__a : Optional[int] = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape maxpool_shape
__a : int = np.zeros((maxpool_shape, maxpool_shape) )
while i < arr.shape[0]:
if i + size > arr.shape[0]:
# if the end of the matrix is reached, break
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the maximum of the pooling matrix
__a : Optional[Any] = np.max(arr[i : i + size, j : j + size] )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
__a : Optional[Any] = 0
__a : str = 0
return updated_arr
def _snake_case ( lowercase , lowercase , lowercase ) -> np.ndarray:
__a : int = np.array(lowercase )
if arr.shape[0] != arr.shape[1]:
raise ValueError("""The input array is not a square matrix""" )
__a : int = 0
__a : Optional[Any] = 0
__a : str = 0
__a : List[Any] = 0
# compute the shape of the output matrix
__a : int = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape avgpool_shape
__a : Optional[int] = np.zeros((avgpool_shape, avgpool_shape) )
while i < arr.shape[0]:
# if the end of the matrix is reached, break
if i + size > arr.shape[0]:
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the average of the pooling matrix
__a : Any = int(np.average(arr[i : i + size, j : j + size] ) )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
__a : str = 0
__a : List[Any] = 0
return updated_arr
# Main Function
if __name__ == "__main__":
from doctest import testmod
testmod(name='avgpooling', verbose=True)
# Loading the image
__SCREAMING_SNAKE_CASE : str = Image.open('path_to_image')
# Converting the image to numpy array and maxpooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(maxpooling(np.array(image), size=3, stride=2)).show()
# Converting the image to numpy array and averagepooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(avgpooling(np.array(image), size=3, stride=2)).show() | 697 | 1 |
'''simple docstring'''
import os
import re
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__SCREAMING_SNAKE_CASE : Optional[Any] = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : str = {'vocab_file': 'spiece.model'}
__SCREAMING_SNAKE_CASE : Dict = {
'vocab_file': {
'google/bigbird-roberta-base': 'https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model',
'google/bigbird-roberta-large': (
'https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model'
),
'google/bigbird-base-trivia-itc': (
'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model'
),
}
}
__SCREAMING_SNAKE_CASE : Tuple = {
'google/bigbird-roberta-base': 4_096,
'google/bigbird-roberta-large': 4_096,
'google/bigbird-base-trivia-itc': 4_096,
}
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
lowercase__ = VOCAB_FILES_NAMES
lowercase__ = PRETRAINED_VOCAB_FILES_MAP
lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ = ["input_ids", "attention_mask"]
lowercase__ = []
def __init__( self , __UpperCamelCase , __UpperCamelCase="<unk>" , __UpperCamelCase="<s>" , __UpperCamelCase="</s>" , __UpperCamelCase="<pad>" , __UpperCamelCase="[SEP]" , __UpperCamelCase="[MASK]" , __UpperCamelCase="[CLS]" , __UpperCamelCase = None , **__UpperCamelCase , ):
'''simple docstring'''
__a : List[str] = AddedToken(__UpperCamelCase , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else bos_token
__a : Dict = AddedToken(__UpperCamelCase , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else eos_token
__a : int = AddedToken(__UpperCamelCase , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else unk_token
__a : Tuple = AddedToken(__UpperCamelCase , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else pad_token
__a : Optional[int] = AddedToken(__UpperCamelCase , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else cls_token
__a : Optional[int] = AddedToken(__UpperCamelCase , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
__a : str = AddedToken(__UpperCamelCase , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else mask_token
__a : int = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__UpperCamelCase , eos_token=__UpperCamelCase , unk_token=__UpperCamelCase , pad_token=__UpperCamelCase , sep_token=__UpperCamelCase , mask_token=__UpperCamelCase , cls_token=__UpperCamelCase , sp_model_kwargs=self.sp_model_kwargs , **__UpperCamelCase , )
__a : Union[str, Any] = vocab_file
__a : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__UpperCamelCase )
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.sp_model.get_piece_size()
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : str = {self.convert_ids_to_tokens(__UpperCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
'''simple docstring'''
__a : Tuple = self.__dict__.copy()
__a : Any = None
return state
def __setstate__( self , __UpperCamelCase ):
'''simple docstring'''
__a : Optional[int] = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
__a : List[str] = {}
__a : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __lowerCamelCase ( self , __UpperCamelCase ):
'''simple docstring'''
return self.sp_model.encode(__UpperCamelCase , out_type=__UpperCamelCase )
def __lowerCamelCase ( self , __UpperCamelCase ):
'''simple docstring'''
return self.sp_model.piece_to_id(__UpperCamelCase )
def __lowerCamelCase ( self , __UpperCamelCase ):
'''simple docstring'''
__a : List[str] = self.sp_model.IdToPiece(__UpperCamelCase )
return token
def __lowerCamelCase ( self , __UpperCamelCase ):
'''simple docstring'''
__a : List[Any] = []
__a : Union[str, Any] = """"""
__a : Any = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__UpperCamelCase ) + token
__a : Tuple = True
__a : str = []
else:
current_sub_tokens.append(__UpperCamelCase )
__a : Tuple = False
out_string += self.sp_model.decode(__UpperCamelCase )
return out_string.strip()
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase = False , __UpperCamelCase = None , __UpperCamelCase = True , **__UpperCamelCase , ):
'''simple docstring'''
__a : Dict = kwargs.pop("""use_source_tokenizer""" , __UpperCamelCase )
__a : Tuple = self.convert_ids_to_tokens(__UpperCamelCase , skip_special_tokens=__UpperCamelCase )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
__a : str = []
__a : Optional[Any] = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(__UpperCamelCase ) )
__a : Optional[Any] = []
sub_texts.append(__UpperCamelCase )
else:
current_sub_text.append(__UpperCamelCase )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(__UpperCamelCase ) )
# Mimic the behavior of the Rust tokenizer:
# No space before [MASK] and [SEP]
if spaces_between_special_tokens:
__a : List[str] = re.sub(r""" (\[(MASK|SEP)\])""" , r"""\1""" , """ """.join(__UpperCamelCase ) )
else:
__a : List[Any] = """""".join(__UpperCamelCase )
__a : List[Any] = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
__a : Optional[int] = self.clean_up_tokenization(__UpperCamelCase )
return clean_text
else:
return text
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase = None ):
'''simple docstring'''
if not os.path.isdir(__UpperCamelCase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
__a : Dict = os.path.join(
__UpperCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __UpperCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__UpperCamelCase , """wb""" ) as fi:
__a : List[Any] = self.sp_model.serialized_model_proto()
fi.write(__UpperCamelCase )
return (out_vocab_file,)
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__a : int = [self.cls_token_id]
__a : Any = [self.sep_token_id]
return cls + token_ids_a + sep + token_ids_a + sep
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__UpperCamelCase , token_ids_a=__UpperCamelCase , already_has_special_tokens=__UpperCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(__UpperCamelCase )) + [1]
return [1] + ([0] * len(__UpperCamelCase )) + [1] + ([0] * len(__UpperCamelCase )) + [1]
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase = None ):
'''simple docstring'''
__a : Optional[Any] = [self.sep_token_id]
__a : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] | 697 |
'''simple docstring'''
import qiskit
def _snake_case ( lowercase , lowercase ) -> qiskit.result.counts.Counts:
__a : Any = qiskit.Aer.get_backend("""aer_simulator""" )
# Create a Quantum Circuit acting on the q register
__a : str = qiskit.QuantumCircuit(lowercase , lowercase )
# Map the quantum measurement to the classical bits
circuit.measure([0] , [0] )
# Execute the circuit on the simulator
__a : Any = qiskit.execute(lowercase , lowercase , shots=1_0_0_0 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(lowercase )
if __name__ == "__main__":
print(f'''Total count for various states are: {single_qubit_measure(1, 1)}''') | 697 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__SCREAMING_SNAKE_CASE : int = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : List[str] = {
'google/vit-base-patch16-224': 'https://huggingface.co/vit-base-patch16-224/resolve/main/config.json',
# See all ViT models at https://huggingface.co/models?filter=vit
}
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
lowercase__ = "vit"
def __init__( self , __UpperCamelCase=768 , __UpperCamelCase=12 , __UpperCamelCase=12 , __UpperCamelCase=3072 , __UpperCamelCase="gelu" , __UpperCamelCase=0.0 , __UpperCamelCase=0.0 , __UpperCamelCase=0.0_2 , __UpperCamelCase=1E-12 , __UpperCamelCase=224 , __UpperCamelCase=16 , __UpperCamelCase=3 , __UpperCamelCase=True , __UpperCamelCase=16 , **__UpperCamelCase , ):
'''simple docstring'''
super().__init__(**__UpperCamelCase )
__a : Optional[int] = hidden_size
__a : Dict = num_hidden_layers
__a : Optional[int] = num_attention_heads
__a : Any = intermediate_size
__a : Optional[Any] = hidden_act
__a : int = hidden_dropout_prob
__a : List[str] = attention_probs_dropout_prob
__a : Tuple = initializer_range
__a : List[str] = layer_norm_eps
__a : Tuple = image_size
__a : str = patch_size
__a : Any = num_channels
__a : Optional[int] = qkv_bias
__a : Dict = encoder_stride
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
lowercase__ = version.parse("1.11" )
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return 1E-4 | 697 |
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConformerConfig,
WavaVecaConformerForCTC,
WavaVecaConformerForPreTraining,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
__SCREAMING_SNAKE_CASE : str = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : Any = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.linear_k': 'encoder.layers.*.self_attn.linear_k',
'self_attn.linear_v': 'encoder.layers.*.self_attn.linear_v',
'self_attn.linear_q': 'encoder.layers.*.self_attn.linear_q',
'self_attn.pos_bias_u': 'encoder.layers.*.self_attn.pos_bias_u',
'self_attn.pos_bias_v': 'encoder.layers.*.self_attn.pos_bias_v',
'self_attn.linear_out': 'encoder.layers.*.self_attn.linear_out',
'self_attn.linear_pos': 'encoder.layers.*.self_attn.linear_pos',
'self_attn.rotary_emb': 'encoder.embed_positions',
'self_attn_layer_norm': 'encoder.layers.*.self_attn_layer_norm',
'conv_module.pointwise_conv1': 'encoder.layers.*.conv_module.pointwise_conv1',
'conv_module.pointwise_conv2': 'encoder.layers.*.conv_module.pointwise_conv2',
'conv_module.depthwise_conv': 'encoder.layers.*.conv_module.depthwise_conv',
'conv_module.batch_norm': 'encoder.layers.*.conv_module.batch_norm',
'conv_module.layer_norm': 'encoder.layers.*.conv_module.layer_norm',
'ffn1.w_1': 'encoder.layers.*.ffn1.intermediate_dense',
'ffn1.w_2': 'encoder.layers.*.ffn1.output_dense',
'ffn1.layer_norm': 'encoder.layers.*.ffn1_layer_norm',
'ffn2.w_1': 'encoder.layers.*.ffn2.intermediate_dense',
'ffn2.w_2': 'encoder.layers.*.ffn2.output_dense',
'ffn2.layer_norm': 'encoder.layers.*.ffn2_layer_norm',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
}
__SCREAMING_SNAKE_CASE : Optional[Any] = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def _snake_case ( lowercase , lowercase , lowercase , lowercase , lowercase ) -> List[Any]:
for attribute in key.split(""".""" ):
__a : str = getattr(lowercase , lowercase )
if weight_type is not None:
__a : Dict = getattr(lowercase , lowercase ).shape
else:
__a : Dict = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}""" )
if weight_type == "weight":
__a : Any = value
elif weight_type == "weight_g":
__a : int = value
elif weight_type == "weight_v":
__a : int = value
elif weight_type == "bias":
__a : List[Any] = value
elif weight_type == "running_mean":
__a : Union[str, Any] = value
elif weight_type == "running_var":
__a : Tuple = value
elif weight_type == "num_batches_tracked":
__a : Optional[int] = value
elif weight_type == "inv_freq":
__a : List[str] = value
else:
__a : List[str] = value
logger.info(F"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def _snake_case ( lowercase , lowercase , lowercase ) -> Dict:
__a : Dict = []
__a : Dict = fairseq_model.state_dict()
__a : Tuple = hf_model.wavaveca_conformer.feature_extractor
for name, value in fairseq_dict.items():
__a : int = False
if "conv_layers" in name:
load_conv_layer(
lowercase , lowercase , lowercase , lowercase , hf_model.config.feat_extract_norm == """group""" , )
__a : List[Any] = True
else:
for key, mapped_key in MAPPING.items():
__a : Optional[int] = """wav2vec2_conformer.""" + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
__a : str = True
if "*" in mapped_key:
__a : Optional[int] = name.split(lowercase )[0].split(""".""" )[-2]
__a : List[Any] = mapped_key.replace("""*""" , lowercase )
if "pos_bias_u" in name:
__a : Union[str, Any] = None
elif "pos_bias_v" in name:
__a : List[Any] = None
elif "weight_g" in name:
__a : List[Any] = """weight_g"""
elif "weight_v" in name:
__a : List[Any] = """weight_v"""
elif "bias" in name:
__a : Optional[int] = """bias"""
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__a : str = """weight"""
elif "running_mean" in name:
__a : List[str] = """running_mean"""
elif "inv_freq" in name:
__a : Dict = """inv_freq"""
elif "running_var" in name:
__a : Union[str, Any] = """running_var"""
elif "num_batches_tracked" in name:
__a : int = """num_batches_tracked"""
else:
__a : Optional[int] = None
set_recursively(lowercase , lowercase , lowercase , lowercase , lowercase )
continue
if not is_used:
unused_weights.append(lowercase )
logger.warning(F"""Unused weights: {unused_weights}""" )
def _snake_case ( lowercase , lowercase , lowercase , lowercase , lowercase ) -> List[str]:
__a : Optional[Any] = full_name.split("""conv_layers.""" )[-1]
__a : Union[str, Any] = name.split(""".""" )
__a : Optional[Any] = int(items[0] )
__a : int = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
__a : Dict = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
__a : str = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" )
__a : Dict = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" )
__a : Union[str, Any] = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(lowercase )
@torch.no_grad()
def _snake_case ( lowercase , lowercase , lowercase=None , lowercase=None , lowercase=True ) -> Optional[Any]:
if config_path is not None:
__a : Any = WavaVecaConformerConfig.from_pretrained(lowercase , hidden_act="""swish""" )
else:
__a : Optional[int] = WavaVecaConformerConfig()
if "rope" in checkpoint_path:
__a : Optional[Any] = """rotary"""
if is_finetuned:
if dict_path:
__a : List[Any] = Dictionary.load(lowercase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
__a : int = target_dict.pad_index
__a : List[str] = target_dict.bos_index
__a : str = target_dict.eos_index
__a : Dict = len(target_dict.symbols )
__a : Any = os.path.join(lowercase , """vocab.json""" )
if not os.path.isdir(lowercase ):
logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(lowercase ) )
return
os.makedirs(lowercase , exist_ok=lowercase )
__a : Dict = target_dict.indices
# fairseq has the <pad> and <s> switched
__a : Optional[Any] = 0
__a : List[Any] = 1
with open(lowercase , """w""" , encoding="""utf-8""" ) as vocab_handle:
json.dump(lowercase , lowercase )
__a : int = WavaVecaCTCTokenizer(
lowercase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=lowercase , )
__a : Optional[int] = True if config.feat_extract_norm == """layer""" else False
__a : Dict = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6_0_0_0 , padding_value=0 , do_normalize=lowercase , return_attention_mask=lowercase , )
__a : str = WavaVecaProcessor(feature_extractor=lowercase , tokenizer=lowercase )
processor.save_pretrained(lowercase )
__a : List[str] = WavaVecaConformerForCTC(lowercase )
else:
__a : Optional[int] = WavaVecaConformerForPreTraining(lowercase )
if is_finetuned:
__a , __a , __a : Dict = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
else:
__a : Optional[int] = argparse.Namespace(task="""audio_pretraining""" )
__a : Tuple = fairseq.tasks.setup_task(lowercase )
__a , __a , __a : int = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=lowercase )
__a : Any = model[0].eval()
recursively_load_weights(lowercase , lowercase , not is_finetuned )
hf_wavavec.save_pretrained(lowercase )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : Dict = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
__SCREAMING_SNAKE_CASE : int = parser.parse_args()
convert_wavaveca_conformer_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
) | 697 | 1 |
'''simple docstring'''
import argparse
import fairseq
import torch
from torch import nn
from transformers import (
MBartaaTokenizer,
MBartConfig,
MBartForCausalLM,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
__SCREAMING_SNAKE_CASE : Tuple = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : Any = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
}
__SCREAMING_SNAKE_CASE : List[Any] = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def _snake_case ( lowercase , lowercase , lowercase , lowercase , lowercase ) -> Union[str, Any]:
for attribute in key.split(""".""" ):
__a : Union[str, Any] = getattr(lowercase , lowercase )
if weight_type is not None:
__a : Union[str, Any] = getattr(lowercase , lowercase ).shape
else:
__a : Dict = hf_pointer.shape
assert hf_shape == value.shape, (
F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
__a : List[str] = value
elif weight_type == "weight_g":
__a : Optional[Any] = value
elif weight_type == "weight_v":
__a : Optional[Any] = value
elif weight_type == "bias":
__a : Optional[int] = value
else:
__a : int = value
logger.info(F"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def _snake_case ( lowercase , lowercase ) -> Union[str, Any]:
__a : Tuple = []
__a : List[Any] = fairseq_model.state_dict()
__a : Optional[Any] = hf_model.feature_extractor
__a : Optional[Any] = hf_model.adapter
for name, value in fairseq_dict.items():
__a : Union[str, Any] = False
if "conv_layers" in name:
load_conv_layer(
lowercase , lowercase , lowercase , lowercase , hf_model.config.feat_extract_norm == """group""" , )
__a : Optional[Any] = True
elif any(x in name for x in ["""adaptor""", """w2v_encoder.proj.""", """w2v_proj_ln."""] ):
load_adapter(lowercase , lowercase , lowercase , lowercase )
__a : Optional[int] = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
__a : List[Any] = True
if "*" in mapped_key:
__a : Union[str, Any] = name.split(lowercase )[0].split(""".""" )[-2]
__a : Tuple = mapped_key.replace("""*""" , lowercase )
if "weight_g" in name:
__a : List[Any] = """weight_g"""
elif "weight_v" in name:
__a : Tuple = """weight_v"""
elif "bias" in name:
__a : Union[str, Any] = """bias"""
elif "weight" in name:
__a : Optional[Any] = """weight"""
else:
__a : Dict = None
set_recursively(lowercase , lowercase , lowercase , lowercase , lowercase )
continue
if not is_used:
unused_weights.append(lowercase )
logger.warning(F"""Unused weights: {unused_weights}""" )
def _snake_case ( lowercase , lowercase , lowercase , lowercase , lowercase ) -> Dict:
__a : str = full_name.split("""conv_layers.""" )[-1]
__a : Dict = name.split(""".""" )
__a : List[Any] = int(items[0] )
__a : str = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
__a : List[Any] = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
__a : Any = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
__a : Dict = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
__a : List[Any] = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(lowercase )
def _snake_case ( lowercase , lowercase , lowercase , lowercase ) -> Optional[Any]:
__a : Tuple = full_name.split("""adaptor.""" )[-1]
__a : Tuple = name.split(""".""" )
if items[1].isdigit():
__a : Union[str, Any] = int(items[1] )
else:
__a : Optional[Any] = None
if "adaptor" not in full_name:
if "proj_ln" in full_name:
# has to be layer norm
if "bias" in name:
assert (
value.shape == adapter.proj_layer_norm.bias.data.shape
), F"""{full_name} has size {value.shape}, but {adapter.proj_layer_norm.bias.data.shape} was found."""
__a : Optional[int] = value
logger.info(F"""Adapter proj layer norm bias was initialized from {full_name}.""" )
if "weight" in name:
assert (
value.shape == adapter.proj_layer_norm.weight.data.shape
), F"""{full_name} has size {value.shape}, but {adapter.proj_layer_norm.weight.data.shape} was found."""
__a : str = value
else:
# has to be projection layer
if "bias" in name:
assert (
value.shape == adapter.proj.bias.data.shape
), F"""{full_name} has size {value.shape}, but {adapter.proj.bias.data.shape} was found."""
__a : int = value
logger.info(F"""Adapter proj layer bias was initialized from {full_name}.""" )
if "weight" in name:
assert (
value.shape == adapter.proj.weight.data.shape
), F"""{full_name} has size {value.shape}, but {adapter.proj.weight.data.shape} was found."""
__a : List[Any] = value
logger.info(F"""Adapter proj layer weight was initialized from {full_name}.""" )
elif isinstance(lowercase , lowercase ):
if "bias" in name:
assert (
value.shape == adapter.layers[layer_id].conv.bias.data.shape
), F"""{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.bias.data.shape} was found."""
__a : int = value
logger.info(F"""Adapter layer {layer_id} bias was initialized from {full_name}.""" )
elif "weight" in name:
assert (
value.shape == adapter.layers[layer_id].conv.weight.data.shape
), F"""{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.weight.data.shape} was found."""
__a : List[Any] = value
logger.info(F"""Adapter layer {layer_id} bias was initialized from {full_name}.""" )
else:
unused_weights.append(lowercase )
def _snake_case ( lowercase ) -> Union[str, Any]:
__a , __a : List[Any] = emb.weight.shape
__a : Optional[Any] = nn.Linear(lowercase , lowercase , bias=lowercase )
__a : Tuple = emb.weight.data
return lin_layer
@torch.no_grad()
def _snake_case ( lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , ) -> Any:
__a : Optional[Any] = WavaVecaConfig.from_pretrained(
lowercase , add_adapter=lowercase , adapter_stride=lowercase , adapter_kernel_size=lowercase , use_auth_token=lowercase , output_hidden_size=lowercase , )
__a : Tuple = MBartConfig.from_pretrained(lowercase )
# load model
__a , __a , __a : Tuple = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={
"""config_yaml""": config_yaml_path,
"""data""": """/""".join(dict_path.split("""/""" )[:-1] ),
"""w2v_path""": checkpoint_path,
"""load_pretrained_decoder_from""": None,
} , )
__a : Optional[Any] = model[0].eval()
# load feature extractor
__a : Optional[Any] = WavaVecaFeatureExtractor.from_pretrained(lowercase , use_auth_token=lowercase )
# set weights for wav2vec2 encoder
__a : Union[str, Any] = WavaVecaModel(lowercase )
recursively_load_weights_wavaveca(model.encoder , lowercase )
# load decoder weights
__a : Tuple = MBartForCausalLM(lowercase )
__a , __a : Union[str, Any] = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=lowercase )
logger.warning(F"""The following keys are missing when loading the decoder weights: {missing_keys}""" )
logger.warning(F"""The following keys are unexpected when loading the decoder weights: {unexpected_keys}""" )
__a : str = SpeechEncoderDecoderModel(encoder=lowercase , decoder=lowercase )
__a : Optional[int] = False
__a : Optional[int] = MBartaaTokenizer(lowercase )
tokenizer.save_pretrained(lowercase )
__a : str = hf_wavavec.config.to_dict()
__a : Optional[int] = tokenizer.pad_token_id
__a : Any = tokenizer.bos_token_id
__a : List[Any] = tokenizer.eos_token_id
__a : List[str] = """mbart50"""
__a : Tuple = """wav2vec2"""
__a : Tuple = tokenizer.eos_token_id
__a : int = 2_5_0_0_0_4
__a : Any = tokenizer.eos_token_id
__a : int = SpeechEncoderDecoderConfig.from_dict(lowercase )
hf_wavavec.save_pretrained(lowercase )
feature_extractor.save_pretrained(lowercase )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : str = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_yaml_path', default=None, type=str, help='Path to yaml file of fine-tuned model')
parser.add_argument(
'--encoder_config_path',
default='facebook/wav2vec2-xls-r-1b',
type=str,
help='Path to hf encoder wav2vec2 checkpoint config',
)
parser.add_argument(
'--decoder_config_path',
default='facebook/mbart-large-50-one-to-many-mmt',
type=str,
help='Path to hf decoder checkpoint config',
)
parser.add_argument('--add_adapter', default=True, type=bool, help='whethere to add model adapter layers')
parser.add_argument('--adapter_stride', default=2, type=int, help='stride of adapter layers')
parser.add_argument('--adapter_kernel_size', default=3, type=int, help='kernel size of adapter layers')
parser.add_argument('--encoder_output_dim', default=1_024, type=int, help='encoder output dim')
parser.add_argument('--start_token_id', default=250_004, type=int, help='`decoder_start_token_id` of model config')
__SCREAMING_SNAKE_CASE : str = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
args.config_yaml_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
add_adapter=args.add_adapter,
adapter_kernel_size=args.adapter_kernel_size,
adapter_stride=args.adapter_stride,
decoder_start_token_id=args.start_token_id,
encoder_output_dim=args.encoder_output_dim,
) | 697 |
'''simple docstring'''
import warnings
from functools import wraps
from typing import Callable
def _snake_case ( lowercase ) -> Callable:
@wraps(lowercase )
def _inner_fn(*lowercase , **lowercase ):
warnings.warn(
(F"""'{fn.__name__}' is experimental and might be subject to breaking changes in the future.""") , lowercase , )
return fn(*lowercase , **lowercase )
return _inner_fn | 697 | 1 |
'''simple docstring'''
import re
from filelock import FileLock
try:
import nltk
__SCREAMING_SNAKE_CASE : Optional[int] = True
except (ImportError, ModuleNotFoundError):
__SCREAMING_SNAKE_CASE : str = False
if NLTK_AVAILABLE:
with FileLock('.lock') as lock:
nltk.download('punkt', quiet=True)
def _snake_case ( lowercase ) -> str:
re.sub("""<n>""" , """""" , lowercase ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(lowercase ) ) | 697 |
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
from ....audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ....feature_extraction_sequence_utils import SequenceFeatureExtractor
from ....feature_extraction_utils import BatchFeature
from ....file_utils import PaddingStrategy, TensorType
from ....utils import logging
__SCREAMING_SNAKE_CASE : Tuple = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
lowercase__ = ["input_features", "attention_mask"]
def __init__( self , __UpperCamelCase=80 , __UpperCamelCase=1_6000 , __UpperCamelCase=0.0 , __UpperCamelCase=10 , __UpperCamelCase=25 , __UpperCamelCase="hamming_window" , __UpperCamelCase=3_2_7_6_8.0 , __UpperCamelCase=0.9_7 , __UpperCamelCase=1.0 , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=False , **__UpperCamelCase , ):
'''simple docstring'''
super().__init__(feature_size=__UpperCamelCase , sampling_rate=__UpperCamelCase , padding_value=__UpperCamelCase , **__UpperCamelCase )
__a : List[str] = feature_size
__a : List[str] = sampling_rate
__a : int = padding_value
__a : Any = hop_length
__a : int = win_length
__a : Tuple = frame_signal_scale
__a : Union[str, Any] = preemphasis_coeff
__a : List[str] = mel_floor
__a : Union[str, Any] = normalize_means
__a : Optional[Any] = normalize_vars
__a : Optional[Any] = win_function
__a : Union[str, Any] = return_attention_mask
__a : List[Any] = win_length * sampling_rate // 1000
__a : List[Any] = hop_length * sampling_rate // 1000
__a : Optional[Any] = optimal_fft_length(self.sample_size )
__a : Any = (self.n_fft // 2) + 1
def __lowerCamelCase ( self , __UpperCamelCase ):
'''simple docstring'''
if self.win_function == "hamming_window":
__a : str = window_function(window_length=self.sample_size , name=self.win_function , periodic=__UpperCamelCase )
else:
__a : Dict = window_function(window_length=self.sample_size , name=self.win_function )
__a : Optional[Any] = mel_filter_bank(
num_frequency_bins=self.n_freqs , num_mel_filters=self.feature_size , min_frequency=0.0 , max_frequency=self.sampling_rate / 2.0 , sampling_rate=self.sampling_rate , )
__a : Any = spectrogram(
one_waveform * self.frame_signal_scale , window=__UpperCamelCase , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , center=__UpperCamelCase , preemphasis=self.preemphasis_coeff , mel_filters=__UpperCamelCase , mel_floor=self.mel_floor , log_mel="""log""" , )
return msfc_features.T
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
if self.normalize_means:
__a : int = x[:input_length].mean(axis=0 )
__a : str = np.subtract(__UpperCamelCase , __UpperCamelCase )
if self.normalize_vars:
__a : Dict = x[:input_length].std(axis=0 )
__a : Dict = np.divide(__UpperCamelCase , __UpperCamelCase )
if input_length < x.shape[0]:
__a : Union[str, Any] = padding_value
# make sure array is in float32
__a : Any = x.astype(np.floataa )
return x
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase = None ):
'''simple docstring'''
__a : Tuple = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [self._normalize_one(__UpperCamelCase , __UpperCamelCase , self.padding_value ) for x, n in zip(__UpperCamelCase , __UpperCamelCase )]
def __call__( self , __UpperCamelCase , __UpperCamelCase = False , __UpperCamelCase = None , __UpperCamelCase = False , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , **__UpperCamelCase , ):
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"""The model corresponding to this feature extractor: {self} was trained using a sampling rate of"""
f""" {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with"""
f""" {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
"""It is strongly recommended to pass the ``sampling_rate`` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
__a : Tuple = isinstance(__UpperCamelCase , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f"""Only mono-channel audio is supported for input to {self}""" )
__a : Tuple = is_batched_numpy or (
isinstance(__UpperCamelCase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
__a : Tuple = [np.asarray(__UpperCamelCase , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(__UpperCamelCase , np.ndarray ):
__a : List[str] = np.asarray(__UpperCamelCase , dtype=np.floataa )
elif isinstance(__UpperCamelCase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
__a : str = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
__a : Any = [raw_speech]
# extract fbank features
__a : str = [self._extract_mfsc_features(__UpperCamelCase ) for one_waveform in raw_speech]
# convert into correct format for padding
__a : Optional[Any] = BatchFeature({"""input_features""": features} )
__a : Any = self.pad(
__UpperCamelCase , padding=__UpperCamelCase , max_length=__UpperCamelCase , truncation=__UpperCamelCase , pad_to_multiple_of=__UpperCamelCase , return_attention_mask=__UpperCamelCase , **__UpperCamelCase , )
# make sure list is in array format
__a : int = padded_inputs.get("""input_features""" )
if isinstance(input_features[0] , __UpperCamelCase ):
__a : Union[str, Any] = [np.asarray(__UpperCamelCase , dtype=np.floataa ) for feature in input_features]
__a : List[str] = padded_inputs.get("""attention_mask""" )
if attention_mask is not None:
__a : Optional[int] = [np.asarray(__UpperCamelCase , dtype=np.intaa ) for array in attention_mask]
if self.normalize_means or self.normalize_vars:
__a : Optional[Any] = (
np.array(__UpperCamelCase , dtype=np.intaa )
if self._get_padding_strategies(__UpperCamelCase , max_length=__UpperCamelCase ) is not PaddingStrategy.DO_NOT_PAD
and padding
else None
)
__a : int = self.normalize(
padded_inputs["""input_features"""] , attention_mask=__UpperCamelCase )
if return_tensors is not None:
__a : List[Any] = padded_inputs.convert_to_tensors(__UpperCamelCase )
return padded_inputs | 697 | 1 |
'''simple docstring'''
import itertools
import os
from collections import Counter, defaultdict
from concurrent.futures import ThreadPoolExecutor, as_completed
import numpy as np
import datasets
from .execute import check_correctness
__SCREAMING_SNAKE_CASE : Optional[Any] = '\\n@misc{chen2021evaluating,\n title={Evaluating Large Language Models Trained on Code},\n author={Mark Chen and Jerry Tworek and Heewoo Jun and Qiming Yuan \\nand Henrique Ponde de Oliveira Pinto and Jared Kaplan and Harri Edwards \\nand Yuri Burda and Nicholas Joseph and Greg Brockman and Alex Ray \\nand Raul Puri and Gretchen Krueger and Michael Petrov and Heidy Khlaaf \\nand Girish Sastry and Pamela Mishkin and Brooke Chan and Scott Gray \\nand Nick Ryder and Mikhail Pavlov and Alethea Power and Lukasz Kaiser \\nand Mohammad Bavarian and Clemens Winter and Philippe Tillet \\nand Felipe Petroski Such and Dave Cummings and Matthias Plappert \\nand Fotios Chantzis and Elizabeth Barnes and Ariel Herbert-Voss \\nand William Hebgen Guss and Alex Nichol and Alex Paino and Nikolas Tezak \\nand Jie Tang and Igor Babuschkin and Suchir Balaji and Shantanu Jain \\nand William Saunders and Christopher Hesse and Andrew N. Carr \\nand Jan Leike and Josh Achiam and Vedant Misra and Evan Morikawa \\nand Alec Radford and Matthew Knight and Miles Brundage and Mira Murati \\nand Katie Mayer and Peter Welinder and Bob McGrew and Dario Amodei \\nand Sam McCandlish and Ilya Sutskever and Wojciech Zaremba},\n year={2021},\n eprint={2107.03374},\n archivePrefix={arXiv},\n primaryClass={cs.LG}\n}\n'
__SCREAMING_SNAKE_CASE : Union[str, Any] = '\\nThis metric implements the evaluation harness for the HumanEval problem solving dataset\ndescribed in the paper "Evaluating Large Language Models Trained on Code"\n(https://arxiv.org/abs/2107.03374).\n'
__SCREAMING_SNAKE_CASE : Union[str, Any] = '\nCalculates how good are predictions given some references, using certain scores\nArgs:\n predictions: list of candidates to evaluate. Each candidates should be a list\n of strings with several code candidates to solve the problem.\n references: a list with a test for each prediction. Each test should evaluate the\n correctness of a code candidate.\n k: number of code candidates to consider in the evaluation (Default: [1, 10, 100])\n num_workers: number of workers used to evaluate the canidate programs (Default: 4).\n timeout:\nReturns:\n pass_at_k: dict with pass rates for each k\n results: dict with granular results of each unittest\nExamples:\n >>> code_eval = datasets.load_metric("code_eval")\n >>> test_cases = ["assert add(2,3)==5"]\n >>> candidates = [["def add(a,b): return a*b", "def add(a, b): return a+b"]]\n >>> pass_at_k, results = code_eval.compute(references=test_cases, predictions=candidates, k=[1, 2])\n >>> print(pass_at_k)\n {\'pass@1\': 0.5, \'pass@2\': 1.0}\n'
__SCREAMING_SNAKE_CASE : Dict = '\n################################################################################\n !!!WARNING!!!\n################################################################################\nThe "code_eval" metric executes untrusted model-generated code in Python.\nAlthough it is highly unlikely that model-generated code will do something\novertly malicious in response to this test suite, model-generated code may act\ndestructively due to a lack of model capability or alignment.\nUsers are strongly encouraged to sandbox this evaluation suite so that it\ndoes not perform destructive actions on their host or network. For more\ninformation on how OpenAI sandboxes its code, see the paper "Evaluating Large\nLanguage Models Trained on Code" (https://arxiv.org/abs/2107.03374).\n\nOnce you have read this disclaimer and taken appropriate precautions,\nset the environment variable HF_ALLOW_CODE_EVAL="1". Within Python you can to this\nwith:\n\n>>> import os\n>>> os.environ["HF_ALLOW_CODE_EVAL"] = "1"\n\n################################################################################\\n'
__SCREAMING_SNAKE_CASE : Dict = 'The MIT License\n\nCopyright (c) OpenAI (https://openai.com)\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the "Software"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE.'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE__ ( datasets.Metric ):
def __lowerCamelCase ( self ):
'''simple docstring'''
return datasets.MetricInfo(
# This is the description that will appear on the metrics page.
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""string""" ) ),
"""references""": datasets.Value("""string""" ),
} ) , homepage="""https://github.com/openai/human-eval""" , codebase_urls=["""https://github.com/openai/human-eval"""] , reference_urls=["""https://github.com/openai/human-eval"""] , license=_LICENSE , )
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=[1, 10, 100] , __UpperCamelCase=4 , __UpperCamelCase=3.0 ):
'''simple docstring'''
if os.getenv("""HF_ALLOW_CODE_EVAL""" , 0 ) != "1":
raise ValueError(_WARNING )
if os.name == "nt":
raise NotImplementedError("""This metric is currently not supported on Windows.""" )
with ThreadPoolExecutor(max_workers=__UpperCamelCase ) as executor:
__a : Union[str, Any] = []
__a : List[str] = Counter()
__a : Optional[int] = 0
__a : Tuple = defaultdict(__UpperCamelCase )
for task_id, (candidates, test_case) in enumerate(zip(__UpperCamelCase , __UpperCamelCase ) ):
for candidate in candidates:
__a : List[Any] = candidate + """\n""" + test_case
__a : List[str] = (test_program, timeout, task_id, completion_id[task_id])
__a : Dict = executor.submit(__UpperCamelCase , *__UpperCamelCase )
futures.append(__UpperCamelCase )
completion_id[task_id] += 1
n_samples += 1
for future in as_completed(__UpperCamelCase ):
__a : Dict = future.result()
results[result["task_id"]].append((result["""completion_id"""], result) )
__a , __a : Tuple = [], []
for result in results.values():
result.sort()
__a : Union[str, Any] = [r[1]["""passed"""] for r in result]
total.append(len(__UpperCamelCase ) )
correct.append(sum(__UpperCamelCase ) )
__a : Any = np.array(__UpperCamelCase )
__a : Tuple = np.array(__UpperCamelCase )
__a : List[Any] = k
__a : List[str] = {f"""pass@{k}""": estimate_pass_at_k(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ).mean() for k in ks if (total >= k).all()}
return pass_at_k, results
def _snake_case ( lowercase , lowercase , lowercase ) -> Any:
def estimator(lowercase , lowercase , lowercase ) -> float:
if n - c < k:
return 1.0
return 1.0 - np.prod(1.0 - k / np.arange(n - c + 1 , n + 1 ) )
if isinstance(lowercase , lowercase ):
__a : Optional[int] = itertools.repeat(lowercase , len(lowercase ) )
else:
assert len(lowercase ) == len(lowercase )
__a : Union[str, Any] = iter(lowercase )
return np.array([estimator(int(lowercase ) , int(lowercase ) , lowercase ) for n, c in zip(lowercase , lowercase )] ) | 697 |
'''simple docstring'''
__SCREAMING_SNAKE_CASE : int = 9.80_665
def _snake_case ( lowercase , lowercase , lowercase = g ) -> float:
if fluid_density <= 0:
raise ValueError("""Impossible fluid density""" )
if volume < 0:
raise ValueError("""Impossible Object volume""" )
if gravity <= 0:
raise ValueError("""Impossible Gravity""" )
return fluid_density * gravity * volume
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod() | 697 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__SCREAMING_SNAKE_CASE : Dict = {'configuration_mmbt': ['MMBTConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Tuple = ['MMBTForClassification', 'MMBTModel', 'ModalEmbeddings']
if TYPE_CHECKING:
from .configuration_mmbt import MMBTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings
else:
import sys
__SCREAMING_SNAKE_CASE : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 697 |
'''simple docstring'''
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetrImageProcessor
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def __init__( self , __UpperCamelCase , __UpperCamelCase=7 , __UpperCamelCase=3 , __UpperCamelCase=30 , __UpperCamelCase=400 , __UpperCamelCase=True , __UpperCamelCase=None , __UpperCamelCase=True , __UpperCamelCase=1 / 255 , __UpperCamelCase=True , __UpperCamelCase=[0.5, 0.5, 0.5] , __UpperCamelCase=[0.5, 0.5, 0.5] , __UpperCamelCase=True , ):
'''simple docstring'''
__a : List[Any] = size if size is not None else {"""shortest_edge""": 18, """longest_edge""": 1333}
__a : Dict = parent
__a : Union[str, Any] = batch_size
__a : Optional[int] = num_channels
__a : Dict = min_resolution
__a : List[Any] = max_resolution
__a : int = do_resize
__a : str = size
__a : Optional[Any] = do_rescale
__a : Optional[Any] = rescale_factor
__a : str = do_normalize
__a : Any = image_mean
__a : Optional[Any] = image_std
__a : Dict = do_pad
def __lowerCamelCase ( self ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_pad": self.do_pad,
}
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase=False ):
'''simple docstring'''
if not batched:
__a : Union[str, Any] = image_inputs[0]
if isinstance(__UpperCamelCase , Image.Image ):
__a , __a : Tuple = image.size
else:
__a , __a : Tuple = image.shape[1], image.shape[2]
if w < h:
__a : Optional[int] = int(self.size["""shortest_edge"""] * h / w )
__a : Tuple = self.size["""shortest_edge"""]
elif w > h:
__a : Optional[Any] = self.size["""shortest_edge"""]
__a : Any = int(self.size["""shortest_edge"""] * w / h )
else:
__a : Any = self.size["""shortest_edge"""]
__a : Optional[int] = self.size["""shortest_edge"""]
else:
__a : Any = []
for image in image_inputs:
__a , __a : Any = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
__a : List[Any] = max(__UpperCamelCase , key=lambda __UpperCamelCase : item[0] )[0]
__a : Optional[Any] = max(__UpperCamelCase , key=lambda __UpperCamelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase , unittest.TestCase ):
lowercase__ = DetrImageProcessor if is_vision_available() else None
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : str = DetrImageProcessingTester(self )
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Optional[int] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__UpperCamelCase , """image_mean""" ) )
self.assertTrue(hasattr(__UpperCamelCase , """image_std""" ) )
self.assertTrue(hasattr(__UpperCamelCase , """do_normalize""" ) )
self.assertTrue(hasattr(__UpperCamelCase , """do_rescale""" ) )
self.assertTrue(hasattr(__UpperCamelCase , """rescale_factor""" ) )
self.assertTrue(hasattr(__UpperCamelCase , """do_resize""" ) )
self.assertTrue(hasattr(__UpperCamelCase , """size""" ) )
self.assertTrue(hasattr(__UpperCamelCase , """do_pad""" ) )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 18, """longest_edge""": 1333} )
self.assertEqual(image_processor.do_pad , __UpperCamelCase )
__a : List[Any] = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=__UpperCamelCase )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42, """longest_edge""": 84} )
self.assertEqual(image_processor.do_pad , __UpperCamelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
pass
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__a : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCamelCase , Image.Image )
# Test not batched input
__a : Optional[Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
__a , __a : Any = self.image_processor_tester.get_expected_values(__UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__a , __a : Optional[int] = self.image_processor_tester.get_expected_values(__UpperCamelCase , batched=__UpperCamelCase )
__a : Any = image_processing(__UpperCamelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Dict = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__a : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCamelCase , numpify=__UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCamelCase , np.ndarray )
# Test not batched input
__a : Dict = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
__a , __a : Any = self.image_processor_tester.get_expected_values(__UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__a : List[str] = image_processing(__UpperCamelCase , return_tensors="""pt""" ).pixel_values
__a , __a : str = self.image_processor_tester.get_expected_values(__UpperCamelCase , batched=__UpperCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Any = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__a : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCamelCase , torchify=__UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCamelCase , torch.Tensor )
# Test not batched input
__a : Any = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
__a , __a : Any = self.image_processor_tester.get_expected_values(__UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__a : List[str] = image_processing(__UpperCamelCase , return_tensors="""pt""" ).pixel_values
__a , __a : Any = self.image_processor_tester.get_expected_values(__UpperCamelCase , batched=__UpperCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : List[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_annotations.txt""" , """r""" ) as f:
__a : Dict = json.loads(f.read() )
__a : Optional[int] = {"""image_id""": 3_9769, """annotations""": target}
# encode them
__a : List[str] = DetrImageProcessor.from_pretrained("""facebook/detr-resnet-50""" )
__a : Tuple = image_processing(images=__UpperCamelCase , annotations=__UpperCamelCase , return_tensors="""pt""" )
# verify pixel values
__a : Union[str, Any] = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["""pixel_values"""].shape , __UpperCamelCase )
__a : List[str] = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , __UpperCamelCase , atol=1E-4 ) )
# verify area
__a : List[Any] = torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , __UpperCamelCase ) )
# verify boxes
__a : Optional[int] = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , __UpperCamelCase )
__a : Any = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , __UpperCamelCase , atol=1E-3 ) )
# verify image_id
__a : Union[str, Any] = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , __UpperCamelCase ) )
# verify is_crowd
__a : List[Any] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , __UpperCamelCase ) )
# verify class_labels
__a : Any = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , __UpperCamelCase ) )
# verify orig_size
__a : Any = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , __UpperCamelCase ) )
# verify size
__a : str = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , __UpperCamelCase ) )
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : List[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt""" , """r""" ) as f:
__a : Tuple = json.loads(f.read() )
__a : str = {"""file_name""": """000000039769.png""", """image_id""": 3_9769, """segments_info""": target}
__a : int = pathlib.Path("""./tests/fixtures/tests_samples/COCO/coco_panoptic""" )
# encode them
__a : List[str] = DetrImageProcessor.from_pretrained("""facebook/detr-resnet-50-panoptic""" )
__a : Tuple = image_processing(images=__UpperCamelCase , annotations=__UpperCamelCase , masks_path=__UpperCamelCase , return_tensors="""pt""" )
# verify pixel values
__a : List[str] = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["""pixel_values"""].shape , __UpperCamelCase )
__a : Any = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , __UpperCamelCase , atol=1E-4 ) )
# verify area
__a : Optional[Any] = torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , __UpperCamelCase ) )
# verify boxes
__a : Optional[Any] = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , __UpperCamelCase )
__a : List[str] = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , __UpperCamelCase , atol=1E-3 ) )
# verify image_id
__a : List[str] = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , __UpperCamelCase ) )
# verify is_crowd
__a : Optional[int] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , __UpperCamelCase ) )
# verify class_labels
__a : Optional[int] = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , __UpperCamelCase ) )
# verify masks
__a : Union[str, Any] = 82_2873
self.assertEqual(encoding["""labels"""][0]["""masks"""].sum().item() , __UpperCamelCase )
# verify orig_size
__a : str = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , __UpperCamelCase ) )
# verify size
__a : List[Any] = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , __UpperCamelCase ) ) | 697 | 1 |
'''simple docstring'''
import re
import tempfile
from pathlib import Path
import pytest
import yaml
from datasets.utils.readme import ReadMe
# @pytest.fixture
# def example_yaml_structure():
__SCREAMING_SNAKE_CASE : Optional[Any] = yaml.safe_load(
'\\nname: ""\nallow_empty: false\nallow_empty_text: true\nsubsections:\n - name: "Dataset Card for X" # First-level markdown heading\n allow_empty: false\n allow_empty_text: true\n subsections:\n - name: "Table of Contents"\n allow_empty: false\n allow_empty_text: false\n subsections: null\n - name: "Dataset Description"\n allow_empty: false\n allow_empty_text: false\n subsections:\n - name: "Dataset Summary"\n allow_empty: false\n allow_empty_text: false\n subsections: null\n - name: "Supported Tasks and Leaderboards"\n allow_empty: true\n allow_empty_text: true\n subsections: null\n - name: Languages\n allow_empty: false\n allow_empty_text: true\n subsections: null\n'
)
__SCREAMING_SNAKE_CASE : Any = {
'name': 'root',
'text': '',
'is_empty_text': True,
'subsections': [
{
'name': 'Dataset Card for My Dataset',
'text': '',
'is_empty_text': True,
'subsections': [
{'name': 'Table of Contents', 'text': 'Some text here.', 'is_empty_text': False, 'subsections': []},
{
'name': 'Dataset Description',
'text': 'Some text here.',
'is_empty_text': False,
'subsections': [
{
'name': 'Dataset Summary',
'text': 'Some text here.',
'is_empty_text': False,
'subsections': [],
},
{
'name': 'Supported Tasks and Leaderboards',
'text': '',
'is_empty_text': True,
'subsections': [],
},
{'name': 'Languages', 'text': 'Language Text', 'is_empty_text': False, 'subsections': []},
],
},
],
}
],
}
__SCREAMING_SNAKE_CASE : List[str] = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n'
__SCREAMING_SNAKE_CASE : Union[str, Any] = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n#### Extra Ignored Subsection\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n'
__SCREAMING_SNAKE_CASE : Dict = {
'name': 'root',
'text': '',
'is_empty_text': True,
'subsections': [
{
'name': 'Dataset Card for My Dataset',
'text': '',
'is_empty_text': True,
'subsections': [
{'name': 'Table of Contents', 'text': 'Some text here.', 'is_empty_text': False, 'subsections': []},
{
'name': 'Dataset Description',
'text': 'Some text here.',
'is_empty_text': False,
'subsections': [
{
'name': 'Dataset Summary',
'text': 'Some text here.',
'is_empty_text': False,
'subsections': [
{
'name': 'Extra Ignored Subsection',
'text': '',
'is_empty_text': True,
'subsections': [],
}
],
},
{
'name': 'Supported Tasks and Leaderboards',
'text': '',
'is_empty_text': True,
'subsections': [],
},
{'name': 'Languages', 'text': 'Language Text', 'is_empty_text': False, 'subsections': []},
],
},
],
}
],
}
__SCREAMING_SNAKE_CASE : List[Any] = '\\n---\n---\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n'
__SCREAMING_SNAKE_CASE : List[Any] = (
'The following issues were found for the README at `{path}`:\n-\tEmpty YAML markers are present in the README.'
)
__SCREAMING_SNAKE_CASE : str = '\\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n'
__SCREAMING_SNAKE_CASE : List[Any] = (
'The following issues were found for the README at `{path}`:\n-\tNo YAML markers are present in the README.'
)
__SCREAMING_SNAKE_CASE : Tuple = '\\n---\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n'
__SCREAMING_SNAKE_CASE : Dict = 'The following issues were found for the README at `{path}`:\n-\tOnly the start of YAML tags present in the README.'
__SCREAMING_SNAKE_CASE : Optional[Any] = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n'
__SCREAMING_SNAKE_CASE : str = 'The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Summary` but it is empty.\n-\tExpected some text in section `Dataset Summary` but it is empty (text in subsections are ignored).'
__SCREAMING_SNAKE_CASE : Optional[Any] = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n'
__SCREAMING_SNAKE_CASE : Dict = 'The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Card for My Dataset` but it is empty.\n-\tSection `Dataset Card for My Dataset` expected the following subsections: `Table of Contents`, `Dataset Description`. Found \'None\'.'
__SCREAMING_SNAKE_CASE : Optional[int] = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Languages\nLanguage Text\n'
__SCREAMING_SNAKE_CASE : List[Any] = 'The following issues were found for the README at `{path}`:\n-\tSection `Dataset Description` is missing subsection: `Supported Tasks and Leaderboards`.'
__SCREAMING_SNAKE_CASE : int = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\n'
__SCREAMING_SNAKE_CASE : Optional[int] = 'The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Languages` but it is empty.'
__SCREAMING_SNAKE_CASE : List[Any] = '\\n---\nlanguage:\n- zh\n- en\n---\n\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n'
__SCREAMING_SNAKE_CASE : Optional[int] = 'The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.'
__SCREAMING_SNAKE_CASE : Tuple = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n# Dataset Card My Dataset\n'
__SCREAMING_SNAKE_CASE : List[Any] = 'The following issues were found for the README at `{path}`:\n-\tThe README has several first-level headings: `Dataset Card for My Dataset`, `Dataset Card My Dataset`. Only one heading is expected. Skipping further validation for this README.'
__SCREAMING_SNAKE_CASE : Optional[int] = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n'
__SCREAMING_SNAKE_CASE : Optional[int] = 'The following issues were found for the README at `{path}`:\n-\tNo first-level heading starting with `Dataset Card for` found in README. Skipping further validation for this README.'
__SCREAMING_SNAKE_CASE : List[Any] = ''
__SCREAMING_SNAKE_CASE : int = 'The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.\n-\tNo YAML markers are present in the README.'
__SCREAMING_SNAKE_CASE : Union[str, Any] = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n'
__SCREAMING_SNAKE_CASE : str = 'The following issues were found while parsing the README at `{path}`:\n-\tMultiple sections with the same heading `Dataset Card for My Dataset` have been found. Please keep only one of these sections.'
@pytest.mark.parametrize(
"""readme_md, expected_dict""" , [
(README_CORRECT, CORRECT_DICT),
(README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL),
] , )
def _snake_case ( lowercase , lowercase ) -> Optional[int]:
assert ReadMe.from_string(lowercase , lowercase ).to_dict() == expected_dict
@pytest.mark.parametrize(
"""readme_md, expected_error""" , [
(README_NO_YAML, EXPECTED_ERROR_README_NO_YAML),
(README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML),
(README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML),
(README_EMPTY, EXPECTED_ERROR_README_EMPTY),
(README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION),
(README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL),
(README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION),
(README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT),
(README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL),
(README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL),
(README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT),
] , )
def _snake_case ( lowercase , lowercase ) -> Optional[Any]:
with pytest.raises(lowercase , match=re.escape(expected_error.format(path="""root""" ) ) ):
__a : Any = ReadMe.from_string(lowercase , lowercase )
readme.validate()
@pytest.mark.parametrize(
"""readme_md, expected_error""" , [
(README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1),
] , )
def _snake_case ( lowercase , lowercase ) -> Union[str, Any]:
with pytest.raises(lowercase , match=re.escape(expected_error.format(path="""root""" ) ) ):
ReadMe.from_string(lowercase , lowercase )
@pytest.mark.parametrize(
"""readme_md,""" , [
(README_MULTIPLE_SAME_HEADING_1),
] , )
def _snake_case ( lowercase ) -> Optional[Any]:
ReadMe.from_string(lowercase , lowercase , suppress_parsing_errors=lowercase )
@pytest.mark.parametrize(
"""readme_md, expected_dict""" , [
(README_CORRECT, CORRECT_DICT),
(README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL),
] , )
def _snake_case ( lowercase , lowercase ) -> Optional[int]:
with tempfile.TemporaryDirectory() as tmp_dir:
__a : Dict = Path(lowercase ) / """README.md"""
with open(lowercase , """w+""" ) as readme_file:
readme_file.write(lowercase )
__a : Any = ReadMe.from_readme(lowercase , lowercase ).to_dict()
assert out["name"] == path
assert out["text"] == ""
assert out["is_empty_text"]
assert out["subsections"] == expected_dict["subsections"]
@pytest.mark.parametrize(
"""readme_md, expected_error""" , [
(README_NO_YAML, EXPECTED_ERROR_README_NO_YAML),
(README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML),
(README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML),
(README_EMPTY, EXPECTED_ERROR_README_EMPTY),
(README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION),
(README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL),
(README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION),
(README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT),
(README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL),
(README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL),
(README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT),
] , )
def _snake_case ( lowercase , lowercase ) -> Union[str, Any]:
with tempfile.TemporaryDirectory() as tmp_dir:
__a : List[str] = Path(lowercase ) / """README.md"""
with open(lowercase , """w+""" ) as readme_file:
readme_file.write(lowercase )
__a : Optional[Any] = expected_error.format(path=lowercase )
with pytest.raises(lowercase , match=re.escape(lowercase ) ):
__a : int = ReadMe.from_readme(lowercase , lowercase )
readme.validate()
@pytest.mark.parametrize(
"""readme_md, expected_error""" , [
(README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1),
] , )
def _snake_case ( lowercase , lowercase ) -> Tuple:
with tempfile.TemporaryDirectory() as tmp_dir:
__a : Union[str, Any] = Path(lowercase ) / """README.md"""
with open(lowercase , """w+""" ) as readme_file:
readme_file.write(lowercase )
__a : Optional[Any] = expected_error.format(path=lowercase )
with pytest.raises(lowercase , match=re.escape(lowercase ) ):
ReadMe.from_readme(lowercase , lowercase )
@pytest.mark.parametrize(
"""readme_md,""" , [
(README_MULTIPLE_SAME_HEADING_1),
] , )
def _snake_case ( lowercase ) -> Any:
with tempfile.TemporaryDirectory() as tmp_dir:
__a : Tuple = Path(lowercase ) / """README.md"""
with open(lowercase , """w+""" ) as readme_file:
readme_file.write(lowercase )
ReadMe.from_readme(lowercase , lowercase , suppress_parsing_errors=lowercase ) | 697 |
'''simple docstring'''
import argparse
import logging
import os
import time
import timeit
import datasets
import numpy as np
import pycuda.autoinit # noqa: F401
import pycuda.driver as cuda
import tensorrt as trt
import torch
from absl import logging as absl_logging
from accelerate import Accelerator
from datasets import load_dataset, load_metric
from torch.utils.data import DataLoader
from utils_qa import postprocess_qa_predictions
import transformers
from transformers import AutoTokenizer, EvalPrediction, default_data_collator, set_seed
from transformers.trainer_pt_utils import nested_concat, nested_truncate
__SCREAMING_SNAKE_CASE : Optional[int] = trt.Logger(trt.Logger.WARNING)
__SCREAMING_SNAKE_CASE : Tuple = absl_logging.get_absl_logger()
absl_logger.setLevel(logging.WARNING)
__SCREAMING_SNAKE_CASE : Any = logging.getLogger(__name__)
__SCREAMING_SNAKE_CASE : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--onnx_model_path',
default=None,
type=str,
required=True,
help='Path to ONNX model: ',
)
parser.add_argument(
'--output_dir',
default=None,
type=str,
required=True,
help='The output directory where the model checkpoints and predictions will be written.',
)
# Other parameters
parser.add_argument(
'--tokenizer_name',
default='',
type=str,
required=True,
help='Pretrained tokenizer name or path if not the same as model_name',
)
parser.add_argument(
'--version_2_with_negative',
action='store_true',
help='If true, the SQuAD examples contain some that do not have an answer.',
)
parser.add_argument(
'--null_score_diff_threshold',
type=float,
default=0.0,
help='If null_score - best_non_null is greater than the threshold predict null.',
)
parser.add_argument(
'--max_seq_length',
default=384,
type=int,
help=(
'The maximum total input sequence length after WordPiece tokenization. Sequences '
'longer than this will be truncated, and sequences shorter than this will be padded.'
),
)
parser.add_argument(
'--doc_stride',
default=128,
type=int,
help='When splitting up a long document into chunks, how much stride to take between chunks.',
)
parser.add_argument('--per_device_eval_batch_size', default=8, type=int, help='Batch size per GPU/CPU for evaluation.')
parser.add_argument(
'--n_best_size',
default=20,
type=int,
help='The total number of n-best predictions to generate in the nbest_predictions.json output file.',
)
parser.add_argument(
'--max_answer_length',
default=30,
type=int,
help=(
'The maximum length of an answer that can be generated. This is needed because the start '
'and end predictions are not conditioned on one another.'
),
)
parser.add_argument('--seed', type=int, default=42, help='random seed for initialization')
parser.add_argument(
'--dataset_name',
type=str,
default=None,
required=True,
help='The name of the dataset to use (via the datasets library).',
)
parser.add_argument(
'--dataset_config_name',
type=str,
default=None,
help='The configuration name of the dataset to use (via the datasets library).',
)
parser.add_argument(
'--preprocessing_num_workers', type=int, default=4, help='A csv or a json file containing the training data.'
)
parser.add_argument('--overwrite_cache', action='store_true', help='Overwrite the cached training and evaluation sets')
parser.add_argument(
'--fp16',
action='store_true',
help='Whether to use 16-bit (mixed) precision instead of 32-bit',
)
parser.add_argument(
'--int8',
action='store_true',
help='Whether to use INT8',
)
__SCREAMING_SNAKE_CASE : Optional[int] = parser.parse_args()
if args.tokenizer_name:
__SCREAMING_SNAKE_CASE : str = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=True)
else:
raise ValueError(
'You are instantiating a new tokenizer from scratch. This is not supported by this script.'
'You can do it from another script, save it, and load it from here, using --tokenizer_name.'
)
logger.info('Training/evaluation parameters %s', args)
__SCREAMING_SNAKE_CASE : List[Any] = args.per_device_eval_batch_size
__SCREAMING_SNAKE_CASE : int = (args.eval_batch_size, args.max_seq_length)
# TRT Engine properties
__SCREAMING_SNAKE_CASE : Optional[Any] = True
__SCREAMING_SNAKE_CASE : Tuple = 'temp_engine/bert-fp32.engine'
if args.fpaa:
__SCREAMING_SNAKE_CASE : Dict = 'temp_engine/bert-fp16.engine'
if args.inta:
__SCREAMING_SNAKE_CASE : Tuple = 'temp_engine/bert-int8.engine'
# import ONNX file
if not os.path.exists('temp_engine'):
os.makedirs('temp_engine')
__SCREAMING_SNAKE_CASE : Optional[Any] = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network, trt.OnnxParser(
network, TRT_LOGGER
) as parser:
with open(args.onnx_model_path, 'rb') as model:
if not parser.parse(model.read()):
for error in range(parser.num_errors):
print(parser.get_error(error))
# Query input names and shapes from parsed TensorRT network
__SCREAMING_SNAKE_CASE : List[Any] = [network.get_input(i) for i in range(network.num_inputs)]
__SCREAMING_SNAKE_CASE : List[Any] = [_input.name for _input in network_inputs] # ex: ["actual_input1"]
with builder.create_builder_config() as config:
__SCREAMING_SNAKE_CASE : Tuple = 1 << 50
if STRICT_TYPES:
config.set_flag(trt.BuilderFlag.STRICT_TYPES)
if args.fpaa:
config.set_flag(trt.BuilderFlag.FPaa)
if args.inta:
config.set_flag(trt.BuilderFlag.INTa)
__SCREAMING_SNAKE_CASE : Dict = builder.create_optimization_profile()
config.add_optimization_profile(profile)
for i in range(len(input_names)):
profile.set_shape(input_names[i], INPUT_SHAPE, INPUT_SHAPE, INPUT_SHAPE)
__SCREAMING_SNAKE_CASE : Union[str, Any] = builder.build_engine(network, config)
# serialize_engine and store in file (can be directly loaded and deserialized):
with open(engine_name, 'wb') as f:
f.write(engine.serialize())
def _snake_case ( lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> List[Any]:
__a : Dict = np.asarray(inputs["""input_ids"""] , dtype=np.intaa )
__a : List[Any] = np.asarray(inputs["""attention_mask"""] , dtype=np.intaa )
__a : str = np.asarray(inputs["""token_type_ids"""] , dtype=np.intaa )
# Copy inputs
cuda.memcpy_htod_async(d_inputs[0] , input_ids.ravel() , lowercase )
cuda.memcpy_htod_async(d_inputs[1] , attention_mask.ravel() , lowercase )
cuda.memcpy_htod_async(d_inputs[2] , token_type_ids.ravel() , lowercase )
# start time
__a : Optional[Any] = time.time()
# Run inference
context.execute_async(
bindings=[int(lowercase ) for d_inp in d_inputs] + [int(lowercase ), int(lowercase )] , stream_handle=stream.handle )
# Transfer predictions back from GPU
cuda.memcpy_dtoh_async(lowercase , lowercase , lowercase )
cuda.memcpy_dtoh_async(lowercase , lowercase , lowercase )
# Synchronize the stream and take time
stream.synchronize()
# end time
__a : str = time.time()
__a : Any = end_time - start_time
__a : Optional[int] = (h_outputa, h_outputa)
# print(outputs)
return outputs, infer_time
# Initialize the accelerator. We will let the accelerator handle device placement for us in this example.
__SCREAMING_SNAKE_CASE : Optional[Any] = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO,
)
# Setup logging, we only want one process per machine to log things on the screen.
# accelerator.is_local_main_process is only True for one process per machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR)
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed)
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
if args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
__SCREAMING_SNAKE_CASE : List[str] = load_dataset(args.dataset_name, args.dataset_config_name)
else:
raise ValueError('Evaluation requires a dataset name')
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Preprocessing the datasets.
# Preprocessing is slighlty different for training and evaluation.
__SCREAMING_SNAKE_CASE : int = raw_datasets['validation'].column_names
__SCREAMING_SNAKE_CASE : Tuple = 'question' if 'question' in column_names else column_names[0]
__SCREAMING_SNAKE_CASE : List[Any] = 'context' if 'context' in column_names else column_names[1]
__SCREAMING_SNAKE_CASE : Tuple = 'answers' if 'answers' in column_names else column_names[2]
# Padding side determines if we do (question|context) or (context|question).
__SCREAMING_SNAKE_CASE : Tuple = tokenizer.padding_side == 'right'
if args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f'''The max_seq_length passed ({args.max_seq_length}) is larger than the maximum length for the'''
f'''model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.'''
)
__SCREAMING_SNAKE_CASE : Dict = min(args.max_seq_length, tokenizer.model_max_length)
def _snake_case ( lowercase ) -> Tuple:
# Some of the questions have lots of whitespace on the left, which is not useful and will make the
# truncation of the context fail (the tokenized question will take a lots of space). So we remove that
# left whitespace
__a : Optional[Any] = [q.lstrip() for q in examples[question_column_name]]
# Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results
# in one example possible giving several features when a context is long, each of those features having a
# context that overlaps a bit the context of the previous feature.
__a : Optional[int] = tokenizer(
examples[question_column_name if pad_on_right else context_column_name] , examples[context_column_name if pad_on_right else question_column_name] , truncation="""only_second""" if pad_on_right else """only_first""" , max_length=lowercase , stride=args.doc_stride , return_overflowing_tokens=lowercase , return_offsets_mapping=lowercase , padding="""max_length""" , )
# Since one example might give us several features if it has a long context, we need a map from a feature to
# its corresponding example. This key gives us just that.
__a : Optional[Any] = tokenized_examples.pop("""overflow_to_sample_mapping""" )
# For evaluation, we will need to convert our predictions to substrings of the context, so we keep the
# corresponding example_id and we will store the offset mappings.
__a : Optional[Any] = []
for i in range(len(tokenized_examples["""input_ids"""] ) ):
# Grab the sequence corresponding to that example (to know what is the context and what is the question).
__a : Dict = tokenized_examples.sequence_ids(lowercase )
__a : Optional[Any] = 1 if pad_on_right else 0
# One example can give several spans, this is the index of the example containing this span of text.
__a : Union[str, Any] = sample_mapping[i]
tokenized_examples["example_id"].append(examples["""id"""][sample_index] )
# Set to None the offset_mapping that are not part of the context so it's easy to determine if a token
# position is part of the context or not.
__a : int = [
(o if sequence_ids[k] == context_index else None)
for k, o in enumerate(tokenized_examples["""offset_mapping"""][i] )
]
return tokenized_examples
__SCREAMING_SNAKE_CASE : int = raw_datasets['validation']
# Validation Feature Creation
__SCREAMING_SNAKE_CASE : Union[str, Any] = eval_examples.map(
prepare_validation_features,
batched=True,
num_proc=args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not args.overwrite_cache,
desc='Running tokenizer on validation dataset',
)
__SCREAMING_SNAKE_CASE : List[Any] = default_data_collator
__SCREAMING_SNAKE_CASE : Union[str, Any] = eval_dataset.remove_columns(['example_id', 'offset_mapping'])
__SCREAMING_SNAKE_CASE : List[str] = DataLoader(
eval_dataset_for_model, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size
)
def _snake_case ( lowercase , lowercase , lowercase , lowercase="eval" ) -> Any:
# Post-processing: we match the start logits and end logits to answers in the original context.
__a : List[str] = postprocess_qa_predictions(
examples=lowercase , features=lowercase , predictions=lowercase , version_2_with_negative=args.version_2_with_negative , n_best_size=args.n_best_size , max_answer_length=args.max_answer_length , null_score_diff_threshold=args.null_score_diff_threshold , output_dir=args.output_dir , prefix=lowercase , )
# Format the result to the format the metric expects.
if args.version_2_with_negative:
__a : List[str] = [
{"""id""": k, """prediction_text""": v, """no_answer_probability""": 0.0} for k, v in predictions.items()
]
else:
__a : List[str] = [{"""id""": k, """prediction_text""": v} for k, v in predictions.items()]
__a : Optional[Any] = [{"""id""": ex["""id"""], """answers""": ex[answer_column_name]} for ex in examples]
return EvalPrediction(predictions=lowercase , label_ids=lowercase )
__SCREAMING_SNAKE_CASE : List[Any] = load_metric('squad_v2' if args.version_2_with_negative else 'squad')
# Evaluation!
logger.info('Loading ONNX model %s for evaluation', args.onnx_model_path)
with open(engine_name, 'rb') as f, trt.Runtime(TRT_LOGGER) as runtime, runtime.deserialize_cuda_engine(
f.read()
) as engine, engine.create_execution_context() as context:
# setup for TRT inferrence
for i in range(len(input_names)):
context.set_binding_shape(i, INPUT_SHAPE)
assert context.all_binding_shapes_specified
def _snake_case ( lowercase ) -> Optional[int]:
return trt.volume(engine.get_binding_shape(lowercase ) ) * engine.get_binding_dtype(lowercase ).itemsize
# Allocate device memory for inputs and outputs.
__SCREAMING_SNAKE_CASE : List[str] = [cuda.mem_alloc(binding_nbytes(binding)) for binding in engine if engine.binding_is_input(binding)]
# Allocate output buffer
__SCREAMING_SNAKE_CASE : str = cuda.pagelocked_empty(tuple(context.get_binding_shape(3)), dtype=np.floataa)
__SCREAMING_SNAKE_CASE : Union[str, Any] = cuda.pagelocked_empty(tuple(context.get_binding_shape(4)), dtype=np.floataa)
__SCREAMING_SNAKE_CASE : str = cuda.mem_alloc(h_outputa.nbytes)
__SCREAMING_SNAKE_CASE : Tuple = cuda.mem_alloc(h_outputa.nbytes)
# Create a stream in which to copy inputs/outputs and run inference.
__SCREAMING_SNAKE_CASE : Tuple = cuda.Stream()
# Evaluation
logger.info('***** Running Evaluation *****')
logger.info(f''' Num examples = {len(eval_dataset)}''')
logger.info(f''' Batch size = {args.per_device_eval_batch_size}''')
__SCREAMING_SNAKE_CASE : Union[str, Any] = 0.0
__SCREAMING_SNAKE_CASE : str = 0
__SCREAMING_SNAKE_CASE : str = timeit.default_timer()
__SCREAMING_SNAKE_CASE : Dict = None
for step, batch in enumerate(eval_dataloader):
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Union[str, Any] = model_infer(batch, context, d_inputs, h_outputa, h_outputa, d_outputa, d_outputa, stream)
total_time += infer_time
niter += 1
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Optional[Any] = outputs
__SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor(start_logits)
__SCREAMING_SNAKE_CASE : Tuple = torch.tensor(end_logits)
# necessary to pad predictions and labels for being gathered
__SCREAMING_SNAKE_CASE : Optional[int] = accelerator.pad_across_processes(start_logits, dim=1, pad_index=-100)
__SCREAMING_SNAKE_CASE : Dict = accelerator.pad_across_processes(end_logits, dim=1, pad_index=-100)
__SCREAMING_SNAKE_CASE : List[str] = (accelerator.gather(start_logits).cpu().numpy(), accelerator.gather(end_logits).cpu().numpy())
__SCREAMING_SNAKE_CASE : List[str] = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-100)
if all_preds is not None:
__SCREAMING_SNAKE_CASE : Tuple = nested_truncate(all_preds, len(eval_dataset))
__SCREAMING_SNAKE_CASE : str = timeit.default_timer() - start_time
logger.info(' Evaluation done in total %f secs (%f sec per example)', evalTime, evalTime / len(eval_dataset))
# Inference time from TRT
logger.info('Average Inference Time = {:.3f} ms'.format(total_time * 1_000 / niter))
logger.info('Total Inference Time = {:.3f} ms'.format(total_time * 1_000))
logger.info('Total Number of Inference = %d', niter)
__SCREAMING_SNAKE_CASE : Optional[int] = post_processing_function(eval_examples, eval_dataset, all_preds)
__SCREAMING_SNAKE_CASE : List[Any] = metric.compute(predictions=prediction.predictions, references=prediction.label_ids)
logger.info(f'''Evaluation metrics: {eval_metric}''') | 697 | 1 |
'''simple docstring'''
import tempfile
import unittest
import numpy as np
from diffusers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionPipeline,
PNDMScheduler,
)
from diffusers.utils.testing_utils import is_onnx_available, nightly, require_onnxruntime, require_torch_gpu
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase , unittest.TestCase ):
lowercase__ = "hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline"
def __lowerCamelCase ( self , __UpperCamelCase=0 ):
'''simple docstring'''
__a : Tuple = np.random.RandomState(__UpperCamelCase )
__a : int = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Tuple = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
__a : Tuple = self.get_dummy_inputs()
__a : Union[str, Any] = pipe(**__UpperCamelCase ).images
__a : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__a : Dict = np.array([0.6_5_0_7_2, 0.5_8_4_9_2, 0.4_8_2_1_9, 0.5_5_5_2_1, 0.5_3_1_8_0, 0.5_5_9_3_9, 0.5_0_6_9_7, 0.3_9_8_0_0, 0.4_6_4_5_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : str = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
__a : Union[str, Any] = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
__a : Union[str, Any] = self.get_dummy_inputs()
__a : Optional[int] = pipe(**__UpperCamelCase ).images
__a : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__a : List[str] = np.array([0.6_5_8_6_3, 0.5_9_4_2_5, 0.4_9_3_2_6, 0.5_6_3_1_3, 0.5_3_8_7_5, 0.5_6_6_2_7, 0.5_1_0_6_5, 0.3_9_7_7_7, 0.4_6_3_3_0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Any = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
__a : int = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
__a : Dict = self.get_dummy_inputs()
__a : Dict = pipe(**__UpperCamelCase ).images
__a : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__a : str = np.array([0.5_3_7_5_5, 0.6_0_7_8_6, 0.4_7_4_0_2, 0.4_9_4_8_8, 0.5_1_8_6_9, 0.4_9_8_1_9, 0.4_7_9_8_5, 0.3_8_9_5_7, 0.4_4_2_7_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : List[str] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
__a : int = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
__a : Tuple = self.get_dummy_inputs()
__a : List[Any] = pipe(**__UpperCamelCase ).images
__a : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__a : Optional[Any] = np.array([0.5_3_7_5_5, 0.6_0_7_8_6, 0.4_7_4_0_2, 0.4_9_4_8_8, 0.5_1_8_6_9, 0.4_9_8_1_9, 0.4_7_9_8_5, 0.3_8_9_5_7, 0.4_4_2_7_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : List[str] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
__a : int = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
__a : int = self.get_dummy_inputs()
__a : List[str] = pipe(**__UpperCamelCase ).images
__a : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__a : int = np.array([0.5_3_8_1_7, 0.6_0_8_1_2, 0.4_7_3_8_4, 0.4_9_5_3_0, 0.5_1_8_9_4, 0.4_9_8_1_4, 0.4_7_9_8_4, 0.3_8_9_5_8, 0.4_4_2_7_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Tuple = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
__a : Union[str, Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
__a : int = self.get_dummy_inputs()
__a : Optional[int] = pipe(**__UpperCamelCase ).images
__a : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__a : Dict = np.array([0.5_3_8_9_5, 0.6_0_8_0_8, 0.4_7_9_3_3, 0.4_9_6_0_8, 0.5_1_8_8_6, 0.4_9_9_5_0, 0.4_8_0_5_3, 0.3_8_9_5_7, 0.4_4_2_0_0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : List[Any] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
__a : Tuple = self.get_dummy_inputs()
__a : Optional[int] = 3 * [inputs["""prompt"""]]
# forward
__a : Union[str, Any] = pipe(**__UpperCamelCase )
__a : Optional[Any] = output.images[0, -3:, -3:, -1]
__a : Dict = self.get_dummy_inputs()
__a : int = 3 * [inputs.pop("""prompt""" )]
__a : Union[str, Any] = pipe.tokenizer(
__UpperCamelCase , padding="""max_length""" , max_length=pipe.tokenizer.model_max_length , truncation=__UpperCamelCase , return_tensors="""np""" , )
__a : Tuple = text_inputs["""input_ids"""]
__a : int = pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0]
__a : Union[str, Any] = prompt_embeds
# forward
__a : Union[str, Any] = pipe(**__UpperCamelCase )
__a : Optional[Any] = output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : int = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
__a : str = self.get_dummy_inputs()
__a : Dict = 3 * ["""this is a negative prompt"""]
__a : int = negative_prompt
__a : str = 3 * [inputs["""prompt"""]]
# forward
__a : Tuple = pipe(**__UpperCamelCase )
__a : Optional[Any] = output.images[0, -3:, -3:, -1]
__a : Any = self.get_dummy_inputs()
__a : str = 3 * [inputs.pop("""prompt""" )]
__a : int = []
for p in [prompt, negative_prompt]:
__a : str = pipe.tokenizer(
__UpperCamelCase , padding="""max_length""" , max_length=pipe.tokenizer.model_max_length , truncation=__UpperCamelCase , return_tensors="""np""" , )
__a : Optional[int] = text_inputs["""input_ids"""]
embeds.append(pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0] )
__a , __a : str = embeds
# forward
__a : List[Any] = pipe(**__UpperCamelCase )
__a : str = output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4
@nightly
@require_onnxruntime
@require_torch_gpu
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Optional[int] = ort.SessionOptions()
__a : int = False
return options
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Union[str, Any] = OnnxStableDiffusionPipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , revision="""onnx""" , safety_checker=__UpperCamelCase , feature_extractor=__UpperCamelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=__UpperCamelCase )
__a : Any = """A painting of a squirrel eating a burger"""
np.random.seed(0 )
__a : List[Any] = sd_pipe([prompt] , guidance_scale=6.0 , num_inference_steps=10 , output_type="""np""" )
__a : Any = output.images
__a : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
__a : Dict = np.array([0.0_4_5_2, 0.0_3_9_0, 0.0_0_8_7, 0.0_3_5_0, 0.0_6_1_7, 0.0_3_6_4, 0.0_5_4_4, 0.0_5_2_3, 0.0_7_2_0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : List[str] = DDIMScheduler.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , subfolder="""scheduler""" , revision="""onnx""" )
__a : Optional[Any] = OnnxStableDiffusionPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , revision="""onnx""" , scheduler=__UpperCamelCase , safety_checker=__UpperCamelCase , feature_extractor=__UpperCamelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=__UpperCamelCase )
__a : Optional[Any] = """open neural network exchange"""
__a : Union[str, Any] = np.random.RandomState(0 )
__a : List[Any] = sd_pipe([prompt] , guidance_scale=7.5 , num_inference_steps=10 , generator=__UpperCamelCase , output_type="""np""" )
__a : Union[str, Any] = output.images
__a : int = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
__a : Optional[Any] = np.array([0.2_8_6_7, 0.1_9_7_4, 0.1_4_8_1, 0.7_2_9_4, 0.7_2_5_1, 0.6_6_6_7, 0.4_1_9_4, 0.5_6_4_2, 0.6_4_8_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Union[str, Any] = LMSDiscreteScheduler.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , subfolder="""scheduler""" , revision="""onnx""" )
__a : Union[str, Any] = OnnxStableDiffusionPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , revision="""onnx""" , scheduler=__UpperCamelCase , safety_checker=__UpperCamelCase , feature_extractor=__UpperCamelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=__UpperCamelCase )
__a : str = """open neural network exchange"""
__a : str = np.random.RandomState(0 )
__a : Tuple = sd_pipe([prompt] , guidance_scale=7.5 , num_inference_steps=10 , generator=__UpperCamelCase , output_type="""np""" )
__a : str = output.images
__a : int = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
__a : List[Any] = np.array([0.2_3_0_6, 0.1_9_5_9, 0.1_5_9_3, 0.6_5_4_9, 0.6_3_9_4, 0.5_4_0_8, 0.5_0_6_5, 0.6_0_1_0, 0.6_1_6_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Dict = 0
def test_callback_fn(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> None:
__a : int = True
nonlocal number_of_steps
number_of_steps += 1
if step == 0:
assert latents.shape == (1, 4, 64, 64)
__a : List[str] = latents[0, -3:, -3:, -1]
__a : str = np.array(
[-0.6_7_7_2, -0.3_8_3_5, -1.2_4_5_6, 0.1_9_0_5, -1.0_9_7_4, 0.6_9_6_7, -1.9_3_5_3, 0.0_1_7_8, 1.0_1_6_7] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1E-3
elif step == 5:
assert latents.shape == (1, 4, 64, 64)
__a : Optional[int] = latents[0, -3:, -3:, -1]
__a : Any = np.array(
[-0.3_3_5_1, 0.2_2_4_1, -0.1_8_3_7, -0.2_3_2_5, -0.6_5_7_7, 0.3_3_9_3, -0.0_2_4_1, 0.5_8_9_9, 1.3_8_7_5] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1E-3
__a : Any = False
__a : List[Any] = OnnxStableDiffusionPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , revision="""onnx""" , safety_checker=__UpperCamelCase , feature_extractor=__UpperCamelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
__a : int = """Andromeda galaxy in a bottle"""
__a : List[Any] = np.random.RandomState(0 )
pipe(
prompt=__UpperCamelCase , num_inference_steps=5 , guidance_scale=7.5 , generator=__UpperCamelCase , callback=__UpperCamelCase , callback_steps=1 , )
assert test_callback_fn.has_been_called
assert number_of_steps == 6
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Tuple = OnnxStableDiffusionPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , revision="""onnx""" , safety_checker=__UpperCamelCase , feature_extractor=__UpperCamelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
assert isinstance(__UpperCamelCase , __UpperCamelCase )
assert pipe.safety_checker is None
__a : Tuple = pipe("""example prompt""" , num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(__UpperCamelCase )
__a : Tuple = OnnxStableDiffusionPipeline.from_pretrained(__UpperCamelCase )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
__a : Tuple = pipe("""example prompt""" , num_inference_steps=2 ).images[0]
assert image is not None | 697 |
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import KarrasVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
lowercase__ = 42
lowercase__ = 42
def __init__( self , __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
super().__init__()
self.register_modules(unet=__UpperCamelCase , scheduler=__UpperCamelCase )
@torch.no_grad()
def __call__( self , __UpperCamelCase = 1 , __UpperCamelCase = 50 , __UpperCamelCase = None , __UpperCamelCase = "pil" , __UpperCamelCase = True , **__UpperCamelCase , ):
'''simple docstring'''
__a : int = self.unet.config.sample_size
__a : Optional[int] = (batch_size, 3, img_size, img_size)
__a : Union[str, Any] = self.unet
# sample x_0 ~ N(0, sigma_0^2 * I)
__a : Dict = randn_tensor(__UpperCamelCase , generator=__UpperCamelCase , device=self.device ) * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(__UpperCamelCase )
for t in self.progress_bar(self.scheduler.timesteps ):
# here sigma_t == t_i from the paper
__a : Dict = self.scheduler.schedule[t]
__a : Any = self.scheduler.schedule[t - 1] if t > 0 else 0
# 1. Select temporarily increased noise level sigma_hat
# 2. Add new noise to move from sample_i to sample_hat
__a , __a : Tuple = self.scheduler.add_noise_to_input(__UpperCamelCase , __UpperCamelCase , generator=__UpperCamelCase )
# 3. Predict the noise residual given the noise magnitude `sigma_hat`
# The model inputs and output are adjusted by following eq. (213) in [1].
__a : List[Any] = (sigma_hat / 2) * model((sample_hat + 1) / 2 , sigma_hat / 2 ).sample
# 4. Evaluate dx/dt at sigma_hat
# 5. Take Euler step from sigma to sigma_prev
__a : str = self.scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
if sigma_prev != 0:
# 6. Apply 2nd order correction
# The model inputs and output are adjusted by following eq. (213) in [1].
__a : Union[str, Any] = (sigma_prev / 2) * model((step_output.prev_sample + 1) / 2 , sigma_prev / 2 ).sample
__a : Tuple = self.scheduler.step_correct(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , step_output.prev_sample , step_output["""derivative"""] , )
__a : Tuple = step_output.prev_sample
__a : Optional[Any] = (sample / 2 + 0.5).clamp(0 , 1 )
__a : Dict = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__a : List[Any] = self.numpy_to_pil(__UpperCamelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__UpperCamelCase ) | 697 | 1 |
'''simple docstring'''
import sys
__SCREAMING_SNAKE_CASE : int = (
'73167176531330624919225119674426574742355349194934'
'96983520312774506326239578318016984801869478851843'
'85861560789112949495459501737958331952853208805511'
'12540698747158523863050715693290963295227443043557'
'66896648950445244523161731856403098711121722383113'
'62229893423380308135336276614282806444486645238749'
'30358907296290491560440772390713810515859307960866'
'70172427121883998797908792274921901699720888093776'
'65727333001053367881220235421809751254540594752243'
'52584907711670556013604839586446706324415722155397'
'53697817977846174064955149290862569321978468622482'
'83972241375657056057490261407972968652414535100474'
'82166370484403199890008895243450658541227588666881'
'16427171479924442928230863465674813919123162824586'
'17866458359124566529476545682848912883142607690042'
'24219022671055626321111109370544217506941658960408'
'07198403850962455444362981230987879927244284909188'
'84580156166097919133875499200524063689912560717606'
'05886116467109405077541002256983155200055935729725'
'71636269561882670428252483600823257530420752963450'
)
def _snake_case ( lowercase = N ) -> int:
__a : List[str] = -sys.maxsize - 1
for i in range(len(lowercase ) - 1_2 ):
__a : int = 1
for j in range(1_3 ):
product *= int(n[i + j] )
if product > largest_product:
__a : int = product
return largest_product
if __name__ == "__main__":
print(f'''{solution() = }''') | 697 |
'''simple docstring'''
def _snake_case ( lowercase ) -> bool:
if not isinstance(lowercase , lowercase ):
raise ValueError("""check_bouncy() accepts only integer arguments""" )
__a : str = str(lowercase )
__a : Any = """""".join(sorted(lowercase ) )
return sorted_str_n != str_n and sorted_str_n[::-1] != str_n
def _snake_case ( lowercase = 9_9 ) -> int:
if not 0 < percent < 1_0_0:
raise ValueError("""solution() only accepts values from 0 to 100""" )
__a : List[str] = 0
__a : Union[str, Any] = 1
while True:
if check_bouncy(lowercase ):
bouncy_num += 1
if (bouncy_num / num) * 1_0_0 >= percent:
return num
num += 1
if __name__ == "__main__":
from doctest import testmod
testmod()
print(f'''{solution(99)}''') | 697 | 1 |
'''simple docstring'''
def _snake_case ( lowercase ) -> list:
__a : str = len(lowercase )
for _ in range(lowercase ):
for i in range(_ % 2 , arr_size - 1 , 2 ):
if arr[i + 1] < arr[i]:
__a , __a : List[Any] = arr[i + 1], arr[i]
return arr
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : List[str] = list(range(10, 0, -1))
print(f'''Original: {arr}. Sorted: {odd_even_transposition(arr)}''') | 697 |
'''simple docstring'''
import argparse
import torch
from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def _snake_case ( lowercase , lowercase , lowercase ) -> Any:
# Construct model
if gpta_config_file == "":
__a : Dict = GPTaConfig()
else:
__a : Optional[Any] = GPTaConfig.from_json_file(lowercase )
__a : Union[str, Any] = GPTaModel(lowercase )
# Load weights from numpy
load_tf_weights_in_gpta(lowercase , lowercase , lowercase )
# Save pytorch-model
__a : Optional[int] = pytorch_dump_folder_path + """/""" + WEIGHTS_NAME
__a : Dict = pytorch_dump_folder_path + """/""" + CONFIG_NAME
print(F"""Save PyTorch model to {pytorch_weights_dump_path}""" )
torch.save(model.state_dict() , lowercase )
print(F"""Save configuration file to {pytorch_config_dump_path}""" )
with open(lowercase , """w""" , encoding="""utf-8""" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--gpt2_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--gpt2_config_file',
default='',
type=str,
help=(
'An optional config json file corresponding to the pre-trained OpenAI model. \n'
'This specifies the model architecture.'
),
)
__SCREAMING_SNAKE_CASE : Dict = parser.parse_args()
convert_gpta_checkpoint_to_pytorch(args.gpta_checkpoint_path, args.gpta_config_file, args.pytorch_dump_folder_path) | 697 | 1 |
'''simple docstring'''
# Function to print upper half of diamond (pyramid)
def _snake_case ( lowercase ) -> Dict:
for i in range(0 , lowercase ):
for _ in range(0 , n - i - 1 ): # printing spaces
print(""" """ , end="""""" )
for _ in range(0 , i + 1 ): # printing stars
print("""* """ , end="""""" )
print()
def _snake_case ( lowercase ) -> int:
for i in range(lowercase , 0 , -1 ):
for _ in range(lowercase , 0 , -1 ): # printing stars
print("""* """ , end="""""" )
print()
for _ in range(n - i + 1 , 0 , -1 ): # printing spaces
print(""" """ , end="""""" )
def _snake_case ( lowercase ) -> Optional[Any]:
if n <= 0:
print(""" ... .... nothing printing :(""" )
return
floyd(lowercase ) # upper half
reverse_floyd(lowercase ) # lower half
if __name__ == "__main__":
print(r'| /\ | |- | |- |--| |\ /| |-')
print(r'|/ \| |- |_ |_ |__| | \/ | |_')
__SCREAMING_SNAKE_CASE : Dict = 1
while K:
__SCREAMING_SNAKE_CASE : Optional[Any] = int(input('enter the number and , and see the magic : '))
print()
pretty_print(user_number)
__SCREAMING_SNAKE_CASE : List[Any] = int(input('press 0 to exit... and 1 to continue...'))
print('Good Bye...') | 697 |
'''simple docstring'''
import unittest
from transformers import (
MODEL_FOR_OBJECT_DETECTION_MAPPING,
AutoFeatureExtractor,
AutoModelForObjectDetection,
ObjectDetectionPipeline,
is_vision_available,
pipeline,
)
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_pytesseract,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class SCREAMING_SNAKE_CASE__ :
@staticmethod
def __lowerCamelCase ( *__UpperCamelCase , **__UpperCamelCase ):
'''simple docstring'''
pass
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
lowercase__ = MODEL_FOR_OBJECT_DETECTION_MAPPING
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
__a : Optional[Any] = ObjectDetectionPipeline(model=__UpperCamelCase , image_processor=__UpperCamelCase )
return object_detector, ["./tests/fixtures/tests_samples/COCO/000000039769.png"]
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
__a : List[str] = object_detector("""./tests/fixtures/tests_samples/COCO/000000039769.png""" , threshold=0.0 )
self.assertGreater(len(__UpperCamelCase ) , 0 )
for detected_object in outputs:
self.assertEqual(
__UpperCamelCase , {
"""score""": ANY(__UpperCamelCase ),
"""label""": ANY(__UpperCamelCase ),
"""box""": {"""xmin""": ANY(__UpperCamelCase ), """ymin""": ANY(__UpperCamelCase ), """xmax""": ANY(__UpperCamelCase ), """ymax""": ANY(__UpperCamelCase )},
} , )
import datasets
__a : Optional[int] = datasets.load_dataset("""hf-internal-testing/fixtures_image_utils""" , """image""" , split="""test""" )
__a : Tuple = [
Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ),
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
# RGBA
dataset[0]["""file"""],
# LA
dataset[1]["""file"""],
# L
dataset[2]["""file"""],
]
__a : Any = object_detector(__UpperCamelCase , threshold=0.0 )
self.assertEqual(len(__UpperCamelCase ) , len(__UpperCamelCase ) )
for outputs in batch_outputs:
self.assertGreater(len(__UpperCamelCase ) , 0 )
for detected_object in outputs:
self.assertEqual(
__UpperCamelCase , {
"""score""": ANY(__UpperCamelCase ),
"""label""": ANY(__UpperCamelCase ),
"""box""": {"""xmin""": ANY(__UpperCamelCase ), """ymin""": ANY(__UpperCamelCase ), """xmax""": ANY(__UpperCamelCase ), """ymax""": ANY(__UpperCamelCase )},
} , )
@require_tf
@unittest.skip("""Object detection not implemented in TF""" )
def __lowerCamelCase ( self ):
'''simple docstring'''
pass
@require_torch
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Optional[Any] = """hf-internal-testing/tiny-detr-mobilenetsv3"""
__a : Dict = AutoModelForObjectDetection.from_pretrained(__UpperCamelCase )
__a : Optional[Any] = AutoFeatureExtractor.from_pretrained(__UpperCamelCase )
__a : str = ObjectDetectionPipeline(model=__UpperCamelCase , feature_extractor=__UpperCamelCase )
__a : Optional[int] = object_detector("""http://images.cocodataset.org/val2017/000000039769.jpg""" , threshold=0.0 )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [
{"""score""": 0.3_3_7_6, """label""": """LABEL_0""", """box""": {"""xmin""": 159, """ymin""": 120, """xmax""": 480, """ymax""": 359}},
{"""score""": 0.3_3_7_6, """label""": """LABEL_0""", """box""": {"""xmin""": 159, """ymin""": 120, """xmax""": 480, """ymax""": 359}},
] , )
__a : Union[str, Any] = object_detector(
[
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
] , threshold=0.0 , )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [
[
{"""score""": 0.3_3_7_6, """label""": """LABEL_0""", """box""": {"""xmin""": 159, """ymin""": 120, """xmax""": 480, """ymax""": 359}},
{"""score""": 0.3_3_7_6, """label""": """LABEL_0""", """box""": {"""xmin""": 159, """ymin""": 120, """xmax""": 480, """ymax""": 359}},
],
[
{"""score""": 0.3_3_7_6, """label""": """LABEL_0""", """box""": {"""xmin""": 159, """ymin""": 120, """xmax""": 480, """ymax""": 359}},
{"""score""": 0.3_3_7_6, """label""": """LABEL_0""", """box""": {"""xmin""": 159, """ymin""": 120, """xmax""": 480, """ymax""": 359}},
],
] , )
@require_torch
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : str = """facebook/detr-resnet-50"""
__a : Dict = AutoModelForObjectDetection.from_pretrained(__UpperCamelCase )
__a : int = AutoFeatureExtractor.from_pretrained(__UpperCamelCase )
__a : int = ObjectDetectionPipeline(model=__UpperCamelCase , feature_extractor=__UpperCamelCase )
__a : Any = object_detector("""http://images.cocodataset.org/val2017/000000039769.jpg""" )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [
{"""score""": 0.9_9_8_2, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 175, """ymax""": 117}},
{"""score""": 0.9_9_6_0, """label""": """remote""", """box""": {"""xmin""": 333, """ymin""": 72, """xmax""": 368, """ymax""": 187}},
{"""score""": 0.9_9_5_5, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 639, """ymax""": 473}},
{"""score""": 0.9_9_8_8, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}},
{"""score""": 0.9_9_8_7, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}},
] , )
__a : Optional[Any] = object_detector(
[
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
] )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [
[
{"""score""": 0.9_9_8_2, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 175, """ymax""": 117}},
{"""score""": 0.9_9_6_0, """label""": """remote""", """box""": {"""xmin""": 333, """ymin""": 72, """xmax""": 368, """ymax""": 187}},
{"""score""": 0.9_9_5_5, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 639, """ymax""": 473}},
{"""score""": 0.9_9_8_8, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}},
{"""score""": 0.9_9_8_7, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}},
],
[
{"""score""": 0.9_9_8_2, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 175, """ymax""": 117}},
{"""score""": 0.9_9_6_0, """label""": """remote""", """box""": {"""xmin""": 333, """ymin""": 72, """xmax""": 368, """ymax""": 187}},
{"""score""": 0.9_9_5_5, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 639, """ymax""": 473}},
{"""score""": 0.9_9_8_8, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}},
{"""score""": 0.9_9_8_7, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}},
],
] , )
@require_torch
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : int = """facebook/detr-resnet-50"""
__a : Optional[int] = pipeline("""object-detection""" , model=__UpperCamelCase )
__a : Optional[int] = object_detector("""http://images.cocodataset.org/val2017/000000039769.jpg""" )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [
{"""score""": 0.9_9_8_2, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 175, """ymax""": 117}},
{"""score""": 0.9_9_6_0, """label""": """remote""", """box""": {"""xmin""": 333, """ymin""": 72, """xmax""": 368, """ymax""": 187}},
{"""score""": 0.9_9_5_5, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 639, """ymax""": 473}},
{"""score""": 0.9_9_8_8, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}},
{"""score""": 0.9_9_8_7, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}},
] , )
__a : List[str] = object_detector(
[
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
] )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [
[
{"""score""": 0.9_9_8_2, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 175, """ymax""": 117}},
{"""score""": 0.9_9_6_0, """label""": """remote""", """box""": {"""xmin""": 333, """ymin""": 72, """xmax""": 368, """ymax""": 187}},
{"""score""": 0.9_9_5_5, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 639, """ymax""": 473}},
{"""score""": 0.9_9_8_8, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}},
{"""score""": 0.9_9_8_7, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}},
],
[
{"""score""": 0.9_9_8_2, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 175, """ymax""": 117}},
{"""score""": 0.9_9_6_0, """label""": """remote""", """box""": {"""xmin""": 333, """ymin""": 72, """xmax""": 368, """ymax""": 187}},
{"""score""": 0.9_9_5_5, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 639, """ymax""": 473}},
{"""score""": 0.9_9_8_8, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}},
{"""score""": 0.9_9_8_7, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}},
],
] , )
@require_torch
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Union[str, Any] = 0.9_9_8_5
__a : Union[str, Any] = """facebook/detr-resnet-50"""
__a : Optional[int] = pipeline("""object-detection""" , model=__UpperCamelCase )
__a : Union[str, Any] = object_detector("""http://images.cocodataset.org/val2017/000000039769.jpg""" , threshold=__UpperCamelCase )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [
{"""score""": 0.9_9_8_8, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}},
{"""score""": 0.9_9_8_7, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}},
] , )
@require_torch
@require_pytesseract
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : str = """Narsil/layoutlmv3-finetuned-funsd"""
__a : List[Any] = 0.9_9_9_3
__a : Dict = pipeline("""object-detection""" , model=__UpperCamelCase , threshold=__UpperCamelCase )
__a : List[str] = object_detector(
"""https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png""" )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [
{"""score""": 0.9_9_9_3, """label""": """I-ANSWER""", """box""": {"""xmin""": 294, """ymin""": 254, """xmax""": 343, """ymax""": 264}},
{"""score""": 0.9_9_9_3, """label""": """I-ANSWER""", """box""": {"""xmin""": 294, """ymin""": 254, """xmax""": 343, """ymax""": 264}},
] , ) | 697 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import List, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__SCREAMING_SNAKE_CASE : Dict = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : Union[str, Any] = {
'google/efficientnet-b7': 'https://huggingface.co/google/efficientnet-b7/resolve/main/config.json',
}
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
lowercase__ = "efficientnet"
def __init__( self , __UpperCamelCase = 3 , __UpperCamelCase = 600 , __UpperCamelCase = 2.0 , __UpperCamelCase = 3.1 , __UpperCamelCase = 8 , __UpperCamelCase = [3, 3, 5, 3, 5, 5, 3] , __UpperCamelCase = [32, 16, 24, 40, 80, 112, 192] , __UpperCamelCase = [16, 24, 40, 80, 112, 192, 320] , __UpperCamelCase = [] , __UpperCamelCase = [1, 2, 2, 2, 1, 2, 1] , __UpperCamelCase = [1, 2, 2, 3, 3, 4, 1] , __UpperCamelCase = [1, 6, 6, 6, 6, 6, 6] , __UpperCamelCase = 0.2_5 , __UpperCamelCase = "swish" , __UpperCamelCase = 2560 , __UpperCamelCase = "mean" , __UpperCamelCase = 0.0_2 , __UpperCamelCase = 0.0_0_1 , __UpperCamelCase = 0.9_9 , __UpperCamelCase = 0.5 , __UpperCamelCase = 0.2 , **__UpperCamelCase , ):
'''simple docstring'''
super().__init__(**__UpperCamelCase )
__a : Union[str, Any] = num_channels
__a : List[Any] = image_size
__a : Optional[int] = width_coefficient
__a : List[Any] = depth_coefficient
__a : Dict = depth_divisor
__a : str = kernel_sizes
__a : Any = in_channels
__a : Tuple = out_channels
__a : List[Any] = depthwise_padding
__a : List[Any] = strides
__a : Optional[Any] = num_block_repeats
__a : Tuple = expand_ratios
__a : Optional[int] = squeeze_expansion_ratio
__a : Dict = hidden_act
__a : Union[str, Any] = hidden_dim
__a : str = pooling_type
__a : Union[str, Any] = initializer_range
__a : Union[str, Any] = batch_norm_eps
__a : List[str] = batch_norm_momentum
__a : str = dropout_rate
__a : List[Any] = drop_connect_rate
__a : Union[str, Any] = sum(__UpperCamelCase ) * 4
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
lowercase__ = version.parse("1.11" )
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return 1E-5 | 697 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__SCREAMING_SNAKE_CASE : List[str] = {
'configuration_blenderbot_small': [
'BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP',
'BlenderbotSmallConfig',
'BlenderbotSmallOnnxConfig',
],
'tokenization_blenderbot_small': ['BlenderbotSmallTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Union[str, Any] = ['BlenderbotSmallTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : List[str] = [
'BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST',
'BlenderbotSmallForCausalLM',
'BlenderbotSmallForConditionalGeneration',
'BlenderbotSmallModel',
'BlenderbotSmallPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Optional[int] = [
'TFBlenderbotSmallForConditionalGeneration',
'TFBlenderbotSmallModel',
'TFBlenderbotSmallPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Optional[Any] = [
'FlaxBlenderbotSmallForConditionalGeneration',
'FlaxBlenderbotSmallModel',
'FlaxBlenderbotSmallPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotSmallConfig,
BlenderbotSmallOnnxConfig,
)
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_small_fast import BlenderbotSmallTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotSmallForCausalLM,
BlenderbotSmallForConditionalGeneration,
BlenderbotSmallModel,
BlenderbotSmallPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot_small import (
TFBlenderbotSmallForConditionalGeneration,
TFBlenderbotSmallModel,
TFBlenderbotSmallPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot_small import (
FlaxBlenderbotSmallForConditionalGeneration,
FlaxBlenderbotSmallModel,
FlaxBlenderbotSmallPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 697 | 1 |
'''simple docstring'''
import unittest
from transformers import DonutProcessor
__SCREAMING_SNAKE_CASE : List[Any] = 'naver-clova-ix/donut-base'
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : int = DonutProcessor.from_pretrained(__UpperCamelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : int = {
"""name""": """John Doe""",
"""age""": """99""",
"""city""": """Atlanta""",
"""state""": """GA""",
"""zip""": """30301""",
"""phone""": """123-4567""",
"""nicknames""": [{"""nickname""": """Johnny"""}, {"""nickname""": """JD"""}],
}
__a : Optional[int] = (
"""<s_name>John Doe</s_name><s_age>99</s_age><s_city>Atlanta</s_city>"""
"""<s_state>GA</s_state><s_zip>30301</s_zip><s_phone>123-4567</s_phone>"""
"""<s_nicknames><s_nickname>Johnny</s_nickname>"""
"""<sep/><s_nickname>JD</s_nickname></s_nicknames>"""
)
__a : str = self.processor.tokenajson(__UpperCamelCase )
self.assertDictEqual(__UpperCamelCase , __UpperCamelCase ) | 697 |
'''simple docstring'''
import numpy as np
import torch
from torch.utils.data import Dataset
from utils import logger
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
def __init__( self , __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
__a : Any = params
__a : Optional[Any] = np.array(__UpperCamelCase )
__a : Union[str, Any] = np.array([len(__UpperCamelCase ) for t in data] )
self.check()
self.remove_long_sequences()
self.remove_empty_sequences()
self.remove_unknown_sequences()
self.check()
self.print_statistics()
def __getitem__( self , __UpperCamelCase ):
'''simple docstring'''
return (self.token_ids[index], self.lengths[index])
def __len__( self ):
'''simple docstring'''
return len(self.lengths )
def __lowerCamelCase ( self ):
'''simple docstring'''
assert len(self.token_ids ) == len(self.lengths )
assert all(self.lengths[i] == len(self.token_ids[i] ) for i in range(len(self.lengths ) ) )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Tuple = self.params.max_model_input_size
__a : Union[str, Any] = self.lengths > max_len
logger.info(f"""Splitting {sum(__UpperCamelCase )} too long sequences.""" )
def divide_chunks(__UpperCamelCase , __UpperCamelCase ):
return [l[i : i + n] for i in range(0 , len(__UpperCamelCase ) , __UpperCamelCase )]
__a : int = []
__a : Union[str, Any] = []
if self.params.mlm:
__a , __a : Any = self.params.special_tok_ids["""cls_token"""], self.params.special_tok_ids["""sep_token"""]
else:
__a , __a : str = self.params.special_tok_ids["""bos_token"""], self.params.special_tok_ids["""eos_token"""]
for seq_, len_ in zip(self.token_ids , self.lengths ):
assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_
if len_ <= max_len:
new_tok_ids.append(seq_ )
new_lengths.append(len_ )
else:
__a : Any = []
for sub_s in divide_chunks(seq_ , max_len - 2 ):
if sub_s[0] != cls_id:
__a : int = np.insert(__UpperCamelCase , 0 , __UpperCamelCase )
if sub_s[-1] != sep_id:
__a : str = np.insert(__UpperCamelCase , len(__UpperCamelCase ) , __UpperCamelCase )
assert len(__UpperCamelCase ) <= max_len
assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s
sub_seqs.append(__UpperCamelCase )
new_tok_ids.extend(__UpperCamelCase )
new_lengths.extend([len(__UpperCamelCase ) for l in sub_seqs] )
__a : Dict = np.array(__UpperCamelCase )
__a : Tuple = np.array(__UpperCamelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : List[str] = len(self )
__a : List[str] = self.lengths > 11
__a : int = self.token_ids[indices]
__a : Union[str, Any] = self.lengths[indices]
__a : Any = len(self )
logger.info(f"""Remove {init_size - new_size} too short (<=11 tokens) sequences.""" )
def __lowerCamelCase ( self ):
'''simple docstring'''
if "unk_token" not in self.params.special_tok_ids:
return
else:
__a : List[str] = self.params.special_tok_ids["""unk_token"""]
__a : str = len(self )
__a : str = np.array([np.count_nonzero(a == unk_token_id ) for a in self.token_ids] )
__a : Optional[Any] = (unk_occs / self.lengths) < 0.5
__a : List[str] = self.token_ids[indices]
__a : Optional[int] = self.lengths[indices]
__a : Any = len(self )
logger.info(f"""Remove {init_size - new_size} sequences with a high level of unknown tokens (50%).""" )
def __lowerCamelCase ( self ):
'''simple docstring'''
if not self.params.is_master:
return
logger.info(f"""{len(self )} sequences""" )
# data_len = sum(self.lengths)
# nb_unique_tokens = len(Counter(list(chain(*self.token_ids))))
# logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)')
# unk_idx = self.params.special_tok_ids['unk_token']
# nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids])
# logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)')
def __lowerCamelCase ( self , __UpperCamelCase ):
'''simple docstring'''
__a : List[str] = [t[0] for t in batch]
__a : str = [t[1] for t in batch]
assert len(__UpperCamelCase ) == len(__UpperCamelCase )
# Max for paddings
__a : Optional[int] = max(__UpperCamelCase )
# Pad token ids
if self.params.mlm:
__a : int = self.params.special_tok_ids["""pad_token"""]
else:
__a : Tuple = self.params.special_tok_ids["""unk_token"""]
__a : Any = [list(t.astype(__UpperCamelCase ) ) + [pad_idx] * (max_seq_len_ - len(__UpperCamelCase )) for t in token_ids]
assert len(tk_ ) == len(__UpperCamelCase )
assert all(len(__UpperCamelCase ) == max_seq_len_ for t in tk_ )
__a : Any = torch.tensor(tk_ ) # (bs, max_seq_len_)
__a : Optional[Any] = torch.tensor(__UpperCamelCase ) # (bs)
return tk_t, lg_t | 697 | 1 |
'''simple docstring'''
def _snake_case ( lowercase ) -> bool:
if not isinstance(lowercase , lowercase ):
raise ValueError("""check_bouncy() accepts only integer arguments""" )
__a : str = str(lowercase )
__a : Any = """""".join(sorted(lowercase ) )
return sorted_str_n != str_n and sorted_str_n[::-1] != str_n
def _snake_case ( lowercase = 9_9 ) -> int:
if not 0 < percent < 1_0_0:
raise ValueError("""solution() only accepts values from 0 to 100""" )
__a : List[str] = 0
__a : Union[str, Any] = 1
while True:
if check_bouncy(lowercase ):
bouncy_num += 1
if (bouncy_num / num) * 1_0_0 >= percent:
return num
num += 1
if __name__ == "__main__":
from doctest import testmod
testmod()
print(f'''{solution(99)}''') | 697 |
'''simple docstring'''
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
lowercase__ = ""
lowercase__ = "hf-legacy" # "hf://"" is reserved for hffs
def __init__( self , __UpperCamelCase = None , __UpperCamelCase = None , **__UpperCamelCase , ):
'''simple docstring'''
super().__init__(self , **__UpperCamelCase )
__a : int = repo_info
__a : int = token
__a : Any = None
def __lowerCamelCase ( self ):
'''simple docstring'''
if self.dir_cache is None:
__a : Union[str, Any] = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
__a : List[str] = {
"""name""": hf_file.rfilename,
"""size""": None,
"""type""": """file""",
}
self.dir_cache.update(
{
str(__UpperCamelCase ): {"""name""": str(__UpperCamelCase ), """size""": None, """type""": """directory"""}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase = "rb" , **__UpperCamelCase , ):
'''simple docstring'''
if not isinstance(self.repo_info , __UpperCamelCase ):
raise NotImplementedError(f"""Open is only implemented for dataset repositories, but got {self.repo_info}""" )
__a : Any = hf_hub_url(self.repo_info.id , __UpperCamelCase , revision=self.repo_info.sha )
return fsspec.open(
__UpperCamelCase , mode=__UpperCamelCase , headers=get_authentication_headers_for_url(__UpperCamelCase , use_auth_token=self.token ) , client_kwargs={"""trust_env""": True} , ).open()
def __lowerCamelCase ( self , __UpperCamelCase , **__UpperCamelCase ):
'''simple docstring'''
self._get_dirs()
__a : str = self._strip_protocol(__UpperCamelCase )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(__UpperCamelCase )
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase=False , **__UpperCamelCase ):
'''simple docstring'''
self._get_dirs()
__a : int = PurePosixPath(path.strip("""/""" ) )
__a : List[str] = {}
for p, f in self.dir_cache.items():
__a : str = PurePosixPath(p.strip("""/""" ) )
__a : Optional[int] = p.parent
if root == path:
__a : List[str] = f
__a : str = list(paths.values() )
if detail:
return out
else:
return sorted(f["""name"""] for f in out ) | 697 | 1 |
'''simple docstring'''
def _snake_case ( lowercase , lowercase ) -> float:
def get_matched_characters(lowercase , lowercase ) -> str:
__a : Dict = []
__a : Tuple = min(len(_stra ) , len(_stra ) ) // 2
for i, l in enumerate(_stra ):
__a : Optional[Any] = int(max(0 , i - limit ) )
__a : Tuple = int(min(i + limit + 1 , len(_stra ) ) )
if l in _stra[left:right]:
matched.append(lowercase )
__a : List[Any] = F"""{_stra[0:_stra.index(lowercase )]} {_stra[_stra.index(lowercase ) + 1:]}"""
return "".join(lowercase )
# matching characters
__a : int = get_matched_characters(lowercase , lowercase )
__a : Optional[Any] = get_matched_characters(lowercase , lowercase )
__a : Optional[Any] = len(lowercase )
# transposition
__a : Dict = (
len([(ca, ca) for ca, ca in zip(lowercase , lowercase ) if ca != ca] ) // 2
)
if not match_count:
__a : List[str] = 0.0
else:
__a : List[str] = (
1
/ 3
* (
match_count / len(lowercase )
+ match_count / len(lowercase )
+ (match_count - transpositions) / match_count
)
)
# common prefix up to 4 characters
__a : Any = 0
for ca, ca in zip(stra[:4] , stra[:4] ):
if ca == ca:
prefix_len += 1
else:
break
return jaro + 0.1 * prefix_len * (1 - jaro)
if __name__ == "__main__":
import doctest
doctest.testmod()
print(jaro_winkler('hello', 'world')) | 697 |
'''simple docstring'''
import inspect
import unittest
from transformers import DPTConfig
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel
from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class SCREAMING_SNAKE_CASE__ :
def __init__( self , __UpperCamelCase , __UpperCamelCase=2 , __UpperCamelCase=32 , __UpperCamelCase=16 , __UpperCamelCase=3 , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=32 , __UpperCamelCase=4 , __UpperCamelCase=[0, 1, 2, 3] , __UpperCamelCase=4 , __UpperCamelCase=37 , __UpperCamelCase="gelu" , __UpperCamelCase=0.1 , __UpperCamelCase=0.1 , __UpperCamelCase=0.0_2 , __UpperCamelCase=3 , __UpperCamelCase=[1, 384, 24, 24] , __UpperCamelCase=True , __UpperCamelCase=None , ):
'''simple docstring'''
__a : List[str] = parent
__a : Tuple = batch_size
__a : str = image_size
__a : int = patch_size
__a : Dict = num_channels
__a : int = is_training
__a : Dict = use_labels
__a : Union[str, Any] = hidden_size
__a : Dict = num_hidden_layers
__a : Dict = backbone_out_indices
__a : Optional[int] = num_attention_heads
__a : List[str] = intermediate_size
__a : Optional[Any] = hidden_act
__a : Dict = hidden_dropout_prob
__a : Tuple = attention_probs_dropout_prob
__a : Any = initializer_range
__a : Any = num_labels
__a : Optional[Any] = backbone_featmap_shape
__a : List[Any] = scope
__a : List[str] = is_hybrid
# sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token)
__a : Union[str, Any] = (image_size // patch_size) ** 2
__a : List[str] = num_patches + 1
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__a : Union[str, Any] = None
if self.use_labels:
__a : str = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__a : Tuple = self.get_config()
return config, pixel_values, labels
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : List[str] = {
"""global_padding""": """same""",
"""layer_type""": """bottleneck""",
"""depths""": [3, 4, 9],
"""out_features""": ["""stage1""", """stage2""", """stage3"""],
"""embedding_dynamic_padding""": True,
"""hidden_sizes""": [96, 192, 384, 768],
"""num_groups""": 2,
}
return DPTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , backbone_out_indices=self.backbone_out_indices , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__UpperCamelCase , initializer_range=self.initializer_range , is_hybrid=self.is_hybrid , backbone_config=__UpperCamelCase , backbone_featmap_shape=self.backbone_featmap_shape , )
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
__a : Optional[Any] = DPTModel(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
__a : List[str] = model(__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
__a : List[str] = self.num_labels
__a : Union[str, Any] = DPTForDepthEstimation(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
__a : Tuple = model(__UpperCamelCase )
self.parent.assertEqual(result.predicted_depth.shape , (self.batch_size, self.image_size, self.image_size) )
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
__a : Dict = self.num_labels
__a : Tuple = DPTForSemanticSegmentation(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
__a : str = model(__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Optional[int] = self.prepare_config_and_inputs()
__a , __a , __a : Tuple = config_and_inputs
__a : List[str] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase , __UpperCamelCase , unittest.TestCase ):
lowercase__ = (DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else ()
lowercase__ = (
{
"depth-estimation": DPTForDepthEstimation,
"feature-extraction": DPTModel,
"image-segmentation": DPTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
lowercase__ = False
lowercase__ = False
lowercase__ = False
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Optional[int] = DPTModelTester(self )
__a : List[Any] = ConfigTester(self , config_class=__UpperCamelCase , has_text_modality=__UpperCamelCase , hidden_size=37 )
def __lowerCamelCase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="""DPT does not use inputs_embeds""" )
def __lowerCamelCase ( self ):
'''simple docstring'''
pass
def __lowerCamelCase ( self ):
'''simple docstring'''
__a , __a : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a : str = model_class(__UpperCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__a : Any = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__UpperCamelCase , nn.Linear ) )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a , __a : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a : Any = model_class(__UpperCamelCase )
__a : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__a : int = [*signature.parameters.keys()]
__a : List[str] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __UpperCamelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_depth_estimation(*__UpperCamelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__UpperCamelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
__a , __a : Dict = self.model_tester.prepare_config_and_inputs_for_common()
__a : List[Any] = True
if model_class in get_values(__UpperCamelCase ):
continue
__a : str = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.train()
__a : Union[str, Any] = self._prepare_for_class(__UpperCamelCase , __UpperCamelCase , return_labels=__UpperCamelCase )
__a : List[Any] = model(**__UpperCamelCase ).loss
loss.backward()
def __lowerCamelCase ( self ):
'''simple docstring'''
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
__a , __a : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
__a : Any = False
__a : Dict = True
if model_class in get_values(__UpperCamelCase ) or not model_class.supports_gradient_checkpointing:
continue
__a : Any = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.gradient_checkpointing_enable()
model.train()
__a : List[str] = self._prepare_for_class(__UpperCamelCase , __UpperCamelCase , return_labels=__UpperCamelCase )
__a : Dict = model(**__UpperCamelCase ).loss
loss.backward()
def __lowerCamelCase ( self ):
'''simple docstring'''
__a , __a : Any = self.model_tester.prepare_config_and_inputs_for_common()
__a : Any = _config_zero_init(__UpperCamelCase )
for model_class in self.all_model_classes:
__a : Any = model_class(config=__UpperCamelCase )
# Skip the check for the backbone
__a : Optional[Any] = []
for name, module in model.named_modules():
if module.__class__.__name__ == "DPTViTHybridEmbeddings":
__a : Optional[int] = [f"""{name}.{key}""" for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def __lowerCamelCase ( self ):
'''simple docstring'''
pass
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]:
__a : int = DPTModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a , __a : int = self.model_tester.prepare_config_and_inputs_for_common()
__a : Optional[int] = """add"""
with self.assertRaises(__UpperCamelCase ):
__a : int = DPTForDepthEstimation(__UpperCamelCase )
def _snake_case ( ) -> Any:
__a : Dict = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
@slow
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : int = DPTImageProcessor.from_pretrained("""Intel/dpt-hybrid-midas""" )
__a : int = DPTForDepthEstimation.from_pretrained("""Intel/dpt-hybrid-midas""" ).to(__UpperCamelCase )
__a : Union[str, Any] = prepare_img()
__a : Any = image_processor(images=__UpperCamelCase , return_tensors="""pt""" ).to(__UpperCamelCase )
# forward pass
with torch.no_grad():
__a : Optional[Any] = model(**__UpperCamelCase )
__a : int = outputs.predicted_depth
# verify the predicted depth
__a : Any = torch.Size((1, 384, 384) )
self.assertEqual(predicted_depth.shape , __UpperCamelCase )
__a : int = torch.tensor(
[[[5.6_4_3_7, 5.6_1_4_6, 5.6_5_1_1], [5.4_3_7_1, 5.5_6_4_9, 5.5_9_5_8], [5.5_2_1_5, 5.5_1_8_4, 5.5_2_9_3]]] ).to(__UpperCamelCase )
self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 100 , __UpperCamelCase , atol=1E-4 ) ) | 697 | 1 |
'''simple docstring'''
from ...utils import logging
from ..ta.modeling_tf_ta import TFTaEncoderModel, TFTaForConditionalGeneration, TFTaModel
from .configuration_mta import MTaConfig
__SCREAMING_SNAKE_CASE : Optional[int] = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : Dict = 'T5Config'
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
lowercase__ = "mt5"
lowercase__ = MTaConfig
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
lowercase__ = "mt5"
lowercase__ = MTaConfig
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
lowercase__ = "mt5"
lowercase__ = MTaConfig | 697 |
'''simple docstring'''
import unittest
from .lib import (
Matrix,
Vector,
axpy,
square_zero_matrix,
unit_basis_vector,
zero_vector,
)
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Dict = Vector([1, 2, 3] )
self.assertEqual(x.component(0 ) , 1 )
self.assertEqual(x.component(2 ) , 3 )
__a : Optional[int] = Vector()
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Any = Vector([0, 0, 0, 0, 0, 1] )
self.assertEqual(str(__UpperCamelCase ) , """(0,0,0,0,0,1)""" )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Tuple = Vector([1, 2, 3, 4] )
self.assertEqual(len(__UpperCamelCase ) , 4 )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Optional[Any] = Vector([1, 2] )
__a : List[str] = Vector([1, 2, 3, 4, 5] )
__a : Optional[int] = Vector([0, 0, 0, 0, 0, 0, 0, 0, 0, 0] )
__a : Dict = Vector([1, -1, 1, -1, 2, -3, 4, -5] )
self.assertAlmostEqual(x.euclidean_length() , 2.2_3_6 , 3 )
self.assertAlmostEqual(y.euclidean_length() , 7.4_1_6 , 3 )
self.assertEqual(z.euclidean_length() , 0 )
self.assertAlmostEqual(w.euclidean_length() , 7.6_1_6 , 3 )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Dict = Vector([1, 2, 3] )
__a : Union[str, Any] = Vector([1, 1, 1] )
self.assertEqual((x + y).component(0 ) , 2 )
self.assertEqual((x + y).component(1 ) , 3 )
self.assertEqual((x + y).component(2 ) , 4 )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : List[str] = Vector([1, 2, 3] )
__a : Any = Vector([1, 1, 1] )
self.assertEqual((x - y).component(0 ) , 0 )
self.assertEqual((x - y).component(1 ) , 1 )
self.assertEqual((x - y).component(2 ) , 2 )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Tuple = Vector([1, 2, 3] )
__a : Optional[Any] = Vector([2, -1, 4] ) # for test of dot product
__a : Union[str, Any] = Vector([1, -2, -1] )
self.assertEqual(str(x * 3.0 ) , """(3.0,6.0,9.0)""" )
self.assertEqual((a * b) , 0 )
def __lowerCamelCase ( self ):
'''simple docstring'''
self.assertEqual(str(zero_vector(10 ) ).count("""0""" ) , 10 )
def __lowerCamelCase ( self ):
'''simple docstring'''
self.assertEqual(str(unit_basis_vector(3 , 1 ) ) , """(0,1,0)""" )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Dict = Vector([1, 2, 3] )
__a : Optional[int] = Vector([1, 0, 1] )
self.assertEqual(str(axpy(2 , __UpperCamelCase , __UpperCamelCase ) ) , """(3,4,7)""" )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : int = Vector([1, 0, 0, 0, 0, 0] )
__a : Any = x.copy()
self.assertEqual(str(__UpperCamelCase ) , str(__UpperCamelCase ) )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Union[str, Any] = Vector([1, 0, 0] )
x.change_component(0 , 0 )
x.change_component(1 , 1 )
self.assertEqual(str(__UpperCamelCase ) , """(0,1,0)""" )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Dict = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual("""|1,2,3|\n|2,4,5|\n|6,7,8|\n""" , str(__UpperCamelCase ) )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : List[Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
__a : List[Any] = [[-3, -14, -10], [-5, -10, -5], [-2, -1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(minors[x][y] , a.minor(__UpperCamelCase , __UpperCamelCase ) )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : List[Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
__a : Any = [[-3, 14, -10], [5, -10, 5], [-2, 1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(cofactors[x][y] , a.cofactor(__UpperCamelCase , __UpperCamelCase ) )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : List[Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(-5 , a.determinant() )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Any = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]] , 3 , 3 )
__a : List[Any] = Vector([1, 2, 3] )
self.assertEqual("""(14,32,50)""" , str(a * x ) )
self.assertEqual("""|2,4,6|\n|8,10,12|\n|14,16,18|\n""" , str(a * 2 ) )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : List[str] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
a.change_component(0 , 2 , 5 )
self.assertEqual("""|1,2,5|\n|2,4,5|\n|6,7,8|\n""" , str(__UpperCamelCase ) )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Tuple = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(7 , a.component(2 , 1 ) , 0.0_1 )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Dict = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
__a : Union[str, Any] = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 )
self.assertEqual("""|2,4,10|\n|4,8,10|\n|12,14,18|\n""" , str(a + b ) )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : List[Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
__a : List[str] = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 )
self.assertEqual("""|0,0,-4|\n|0,0,0|\n|0,0,-2|\n""" , str(a - b ) )
def __lowerCamelCase ( self ):
'''simple docstring'''
self.assertEqual(
"""|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n""" , str(square_zero_matrix(5 ) ) , )
if __name__ == "__main__":
unittest.main() | 697 | 1 |
'''simple docstring'''
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import DiffusionPipeline
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import logging
__SCREAMING_SNAKE_CASE : Union[str, Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
def __init__( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , ):
'''simple docstring'''
super().__init__()
self.register_modules(
vae=__UpperCamelCase , text_encoder=__UpperCamelCase , tokenizer=__UpperCamelCase , unet=__UpperCamelCase , scheduler=__UpperCamelCase , safety_checker=__UpperCamelCase , feature_extractor=__UpperCamelCase , )
def __lowerCamelCase ( self , __UpperCamelCase = "auto" ):
'''simple docstring'''
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
__a : Union[str, Any] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(__UpperCamelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
self.enable_attention_slicing(__UpperCamelCase )
@torch.no_grad()
def __call__( self , __UpperCamelCase , __UpperCamelCase = 512 , __UpperCamelCase = 512 , __UpperCamelCase = 50 , __UpperCamelCase = 7.5 , __UpperCamelCase = None , __UpperCamelCase = 1 , __UpperCamelCase = 0.0 , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = "pil" , __UpperCamelCase = True , __UpperCamelCase = None , __UpperCamelCase = 1 , __UpperCamelCase = None , **__UpperCamelCase , ):
'''simple docstring'''
if isinstance(__UpperCamelCase , __UpperCamelCase ):
__a : Union[str, Any] = 1
elif isinstance(__UpperCamelCase , __UpperCamelCase ):
__a : Tuple = len(__UpperCamelCase )
else:
raise ValueError(f"""`prompt` has to be of type `str` or `list` but is {type(__UpperCamelCase )}""" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"""`height` and `width` have to be divisible by 8 but are {height} and {width}.""" )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(__UpperCamelCase , __UpperCamelCase ) or callback_steps <= 0)
):
raise ValueError(
f"""`callback_steps` has to be a positive integer but is {callback_steps} of type"""
f""" {type(__UpperCamelCase )}.""" )
# get prompt text embeddings
__a : Tuple = self.tokenizer(
__UpperCamelCase , padding="""max_length""" , max_length=self.tokenizer.model_max_length , return_tensors="""pt""" , )
__a : Union[str, Any] = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
__a : str = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"""The following part of your input was truncated because CLIP can only handle sequences up to"""
f""" {self.tokenizer.model_max_length} tokens: {removed_text}""" )
__a : Optional[int] = text_input_ids[:, : self.tokenizer.model_max_length]
if text_embeddings is None:
__a : int = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
__a , __a , __a : Union[str, Any] = text_embeddings.shape
__a : Optional[Any] = text_embeddings.repeat(1 , __UpperCamelCase , 1 )
__a : Union[str, Any] = text_embeddings.view(bs_embed * num_images_per_prompt , __UpperCamelCase , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
__a : Any = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
__a : List[str]
if negative_prompt is None:
__a : Optional[Any] = [""""""]
elif type(__UpperCamelCase ) is not type(__UpperCamelCase ):
raise TypeError(
f"""`negative_prompt` should be the same type to `prompt`, but got {type(__UpperCamelCase )} !="""
f""" {type(__UpperCamelCase )}.""" )
elif isinstance(__UpperCamelCase , __UpperCamelCase ):
__a : Any = [negative_prompt]
elif batch_size != len(__UpperCamelCase ):
raise ValueError(
f"""`negative_prompt`: {negative_prompt} has batch size {len(__UpperCamelCase )}, but `prompt`:"""
f""" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"""
""" the batch size of `prompt`.""" )
else:
__a : Tuple = negative_prompt
__a : Any = text_input_ids.shape[-1]
__a : List[str] = self.tokenizer(
__UpperCamelCase , padding="""max_length""" , max_length=__UpperCamelCase , truncation=__UpperCamelCase , return_tensors="""pt""" , )
__a : str = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
__a : List[str] = uncond_embeddings.shape[1]
__a : List[Any] = uncond_embeddings.repeat(__UpperCamelCase , __UpperCamelCase , 1 )
__a : Tuple = uncond_embeddings.view(batch_size * num_images_per_prompt , __UpperCamelCase , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
__a : List[Any] = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
__a : Tuple = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
__a : List[Any] = (batch_size * num_images_per_prompt, self.unet.config.in_channels, 64, 64)
__a : int = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
__a : Any = torch.randn(
__UpperCamelCase , generator=__UpperCamelCase , device="""cpu""" , dtype=__UpperCamelCase ).to(self.device )
__a : Optional[Any] = torch.randn(__UpperCamelCase , generator=__UpperCamelCase , device="""cpu""" , dtype=__UpperCamelCase ).to(
self.device )
else:
__a : Optional[int] = torch.randn(
__UpperCamelCase , generator=__UpperCamelCase , device=self.device , dtype=__UpperCamelCase )
__a : str = torch.randn(__UpperCamelCase , generator=__UpperCamelCase , device=self.device , dtype=__UpperCamelCase )
else:
if latents_reference.shape != latents_shape:
raise ValueError(f"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" )
__a : Optional[Any] = latents_reference.to(self.device )
__a : str = latents.to(self.device )
# This is the key part of the pipeline where we
# try to ensure that the generated images w/ the same seed
# but different sizes actually result in similar images
__a : List[str] = (latents_shape[3] - latents_shape_reference[3]) // 2
__a : int = (latents_shape[2] - latents_shape_reference[2]) // 2
__a : int = latents_shape_reference[3] if dx >= 0 else latents_shape_reference[3] + 2 * dx
__a : Tuple = latents_shape_reference[2] if dy >= 0 else latents_shape_reference[2] + 2 * dy
__a : Optional[Any] = 0 if dx < 0 else dx
__a : Optional[Any] = 0 if dy < 0 else dy
__a : Optional[int] = max(-dx , 0 )
__a : Optional[Any] = max(-dy , 0 )
# import pdb
# pdb.set_trace()
__a : Optional[int] = latents_reference[:, :, dy : dy + h, dx : dx + w]
# set timesteps
self.scheduler.set_timesteps(__UpperCamelCase )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
__a : Dict = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
__a : Any = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
__a : List[Any] = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
__a : Optional[Any] = {}
if accepts_eta:
__a : Union[str, Any] = eta
for i, t in enumerate(self.progress_bar(__UpperCamelCase ) ):
# expand the latents if we are doing classifier free guidance
__a : List[str] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
__a : Tuple = self.scheduler.scale_model_input(__UpperCamelCase , __UpperCamelCase )
# predict the noise residual
__a : Union[str, Any] = self.unet(__UpperCamelCase , __UpperCamelCase , encoder_hidden_states=__UpperCamelCase ).sample
# perform guidance
if do_classifier_free_guidance:
__a , __a : List[str] = noise_pred.chunk(2 )
__a : Optional[int] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
__a : List[Any] = self.scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
__a : Optional[Any] = 1 / 0.1_8_2_1_5 * latents
__a : Optional[int] = self.vae.decode(__UpperCamelCase ).sample
__a : List[str] = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
__a : int = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if self.safety_checker is not None:
__a : List[str] = self.feature_extractor(self.numpy_to_pil(__UpperCamelCase ) , return_tensors="""pt""" ).to(
self.device )
__a , __a : int = self.safety_checker(
images=__UpperCamelCase , clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype ) )
else:
__a : Optional[int] = None
if output_type == "pil":
__a : str = self.numpy_to_pil(__UpperCamelCase )
if not return_dict:
return (image, has_nsfw_concept)
return StableDiffusionPipelineOutput(images=__UpperCamelCase , nsfw_content_detected=__UpperCamelCase ) | 697 |
'''simple docstring'''
import os
from itertools import chain
from random import randrange, shuffle
import pytest
from .sola import PokerHand
__SCREAMING_SNAKE_CASE : List[str] = (
'4S 3H 2C 7S 5H',
'9D 8H 2C 6S 7H',
'2D 6D 9D TH 7D',
'TC 8C 2S JH 6C',
'JH 8S TH AH QH',
'TS KS 5S 9S AC',
'KD 6S 9D TH AD',
'KS 8D 4D 9S 4S', # pair
'8C 4S KH JS 4D', # pair
'QH 8H KD JH 8S', # pair
'KC 4H KS 2H 8D', # pair
'KD 4S KC 3H 8S', # pair
'AH 8S AS KC JH', # pair
'3H 4C 4H 3S 2H', # 2 pairs
'5S 5D 2C KH KH', # 2 pairs
'3C KH 5D 5S KH', # 2 pairs
'AS 3C KH AD KH', # 2 pairs
'7C 7S 3S 7H 5S', # 3 of a kind
'7C 7S KH 2H 7H', # 3 of a kind
'AC KH QH AH AS', # 3 of a kind
'2H 4D 3C AS 5S', # straight (low ace)
'3C 5C 4C 2C 6H', # straight
'6S 8S 7S 5H 9H', # straight
'JS QS 9H TS KH', # straight
'QC KH TS JS AH', # straight (high ace)
'8C 9C 5C 3C TC', # flush
'3S 8S 9S 5S KS', # flush
'4C 5C 9C 8C KC', # flush
'JH 8H AH KH QH', # flush
'3D 2H 3H 2C 2D', # full house
'2H 2C 3S 3H 3D', # full house
'KH KC 3S 3H 3D', # full house
'JC 6H JS JD JH', # 4 of a kind
'JC 7H JS JD JH', # 4 of a kind
'JC KH JS JD JH', # 4 of a kind
'2S AS 4S 5S 3S', # straight flush (low ace)
'2D 6D 3D 4D 5D', # straight flush
'5C 6C 3C 7C 4C', # straight flush
'JH 9H TH KH QH', # straight flush
'JH AH TH KH QH', # royal flush (high ace straight flush)
)
__SCREAMING_SNAKE_CASE : Optional[Any] = (
('2H 3H 4H 5H 6H', 'KS AS TS QS JS', 'Loss'),
('2H 3H 4H 5H 6H', 'AS AD AC AH JD', 'Win'),
('AS AH 2H AD AC', 'JS JD JC JH 3D', 'Win'),
('2S AH 2H AS AC', 'JS JD JC JH AD', 'Loss'),
('2S AH 2H AS AC', '2H 3H 5H 6H 7H', 'Win'),
('AS 3S 4S 8S 2S', '2H 3H 5H 6H 7H', 'Win'),
('2H 3H 5H 6H 7H', '2S 3H 4H 5S 6C', 'Win'),
('2S 3H 4H 5S 6C', '3D 4C 5H 6H 2S', 'Tie'),
('2S 3H 4H 5S 6C', 'AH AC 5H 6H AS', 'Win'),
('2S 2H 4H 5S 4C', 'AH AC 5H 6H AS', 'Loss'),
('2S 2H 4H 5S 4C', 'AH AC 5H 6H 7S', 'Win'),
('6S AD 7H 4S AS', 'AH AC 5H 6H 7S', 'Loss'),
('2S AH 4H 5S KC', 'AH AC 5H 6H 7S', 'Loss'),
('2S 3H 6H 7S 9C', '7H 3C TH 6H 9S', 'Loss'),
('4S 5H 6H TS AC', '3S 5H 6H TS AC', 'Win'),
('2S AH 4H 5S 6C', 'AD 4C 5H 6H 2C', 'Tie'),
('AS AH 3H AD AC', 'AS AH 2H AD AC', 'Win'),
('AH AC 5H 5C QS', 'AH AC 5H 5C KS', 'Loss'),
('AH AC 5H 5C QS', 'KH KC 5H 5C QS', 'Win'),
('7C 7S KH 2H 7H', '3C 3S AH 2H 3H', 'Win'),
('3C 3S AH 2H 3H', '7C 7S KH 2H 7H', 'Loss'),
('6H 5H 4H 3H 2H', '5H 4H 3H 2H AH', 'Win'),
('5H 4H 3H 2H AH', '5H 4H 3H 2H AH', 'Tie'),
('5H 4H 3H 2H AH', '6H 5H 4H 3H 2H', 'Loss'),
('AH AD KS KC AC', 'AH KD KH AC KC', 'Win'),
('2H 4D 3C AS 5S', '2H 4D 3C 6S 5S', 'Loss'),
('2H 3S 3C 3H 2S', '3S 3C 2S 2H 2D', 'Win'),
('4D 6D 5D 2D JH', '3S 8S 3H TC KH', 'Loss'),
('4S 6C 8S 3S 7S', 'AD KS 2D 7D 7C', 'Loss'),
('6S 4C 7H 8C 3H', '5H JC AH 9D 9C', 'Loss'),
('9D 9H JH TC QH', '3C 2S JS 5C 7H', 'Win'),
('2H TC 8S AD 9S', '4H TS 7H 2C 5C', 'Win'),
('9D 3S 2C 7S 7C', 'JC TD 3C TC 9H', 'Loss'),
)
__SCREAMING_SNAKE_CASE : Tuple = (
('2H 3H 4H 5H 6H', True),
('AS AH 2H AD AC', False),
('2H 3H 5H 6H 7H', True),
('KS AS TS QS JS', True),
('8H 9H QS JS TH', False),
('AS 3S 4S 8S 2S', True),
)
__SCREAMING_SNAKE_CASE : Dict = (
('2H 3H 4H 5H 6H', True),
('AS AH 2H AD AC', False),
('2H 3H 5H 6H 7H', False),
('KS AS TS QS JS', True),
('8H 9H QS JS TH', True),
)
__SCREAMING_SNAKE_CASE : Optional[int] = (
('2H 4D 3C AS 5S', True, [5, 4, 3, 2, 14]),
('2H 5D 3C AS 5S', False, [14, 5, 5, 3, 2]),
('JH QD KC AS TS', False, [14, 13, 12, 11, 10]),
('9D 3S 2C 7S 7C', False, [9, 7, 7, 3, 2]),
)
__SCREAMING_SNAKE_CASE : int = (
('JH AH TH KH QH', 0),
('JH 9H TH KH QH', 0),
('JC KH JS JD JH', 7),
('KH KC 3S 3H 3D', 6),
('8C 9C 5C 3C TC', 0),
('JS QS 9H TS KH', 0),
('7C 7S KH 2H 7H', 3),
('3C KH 5D 5S KH', 2),
('QH 8H KD JH 8S', 1),
('2D 6D 9D TH 7D', 0),
)
__SCREAMING_SNAKE_CASE : int = (
('JH AH TH KH QH', 23),
('JH 9H TH KH QH', 22),
('JC KH JS JD JH', 21),
('KH KC 3S 3H 3D', 20),
('8C 9C 5C 3C TC', 19),
('JS QS 9H TS KH', 18),
('7C 7S KH 2H 7H', 17),
('3C KH 5D 5S KH', 16),
('QH 8H KD JH 8S', 15),
('2D 6D 9D TH 7D', 14),
)
def _snake_case ( ) -> List[str]:
__a , __a : List[Any] = randrange(len(lowercase ) ), randrange(len(lowercase ) )
__a : int = ["""Loss""", """Tie""", """Win"""][(play >= oppo) + (play > oppo)]
__a , __a : int = SORTED_HANDS[play], SORTED_HANDS[oppo]
return hand, other, expected
def _snake_case ( lowercase = 1_0_0 ) -> Any:
return (generate_random_hand() for _ in range(lowercase ))
@pytest.mark.parametrize("""hand, expected""" , lowercase )
def _snake_case ( lowercase , lowercase ) -> int:
assert PokerHand(lowercase )._is_flush() == expected
@pytest.mark.parametrize("""hand, expected""" , lowercase )
def _snake_case ( lowercase , lowercase ) -> Any:
assert PokerHand(lowercase )._is_straight() == expected
@pytest.mark.parametrize("""hand, expected, card_values""" , lowercase )
def _snake_case ( lowercase , lowercase , lowercase ) -> List[str]:
__a : Union[str, Any] = PokerHand(lowercase )
assert player._is_five_high_straight() == expected
assert player._card_values == card_values
@pytest.mark.parametrize("""hand, expected""" , lowercase )
def _snake_case ( lowercase , lowercase ) -> Optional[int]:
assert PokerHand(lowercase )._is_same_kind() == expected
@pytest.mark.parametrize("""hand, expected""" , lowercase )
def _snake_case ( lowercase , lowercase ) -> Union[str, Any]:
assert PokerHand(lowercase )._hand_type == expected
@pytest.mark.parametrize("""hand, other, expected""" , lowercase )
def _snake_case ( lowercase , lowercase , lowercase ) -> Optional[int]:
assert PokerHand(lowercase ).compare_with(PokerHand(lowercase ) ) == expected
@pytest.mark.parametrize("""hand, other, expected""" , generate_random_hands() )
def _snake_case ( lowercase , lowercase , lowercase ) -> int:
assert PokerHand(lowercase ).compare_with(PokerHand(lowercase ) ) == expected
def _snake_case ( ) -> Union[str, Any]:
__a : Tuple = [PokerHand(lowercase ) for hand in SORTED_HANDS]
__a : Optional[int] = poker_hands.copy()
shuffle(lowercase )
__a : List[str] = chain(sorted(lowercase ) )
for index, hand in enumerate(lowercase ):
assert hand == poker_hands[index]
def _snake_case ( ) -> List[str]:
# Test that five high straights are compared correctly.
__a : Optional[int] = [PokerHand("""2D AC 3H 4H 5S""" ), PokerHand("""2S 3H 4H 5S 6C""" )]
pokerhands.sort(reverse=lowercase )
assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C"
def _snake_case ( ) -> List[str]:
# Multiple calls to five_high_straight function should still return True
# and shouldn't mutate the list in every call other than the first.
__a : Dict = PokerHand("""2C 4S AS 3D 5C""" )
__a : Dict = True
__a : Optional[int] = [5, 4, 3, 2, 1_4]
for _ in range(1_0 ):
assert pokerhand._is_five_high_straight() == expected
assert pokerhand._card_values == expected_card_values
def _snake_case ( ) -> Dict:
# Problem number 54 from Project Euler
# Testing from poker_hands.txt file
__a : Tuple = 0
__a : int = os.path.abspath(os.path.dirname(lowercase ) )
__a : Union[str, Any] = os.path.join(lowercase , """poker_hands.txt""" )
with open(lowercase ) as file_hand:
for line in file_hand:
__a : Union[str, Any] = line[:1_4].strip()
__a : Optional[Any] = line[1_5:].strip()
__a , __a : List[str] = PokerHand(lowercase ), PokerHand(lowercase )
__a : str = player.compare_with(lowercase )
if output == "Win":
answer += 1
assert answer == 3_7_6 | 697 | 1 |
'''simple docstring'''
import gc
import unittest
from transformers import CTRLConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
)
class SCREAMING_SNAKE_CASE__ :
def __init__( self , __UpperCamelCase , __UpperCamelCase=14 , __UpperCamelCase=7 , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=99 , __UpperCamelCase=32 , __UpperCamelCase=5 , __UpperCamelCase=4 , __UpperCamelCase=37 , __UpperCamelCase="gelu" , __UpperCamelCase=0.1 , __UpperCamelCase=0.1 , __UpperCamelCase=512 , __UpperCamelCase=16 , __UpperCamelCase=2 , __UpperCamelCase=0.0_2 , __UpperCamelCase=3 , __UpperCamelCase=4 , __UpperCamelCase=None , ):
'''simple docstring'''
__a : Any = parent
__a : List[str] = batch_size
__a : str = seq_length
__a : Tuple = is_training
__a : Any = use_token_type_ids
__a : str = use_input_mask
__a : Dict = use_labels
__a : List[str] = use_mc_token_ids
__a : List[Any] = vocab_size
__a : List[Any] = hidden_size
__a : List[Any] = num_hidden_layers
__a : Dict = num_attention_heads
__a : List[Any] = intermediate_size
__a : Tuple = hidden_act
__a : Optional[int] = hidden_dropout_prob
__a : Dict = attention_probs_dropout_prob
__a : str = max_position_embeddings
__a : int = type_vocab_size
__a : Dict = type_sequence_label_size
__a : List[str] = initializer_range
__a : str = num_labels
__a : Any = num_choices
__a : Dict = scope
__a : Any = self.vocab_size - 1
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__a : int = None
if self.use_input_mask:
__a : List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
__a : Any = None
if self.use_token_type_ids:
__a : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__a : Optional[int] = None
if self.use_mc_token_ids:
__a : Tuple = ids_tensor([self.batch_size, self.num_choices] , self.seq_length )
__a : int = None
__a : Tuple = None
__a : Optional[int] = None
if self.use_labels:
__a : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__a : Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__a : Dict = ids_tensor([self.batch_size] , self.num_choices )
__a : List[str] = self.get_config()
__a : int = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
token_type_ids,
mc_token_ids,
sequence_labels,
token_labels,
choice_labels,
)
def __lowerCamelCase ( self ):
'''simple docstring'''
return CTRLConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , *__UpperCamelCase ):
'''simple docstring'''
__a : Optional[int] = CTRLModel(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
model(__UpperCamelCase , token_type_ids=__UpperCamelCase , head_mask=__UpperCamelCase )
model(__UpperCamelCase , token_type_ids=__UpperCamelCase )
__a : Optional[Any] = model(__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(len(result.past_key_values ) , config.n_layer )
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , *__UpperCamelCase ):
'''simple docstring'''
__a : str = CTRLLMHeadModel(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
__a : Tuple = model(__UpperCamelCase , token_type_ids=__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : List[str] = self.prepare_config_and_inputs()
(
(
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) ,
) : List[Any] = config_and_inputs
__a : Any = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """head_mask""": head_mask}
return config, inputs_dict
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , *__UpperCamelCase ):
'''simple docstring'''
__a : str = self.num_labels
__a : Any = CTRLForSequenceClassification(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
__a : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__a : Dict = model(__UpperCamelCase , token_type_ids=__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
@require_torch
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , unittest.TestCase ):
lowercase__ = (CTRLModel, CTRLLMHeadModel, CTRLForSequenceClassification) if is_torch_available() else ()
lowercase__ = (CTRLLMHeadModel,) if is_torch_available() else ()
lowercase__ = (
{
"feature-extraction": CTRLModel,
"text-classification": CTRLForSequenceClassification,
"text-generation": CTRLLMHeadModel,
"zero-shot": CTRLForSequenceClassification,
}
if is_torch_available()
else {}
)
lowercase__ = True
lowercase__ = False
lowercase__ = False
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `CTRLConfig` was never used in pipeline tests, either because of a missing checkpoint or because a tiny
# config could not be created.
return True
return False
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Optional[Any] = CTRLModelTester(self )
__a : int = ConfigTester(self , config_class=__UpperCamelCase , n_embd=37 )
def __lowerCamelCase ( self ):
'''simple docstring'''
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
def __lowerCamelCase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_ctrl_model(*__UpperCamelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*__UpperCamelCase )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def __lowerCamelCase ( self ):
'''simple docstring'''
pass
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
for model_name in CTRL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a : Dict = CTRLModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
@unittest.skip("""The model doesn't support left padding""" ) # and it's not used enough to be worth fixing :)
def __lowerCamelCase ( self ):
'''simple docstring'''
pass
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def __lowerCamelCase ( self ):
'''simple docstring'''
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Optional[int] = CTRLLMHeadModel.from_pretrained("""ctrl""" )
model.to(__UpperCamelCase )
__a : Union[str, Any] = torch.tensor(
[[1_1859, 0, 1611, 8]] , dtype=torch.long , device=__UpperCamelCase ) # Legal the president is
__a : Dict = [
1_1859,
0,
1611,
8,
5,
150,
2_6449,
2,
19,
348,
469,
3,
2595,
48,
2_0740,
24_6533,
24_6533,
19,
30,
5,
] # Legal the president is a good guy and I don't want to lose my job. \n \n I have a
__a : Dict = model.generate(__UpperCamelCase , do_sample=__UpperCamelCase )
self.assertListEqual(output_ids[0].tolist() , __UpperCamelCase ) | 697 |
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__SCREAMING_SNAKE_CASE : Optional[Any] = {'configuration_focalnet': ['FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FocalNetConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : List[Any] = [
'FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'FocalNetForImageClassification',
'FocalNetForMaskedImageModeling',
'FocalNetBackbone',
'FocalNetModel',
'FocalNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 697 | 1 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
__SCREAMING_SNAKE_CASE : Optional[int] = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
def __init__( self , *__UpperCamelCase , **__UpperCamelCase ):
'''simple docstring'''
warnings.warn(
"""The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use LayoutLMv2ImageProcessor instead.""" , __UpperCamelCase , )
super().__init__(*__UpperCamelCase , **__UpperCamelCase ) | 697 |
'''simple docstring'''
from __future__ import annotations
import bisect
def _snake_case ( lowercase , lowercase , lowercase = 0 , lowercase = -1 ) -> int:
if hi < 0:
__a : Union[str, Any] = len(lowercase )
while lo < hi:
__a : List[str] = lo + (hi - lo) // 2
if sorted_collection[mid] < item:
__a : int = mid + 1
else:
__a : int = mid
return lo
def _snake_case ( lowercase , lowercase , lowercase = 0 , lowercase = -1 ) -> int:
if hi < 0:
__a : Any = len(lowercase )
while lo < hi:
__a : Any = lo + (hi - lo) // 2
if sorted_collection[mid] <= item:
__a : List[str] = mid + 1
else:
__a : Any = mid
return lo
def _snake_case ( lowercase , lowercase , lowercase = 0 , lowercase = -1 ) -> None:
sorted_collection.insert(bisect_left(lowercase , lowercase , lowercase , lowercase ) , lowercase )
def _snake_case ( lowercase , lowercase , lowercase = 0 , lowercase = -1 ) -> None:
sorted_collection.insert(bisect_right(lowercase , lowercase , lowercase , lowercase ) , lowercase )
def _snake_case ( lowercase , lowercase ) -> int | None:
__a : Dict = 0
__a : Any = len(lowercase ) - 1
while left <= right:
__a : str = left + (right - left) // 2
__a : List[Any] = sorted_collection[midpoint]
if current_item == item:
return midpoint
elif item < current_item:
__a : Optional[Any] = midpoint - 1
else:
__a : Optional[int] = midpoint + 1
return None
def _snake_case ( lowercase , lowercase ) -> int | None:
__a : Optional[int] = bisect.bisect_left(lowercase , lowercase )
if index != len(lowercase ) and sorted_collection[index] == item:
return index
return None
def _snake_case ( lowercase , lowercase , lowercase , lowercase ) -> int | None:
if right < left:
return None
__a : Any = left + (right - left) // 2
if sorted_collection[midpoint] == item:
return midpoint
elif sorted_collection[midpoint] > item:
return binary_search_by_recursion(lowercase , lowercase , lowercase , midpoint - 1 )
else:
return binary_search_by_recursion(lowercase , lowercase , midpoint + 1 , lowercase )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : List[Any] = input('Enter numbers separated by comma:\n').strip()
__SCREAMING_SNAKE_CASE : Optional[Any] = sorted(int(item) for item in user_input.split(','))
__SCREAMING_SNAKE_CASE : List[str] = int(input('Enter a single number to be found in the list:\n'))
__SCREAMING_SNAKE_CASE : Optional[int] = binary_search(collection, target)
if result is None:
print(f'''{target} was not found in {collection}.''')
else:
print(f'''{target} was found at position {result} in {collection}.''') | 697 | 1 |
'''simple docstring'''
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
lowercase__ = ["image_processor", "tokenizer"]
lowercase__ = "AutoImageProcessor"
lowercase__ = "AutoTokenizer"
def __init__( self , __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
super().__init__(__UpperCamelCase , __UpperCamelCase )
__a : Tuple = self.image_processor
def __call__( self , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=None , **__UpperCamelCase ):
'''simple docstring'''
if text is None and images is None:
raise ValueError("""You have to specify either text or images. Both cannot be none.""" )
if text is not None:
__a : List[Any] = self.tokenizer(__UpperCamelCase , return_tensors=__UpperCamelCase , **__UpperCamelCase )
if images is not None:
__a : str = self.image_processor(__UpperCamelCase , return_tensors=__UpperCamelCase , **__UpperCamelCase )
if text is not None and images is not None:
__a : Union[str, Any] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__UpperCamelCase ) , tensor_type=__UpperCamelCase )
def __lowerCamelCase ( self , *__UpperCamelCase , **__UpperCamelCase ):
'''simple docstring'''
return self.tokenizer.batch_decode(*__UpperCamelCase , **__UpperCamelCase )
def __lowerCamelCase ( self , *__UpperCamelCase , **__UpperCamelCase ):
'''simple docstring'''
return self.tokenizer.decode(*__UpperCamelCase , **__UpperCamelCase )
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return ["input_ids", "attention_mask", "pixel_values"] | 697 |
'''simple docstring'''
from itertools import product
def _snake_case ( lowercase , lowercase ) -> list[int]:
__a : Optional[int] = sides_number
__a : Union[str, Any] = max_face_number * dice_number
__a : Optional[Any] = [0] * (max_total + 1)
__a : Dict = 1
__a : str = range(lowercase , max_face_number + 1 )
for dice_numbers in product(lowercase , repeat=lowercase ):
__a : int = sum(lowercase )
totals_frequencies[total] += 1
return totals_frequencies
def _snake_case ( ) -> float:
__a : Tuple = total_frequency_distribution(
sides_number=4 , dice_number=9 )
__a : Union[str, Any] = total_frequency_distribution(
sides_number=6 , dice_number=6 )
__a : str = 0
__a : Dict = 9
__a : str = 4 * 9
__a : Any = 6
for peter_total in range(lowercase , max_peter_total + 1 ):
peter_wins_count += peter_totals_frequencies[peter_total] * sum(
colin_totals_frequencies[min_colin_total:peter_total] )
__a : str = (4**9) * (6**6)
__a : List[Any] = peter_wins_count / total_games_number
__a : List[Any] = round(lowercase , ndigits=7 )
return rounded_peter_win_probability
if __name__ == "__main__":
print(f'''{solution() = }''') | 697 | 1 |
'''simple docstring'''
def _snake_case ( lowercase , lowercase ) -> list:
__a : Dict = len(lowercase )
__a : Optional[int] = []
for i in range(len(lowercase ) - pat_len + 1 ):
__a : Tuple = True
for j in range(lowercase ):
if s[i + j] != pattern[j]:
__a : Dict = False
break
if match_found:
position.append(lowercase )
return position
if __name__ == "__main__":
assert naive_pattern_search('ABCDEFG', 'DE') == [3]
print(naive_pattern_search('ABAAABCDBBABCDDEBCABC', 'ABC')) | 697 |
'''simple docstring'''
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import DiffusionPipeline
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import logging
__SCREAMING_SNAKE_CASE : Union[str, Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
def __init__( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , ):
'''simple docstring'''
super().__init__()
self.register_modules(
vae=__UpperCamelCase , text_encoder=__UpperCamelCase , tokenizer=__UpperCamelCase , unet=__UpperCamelCase , scheduler=__UpperCamelCase , safety_checker=__UpperCamelCase , feature_extractor=__UpperCamelCase , )
def __lowerCamelCase ( self , __UpperCamelCase = "auto" ):
'''simple docstring'''
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
__a : Union[str, Any] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(__UpperCamelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
self.enable_attention_slicing(__UpperCamelCase )
@torch.no_grad()
def __call__( self , __UpperCamelCase , __UpperCamelCase = 512 , __UpperCamelCase = 512 , __UpperCamelCase = 50 , __UpperCamelCase = 7.5 , __UpperCamelCase = None , __UpperCamelCase = 1 , __UpperCamelCase = 0.0 , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = "pil" , __UpperCamelCase = True , __UpperCamelCase = None , __UpperCamelCase = 1 , __UpperCamelCase = None , **__UpperCamelCase , ):
'''simple docstring'''
if isinstance(__UpperCamelCase , __UpperCamelCase ):
__a : Union[str, Any] = 1
elif isinstance(__UpperCamelCase , __UpperCamelCase ):
__a : Tuple = len(__UpperCamelCase )
else:
raise ValueError(f"""`prompt` has to be of type `str` or `list` but is {type(__UpperCamelCase )}""" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"""`height` and `width` have to be divisible by 8 but are {height} and {width}.""" )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(__UpperCamelCase , __UpperCamelCase ) or callback_steps <= 0)
):
raise ValueError(
f"""`callback_steps` has to be a positive integer but is {callback_steps} of type"""
f""" {type(__UpperCamelCase )}.""" )
# get prompt text embeddings
__a : Tuple = self.tokenizer(
__UpperCamelCase , padding="""max_length""" , max_length=self.tokenizer.model_max_length , return_tensors="""pt""" , )
__a : Union[str, Any] = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
__a : str = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"""The following part of your input was truncated because CLIP can only handle sequences up to"""
f""" {self.tokenizer.model_max_length} tokens: {removed_text}""" )
__a : Optional[int] = text_input_ids[:, : self.tokenizer.model_max_length]
if text_embeddings is None:
__a : int = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
__a , __a , __a : Union[str, Any] = text_embeddings.shape
__a : Optional[Any] = text_embeddings.repeat(1 , __UpperCamelCase , 1 )
__a : Union[str, Any] = text_embeddings.view(bs_embed * num_images_per_prompt , __UpperCamelCase , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
__a : Any = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
__a : List[str]
if negative_prompt is None:
__a : Optional[Any] = [""""""]
elif type(__UpperCamelCase ) is not type(__UpperCamelCase ):
raise TypeError(
f"""`negative_prompt` should be the same type to `prompt`, but got {type(__UpperCamelCase )} !="""
f""" {type(__UpperCamelCase )}.""" )
elif isinstance(__UpperCamelCase , __UpperCamelCase ):
__a : Any = [negative_prompt]
elif batch_size != len(__UpperCamelCase ):
raise ValueError(
f"""`negative_prompt`: {negative_prompt} has batch size {len(__UpperCamelCase )}, but `prompt`:"""
f""" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"""
""" the batch size of `prompt`.""" )
else:
__a : Tuple = negative_prompt
__a : Any = text_input_ids.shape[-1]
__a : List[str] = self.tokenizer(
__UpperCamelCase , padding="""max_length""" , max_length=__UpperCamelCase , truncation=__UpperCamelCase , return_tensors="""pt""" , )
__a : str = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
__a : List[str] = uncond_embeddings.shape[1]
__a : List[Any] = uncond_embeddings.repeat(__UpperCamelCase , __UpperCamelCase , 1 )
__a : Tuple = uncond_embeddings.view(batch_size * num_images_per_prompt , __UpperCamelCase , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
__a : List[Any] = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
__a : Tuple = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
__a : List[Any] = (batch_size * num_images_per_prompt, self.unet.config.in_channels, 64, 64)
__a : int = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
__a : Any = torch.randn(
__UpperCamelCase , generator=__UpperCamelCase , device="""cpu""" , dtype=__UpperCamelCase ).to(self.device )
__a : Optional[Any] = torch.randn(__UpperCamelCase , generator=__UpperCamelCase , device="""cpu""" , dtype=__UpperCamelCase ).to(
self.device )
else:
__a : Optional[int] = torch.randn(
__UpperCamelCase , generator=__UpperCamelCase , device=self.device , dtype=__UpperCamelCase )
__a : str = torch.randn(__UpperCamelCase , generator=__UpperCamelCase , device=self.device , dtype=__UpperCamelCase )
else:
if latents_reference.shape != latents_shape:
raise ValueError(f"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" )
__a : Optional[Any] = latents_reference.to(self.device )
__a : str = latents.to(self.device )
# This is the key part of the pipeline where we
# try to ensure that the generated images w/ the same seed
# but different sizes actually result in similar images
__a : List[str] = (latents_shape[3] - latents_shape_reference[3]) // 2
__a : int = (latents_shape[2] - latents_shape_reference[2]) // 2
__a : int = latents_shape_reference[3] if dx >= 0 else latents_shape_reference[3] + 2 * dx
__a : Tuple = latents_shape_reference[2] if dy >= 0 else latents_shape_reference[2] + 2 * dy
__a : Optional[Any] = 0 if dx < 0 else dx
__a : Optional[Any] = 0 if dy < 0 else dy
__a : Optional[int] = max(-dx , 0 )
__a : Optional[Any] = max(-dy , 0 )
# import pdb
# pdb.set_trace()
__a : Optional[int] = latents_reference[:, :, dy : dy + h, dx : dx + w]
# set timesteps
self.scheduler.set_timesteps(__UpperCamelCase )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
__a : Dict = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
__a : Any = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
__a : List[Any] = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
__a : Optional[Any] = {}
if accepts_eta:
__a : Union[str, Any] = eta
for i, t in enumerate(self.progress_bar(__UpperCamelCase ) ):
# expand the latents if we are doing classifier free guidance
__a : List[str] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
__a : Tuple = self.scheduler.scale_model_input(__UpperCamelCase , __UpperCamelCase )
# predict the noise residual
__a : Union[str, Any] = self.unet(__UpperCamelCase , __UpperCamelCase , encoder_hidden_states=__UpperCamelCase ).sample
# perform guidance
if do_classifier_free_guidance:
__a , __a : List[str] = noise_pred.chunk(2 )
__a : Optional[int] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
__a : List[Any] = self.scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
__a : Optional[Any] = 1 / 0.1_8_2_1_5 * latents
__a : Optional[int] = self.vae.decode(__UpperCamelCase ).sample
__a : List[str] = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
__a : int = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if self.safety_checker is not None:
__a : List[str] = self.feature_extractor(self.numpy_to_pil(__UpperCamelCase ) , return_tensors="""pt""" ).to(
self.device )
__a , __a : int = self.safety_checker(
images=__UpperCamelCase , clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype ) )
else:
__a : Optional[int] = None
if output_type == "pil":
__a : str = self.numpy_to_pil(__UpperCamelCase )
if not return_dict:
return (image, has_nsfw_concept)
return StableDiffusionPipelineOutput(images=__UpperCamelCase , nsfw_content_detected=__UpperCamelCase ) | 697 | 1 |
'''simple docstring'''
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
__SCREAMING_SNAKE_CASE : Optional[Any] = re.compile('[^A-Za-z_0-9]')
# parameters used in DuplicationIndex
__SCREAMING_SNAKE_CASE : Dict = 10
__SCREAMING_SNAKE_CASE : List[str] = 256
def _snake_case ( lowercase ) -> Optional[MinHash]:
if len(lowercase ) < MIN_NUM_TOKENS:
return None
__a : str = MinHash(num_perm=lowercase )
for token in set(lowercase ):
min_hash.update(token.encode() )
return min_hash
def _snake_case ( lowercase ) -> Set[str]:
return {t for t in NON_ALPHA.split(lowercase ) if len(t.strip() ) > 0}
class SCREAMING_SNAKE_CASE__ :
def __init__( self , *,
__UpperCamelCase = 0.8_5 , ):
'''simple docstring'''
__a : List[str] = duplication_jaccard_threshold
__a : List[Any] = NUM_PERM
__a : str = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm )
__a : str = defaultdict(__UpperCamelCase )
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
__a : str = self._index.query(__UpperCamelCase )
if code_key in self._index.keys:
print(f"""Duplicate key {code_key}""" )
return
self._index.insert(__UpperCamelCase , __UpperCamelCase )
if len(__UpperCamelCase ) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(__UpperCamelCase )
break
else:
self._duplicate_clusters[close_duplicates[0]].add(__UpperCamelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Union[str, Any] = []
for base, duplicates in self._duplicate_clusters.items():
__a : str = [base] + list(__UpperCamelCase )
# reformat the cluster to be a list of dict
__a : str = [{"""base_index""": el[0], """repo_name""": el[1], """path""": el[2]} for el in cluster]
duplicate_clusters.append(__UpperCamelCase )
return duplicate_clusters
def __lowerCamelCase ( self , __UpperCamelCase ):
'''simple docstring'''
__a : str = self.get_duplicate_clusters()
with open(__UpperCamelCase , """w""" ) as f:
json.dump(__UpperCamelCase , __UpperCamelCase )
def _snake_case ( lowercase ) -> int:
__a , __a : int = element
__a : List[Any] = get_min_hash([t for t in NON_ALPHA.split(data["""content"""] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def _snake_case ( lowercase ) -> Any:
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash , ThreadedIterator(lowercase , max_queue_size=1_0_0_0_0 ) , chunksize=1_0_0 , ):
if data is not None:
yield data
def _snake_case ( lowercase , lowercase ) -> Any:
__a : Dict = DuplicationIndex(duplication_jaccard_threshold=lowercase )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(lowercase ) ) , max_queue_size=1_0_0 ) ):
di.add(lowercase , lowercase )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def _snake_case ( lowercase , lowercase ) -> float:
__a : Any = get_tokens(lowercase )
__a : str = get_tokens(lowercase )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
__SCREAMING_SNAKE_CASE : Union[str, Any] = None
def _snake_case ( lowercase , lowercase ) -> Union[str, Any]:
__a : int = []
for elementa in cluster:
__a : Optional[int] = _shared_dataset[elementa["""base_index"""]]["""content"""]
for elementa in extremes:
__a : Any = _shared_dataset[elementa["""base_index"""]]["""content"""]
if jaccard_similarity(lowercase , lowercase ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
__a : int = 1
extremes.append(lowercase )
return extremes
def _snake_case ( lowercase , lowercase , lowercase ) -> str:
global _shared_dataset
__a : Union[str, Any] = dataset
__a : Union[str, Any] = []
__a : List[str] = partial(_find_cluster_extremes_shared , jaccard_threshold=lowercase )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
lowercase , lowercase , ) , total=len(lowercase ) , ):
extremes_list.append(lowercase )
return extremes_list
def _snake_case ( lowercase , lowercase = 0.8_5 ) -> Tuple[Type[Dataset], List[List[Dict]]]:
__a : List[Any] = make_duplicate_clusters(lowercase , lowercase )
__a : Optional[int] = {x["""base_index"""] for cluster in duplicate_clusters for x in cluster}
__a : Tuple = {}
__a : int = find_extremes(lowercase , lowercase , lowercase )
for extremes in extremes_clusters:
for element in extremes:
__a : List[Any] = element
__a : List[str] = duplicate_indices - set(extreme_dict.keys() )
__a : List[Any] = dataset.filter(lambda lowercase , lowercase : idx not in remove_indices , with_indices=lowercase )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
__a : List[str] = element["""base_index"""] in extreme_dict
if element["is_extreme"]:
__a : Optional[int] = extreme_dict[element["""base_index"""]]["""copies"""]
print(F"""Original dataset size: {len(lowercase )}""" )
print(F"""Number of duplicate clusters: {len(lowercase )}""" )
print(F"""Files in duplicate cluster: {len(lowercase )}""" )
print(F"""Unique files in duplicate cluster: {len(lowercase )}""" )
print(F"""Filtered dataset size: {len(lowercase )}""" )
return ds_filter, duplicate_clusters | 697 |
'''simple docstring'''
import numpy as np
from PIL import Image
def _snake_case ( lowercase , lowercase , lowercase ) -> np.ndarray:
__a : Any = np.array(lowercase )
if arr.shape[0] != arr.shape[1]:
raise ValueError("""The input array is not a square matrix""" )
__a : Union[str, Any] = 0
__a : Dict = 0
__a : Optional[Any] = 0
__a : Tuple = 0
# compute the shape of the output matrix
__a : Optional[int] = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape maxpool_shape
__a : int = np.zeros((maxpool_shape, maxpool_shape) )
while i < arr.shape[0]:
if i + size > arr.shape[0]:
# if the end of the matrix is reached, break
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the maximum of the pooling matrix
__a : Optional[Any] = np.max(arr[i : i + size, j : j + size] )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
__a : Optional[Any] = 0
__a : str = 0
return updated_arr
def _snake_case ( lowercase , lowercase , lowercase ) -> np.ndarray:
__a : int = np.array(lowercase )
if arr.shape[0] != arr.shape[1]:
raise ValueError("""The input array is not a square matrix""" )
__a : int = 0
__a : Optional[Any] = 0
__a : str = 0
__a : List[Any] = 0
# compute the shape of the output matrix
__a : int = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape avgpool_shape
__a : Optional[int] = np.zeros((avgpool_shape, avgpool_shape) )
while i < arr.shape[0]:
# if the end of the matrix is reached, break
if i + size > arr.shape[0]:
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the average of the pooling matrix
__a : Any = int(np.average(arr[i : i + size, j : j + size] ) )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
__a : str = 0
__a : List[Any] = 0
return updated_arr
# Main Function
if __name__ == "__main__":
from doctest import testmod
testmod(name='avgpooling', verbose=True)
# Loading the image
__SCREAMING_SNAKE_CASE : str = Image.open('path_to_image')
# Converting the image to numpy array and maxpooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(maxpooling(np.array(image), size=3, stride=2)).show()
# Converting the image to numpy array and averagepooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(avgpooling(np.array(image), size=3, stride=2)).show() | 697 | 1 |
'''simple docstring'''
from numpy import exp, pi, sqrt
def _snake_case ( lowercase , lowercase = 0.0 , lowercase = 1.0 ) -> int:
return 1 / sqrt(2 * pi * sigma**2 ) * exp(-((x - mu) ** 2) / (2 * sigma**2) )
if __name__ == "__main__":
import doctest
doctest.testmod() | 697 |
'''simple docstring'''
import qiskit
def _snake_case ( lowercase , lowercase ) -> qiskit.result.counts.Counts:
__a : Any = qiskit.Aer.get_backend("""aer_simulator""" )
# Create a Quantum Circuit acting on the q register
__a : str = qiskit.QuantumCircuit(lowercase , lowercase )
# Map the quantum measurement to the classical bits
circuit.measure([0] , [0] )
# Execute the circuit on the simulator
__a : Any = qiskit.execute(lowercase , lowercase , shots=1_0_0_0 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(lowercase )
if __name__ == "__main__":
print(f'''Total count for various states are: {single_qubit_measure(1, 1)}''') | 697 | 1 |
'''simple docstring'''
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
@staticmethod
@abstractmethod
def __lowerCamelCase ( __UpperCamelCase ):
'''simple docstring'''
raise NotImplementedError()
@abstractmethod
def __lowerCamelCase ( self ):
'''simple docstring'''
raise NotImplementedError() | 697 |
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConformerConfig,
WavaVecaConformerForCTC,
WavaVecaConformerForPreTraining,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
__SCREAMING_SNAKE_CASE : str = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : Any = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.linear_k': 'encoder.layers.*.self_attn.linear_k',
'self_attn.linear_v': 'encoder.layers.*.self_attn.linear_v',
'self_attn.linear_q': 'encoder.layers.*.self_attn.linear_q',
'self_attn.pos_bias_u': 'encoder.layers.*.self_attn.pos_bias_u',
'self_attn.pos_bias_v': 'encoder.layers.*.self_attn.pos_bias_v',
'self_attn.linear_out': 'encoder.layers.*.self_attn.linear_out',
'self_attn.linear_pos': 'encoder.layers.*.self_attn.linear_pos',
'self_attn.rotary_emb': 'encoder.embed_positions',
'self_attn_layer_norm': 'encoder.layers.*.self_attn_layer_norm',
'conv_module.pointwise_conv1': 'encoder.layers.*.conv_module.pointwise_conv1',
'conv_module.pointwise_conv2': 'encoder.layers.*.conv_module.pointwise_conv2',
'conv_module.depthwise_conv': 'encoder.layers.*.conv_module.depthwise_conv',
'conv_module.batch_norm': 'encoder.layers.*.conv_module.batch_norm',
'conv_module.layer_norm': 'encoder.layers.*.conv_module.layer_norm',
'ffn1.w_1': 'encoder.layers.*.ffn1.intermediate_dense',
'ffn1.w_2': 'encoder.layers.*.ffn1.output_dense',
'ffn1.layer_norm': 'encoder.layers.*.ffn1_layer_norm',
'ffn2.w_1': 'encoder.layers.*.ffn2.intermediate_dense',
'ffn2.w_2': 'encoder.layers.*.ffn2.output_dense',
'ffn2.layer_norm': 'encoder.layers.*.ffn2_layer_norm',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
}
__SCREAMING_SNAKE_CASE : Optional[Any] = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def _snake_case ( lowercase , lowercase , lowercase , lowercase , lowercase ) -> List[Any]:
for attribute in key.split(""".""" ):
__a : str = getattr(lowercase , lowercase )
if weight_type is not None:
__a : Dict = getattr(lowercase , lowercase ).shape
else:
__a : Dict = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}""" )
if weight_type == "weight":
__a : Any = value
elif weight_type == "weight_g":
__a : int = value
elif weight_type == "weight_v":
__a : int = value
elif weight_type == "bias":
__a : List[Any] = value
elif weight_type == "running_mean":
__a : Union[str, Any] = value
elif weight_type == "running_var":
__a : Tuple = value
elif weight_type == "num_batches_tracked":
__a : Optional[int] = value
elif weight_type == "inv_freq":
__a : List[str] = value
else:
__a : List[str] = value
logger.info(F"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def _snake_case ( lowercase , lowercase , lowercase ) -> Dict:
__a : Dict = []
__a : Dict = fairseq_model.state_dict()
__a : Tuple = hf_model.wavaveca_conformer.feature_extractor
for name, value in fairseq_dict.items():
__a : int = False
if "conv_layers" in name:
load_conv_layer(
lowercase , lowercase , lowercase , lowercase , hf_model.config.feat_extract_norm == """group""" , )
__a : List[Any] = True
else:
for key, mapped_key in MAPPING.items():
__a : Optional[int] = """wav2vec2_conformer.""" + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
__a : str = True
if "*" in mapped_key:
__a : Optional[int] = name.split(lowercase )[0].split(""".""" )[-2]
__a : List[Any] = mapped_key.replace("""*""" , lowercase )
if "pos_bias_u" in name:
__a : Union[str, Any] = None
elif "pos_bias_v" in name:
__a : List[Any] = None
elif "weight_g" in name:
__a : List[Any] = """weight_g"""
elif "weight_v" in name:
__a : List[Any] = """weight_v"""
elif "bias" in name:
__a : Optional[int] = """bias"""
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__a : str = """weight"""
elif "running_mean" in name:
__a : List[str] = """running_mean"""
elif "inv_freq" in name:
__a : Dict = """inv_freq"""
elif "running_var" in name:
__a : Union[str, Any] = """running_var"""
elif "num_batches_tracked" in name:
__a : int = """num_batches_tracked"""
else:
__a : Optional[int] = None
set_recursively(lowercase , lowercase , lowercase , lowercase , lowercase )
continue
if not is_used:
unused_weights.append(lowercase )
logger.warning(F"""Unused weights: {unused_weights}""" )
def _snake_case ( lowercase , lowercase , lowercase , lowercase , lowercase ) -> List[str]:
__a : Optional[Any] = full_name.split("""conv_layers.""" )[-1]
__a : Union[str, Any] = name.split(""".""" )
__a : Optional[Any] = int(items[0] )
__a : int = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
__a : Dict = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
__a : str = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" )
__a : Dict = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" )
__a : Union[str, Any] = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(lowercase )
@torch.no_grad()
def _snake_case ( lowercase , lowercase , lowercase=None , lowercase=None , lowercase=True ) -> Optional[Any]:
if config_path is not None:
__a : Any = WavaVecaConformerConfig.from_pretrained(lowercase , hidden_act="""swish""" )
else:
__a : Optional[int] = WavaVecaConformerConfig()
if "rope" in checkpoint_path:
__a : Optional[Any] = """rotary"""
if is_finetuned:
if dict_path:
__a : List[Any] = Dictionary.load(lowercase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
__a : int = target_dict.pad_index
__a : List[str] = target_dict.bos_index
__a : str = target_dict.eos_index
__a : Dict = len(target_dict.symbols )
__a : Any = os.path.join(lowercase , """vocab.json""" )
if not os.path.isdir(lowercase ):
logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(lowercase ) )
return
os.makedirs(lowercase , exist_ok=lowercase )
__a : Dict = target_dict.indices
# fairseq has the <pad> and <s> switched
__a : Optional[Any] = 0
__a : List[Any] = 1
with open(lowercase , """w""" , encoding="""utf-8""" ) as vocab_handle:
json.dump(lowercase , lowercase )
__a : int = WavaVecaCTCTokenizer(
lowercase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=lowercase , )
__a : Optional[int] = True if config.feat_extract_norm == """layer""" else False
__a : Dict = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6_0_0_0 , padding_value=0 , do_normalize=lowercase , return_attention_mask=lowercase , )
__a : str = WavaVecaProcessor(feature_extractor=lowercase , tokenizer=lowercase )
processor.save_pretrained(lowercase )
__a : List[str] = WavaVecaConformerForCTC(lowercase )
else:
__a : Optional[int] = WavaVecaConformerForPreTraining(lowercase )
if is_finetuned:
__a , __a , __a : Dict = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
else:
__a : Optional[int] = argparse.Namespace(task="""audio_pretraining""" )
__a : Tuple = fairseq.tasks.setup_task(lowercase )
__a , __a , __a : int = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=lowercase )
__a : Any = model[0].eval()
recursively_load_weights(lowercase , lowercase , not is_finetuned )
hf_wavavec.save_pretrained(lowercase )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : Dict = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
__SCREAMING_SNAKE_CASE : int = parser.parse_args()
convert_wavaveca_conformer_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
) | 697 | 1 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
__SCREAMING_SNAKE_CASE : Dict = logging.get_logger(__name__)
if is_vision_available():
import PIL
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
lowercase__ = ["pixel_values"]
def __init__( self , __UpperCamelCase = True , __UpperCamelCase = None , __UpperCamelCase = PILImageResampling.BICUBIC , __UpperCamelCase = True , __UpperCamelCase = None , __UpperCamelCase = True , __UpperCamelCase = 1 / 255 , __UpperCamelCase = True , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = True , **__UpperCamelCase , ):
'''simple docstring'''
super().__init__(**__UpperCamelCase )
__a : Tuple = size if size is not None else {"""shortest_edge""": 224}
__a : Tuple = get_size_dict(__UpperCamelCase , default_to_square=__UpperCamelCase )
__a : int = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
__a : int = get_size_dict(__UpperCamelCase , default_to_square=__UpperCamelCase , param_name="""crop_size""" )
__a : Optional[int] = do_resize
__a : Any = size
__a : Any = resample
__a : List[Any] = do_center_crop
__a : int = crop_size
__a : Tuple = do_rescale
__a : int = rescale_factor
__a : Optional[int] = do_normalize
__a : int = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
__a : List[str] = image_std if image_std is not None else OPENAI_CLIP_STD
__a : Optional[int] = do_convert_rgb
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = PILImageResampling.BICUBIC , __UpperCamelCase = None , **__UpperCamelCase , ):
'''simple docstring'''
__a : Dict = get_size_dict(__UpperCamelCase , default_to_square=__UpperCamelCase )
if "shortest_edge" not in size:
raise ValueError(f"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" )
__a : int = get_resize_output_image_size(__UpperCamelCase , size=size["""shortest_edge"""] , default_to_square=__UpperCamelCase )
return resize(__UpperCamelCase , size=__UpperCamelCase , resample=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase )
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = None , **__UpperCamelCase , ):
'''simple docstring'''
__a : Optional[int] = get_size_dict(__UpperCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(f"""The `size` parameter must contain the keys (height, width). Got {size.keys()}""" )
return center_crop(__UpperCamelCase , size=(size["""height"""], size["""width"""]) , data_format=__UpperCamelCase , **__UpperCamelCase )
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = None , **__UpperCamelCase , ):
'''simple docstring'''
return rescale(__UpperCamelCase , scale=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase )
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = None , **__UpperCamelCase , ):
'''simple docstring'''
return normalize(__UpperCamelCase , mean=__UpperCamelCase , std=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase )
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = ChannelDimension.FIRST , **__UpperCamelCase , ):
'''simple docstring'''
__a : List[str] = do_resize if do_resize is not None else self.do_resize
__a : Dict = size if size is not None else self.size
__a : Optional[Any] = get_size_dict(__UpperCamelCase , param_name="""size""" , default_to_square=__UpperCamelCase )
__a : List[str] = resample if resample is not None else self.resample
__a : Union[str, Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
__a : int = crop_size if crop_size is not None else self.crop_size
__a : List[Any] = get_size_dict(__UpperCamelCase , param_name="""crop_size""" , default_to_square=__UpperCamelCase )
__a : int = do_rescale if do_rescale is not None else self.do_rescale
__a : str = rescale_factor if rescale_factor is not None else self.rescale_factor
__a : Union[str, Any] = do_normalize if do_normalize is not None else self.do_normalize
__a : Union[str, Any] = image_mean if image_mean is not None else self.image_mean
__a : Tuple = image_std if image_std is not None else self.image_std
__a : str = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
__a : Union[str, Any] = make_list_of_images(__UpperCamelCase )
if not valid_images(__UpperCamelCase ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
__a : Dict = [convert_to_rgb(__UpperCamelCase ) for image in images]
# All transformations expect numpy arrays.
__a : Dict = [to_numpy_array(__UpperCamelCase ) for image in images]
if do_resize:
__a : int = [self.resize(image=__UpperCamelCase , size=__UpperCamelCase , resample=__UpperCamelCase ) for image in images]
if do_center_crop:
__a : Union[str, Any] = [self.center_crop(image=__UpperCamelCase , size=__UpperCamelCase ) for image in images]
if do_rescale:
__a : List[Any] = [self.rescale(image=__UpperCamelCase , scale=__UpperCamelCase ) for image in images]
if do_normalize:
__a : List[Any] = [self.normalize(image=__UpperCamelCase , mean=__UpperCamelCase , std=__UpperCamelCase ) for image in images]
__a : Optional[Any] = [to_channel_dimension_format(__UpperCamelCase , __UpperCamelCase ) for image in images]
__a : Union[str, Any] = {"""pixel_values""": images}
return BatchFeature(data=__UpperCamelCase , tensor_type=__UpperCamelCase ) | 697 |
'''simple docstring'''
import warnings
from functools import wraps
from typing import Callable
def _snake_case ( lowercase ) -> Callable:
@wraps(lowercase )
def _inner_fn(*lowercase , **lowercase ):
warnings.warn(
(F"""'{fn.__name__}' is experimental and might be subject to breaking changes in the future.""") , lowercase , )
return fn(*lowercase , **lowercase )
return _inner_fn | 697 | 1 |
'''simple docstring'''
from __future__ import annotations
import math
import random
from collections.abc import Collection
from typing import overload
class SCREAMING_SNAKE_CASE__ :
def __init__( self , __UpperCamelCase = None ):
'''simple docstring'''
if components is None:
__a : Union[str, Any] = []
__a : List[str] = list(__UpperCamelCase )
def __len__( self ):
'''simple docstring'''
return len(self.__components )
def __str__( self ):
'''simple docstring'''
return "(" + ",".join(map(__UpperCamelCase , self.__components ) ) + ")"
def __add__( self , __UpperCamelCase ):
'''simple docstring'''
__a : Any = len(self )
if size == len(__UpperCamelCase ):
__a : List[str] = [self.__components[i] + other.component(__UpperCamelCase ) for i in range(__UpperCamelCase )]
return Vector(__UpperCamelCase )
else:
raise Exception("""must have the same size""" )
def __sub__( self , __UpperCamelCase ):
'''simple docstring'''
__a : Any = len(self )
if size == len(__UpperCamelCase ):
__a : str = [self.__components[i] - other.component(__UpperCamelCase ) for i in range(__UpperCamelCase )]
return Vector(__UpperCamelCase )
else: # error case
raise Exception("""must have the same size""" )
@overload
def __mul__( self , __UpperCamelCase ):
'''simple docstring'''
...
@overload
def __mul__( self , __UpperCamelCase ):
'''simple docstring'''
...
def __mul__( self , __UpperCamelCase ):
'''simple docstring'''
if isinstance(__UpperCamelCase , (float, int) ):
__a : Optional[Any] = [c * other for c in self.__components]
return Vector(__UpperCamelCase )
elif isinstance(__UpperCamelCase , __UpperCamelCase ) and len(self ) == len(__UpperCamelCase ):
__a : Dict = len(self )
__a : Optional[int] = [self.__components[i] * other.component(__UpperCamelCase ) for i in range(__UpperCamelCase )]
return sum(__UpperCamelCase )
else: # error case
raise Exception("""invalid operand!""" )
def __lowerCamelCase ( self ):
'''simple docstring'''
return Vector(self.__components )
def __lowerCamelCase ( self , __UpperCamelCase ):
'''simple docstring'''
if isinstance(__UpperCamelCase , __UpperCamelCase ) and -len(self.__components ) <= i < len(self.__components ):
return self.__components[i]
else:
raise Exception("""index out of range""" )
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
assert -len(self.__components ) <= pos < len(self.__components )
__a : Optional[Any] = value
def __lowerCamelCase ( self ):
'''simple docstring'''
if len(self.__components ) == 0:
raise Exception("""Vector is empty""" )
__a : int = [c**2 for c in self.__components]
return math.sqrt(sum(__UpperCamelCase ) )
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase = False ):
'''simple docstring'''
__a : List[str] = self * other
__a : List[Any] = self.euclidean_length() * other.euclidean_length()
if deg:
return math.degrees(math.acos(num / den ) )
else:
return math.acos(num / den )
def _snake_case ( lowercase ) -> Vector:
assert isinstance(lowercase , lowercase )
return Vector([0] * dimension )
def _snake_case ( lowercase , lowercase ) -> Vector:
assert isinstance(lowercase , lowercase ) and (isinstance(lowercase , lowercase ))
__a : List[str] = [0] * dimension
__a : int = 1
return Vector(lowercase )
def _snake_case ( lowercase , lowercase , lowercase ) -> Vector:
assert (
isinstance(lowercase , lowercase )
and isinstance(lowercase , lowercase )
and (isinstance(lowercase , (int, float) ))
)
return x * scalar + y
def _snake_case ( lowercase , lowercase , lowercase ) -> Vector:
random.seed(lowercase )
__a : Union[str, Any] = [random.randint(lowercase , lowercase ) for _ in range(lowercase )]
return Vector(lowercase )
class SCREAMING_SNAKE_CASE__ :
def __init__( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
__a : Dict = matrix
__a : Optional[int] = w
__a : Dict = h
def __str__( self ):
'''simple docstring'''
__a : Dict = """"""
for i in range(self.__height ):
ans += "|"
for j in range(self.__width ):
if j < self.__width - 1:
ans += str(self.__matrix[i][j] ) + ","
else:
ans += str(self.__matrix[i][j] ) + "|\n"
return ans
def __add__( self , __UpperCamelCase ):
'''simple docstring'''
if self.__width == other.width() and self.__height == other.height():
__a : Any = []
for i in range(self.__height ):
__a : int = [
self.__matrix[i][j] + other.component(__UpperCamelCase , __UpperCamelCase )
for j in range(self.__width )
]
matrix.append(__UpperCamelCase )
return Matrix(__UpperCamelCase , self.__width , self.__height )
else:
raise Exception("""matrix must have the same dimension!""" )
def __sub__( self , __UpperCamelCase ):
'''simple docstring'''
if self.__width == other.width() and self.__height == other.height():
__a : Any = []
for i in range(self.__height ):
__a : Union[str, Any] = [
self.__matrix[i][j] - other.component(__UpperCamelCase , __UpperCamelCase )
for j in range(self.__width )
]
matrix.append(__UpperCamelCase )
return Matrix(__UpperCamelCase , self.__width , self.__height )
else:
raise Exception("""matrices must have the same dimension!""" )
@overload
def __mul__( self , __UpperCamelCase ):
'''simple docstring'''
...
@overload
def __mul__( self , __UpperCamelCase ):
'''simple docstring'''
...
def __mul__( self , __UpperCamelCase ):
'''simple docstring'''
if isinstance(__UpperCamelCase , __UpperCamelCase ): # matrix-vector
if len(__UpperCamelCase ) == self.__width:
__a : List[str] = zero_vector(self.__height )
for i in range(self.__height ):
__a : Dict = [
self.__matrix[i][j] * other.component(__UpperCamelCase )
for j in range(self.__width )
]
ans.change_component(__UpperCamelCase , sum(__UpperCamelCase ) )
return ans
else:
raise Exception(
"""vector must have the same size as the """
"""number of columns of the matrix!""" )
elif isinstance(__UpperCamelCase , (int, float) ): # matrix-scalar
__a : Optional[Any] = [
[self.__matrix[i][j] * other for j in range(self.__width )]
for i in range(self.__height )
]
return Matrix(__UpperCamelCase , self.__width , self.__height )
return None
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.__height
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.__width
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
if 0 <= x < self.__height and 0 <= y < self.__width:
return self.__matrix[x][y]
else:
raise Exception("""change_component: indices out of bounds""" )
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
if 0 <= x < self.__height and 0 <= y < self.__width:
__a : Any = value
else:
raise Exception("""change_component: indices out of bounds""" )
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
if self.__height != self.__width:
raise Exception("""Matrix is not square""" )
__a : Optional[int] = self.__matrix[:x] + self.__matrix[x + 1 :]
for i in range(len(__UpperCamelCase ) ):
__a : List[str] = minor[i][:y] + minor[i][y + 1 :]
return Matrix(__UpperCamelCase , self.__width - 1 , self.__height - 1 ).determinant()
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
if self.__height != self.__width:
raise Exception("""Matrix is not square""" )
if 0 <= x < self.__height and 0 <= y < self.__width:
return (-1) ** (x + y) * self.minor(__UpperCamelCase , __UpperCamelCase )
else:
raise Exception("""Indices out of bounds""" )
def __lowerCamelCase ( self ):
'''simple docstring'''
if self.__height != self.__width:
raise Exception("""Matrix is not square""" )
if self.__height < 1:
raise Exception("""Matrix has no element""" )
elif self.__height == 1:
return self.__matrix[0][0]
elif self.__height == 2:
return (
self.__matrix[0][0] * self.__matrix[1][1]
- self.__matrix[0][1] * self.__matrix[1][0]
)
else:
__a : Union[str, Any] = [
self.__matrix[0][y] * self.cofactor(0 , __UpperCamelCase ) for y in range(self.__width )
]
return sum(__UpperCamelCase )
def _snake_case ( lowercase ) -> Matrix:
__a : list[list[float]] = [[0] * n for _ in range(lowercase )]
return Matrix(lowercase , lowercase , lowercase )
def _snake_case ( lowercase , lowercase , lowercase , lowercase ) -> Matrix:
random.seed(lowercase )
__a : list[list[float]] = [
[random.randint(lowercase , lowercase ) for _ in range(lowercase )] for _ in range(lowercase )
]
return Matrix(lowercase , lowercase , lowercase ) | 697 |
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
from ....audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ....feature_extraction_sequence_utils import SequenceFeatureExtractor
from ....feature_extraction_utils import BatchFeature
from ....file_utils import PaddingStrategy, TensorType
from ....utils import logging
__SCREAMING_SNAKE_CASE : Tuple = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
lowercase__ = ["input_features", "attention_mask"]
def __init__( self , __UpperCamelCase=80 , __UpperCamelCase=1_6000 , __UpperCamelCase=0.0 , __UpperCamelCase=10 , __UpperCamelCase=25 , __UpperCamelCase="hamming_window" , __UpperCamelCase=3_2_7_6_8.0 , __UpperCamelCase=0.9_7 , __UpperCamelCase=1.0 , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=False , **__UpperCamelCase , ):
'''simple docstring'''
super().__init__(feature_size=__UpperCamelCase , sampling_rate=__UpperCamelCase , padding_value=__UpperCamelCase , **__UpperCamelCase )
__a : List[str] = feature_size
__a : List[str] = sampling_rate
__a : int = padding_value
__a : Any = hop_length
__a : int = win_length
__a : Tuple = frame_signal_scale
__a : Union[str, Any] = preemphasis_coeff
__a : List[str] = mel_floor
__a : Union[str, Any] = normalize_means
__a : Optional[Any] = normalize_vars
__a : Optional[Any] = win_function
__a : Union[str, Any] = return_attention_mask
__a : List[Any] = win_length * sampling_rate // 1000
__a : List[Any] = hop_length * sampling_rate // 1000
__a : Optional[Any] = optimal_fft_length(self.sample_size )
__a : Any = (self.n_fft // 2) + 1
def __lowerCamelCase ( self , __UpperCamelCase ):
'''simple docstring'''
if self.win_function == "hamming_window":
__a : str = window_function(window_length=self.sample_size , name=self.win_function , periodic=__UpperCamelCase )
else:
__a : Dict = window_function(window_length=self.sample_size , name=self.win_function )
__a : Optional[Any] = mel_filter_bank(
num_frequency_bins=self.n_freqs , num_mel_filters=self.feature_size , min_frequency=0.0 , max_frequency=self.sampling_rate / 2.0 , sampling_rate=self.sampling_rate , )
__a : Any = spectrogram(
one_waveform * self.frame_signal_scale , window=__UpperCamelCase , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , center=__UpperCamelCase , preemphasis=self.preemphasis_coeff , mel_filters=__UpperCamelCase , mel_floor=self.mel_floor , log_mel="""log""" , )
return msfc_features.T
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
if self.normalize_means:
__a : int = x[:input_length].mean(axis=0 )
__a : str = np.subtract(__UpperCamelCase , __UpperCamelCase )
if self.normalize_vars:
__a : Dict = x[:input_length].std(axis=0 )
__a : Dict = np.divide(__UpperCamelCase , __UpperCamelCase )
if input_length < x.shape[0]:
__a : Union[str, Any] = padding_value
# make sure array is in float32
__a : Any = x.astype(np.floataa )
return x
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase = None ):
'''simple docstring'''
__a : Tuple = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [self._normalize_one(__UpperCamelCase , __UpperCamelCase , self.padding_value ) for x, n in zip(__UpperCamelCase , __UpperCamelCase )]
def __call__( self , __UpperCamelCase , __UpperCamelCase = False , __UpperCamelCase = None , __UpperCamelCase = False , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , **__UpperCamelCase , ):
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"""The model corresponding to this feature extractor: {self} was trained using a sampling rate of"""
f""" {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with"""
f""" {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
"""It is strongly recommended to pass the ``sampling_rate`` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
__a : Tuple = isinstance(__UpperCamelCase , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f"""Only mono-channel audio is supported for input to {self}""" )
__a : Tuple = is_batched_numpy or (
isinstance(__UpperCamelCase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
__a : Tuple = [np.asarray(__UpperCamelCase , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(__UpperCamelCase , np.ndarray ):
__a : List[str] = np.asarray(__UpperCamelCase , dtype=np.floataa )
elif isinstance(__UpperCamelCase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
__a : str = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
__a : Any = [raw_speech]
# extract fbank features
__a : str = [self._extract_mfsc_features(__UpperCamelCase ) for one_waveform in raw_speech]
# convert into correct format for padding
__a : Optional[Any] = BatchFeature({"""input_features""": features} )
__a : Any = self.pad(
__UpperCamelCase , padding=__UpperCamelCase , max_length=__UpperCamelCase , truncation=__UpperCamelCase , pad_to_multiple_of=__UpperCamelCase , return_attention_mask=__UpperCamelCase , **__UpperCamelCase , )
# make sure list is in array format
__a : int = padded_inputs.get("""input_features""" )
if isinstance(input_features[0] , __UpperCamelCase ):
__a : Union[str, Any] = [np.asarray(__UpperCamelCase , dtype=np.floataa ) for feature in input_features]
__a : List[str] = padded_inputs.get("""attention_mask""" )
if attention_mask is not None:
__a : Optional[int] = [np.asarray(__UpperCamelCase , dtype=np.intaa ) for array in attention_mask]
if self.normalize_means or self.normalize_vars:
__a : Optional[Any] = (
np.array(__UpperCamelCase , dtype=np.intaa )
if self._get_padding_strategies(__UpperCamelCase , max_length=__UpperCamelCase ) is not PaddingStrategy.DO_NOT_PAD
and padding
else None
)
__a : int = self.normalize(
padded_inputs["""input_features"""] , attention_mask=__UpperCamelCase )
if return_tensors is not None:
__a : List[Any] = padded_inputs.convert_to_tensors(__UpperCamelCase )
return padded_inputs | 697 | 1 |
'''simple docstring'''
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase , unittest.TestCase ):
lowercase__ = BarthezTokenizer
lowercase__ = BarthezTokenizerFast
lowercase__ = True
lowercase__ = True
def __lowerCamelCase ( self ):
'''simple docstring'''
super().setUp()
__a : Dict = BarthezTokenizerFast.from_pretrained("""moussaKam/mbarthez""" )
tokenizer.save_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname , legacy_format=__UpperCamelCase )
__a : Dict = tokenizer
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Dict = """<pad>"""
__a : Optional[int] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__UpperCamelCase ) , __UpperCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__UpperCamelCase ) , __UpperCamelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : List[Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(vocab_keys[-1] , """<mask>""" )
self.assertEqual(len(__UpperCamelCase ) , 10_1122 )
def __lowerCamelCase ( self ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 10_1122 )
@require_torch
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Dict = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
__a : Optional[Any] = [0, 57, 3018, 7_0307, 91, 2]
__a : str = self.tokenizer(
__UpperCamelCase , max_length=len(__UpperCamelCase ) , padding=__UpperCamelCase , truncation=__UpperCamelCase , return_tensors="""pt""" )
self.assertIsInstance(__UpperCamelCase , __UpperCamelCase )
self.assertEqual((2, 6) , batch.input_ids.shape )
self.assertEqual((2, 6) , batch.attention_mask.shape )
__a : Any = batch.input_ids.tolist()[0]
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
__a : List[Any] = self.get_tokenizer()
__a : int = self.get_rust_tokenizer()
__a : Dict = """I was born in 92000, and this is falsé."""
__a : str = tokenizer.tokenize(__UpperCamelCase )
__a : Optional[int] = rust_tokenizer.tokenize(__UpperCamelCase )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
__a : List[str] = tokenizer.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase )
__a : List[str] = rust_tokenizer.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
__a : Tuple = self.get_rust_tokenizer()
__a : Optional[Any] = tokenizer.encode(__UpperCamelCase )
__a : Optional[int] = rust_tokenizer.encode(__UpperCamelCase )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Optional[int] = {"""input_ids""": [[0, 490, 1_4328, 4507, 354, 47, 4_3669, 95, 25, 7_8117, 2_0215, 1_9779, 190, 22, 400, 4, 3_5343, 8_0310, 603, 86, 2_4937, 105, 3_3438, 9_4762, 196, 3_9642, 7, 15, 1_5933, 173, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 1_0534, 87, 25, 66, 3358, 196, 5_5289, 8, 8_2961, 81, 2204, 7_5203, 7, 15, 763, 1_2956, 216, 178, 1_4328, 9595, 1377, 6_9693, 7, 448, 7_1021, 196, 1_8106, 1437, 1_3974, 108, 9083, 4, 4_9315, 7, 39, 86, 1326, 2793, 4_6333, 4, 448, 196, 7_4588, 7, 4_9315, 7, 39, 21, 822, 3_8470, 74, 21, 6_6723, 6_2480, 8, 2_2050, 5, 2]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
__a : Optional[Any] = [
"""Le transformeur est un modèle d'apprentissage profond introduit en 2017, """
"""utilisé principalement dans le domaine du traitement automatique des langues (TAL).""",
"""À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus """
"""pour gérer des données séquentielles, telles que le langage naturel, pour des tâches """
"""telles que la traduction et la synthèse de texte.""",
]
self.tokenizer_integration_test_util(
expected_encoding=__UpperCamelCase , model_name="""moussaKam/mbarthez""" , revision="""c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6""" , sequences=__UpperCamelCase , ) | 697 |
'''simple docstring'''
__SCREAMING_SNAKE_CASE : int = 9.80_665
def _snake_case ( lowercase , lowercase , lowercase = g ) -> float:
if fluid_density <= 0:
raise ValueError("""Impossible fluid density""" )
if volume < 0:
raise ValueError("""Impossible Object volume""" )
if gravity <= 0:
raise ValueError("""Impossible Gravity""" )
return fluid_density * gravity * volume
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod() | 697 | 1 |
'''simple docstring'''
class SCREAMING_SNAKE_CASE__ :
def __init__( self ):
'''simple docstring'''
__a : List[str] = 0
__a : Tuple = 0
__a : List[str] = {}
def __lowerCamelCase ( self , __UpperCamelCase ):
'''simple docstring'''
if vertex not in self.adjacency:
__a : Optional[Any] = {}
self.num_vertices += 1
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
self.add_vertex(__UpperCamelCase )
self.add_vertex(__UpperCamelCase )
if head == tail:
return
__a : int = weight
__a : Tuple = weight
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : List[Any] = self.get_edges()
for edge in edges:
__a , __a , __a : str = edge
edges.remove((tail, head, weight) )
for i in range(len(__UpperCamelCase ) ):
__a : Optional[int] = list(edges[i] )
edges.sort(key=lambda __UpperCamelCase : e[2] )
for i in range(len(__UpperCamelCase ) - 1 ):
if edges[i][2] >= edges[i + 1][2]:
__a : List[Any] = edges[i][2] + 1
for edge in edges:
__a , __a , __a : int = edge
__a : Any = weight
__a : List[Any] = weight
def __str__( self ):
'''simple docstring'''
__a : Union[str, Any] = """"""
for tail in self.adjacency:
for head in self.adjacency[tail]:
__a : Optional[int] = self.adjacency[head][tail]
string += f"""{head} -> {tail} == {weight}\n"""
return string.rstrip("""\n""" )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : List[Any] = []
for tail in self.adjacency:
for head in self.adjacency[tail]:
output.append((tail, head, self.adjacency[head][tail]) )
return output
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.adjacency.keys()
@staticmethod
def __lowerCamelCase ( __UpperCamelCase=None , __UpperCamelCase=None ):
'''simple docstring'''
__a : Tuple = Graph()
if vertices is None:
__a : Tuple = []
if edges is None:
__a : int = []
for vertex in vertices:
g.add_vertex(__UpperCamelCase )
for edge in edges:
g.add_edge(*__UpperCamelCase )
return g
class SCREAMING_SNAKE_CASE__ :
def __init__( self ):
'''simple docstring'''
__a : int = {}
__a : Optional[Any] = {}
def __len__( self ):
'''simple docstring'''
return len(self.parent )
def __lowerCamelCase ( self , __UpperCamelCase ):
'''simple docstring'''
if item in self.parent:
return self.find(__UpperCamelCase )
__a : Any = item
__a : Optional[Any] = 0
return item
def __lowerCamelCase ( self , __UpperCamelCase ):
'''simple docstring'''
if item not in self.parent:
return self.make_set(__UpperCamelCase )
if item != self.parent[item]:
__a : str = self.find(self.parent[item] )
return self.parent[item]
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
__a : List[Any] = self.find(__UpperCamelCase )
__a : Dict = self.find(__UpperCamelCase )
if roota == roota:
return roota
if self.rank[roota] > self.rank[roota]:
__a : Dict = roota
return roota
if self.rank[roota] < self.rank[roota]:
__a : Optional[Any] = roota
return roota
if self.rank[roota] == self.rank[roota]:
self.rank[roota] += 1
__a : List[Any] = roota
return roota
return None
@staticmethod
def __lowerCamelCase ( __UpperCamelCase ):
'''simple docstring'''
__a : str = graph.num_vertices
__a : int = Graph.UnionFind()
__a : str = []
while num_components > 1:
__a : Optional[int] = {}
for vertex in graph.get_vertices():
__a : Optional[int] = -1
__a : List[str] = graph.get_edges()
for edge in edges:
__a , __a , __a : str = edge
edges.remove((tail, head, weight) )
for edge in edges:
__a , __a , __a : Union[str, Any] = edge
__a : str = union_find.find(__UpperCamelCase )
__a : List[str] = union_find.find(__UpperCamelCase )
if seta != seta:
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
__a : str = [head, tail, weight]
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
__a : List[str] = [head, tail, weight]
for vertex in cheap_edge:
if cheap_edge[vertex] != -1:
__a , __a , __a : Tuple = cheap_edge[vertex]
if union_find.find(__UpperCamelCase ) != union_find.find(__UpperCamelCase ):
union_find.union(__UpperCamelCase , __UpperCamelCase )
mst_edges.append(cheap_edge[vertex] )
__a : Optional[int] = num_components - 1
__a : str = Graph.build(edges=__UpperCamelCase )
return mst | 697 |
'''simple docstring'''
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetrImageProcessor
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def __init__( self , __UpperCamelCase , __UpperCamelCase=7 , __UpperCamelCase=3 , __UpperCamelCase=30 , __UpperCamelCase=400 , __UpperCamelCase=True , __UpperCamelCase=None , __UpperCamelCase=True , __UpperCamelCase=1 / 255 , __UpperCamelCase=True , __UpperCamelCase=[0.5, 0.5, 0.5] , __UpperCamelCase=[0.5, 0.5, 0.5] , __UpperCamelCase=True , ):
'''simple docstring'''
__a : List[Any] = size if size is not None else {"""shortest_edge""": 18, """longest_edge""": 1333}
__a : Dict = parent
__a : Union[str, Any] = batch_size
__a : Optional[int] = num_channels
__a : Dict = min_resolution
__a : List[Any] = max_resolution
__a : int = do_resize
__a : str = size
__a : Optional[Any] = do_rescale
__a : Optional[Any] = rescale_factor
__a : str = do_normalize
__a : Any = image_mean
__a : Optional[Any] = image_std
__a : Dict = do_pad
def __lowerCamelCase ( self ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_pad": self.do_pad,
}
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase=False ):
'''simple docstring'''
if not batched:
__a : Union[str, Any] = image_inputs[0]
if isinstance(__UpperCamelCase , Image.Image ):
__a , __a : Tuple = image.size
else:
__a , __a : Tuple = image.shape[1], image.shape[2]
if w < h:
__a : Optional[int] = int(self.size["""shortest_edge"""] * h / w )
__a : Tuple = self.size["""shortest_edge"""]
elif w > h:
__a : Optional[Any] = self.size["""shortest_edge"""]
__a : Any = int(self.size["""shortest_edge"""] * w / h )
else:
__a : Any = self.size["""shortest_edge"""]
__a : Optional[int] = self.size["""shortest_edge"""]
else:
__a : Any = []
for image in image_inputs:
__a , __a : Any = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
__a : List[Any] = max(__UpperCamelCase , key=lambda __UpperCamelCase : item[0] )[0]
__a : Optional[Any] = max(__UpperCamelCase , key=lambda __UpperCamelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase , unittest.TestCase ):
lowercase__ = DetrImageProcessor if is_vision_available() else None
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : str = DetrImageProcessingTester(self )
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Optional[int] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__UpperCamelCase , """image_mean""" ) )
self.assertTrue(hasattr(__UpperCamelCase , """image_std""" ) )
self.assertTrue(hasattr(__UpperCamelCase , """do_normalize""" ) )
self.assertTrue(hasattr(__UpperCamelCase , """do_rescale""" ) )
self.assertTrue(hasattr(__UpperCamelCase , """rescale_factor""" ) )
self.assertTrue(hasattr(__UpperCamelCase , """do_resize""" ) )
self.assertTrue(hasattr(__UpperCamelCase , """size""" ) )
self.assertTrue(hasattr(__UpperCamelCase , """do_pad""" ) )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 18, """longest_edge""": 1333} )
self.assertEqual(image_processor.do_pad , __UpperCamelCase )
__a : List[Any] = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=__UpperCamelCase )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42, """longest_edge""": 84} )
self.assertEqual(image_processor.do_pad , __UpperCamelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
pass
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__a : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCamelCase , Image.Image )
# Test not batched input
__a : Optional[Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
__a , __a : Any = self.image_processor_tester.get_expected_values(__UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__a , __a : Optional[int] = self.image_processor_tester.get_expected_values(__UpperCamelCase , batched=__UpperCamelCase )
__a : Any = image_processing(__UpperCamelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Dict = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__a : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCamelCase , numpify=__UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCamelCase , np.ndarray )
# Test not batched input
__a : Dict = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
__a , __a : Any = self.image_processor_tester.get_expected_values(__UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__a : List[str] = image_processing(__UpperCamelCase , return_tensors="""pt""" ).pixel_values
__a , __a : str = self.image_processor_tester.get_expected_values(__UpperCamelCase , batched=__UpperCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Any = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__a : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCamelCase , torchify=__UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCamelCase , torch.Tensor )
# Test not batched input
__a : Any = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
__a , __a : Any = self.image_processor_tester.get_expected_values(__UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__a : List[str] = image_processing(__UpperCamelCase , return_tensors="""pt""" ).pixel_values
__a , __a : Any = self.image_processor_tester.get_expected_values(__UpperCamelCase , batched=__UpperCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : List[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_annotations.txt""" , """r""" ) as f:
__a : Dict = json.loads(f.read() )
__a : Optional[int] = {"""image_id""": 3_9769, """annotations""": target}
# encode them
__a : List[str] = DetrImageProcessor.from_pretrained("""facebook/detr-resnet-50""" )
__a : Tuple = image_processing(images=__UpperCamelCase , annotations=__UpperCamelCase , return_tensors="""pt""" )
# verify pixel values
__a : Union[str, Any] = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["""pixel_values"""].shape , __UpperCamelCase )
__a : List[str] = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , __UpperCamelCase , atol=1E-4 ) )
# verify area
__a : List[Any] = torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , __UpperCamelCase ) )
# verify boxes
__a : Optional[int] = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , __UpperCamelCase )
__a : Any = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , __UpperCamelCase , atol=1E-3 ) )
# verify image_id
__a : Union[str, Any] = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , __UpperCamelCase ) )
# verify is_crowd
__a : List[Any] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , __UpperCamelCase ) )
# verify class_labels
__a : Any = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , __UpperCamelCase ) )
# verify orig_size
__a : Any = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , __UpperCamelCase ) )
# verify size
__a : str = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , __UpperCamelCase ) )
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : List[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt""" , """r""" ) as f:
__a : Tuple = json.loads(f.read() )
__a : str = {"""file_name""": """000000039769.png""", """image_id""": 3_9769, """segments_info""": target}
__a : int = pathlib.Path("""./tests/fixtures/tests_samples/COCO/coco_panoptic""" )
# encode them
__a : List[str] = DetrImageProcessor.from_pretrained("""facebook/detr-resnet-50-panoptic""" )
__a : Tuple = image_processing(images=__UpperCamelCase , annotations=__UpperCamelCase , masks_path=__UpperCamelCase , return_tensors="""pt""" )
# verify pixel values
__a : List[str] = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["""pixel_values"""].shape , __UpperCamelCase )
__a : Any = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , __UpperCamelCase , atol=1E-4 ) )
# verify area
__a : Optional[Any] = torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , __UpperCamelCase ) )
# verify boxes
__a : Optional[Any] = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , __UpperCamelCase )
__a : List[str] = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , __UpperCamelCase , atol=1E-3 ) )
# verify image_id
__a : List[str] = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , __UpperCamelCase ) )
# verify is_crowd
__a : Optional[int] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , __UpperCamelCase ) )
# verify class_labels
__a : Optional[int] = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , __UpperCamelCase ) )
# verify masks
__a : Union[str, Any] = 82_2873
self.assertEqual(encoding["""labels"""][0]["""masks"""].sum().item() , __UpperCamelCase )
# verify orig_size
__a : str = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , __UpperCamelCase ) )
# verify size
__a : List[Any] = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , __UpperCamelCase ) ) | 697 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__SCREAMING_SNAKE_CASE : List[str] = {
'configuration_blenderbot_small': [
'BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP',
'BlenderbotSmallConfig',
'BlenderbotSmallOnnxConfig',
],
'tokenization_blenderbot_small': ['BlenderbotSmallTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Union[str, Any] = ['BlenderbotSmallTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : List[str] = [
'BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST',
'BlenderbotSmallForCausalLM',
'BlenderbotSmallForConditionalGeneration',
'BlenderbotSmallModel',
'BlenderbotSmallPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Optional[int] = [
'TFBlenderbotSmallForConditionalGeneration',
'TFBlenderbotSmallModel',
'TFBlenderbotSmallPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Optional[Any] = [
'FlaxBlenderbotSmallForConditionalGeneration',
'FlaxBlenderbotSmallModel',
'FlaxBlenderbotSmallPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotSmallConfig,
BlenderbotSmallOnnxConfig,
)
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_small_fast import BlenderbotSmallTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotSmallForCausalLM,
BlenderbotSmallForConditionalGeneration,
BlenderbotSmallModel,
BlenderbotSmallPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot_small import (
TFBlenderbotSmallForConditionalGeneration,
TFBlenderbotSmallModel,
TFBlenderbotSmallPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot_small import (
FlaxBlenderbotSmallForConditionalGeneration,
FlaxBlenderbotSmallModel,
FlaxBlenderbotSmallPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 697 |
'''simple docstring'''
import argparse
import logging
import os
import time
import timeit
import datasets
import numpy as np
import pycuda.autoinit # noqa: F401
import pycuda.driver as cuda
import tensorrt as trt
import torch
from absl import logging as absl_logging
from accelerate import Accelerator
from datasets import load_dataset, load_metric
from torch.utils.data import DataLoader
from utils_qa import postprocess_qa_predictions
import transformers
from transformers import AutoTokenizer, EvalPrediction, default_data_collator, set_seed
from transformers.trainer_pt_utils import nested_concat, nested_truncate
__SCREAMING_SNAKE_CASE : Optional[int] = trt.Logger(trt.Logger.WARNING)
__SCREAMING_SNAKE_CASE : Tuple = absl_logging.get_absl_logger()
absl_logger.setLevel(logging.WARNING)
__SCREAMING_SNAKE_CASE : Any = logging.getLogger(__name__)
__SCREAMING_SNAKE_CASE : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--onnx_model_path',
default=None,
type=str,
required=True,
help='Path to ONNX model: ',
)
parser.add_argument(
'--output_dir',
default=None,
type=str,
required=True,
help='The output directory where the model checkpoints and predictions will be written.',
)
# Other parameters
parser.add_argument(
'--tokenizer_name',
default='',
type=str,
required=True,
help='Pretrained tokenizer name or path if not the same as model_name',
)
parser.add_argument(
'--version_2_with_negative',
action='store_true',
help='If true, the SQuAD examples contain some that do not have an answer.',
)
parser.add_argument(
'--null_score_diff_threshold',
type=float,
default=0.0,
help='If null_score - best_non_null is greater than the threshold predict null.',
)
parser.add_argument(
'--max_seq_length',
default=384,
type=int,
help=(
'The maximum total input sequence length after WordPiece tokenization. Sequences '
'longer than this will be truncated, and sequences shorter than this will be padded.'
),
)
parser.add_argument(
'--doc_stride',
default=128,
type=int,
help='When splitting up a long document into chunks, how much stride to take between chunks.',
)
parser.add_argument('--per_device_eval_batch_size', default=8, type=int, help='Batch size per GPU/CPU for evaluation.')
parser.add_argument(
'--n_best_size',
default=20,
type=int,
help='The total number of n-best predictions to generate in the nbest_predictions.json output file.',
)
parser.add_argument(
'--max_answer_length',
default=30,
type=int,
help=(
'The maximum length of an answer that can be generated. This is needed because the start '
'and end predictions are not conditioned on one another.'
),
)
parser.add_argument('--seed', type=int, default=42, help='random seed for initialization')
parser.add_argument(
'--dataset_name',
type=str,
default=None,
required=True,
help='The name of the dataset to use (via the datasets library).',
)
parser.add_argument(
'--dataset_config_name',
type=str,
default=None,
help='The configuration name of the dataset to use (via the datasets library).',
)
parser.add_argument(
'--preprocessing_num_workers', type=int, default=4, help='A csv or a json file containing the training data.'
)
parser.add_argument('--overwrite_cache', action='store_true', help='Overwrite the cached training and evaluation sets')
parser.add_argument(
'--fp16',
action='store_true',
help='Whether to use 16-bit (mixed) precision instead of 32-bit',
)
parser.add_argument(
'--int8',
action='store_true',
help='Whether to use INT8',
)
__SCREAMING_SNAKE_CASE : Optional[int] = parser.parse_args()
if args.tokenizer_name:
__SCREAMING_SNAKE_CASE : str = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=True)
else:
raise ValueError(
'You are instantiating a new tokenizer from scratch. This is not supported by this script.'
'You can do it from another script, save it, and load it from here, using --tokenizer_name.'
)
logger.info('Training/evaluation parameters %s', args)
__SCREAMING_SNAKE_CASE : List[Any] = args.per_device_eval_batch_size
__SCREAMING_SNAKE_CASE : int = (args.eval_batch_size, args.max_seq_length)
# TRT Engine properties
__SCREAMING_SNAKE_CASE : Optional[Any] = True
__SCREAMING_SNAKE_CASE : Tuple = 'temp_engine/bert-fp32.engine'
if args.fpaa:
__SCREAMING_SNAKE_CASE : Dict = 'temp_engine/bert-fp16.engine'
if args.inta:
__SCREAMING_SNAKE_CASE : Tuple = 'temp_engine/bert-int8.engine'
# import ONNX file
if not os.path.exists('temp_engine'):
os.makedirs('temp_engine')
__SCREAMING_SNAKE_CASE : Optional[Any] = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network, trt.OnnxParser(
network, TRT_LOGGER
) as parser:
with open(args.onnx_model_path, 'rb') as model:
if not parser.parse(model.read()):
for error in range(parser.num_errors):
print(parser.get_error(error))
# Query input names and shapes from parsed TensorRT network
__SCREAMING_SNAKE_CASE : List[Any] = [network.get_input(i) for i in range(network.num_inputs)]
__SCREAMING_SNAKE_CASE : List[Any] = [_input.name for _input in network_inputs] # ex: ["actual_input1"]
with builder.create_builder_config() as config:
__SCREAMING_SNAKE_CASE : Tuple = 1 << 50
if STRICT_TYPES:
config.set_flag(trt.BuilderFlag.STRICT_TYPES)
if args.fpaa:
config.set_flag(trt.BuilderFlag.FPaa)
if args.inta:
config.set_flag(trt.BuilderFlag.INTa)
__SCREAMING_SNAKE_CASE : Dict = builder.create_optimization_profile()
config.add_optimization_profile(profile)
for i in range(len(input_names)):
profile.set_shape(input_names[i], INPUT_SHAPE, INPUT_SHAPE, INPUT_SHAPE)
__SCREAMING_SNAKE_CASE : Union[str, Any] = builder.build_engine(network, config)
# serialize_engine and store in file (can be directly loaded and deserialized):
with open(engine_name, 'wb') as f:
f.write(engine.serialize())
def _snake_case ( lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> List[Any]:
__a : Dict = np.asarray(inputs["""input_ids"""] , dtype=np.intaa )
__a : List[Any] = np.asarray(inputs["""attention_mask"""] , dtype=np.intaa )
__a : str = np.asarray(inputs["""token_type_ids"""] , dtype=np.intaa )
# Copy inputs
cuda.memcpy_htod_async(d_inputs[0] , input_ids.ravel() , lowercase )
cuda.memcpy_htod_async(d_inputs[1] , attention_mask.ravel() , lowercase )
cuda.memcpy_htod_async(d_inputs[2] , token_type_ids.ravel() , lowercase )
# start time
__a : Optional[Any] = time.time()
# Run inference
context.execute_async(
bindings=[int(lowercase ) for d_inp in d_inputs] + [int(lowercase ), int(lowercase )] , stream_handle=stream.handle )
# Transfer predictions back from GPU
cuda.memcpy_dtoh_async(lowercase , lowercase , lowercase )
cuda.memcpy_dtoh_async(lowercase , lowercase , lowercase )
# Synchronize the stream and take time
stream.synchronize()
# end time
__a : str = time.time()
__a : Any = end_time - start_time
__a : Optional[int] = (h_outputa, h_outputa)
# print(outputs)
return outputs, infer_time
# Initialize the accelerator. We will let the accelerator handle device placement for us in this example.
__SCREAMING_SNAKE_CASE : Optional[Any] = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO,
)
# Setup logging, we only want one process per machine to log things on the screen.
# accelerator.is_local_main_process is only True for one process per machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR)
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed)
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
if args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
__SCREAMING_SNAKE_CASE : List[str] = load_dataset(args.dataset_name, args.dataset_config_name)
else:
raise ValueError('Evaluation requires a dataset name')
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Preprocessing the datasets.
# Preprocessing is slighlty different for training and evaluation.
__SCREAMING_SNAKE_CASE : int = raw_datasets['validation'].column_names
__SCREAMING_SNAKE_CASE : Tuple = 'question' if 'question' in column_names else column_names[0]
__SCREAMING_SNAKE_CASE : List[Any] = 'context' if 'context' in column_names else column_names[1]
__SCREAMING_SNAKE_CASE : Tuple = 'answers' if 'answers' in column_names else column_names[2]
# Padding side determines if we do (question|context) or (context|question).
__SCREAMING_SNAKE_CASE : Tuple = tokenizer.padding_side == 'right'
if args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f'''The max_seq_length passed ({args.max_seq_length}) is larger than the maximum length for the'''
f'''model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.'''
)
__SCREAMING_SNAKE_CASE : Dict = min(args.max_seq_length, tokenizer.model_max_length)
def _snake_case ( lowercase ) -> Tuple:
# Some of the questions have lots of whitespace on the left, which is not useful and will make the
# truncation of the context fail (the tokenized question will take a lots of space). So we remove that
# left whitespace
__a : Optional[Any] = [q.lstrip() for q in examples[question_column_name]]
# Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results
# in one example possible giving several features when a context is long, each of those features having a
# context that overlaps a bit the context of the previous feature.
__a : Optional[int] = tokenizer(
examples[question_column_name if pad_on_right else context_column_name] , examples[context_column_name if pad_on_right else question_column_name] , truncation="""only_second""" if pad_on_right else """only_first""" , max_length=lowercase , stride=args.doc_stride , return_overflowing_tokens=lowercase , return_offsets_mapping=lowercase , padding="""max_length""" , )
# Since one example might give us several features if it has a long context, we need a map from a feature to
# its corresponding example. This key gives us just that.
__a : Optional[Any] = tokenized_examples.pop("""overflow_to_sample_mapping""" )
# For evaluation, we will need to convert our predictions to substrings of the context, so we keep the
# corresponding example_id and we will store the offset mappings.
__a : Optional[Any] = []
for i in range(len(tokenized_examples["""input_ids"""] ) ):
# Grab the sequence corresponding to that example (to know what is the context and what is the question).
__a : Dict = tokenized_examples.sequence_ids(lowercase )
__a : Optional[Any] = 1 if pad_on_right else 0
# One example can give several spans, this is the index of the example containing this span of text.
__a : Union[str, Any] = sample_mapping[i]
tokenized_examples["example_id"].append(examples["""id"""][sample_index] )
# Set to None the offset_mapping that are not part of the context so it's easy to determine if a token
# position is part of the context or not.
__a : int = [
(o if sequence_ids[k] == context_index else None)
for k, o in enumerate(tokenized_examples["""offset_mapping"""][i] )
]
return tokenized_examples
__SCREAMING_SNAKE_CASE : int = raw_datasets['validation']
# Validation Feature Creation
__SCREAMING_SNAKE_CASE : Union[str, Any] = eval_examples.map(
prepare_validation_features,
batched=True,
num_proc=args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not args.overwrite_cache,
desc='Running tokenizer on validation dataset',
)
__SCREAMING_SNAKE_CASE : List[Any] = default_data_collator
__SCREAMING_SNAKE_CASE : Union[str, Any] = eval_dataset.remove_columns(['example_id', 'offset_mapping'])
__SCREAMING_SNAKE_CASE : List[str] = DataLoader(
eval_dataset_for_model, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size
)
def _snake_case ( lowercase , lowercase , lowercase , lowercase="eval" ) -> Any:
# Post-processing: we match the start logits and end logits to answers in the original context.
__a : List[str] = postprocess_qa_predictions(
examples=lowercase , features=lowercase , predictions=lowercase , version_2_with_negative=args.version_2_with_negative , n_best_size=args.n_best_size , max_answer_length=args.max_answer_length , null_score_diff_threshold=args.null_score_diff_threshold , output_dir=args.output_dir , prefix=lowercase , )
# Format the result to the format the metric expects.
if args.version_2_with_negative:
__a : List[str] = [
{"""id""": k, """prediction_text""": v, """no_answer_probability""": 0.0} for k, v in predictions.items()
]
else:
__a : List[str] = [{"""id""": k, """prediction_text""": v} for k, v in predictions.items()]
__a : Optional[Any] = [{"""id""": ex["""id"""], """answers""": ex[answer_column_name]} for ex in examples]
return EvalPrediction(predictions=lowercase , label_ids=lowercase )
__SCREAMING_SNAKE_CASE : List[Any] = load_metric('squad_v2' if args.version_2_with_negative else 'squad')
# Evaluation!
logger.info('Loading ONNX model %s for evaluation', args.onnx_model_path)
with open(engine_name, 'rb') as f, trt.Runtime(TRT_LOGGER) as runtime, runtime.deserialize_cuda_engine(
f.read()
) as engine, engine.create_execution_context() as context:
# setup for TRT inferrence
for i in range(len(input_names)):
context.set_binding_shape(i, INPUT_SHAPE)
assert context.all_binding_shapes_specified
def _snake_case ( lowercase ) -> Optional[int]:
return trt.volume(engine.get_binding_shape(lowercase ) ) * engine.get_binding_dtype(lowercase ).itemsize
# Allocate device memory for inputs and outputs.
__SCREAMING_SNAKE_CASE : List[str] = [cuda.mem_alloc(binding_nbytes(binding)) for binding in engine if engine.binding_is_input(binding)]
# Allocate output buffer
__SCREAMING_SNAKE_CASE : str = cuda.pagelocked_empty(tuple(context.get_binding_shape(3)), dtype=np.floataa)
__SCREAMING_SNAKE_CASE : Union[str, Any] = cuda.pagelocked_empty(tuple(context.get_binding_shape(4)), dtype=np.floataa)
__SCREAMING_SNAKE_CASE : str = cuda.mem_alloc(h_outputa.nbytes)
__SCREAMING_SNAKE_CASE : Tuple = cuda.mem_alloc(h_outputa.nbytes)
# Create a stream in which to copy inputs/outputs and run inference.
__SCREAMING_SNAKE_CASE : Tuple = cuda.Stream()
# Evaluation
logger.info('***** Running Evaluation *****')
logger.info(f''' Num examples = {len(eval_dataset)}''')
logger.info(f''' Batch size = {args.per_device_eval_batch_size}''')
__SCREAMING_SNAKE_CASE : Union[str, Any] = 0.0
__SCREAMING_SNAKE_CASE : str = 0
__SCREAMING_SNAKE_CASE : str = timeit.default_timer()
__SCREAMING_SNAKE_CASE : Dict = None
for step, batch in enumerate(eval_dataloader):
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Union[str, Any] = model_infer(batch, context, d_inputs, h_outputa, h_outputa, d_outputa, d_outputa, stream)
total_time += infer_time
niter += 1
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Optional[Any] = outputs
__SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor(start_logits)
__SCREAMING_SNAKE_CASE : Tuple = torch.tensor(end_logits)
# necessary to pad predictions and labels for being gathered
__SCREAMING_SNAKE_CASE : Optional[int] = accelerator.pad_across_processes(start_logits, dim=1, pad_index=-100)
__SCREAMING_SNAKE_CASE : Dict = accelerator.pad_across_processes(end_logits, dim=1, pad_index=-100)
__SCREAMING_SNAKE_CASE : List[str] = (accelerator.gather(start_logits).cpu().numpy(), accelerator.gather(end_logits).cpu().numpy())
__SCREAMING_SNAKE_CASE : List[str] = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-100)
if all_preds is not None:
__SCREAMING_SNAKE_CASE : Tuple = nested_truncate(all_preds, len(eval_dataset))
__SCREAMING_SNAKE_CASE : str = timeit.default_timer() - start_time
logger.info(' Evaluation done in total %f secs (%f sec per example)', evalTime, evalTime / len(eval_dataset))
# Inference time from TRT
logger.info('Average Inference Time = {:.3f} ms'.format(total_time * 1_000 / niter))
logger.info('Total Inference Time = {:.3f} ms'.format(total_time * 1_000))
logger.info('Total Number of Inference = %d', niter)
__SCREAMING_SNAKE_CASE : Optional[int] = post_processing_function(eval_examples, eval_dataset, all_preds)
__SCREAMING_SNAKE_CASE : List[Any] = metric.compute(predictions=prediction.predictions, references=prediction.label_ids)
logger.info(f'''Evaluation metrics: {eval_metric}''') | 697 | 1 |
'''simple docstring'''
import pytest
import requests
from datasets.utils.file_utils import http_head
from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline
@pytest.mark.integration
def _snake_case ( ) -> List[Any]:
with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ):
with pytest.raises(lowercase ):
requests.request("""GET""" , """https://huggingface.co""" )
with pytest.raises(requests.exceptions.ConnectTimeout ):
requests.request("""GET""" , """https://huggingface.co""" , timeout=1.0 )
@pytest.mark.integration
def _snake_case ( ) -> Any:
with offline(OfflineSimulationMode.CONNECTION_FAILS ):
with pytest.raises(requests.exceptions.ConnectionError ):
requests.request("""GET""" , """https://huggingface.co""" )
def _snake_case ( ) -> Dict:
with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ):
with pytest.raises(lowercase ):
http_head("""https://huggingface.co""" ) | 697 |
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import KarrasVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
lowercase__ = 42
lowercase__ = 42
def __init__( self , __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
super().__init__()
self.register_modules(unet=__UpperCamelCase , scheduler=__UpperCamelCase )
@torch.no_grad()
def __call__( self , __UpperCamelCase = 1 , __UpperCamelCase = 50 , __UpperCamelCase = None , __UpperCamelCase = "pil" , __UpperCamelCase = True , **__UpperCamelCase , ):
'''simple docstring'''
__a : int = self.unet.config.sample_size
__a : Optional[int] = (batch_size, 3, img_size, img_size)
__a : Union[str, Any] = self.unet
# sample x_0 ~ N(0, sigma_0^2 * I)
__a : Dict = randn_tensor(__UpperCamelCase , generator=__UpperCamelCase , device=self.device ) * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(__UpperCamelCase )
for t in self.progress_bar(self.scheduler.timesteps ):
# here sigma_t == t_i from the paper
__a : Dict = self.scheduler.schedule[t]
__a : Any = self.scheduler.schedule[t - 1] if t > 0 else 0
# 1. Select temporarily increased noise level sigma_hat
# 2. Add new noise to move from sample_i to sample_hat
__a , __a : Tuple = self.scheduler.add_noise_to_input(__UpperCamelCase , __UpperCamelCase , generator=__UpperCamelCase )
# 3. Predict the noise residual given the noise magnitude `sigma_hat`
# The model inputs and output are adjusted by following eq. (213) in [1].
__a : List[Any] = (sigma_hat / 2) * model((sample_hat + 1) / 2 , sigma_hat / 2 ).sample
# 4. Evaluate dx/dt at sigma_hat
# 5. Take Euler step from sigma to sigma_prev
__a : str = self.scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
if sigma_prev != 0:
# 6. Apply 2nd order correction
# The model inputs and output are adjusted by following eq. (213) in [1].
__a : Union[str, Any] = (sigma_prev / 2) * model((step_output.prev_sample + 1) / 2 , sigma_prev / 2 ).sample
__a : Tuple = self.scheduler.step_correct(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , step_output.prev_sample , step_output["""derivative"""] , )
__a : Tuple = step_output.prev_sample
__a : Optional[Any] = (sample / 2 + 0.5).clamp(0 , 1 )
__a : Dict = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__a : List[Any] = self.numpy_to_pil(__UpperCamelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__UpperCamelCase ) | 697 | 1 |
'''simple docstring'''
from typing import Dict, List, Optional
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__SCREAMING_SNAKE_CASE : Tuple = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : Any = {
'nielsr/canine-s': 2_048,
}
# Unicode defines 1,114,112 total “codepoints”
__SCREAMING_SNAKE_CASE : int = 1_114_112
# Below: Constants defining canonical codepoints for special, pseudo-characters.
# Copied from https://github.com/google-research/language/blob/master/language/canine/special_codepoints.py
__SCREAMING_SNAKE_CASE : Dict = 0
__SCREAMING_SNAKE_CASE : Dict = 0Xe_0_0_0
__SCREAMING_SNAKE_CASE : Any = 0Xe_0_0_1
__SCREAMING_SNAKE_CASE : Union[str, Any] = 0Xe_0_0_2
__SCREAMING_SNAKE_CASE : Optional[Any] = 0Xe_0_0_3
__SCREAMING_SNAKE_CASE : Optional[Any] = 0Xe_0_0_4
# Maps special codepoints to human-readable names.
__SCREAMING_SNAKE_CASE : Dict[int, str] = {
# Special symbols are represented using codepoints values that are valid,
# but designated as "Private Use", meaning that they will never be assigned
# characters by the Unicode Consortium, and are thus safe for use here.
#
# NOTE: Do *NOT* add any sort of [UNK_CHAR] here. They are explicitly
# excluded and should fail with a hard error.
CLS: "[CLS]",
SEP: "[SEP]",
BOS: "[BOS]",
MASK: "[MASK]",
PAD: "[PAD]",
RESERVED: "[RESERVED]",
}
# Maps special codepoint human-readable names to their codepoint values.
__SCREAMING_SNAKE_CASE : Dict[str, int] = {name: codepoint for codepoint, name in SPECIAL_CODEPOINTS.items()}
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , __UpperCamelCase=chr(__UpperCamelCase ) , __UpperCamelCase=chr(__UpperCamelCase ) , __UpperCamelCase=chr(__UpperCamelCase ) , __UpperCamelCase=chr(__UpperCamelCase ) , __UpperCamelCase=chr(__UpperCamelCase ) , __UpperCamelCase=chr(__UpperCamelCase ) , __UpperCamelCase=False , __UpperCamelCase=2048 , **__UpperCamelCase , ):
'''simple docstring'''
__a : Tuple = AddedToken(__UpperCamelCase , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else bos_token
__a : Optional[Any] = AddedToken(__UpperCamelCase , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else eos_token
__a : str = AddedToken(__UpperCamelCase , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else sep_token
__a : Union[str, Any] = AddedToken(__UpperCamelCase , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else cls_token
__a : List[Any] = AddedToken(__UpperCamelCase , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
__a : Union[str, Any] = AddedToken(__UpperCamelCase , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else mask_token
super().__init__(
bos_token=__UpperCamelCase , eos_token=__UpperCamelCase , sep_token=__UpperCamelCase , cls_token=__UpperCamelCase , pad_token=__UpperCamelCase , mask_token=__UpperCamelCase , add_prefix_space=__UpperCamelCase , model_max_length=__UpperCamelCase , **__UpperCamelCase , )
# Creates a mapping for looking up the IDs of special symbols.
__a : Dict[str, int] = {}
for codepoint, name in SPECIAL_CODEPOINTS.items():
__a : List[str] = codepoint
# Creates a mapping for looking up the string forms of special symbol IDs.
__a : Dict[int, str] = {
codepoint: name for name, codepoint in self._special_codepoints.items()
}
__a : Tuple = UNICODE_VOCAB_SIZE
__a : Optional[Any] = len(self._special_codepoints )
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self._unicode_vocab_size
def __lowerCamelCase ( self , __UpperCamelCase ):
'''simple docstring'''
return list(__UpperCamelCase )
def __lowerCamelCase ( self , __UpperCamelCase ):
'''simple docstring'''
try:
return ord(__UpperCamelCase )
except TypeError:
raise ValueError(f"""invalid token: '{token}'""" )
def __lowerCamelCase ( self , __UpperCamelCase ):
'''simple docstring'''
try:
if index in SPECIAL_CODEPOINTS:
return SPECIAL_CODEPOINTS[index]
return chr(__UpperCamelCase )
except TypeError:
raise ValueError(f"""invalid id: {index}""" )
def __lowerCamelCase ( self , __UpperCamelCase ):
'''simple docstring'''
return "".join(__UpperCamelCase )
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase = None ):
'''simple docstring'''
__a : int = [self.sep_token_id]
__a : List[str] = [self.cls_token_id]
__a : List[Any] = cls + token_ids_a + sep
if token_ids_a is not None:
result += token_ids_a + sep
return result
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__UpperCamelCase , token_ids_a=__UpperCamelCase , already_has_special_tokens=__UpperCamelCase )
__a : List[Any] = [1] + ([0] * len(__UpperCamelCase )) + [1]
if token_ids_a is not None:
result += ([0] * len(__UpperCamelCase )) + [1]
return result
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase = None ):
'''simple docstring'''
__a : List[str] = [self.sep_token_id]
__a : int = [self.cls_token_id]
__a : Union[str, Any] = len(cls + token_ids_a + sep ) * [0]
if token_ids_a is not None:
result += len(token_ids_a + sep ) * [1]
return result
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase = None ):
'''simple docstring'''
return () | 697 |
'''simple docstring'''
def _snake_case ( lowercase ) -> bool:
if not isinstance(lowercase , lowercase ):
raise ValueError("""check_bouncy() accepts only integer arguments""" )
__a : str = str(lowercase )
__a : Any = """""".join(sorted(lowercase ) )
return sorted_str_n != str_n and sorted_str_n[::-1] != str_n
def _snake_case ( lowercase = 9_9 ) -> int:
if not 0 < percent < 1_0_0:
raise ValueError("""solution() only accepts values from 0 to 100""" )
__a : List[str] = 0
__a : Union[str, Any] = 1
while True:
if check_bouncy(lowercase ):
bouncy_num += 1
if (bouncy_num / num) * 1_0_0 >= percent:
return num
num += 1
if __name__ == "__main__":
from doctest import testmod
testmod()
print(f'''{solution(99)}''') | 697 | 1 |
'''simple docstring'''
import ast
import os
import re
import shutil
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.test_utils.examples import compare_against_test
from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow
from accelerate.utils import write_basic_config
# DataLoaders built from `test_samples/MRPC` for quick testing
# Should mock `{script_name}.get_dataloaders` via:
# @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders)
__SCREAMING_SNAKE_CASE : Any = [
'cross_validation.py',
'gradient_accumulation.py',
'local_sgd.py',
'multi_process_metrics.py',
'memory.py',
'automatic_gradient_accumulation.py',
'fsdp_with_peak_mem_tracking.py',
'deepspeed_with_config_support.py',
'megatron_lm_gpt_pretraining.py',
]
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = None ):
'''simple docstring'''
__a : Tuple = None
__a : Any = os.path.abspath(os.path.join("""examples""" , """by_feature""" ) )
__a : int = os.path.abspath("""examples""" )
for item in os.listdir(__UpperCamelCase ):
if item not in EXCLUDE_EXAMPLES:
__a : Optional[Any] = os.path.join(__UpperCamelCase , __UpperCamelCase )
if os.path.isfile(__UpperCamelCase ) and ".py" in item_path:
with self.subTest(
tested_script=__UpperCamelCase , feature_script=__UpperCamelCase , tested_section="""main()""" if parser_only else """training_function()""" , ):
__a : Any = compare_against_test(
os.path.join(__UpperCamelCase , __UpperCamelCase ) , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
__a : Dict = """\n""".join(__UpperCamelCase )
if special_strings is not None:
for string in special_strings:
__a : Optional[Any] = diff.replace(__UpperCamelCase , """""" )
self.assertEqual(__UpperCamelCase , """""" )
def __lowerCamelCase ( self ):
'''simple docstring'''
self.one_complete_example("""complete_nlp_example.py""" , __UpperCamelCase )
self.one_complete_example("""complete_nlp_example.py""" , __UpperCamelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Optional[Any] = os.path.abspath(os.path.join("""examples""" , """cv_example.py""" ) )
__a : Dict = [
""" """ * 16 + """{\n\n""",
""" """ * 20 + """\"accuracy\": eval_metric[\"accuracy\"],\n\n""",
""" """ * 20 + """\"f1\": eval_metric[\"f1\"],\n\n""",
""" """ * 20 + """\"train_loss\": total_loss.item() / len(train_dataloader),\n\n""",
""" """ * 20 + """\"epoch\": epoch,\n\n""",
""" """ * 16 + """},\n\n""",
""" """ * 16 + """step=epoch,\n""",
""" """ * 12,
""" """ * 8 + """for step, batch in enumerate(active_dataloader):\n""",
]
self.one_complete_example("""complete_cv_example.py""" , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
self.one_complete_example("""complete_cv_example.py""" , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
@mock.patch.dict(os.environ , {"TESTING_MOCKED_DATALOADERS": "1"} )
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
lowercase__ = False
@classmethod
def __lowerCamelCase ( cls ):
'''simple docstring'''
super().setUpClass()
__a : Union[str, Any] = tempfile.mkdtemp()
__a : List[Any] = os.path.join(cls._tmpdir , """default_config.yml""" )
write_basic_config(save_location=cls.configPath )
__a : Any = ["""accelerate""", """launch""", """--config_file""", cls.configPath]
@classmethod
def __lowerCamelCase ( cls ):
'''simple docstring'''
super().tearDownClass()
shutil.rmtree(cls._tmpdir )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : List[str] = f"""
examples/by_feature/checkpointing.py
--checkpointing_steps epoch
--output_dir {self.tmpdir}
""".split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , """epoch_0""" ) ) )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : str = f"""
examples/by_feature/checkpointing.py
--checkpointing_steps 1
--output_dir {self.tmpdir}
""".split()
__a : Optional[Any] = run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , """step_2""" ) ) )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Optional[int] = f"""
examples/by_feature/checkpointing.py
--resume_from_checkpoint {os.path.join(self.tmpdir , "epoch_0" )}
""".split()
__a : Union[str, Any] = run_command(self._launch_args + testargs , return_stdout=__UpperCamelCase )
self.assertNotIn("""epoch 0:""" , __UpperCamelCase )
self.assertIn("""epoch 1:""" , __UpperCamelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Tuple = f"""
examples/by_feature/checkpointing.py
--resume_from_checkpoint {os.path.join(self.tmpdir , "step_2" )}
""".split()
__a : Optional[int] = run_command(self._launch_args + testargs , return_stdout=__UpperCamelCase )
if torch.cuda.is_available():
__a : Any = torch.cuda.device_count()
else:
__a : Union[str, Any] = 1
if num_processes > 1:
self.assertNotIn("""epoch 0:""" , __UpperCamelCase )
self.assertIn("""epoch 1:""" , __UpperCamelCase )
else:
self.assertIn("""epoch 0:""" , __UpperCamelCase )
self.assertIn("""epoch 1:""" , __UpperCamelCase )
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Union[str, Any] = """
examples/by_feature/cross_validation.py
--num_folds 2
""".split()
with mock.patch.dict(os.environ , {"""TESTING_MOCKED_DATALOADERS""": """0"""} ):
__a : Any = run_command(self._launch_args + testargs , return_stdout=__UpperCamelCase )
__a : List[str] = re.findall("""({.+})""" , __UpperCamelCase )
__a : Optional[int] = [r for r in results if """accuracy""" in r][-1]
__a : Tuple = ast.literal_eval(__UpperCamelCase )
self.assertGreaterEqual(results["""accuracy"""] , 0.7_5 )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Union[str, Any] = ["""examples/by_feature/multi_process_metrics.py"""]
run_command(self._launch_args + testargs )
@require_trackers
@mock.patch.dict(os.environ , {"""WANDB_MODE""": """offline"""} )
def __lowerCamelCase ( self ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdir:
__a : int = f"""
examples/by_feature/tracking.py
--with_tracking
--project_dir {tmpdir}
""".split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(__UpperCamelCase , """tracking""" ) ) )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Optional[int] = ["""examples/by_feature/gradient_accumulation.py"""]
run_command(self._launch_args + testargs )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Optional[Any] = ["""examples/by_feature/local_sgd.py"""]
run_command(self._launch_args + testargs ) | 697 |
'''simple docstring'''
import argparse
import torch
from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def _snake_case ( lowercase , lowercase , lowercase ) -> Any:
# Construct model
if gpta_config_file == "":
__a : Dict = GPTaConfig()
else:
__a : Optional[Any] = GPTaConfig.from_json_file(lowercase )
__a : Union[str, Any] = GPTaModel(lowercase )
# Load weights from numpy
load_tf_weights_in_gpta(lowercase , lowercase , lowercase )
# Save pytorch-model
__a : Optional[int] = pytorch_dump_folder_path + """/""" + WEIGHTS_NAME
__a : Dict = pytorch_dump_folder_path + """/""" + CONFIG_NAME
print(F"""Save PyTorch model to {pytorch_weights_dump_path}""" )
torch.save(model.state_dict() , lowercase )
print(F"""Save configuration file to {pytorch_config_dump_path}""" )
with open(lowercase , """w""" , encoding="""utf-8""" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--gpt2_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--gpt2_config_file',
default='',
type=str,
help=(
'An optional config json file corresponding to the pre-trained OpenAI model. \n'
'This specifies the model architecture.'
),
)
__SCREAMING_SNAKE_CASE : Dict = parser.parse_args()
convert_gpta_checkpoint_to_pytorch(args.gpta_checkpoint_path, args.gpta_config_file, args.pytorch_dump_folder_path) | 697 | 1 |
'''simple docstring'''
__SCREAMING_SNAKE_CASE : int = 9.80_665
def _snake_case ( lowercase , lowercase , lowercase = g ) -> float:
if fluid_density <= 0:
raise ValueError("""Impossible fluid density""" )
if volume < 0:
raise ValueError("""Impossible Object volume""" )
if gravity <= 0:
raise ValueError("""Impossible Gravity""" )
return fluid_density * gravity * volume
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod() | 697 |
'''simple docstring'''
import unittest
from transformers import (
MODEL_FOR_OBJECT_DETECTION_MAPPING,
AutoFeatureExtractor,
AutoModelForObjectDetection,
ObjectDetectionPipeline,
is_vision_available,
pipeline,
)
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_pytesseract,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class SCREAMING_SNAKE_CASE__ :
@staticmethod
def __lowerCamelCase ( *__UpperCamelCase , **__UpperCamelCase ):
'''simple docstring'''
pass
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
lowercase__ = MODEL_FOR_OBJECT_DETECTION_MAPPING
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
__a : Optional[Any] = ObjectDetectionPipeline(model=__UpperCamelCase , image_processor=__UpperCamelCase )
return object_detector, ["./tests/fixtures/tests_samples/COCO/000000039769.png"]
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
__a : List[str] = object_detector("""./tests/fixtures/tests_samples/COCO/000000039769.png""" , threshold=0.0 )
self.assertGreater(len(__UpperCamelCase ) , 0 )
for detected_object in outputs:
self.assertEqual(
__UpperCamelCase , {
"""score""": ANY(__UpperCamelCase ),
"""label""": ANY(__UpperCamelCase ),
"""box""": {"""xmin""": ANY(__UpperCamelCase ), """ymin""": ANY(__UpperCamelCase ), """xmax""": ANY(__UpperCamelCase ), """ymax""": ANY(__UpperCamelCase )},
} , )
import datasets
__a : Optional[int] = datasets.load_dataset("""hf-internal-testing/fixtures_image_utils""" , """image""" , split="""test""" )
__a : Tuple = [
Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ),
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
# RGBA
dataset[0]["""file"""],
# LA
dataset[1]["""file"""],
# L
dataset[2]["""file"""],
]
__a : Any = object_detector(__UpperCamelCase , threshold=0.0 )
self.assertEqual(len(__UpperCamelCase ) , len(__UpperCamelCase ) )
for outputs in batch_outputs:
self.assertGreater(len(__UpperCamelCase ) , 0 )
for detected_object in outputs:
self.assertEqual(
__UpperCamelCase , {
"""score""": ANY(__UpperCamelCase ),
"""label""": ANY(__UpperCamelCase ),
"""box""": {"""xmin""": ANY(__UpperCamelCase ), """ymin""": ANY(__UpperCamelCase ), """xmax""": ANY(__UpperCamelCase ), """ymax""": ANY(__UpperCamelCase )},
} , )
@require_tf
@unittest.skip("""Object detection not implemented in TF""" )
def __lowerCamelCase ( self ):
'''simple docstring'''
pass
@require_torch
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Optional[Any] = """hf-internal-testing/tiny-detr-mobilenetsv3"""
__a : Dict = AutoModelForObjectDetection.from_pretrained(__UpperCamelCase )
__a : Optional[Any] = AutoFeatureExtractor.from_pretrained(__UpperCamelCase )
__a : str = ObjectDetectionPipeline(model=__UpperCamelCase , feature_extractor=__UpperCamelCase )
__a : Optional[int] = object_detector("""http://images.cocodataset.org/val2017/000000039769.jpg""" , threshold=0.0 )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [
{"""score""": 0.3_3_7_6, """label""": """LABEL_0""", """box""": {"""xmin""": 159, """ymin""": 120, """xmax""": 480, """ymax""": 359}},
{"""score""": 0.3_3_7_6, """label""": """LABEL_0""", """box""": {"""xmin""": 159, """ymin""": 120, """xmax""": 480, """ymax""": 359}},
] , )
__a : Union[str, Any] = object_detector(
[
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
] , threshold=0.0 , )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [
[
{"""score""": 0.3_3_7_6, """label""": """LABEL_0""", """box""": {"""xmin""": 159, """ymin""": 120, """xmax""": 480, """ymax""": 359}},
{"""score""": 0.3_3_7_6, """label""": """LABEL_0""", """box""": {"""xmin""": 159, """ymin""": 120, """xmax""": 480, """ymax""": 359}},
],
[
{"""score""": 0.3_3_7_6, """label""": """LABEL_0""", """box""": {"""xmin""": 159, """ymin""": 120, """xmax""": 480, """ymax""": 359}},
{"""score""": 0.3_3_7_6, """label""": """LABEL_0""", """box""": {"""xmin""": 159, """ymin""": 120, """xmax""": 480, """ymax""": 359}},
],
] , )
@require_torch
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : str = """facebook/detr-resnet-50"""
__a : Dict = AutoModelForObjectDetection.from_pretrained(__UpperCamelCase )
__a : int = AutoFeatureExtractor.from_pretrained(__UpperCamelCase )
__a : int = ObjectDetectionPipeline(model=__UpperCamelCase , feature_extractor=__UpperCamelCase )
__a : Any = object_detector("""http://images.cocodataset.org/val2017/000000039769.jpg""" )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [
{"""score""": 0.9_9_8_2, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 175, """ymax""": 117}},
{"""score""": 0.9_9_6_0, """label""": """remote""", """box""": {"""xmin""": 333, """ymin""": 72, """xmax""": 368, """ymax""": 187}},
{"""score""": 0.9_9_5_5, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 639, """ymax""": 473}},
{"""score""": 0.9_9_8_8, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}},
{"""score""": 0.9_9_8_7, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}},
] , )
__a : Optional[Any] = object_detector(
[
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
] )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [
[
{"""score""": 0.9_9_8_2, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 175, """ymax""": 117}},
{"""score""": 0.9_9_6_0, """label""": """remote""", """box""": {"""xmin""": 333, """ymin""": 72, """xmax""": 368, """ymax""": 187}},
{"""score""": 0.9_9_5_5, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 639, """ymax""": 473}},
{"""score""": 0.9_9_8_8, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}},
{"""score""": 0.9_9_8_7, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}},
],
[
{"""score""": 0.9_9_8_2, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 175, """ymax""": 117}},
{"""score""": 0.9_9_6_0, """label""": """remote""", """box""": {"""xmin""": 333, """ymin""": 72, """xmax""": 368, """ymax""": 187}},
{"""score""": 0.9_9_5_5, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 639, """ymax""": 473}},
{"""score""": 0.9_9_8_8, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}},
{"""score""": 0.9_9_8_7, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}},
],
] , )
@require_torch
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : int = """facebook/detr-resnet-50"""
__a : Optional[int] = pipeline("""object-detection""" , model=__UpperCamelCase )
__a : Optional[int] = object_detector("""http://images.cocodataset.org/val2017/000000039769.jpg""" )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [
{"""score""": 0.9_9_8_2, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 175, """ymax""": 117}},
{"""score""": 0.9_9_6_0, """label""": """remote""", """box""": {"""xmin""": 333, """ymin""": 72, """xmax""": 368, """ymax""": 187}},
{"""score""": 0.9_9_5_5, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 639, """ymax""": 473}},
{"""score""": 0.9_9_8_8, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}},
{"""score""": 0.9_9_8_7, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}},
] , )
__a : List[str] = object_detector(
[
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
] )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [
[
{"""score""": 0.9_9_8_2, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 175, """ymax""": 117}},
{"""score""": 0.9_9_6_0, """label""": """remote""", """box""": {"""xmin""": 333, """ymin""": 72, """xmax""": 368, """ymax""": 187}},
{"""score""": 0.9_9_5_5, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 639, """ymax""": 473}},
{"""score""": 0.9_9_8_8, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}},
{"""score""": 0.9_9_8_7, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}},
],
[
{"""score""": 0.9_9_8_2, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 175, """ymax""": 117}},
{"""score""": 0.9_9_6_0, """label""": """remote""", """box""": {"""xmin""": 333, """ymin""": 72, """xmax""": 368, """ymax""": 187}},
{"""score""": 0.9_9_5_5, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 639, """ymax""": 473}},
{"""score""": 0.9_9_8_8, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}},
{"""score""": 0.9_9_8_7, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}},
],
] , )
@require_torch
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Union[str, Any] = 0.9_9_8_5
__a : Union[str, Any] = """facebook/detr-resnet-50"""
__a : Optional[int] = pipeline("""object-detection""" , model=__UpperCamelCase )
__a : Union[str, Any] = object_detector("""http://images.cocodataset.org/val2017/000000039769.jpg""" , threshold=__UpperCamelCase )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [
{"""score""": 0.9_9_8_8, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}},
{"""score""": 0.9_9_8_7, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}},
] , )
@require_torch
@require_pytesseract
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : str = """Narsil/layoutlmv3-finetuned-funsd"""
__a : List[Any] = 0.9_9_9_3
__a : Dict = pipeline("""object-detection""" , model=__UpperCamelCase , threshold=__UpperCamelCase )
__a : List[str] = object_detector(
"""https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png""" )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [
{"""score""": 0.9_9_9_3, """label""": """I-ANSWER""", """box""": {"""xmin""": 294, """ymin""": 254, """xmax""": 343, """ymax""": 264}},
{"""score""": 0.9_9_9_3, """label""": """I-ANSWER""", """box""": {"""xmin""": 294, """ymin""": 254, """xmax""": 343, """ymax""": 264}},
] , ) | 697 | 1 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Iterator
class SCREAMING_SNAKE_CASE__ :
def __init__( self , __UpperCamelCase ):
'''simple docstring'''
__a : str = value
__a : Node | None = None
__a : Node | None = None
class SCREAMING_SNAKE_CASE__ :
def __init__( self , __UpperCamelCase ):
'''simple docstring'''
__a : List[Any] = tree
def __lowerCamelCase ( self , __UpperCamelCase ):
'''simple docstring'''
if node is None:
return 0
return node.value + (
self.depth_first_search(node.left ) + self.depth_first_search(node.right )
)
def __iter__( self ):
'''simple docstring'''
yield self.depth_first_search(self.tree )
if __name__ == "__main__":
import doctest
doctest.testmod() | 697 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__SCREAMING_SNAKE_CASE : List[str] = {
'configuration_blenderbot_small': [
'BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP',
'BlenderbotSmallConfig',
'BlenderbotSmallOnnxConfig',
],
'tokenization_blenderbot_small': ['BlenderbotSmallTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Union[str, Any] = ['BlenderbotSmallTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : List[str] = [
'BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST',
'BlenderbotSmallForCausalLM',
'BlenderbotSmallForConditionalGeneration',
'BlenderbotSmallModel',
'BlenderbotSmallPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Optional[int] = [
'TFBlenderbotSmallForConditionalGeneration',
'TFBlenderbotSmallModel',
'TFBlenderbotSmallPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Optional[Any] = [
'FlaxBlenderbotSmallForConditionalGeneration',
'FlaxBlenderbotSmallModel',
'FlaxBlenderbotSmallPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotSmallConfig,
BlenderbotSmallOnnxConfig,
)
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_small_fast import BlenderbotSmallTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotSmallForCausalLM,
BlenderbotSmallForConditionalGeneration,
BlenderbotSmallModel,
BlenderbotSmallPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot_small import (
TFBlenderbotSmallForConditionalGeneration,
TFBlenderbotSmallModel,
TFBlenderbotSmallPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot_small import (
FlaxBlenderbotSmallForConditionalGeneration,
FlaxBlenderbotSmallModel,
FlaxBlenderbotSmallPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 697 | 1 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def __init__( self , __UpperCamelCase , __UpperCamelCase=7 , __UpperCamelCase=3 , __UpperCamelCase=18 , __UpperCamelCase=30 , __UpperCamelCase=400 , __UpperCamelCase=True , __UpperCamelCase=None , __UpperCamelCase=True , ):
'''simple docstring'''
__a : Dict = size if size is not None else {"""height""": 18, """width""": 18}
__a : Any = parent
__a : Union[str, Any] = batch_size
__a : Tuple = num_channels
__a : Optional[int] = image_size
__a : Tuple = min_resolution
__a : str = max_resolution
__a : List[Any] = do_resize
__a : List[Any] = size
__a : List[str] = apply_ocr
def __lowerCamelCase ( self ):
'''simple docstring'''
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase , unittest.TestCase ):
lowercase__ = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Tuple = LayoutLMvaImageProcessingTester(self )
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : List[str] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__UpperCamelCase , """do_resize""" ) )
self.assertTrue(hasattr(__UpperCamelCase , """size""" ) )
self.assertTrue(hasattr(__UpperCamelCase , """apply_ocr""" ) )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Any = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 18, """width""": 18} )
__a : str = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} )
def __lowerCamelCase ( self ):
'''simple docstring'''
pass
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__a : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCamelCase , Image.Image )
# Test not batched input
__a : int = image_processing(image_inputs[0] , return_tensors="""pt""" )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
self.assertIsInstance(encoding.words , __UpperCamelCase )
self.assertIsInstance(encoding.boxes , __UpperCamelCase )
# Test batched
__a : str = image_processing(__UpperCamelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__a : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCamelCase , numpify=__UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCamelCase , np.ndarray )
# Test not batched input
__a : Optional[int] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
__a : int = image_processing(__UpperCamelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__a : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCamelCase , torchify=__UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCamelCase , torch.Tensor )
# Test not batched input
__a : Tuple = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
__a : int = image_processing(__UpperCamelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Optional[Any] = LayoutLMvaImageProcessor()
from datasets import load_dataset
__a : List[str] = load_dataset("""hf-internal-testing/fixtures_docvqa""" , split="""test""" )
__a : Optional[int] = Image.open(ds[0]["""file"""] ).convert("""RGB""" )
__a : int = image_processing(__UpperCamelCase , return_tensors="""pt""" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
__a : List[str] = [["""11:14""", """to""", """11:39""", """a.m""", """11:39""", """to""", """11:44""", """a.m.""", """11:44""", """a.m.""", """to""", """12:25""", """p.m.""", """12:25""", """to""", """12:58""", """p.m.""", """12:58""", """to""", """4:00""", """p.m.""", """2:00""", """to""", """5:00""", """p.m.""", """Coffee""", """Break""", """Coffee""", """will""", """be""", """served""", """for""", """men""", """and""", """women""", """in""", """the""", """lobby""", """adjacent""", """to""", """exhibit""", """area.""", """Please""", """move""", """into""", """exhibit""", """area.""", """(Exhibits""", """Open)""", """TRRF""", """GENERAL""", """SESSION""", """(PART""", """|)""", """Presiding:""", """Lee""", """A.""", """Waller""", """TRRF""", """Vice""", """President""", """“Introductory""", """Remarks”""", """Lee""", """A.""", """Waller,""", """TRRF""", """Vice""", """Presi-""", """dent""", """Individual""", """Interviews""", """with""", """TRRF""", """Public""", """Board""", """Members""", """and""", """Sci-""", """entific""", """Advisory""", """Council""", """Mem-""", """bers""", """Conducted""", """by""", """TRRF""", """Treasurer""", """Philip""", """G.""", """Kuehn""", """to""", """get""", """answers""", """which""", """the""", """public""", """refrigerated""", """warehousing""", """industry""", """is""", """looking""", """for.""", """Plus""", """questions""", """from""", """the""", """floor.""", """Dr.""", """Emil""", """M.""", """Mrak,""", """University""", """of""", """Cal-""", """ifornia,""", """Chairman,""", """TRRF""", """Board;""", """Sam""", """R.""", """Cecil,""", """University""", """of""", """Georgia""", """College""", """of""", """Agriculture;""", """Dr.""", """Stanley""", """Charm,""", """Tufts""", """University""", """School""", """of""", """Medicine;""", """Dr.""", """Robert""", """H.""", """Cotton,""", """ITT""", """Continental""", """Baking""", """Company;""", """Dr.""", """Owen""", """Fennema,""", """University""", """of""", """Wis-""", """consin;""", """Dr.""", """Robert""", """E.""", """Hardenburg,""", """USDA.""", """Questions""", """and""", """Answers""", """Exhibits""", """Open""", """Capt.""", """Jack""", """Stoney""", """Room""", """TRRF""", """Scientific""", """Advisory""", """Council""", """Meeting""", """Ballroom""", """Foyer"""]] # noqa: E231
__a : str = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , __UpperCamelCase )
self.assertListEqual(encoding.boxes , __UpperCamelCase )
# with apply_OCR = False
__a : Union[str, Any] = LayoutLMvaImageProcessor(apply_ocr=__UpperCamelCase )
__a : str = image_processing(__UpperCamelCase , return_tensors="""pt""" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) ) | 697 |
'''simple docstring'''
import numpy as np
import torch
from torch.utils.data import Dataset
from utils import logger
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
def __init__( self , __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
__a : Any = params
__a : Optional[Any] = np.array(__UpperCamelCase )
__a : Union[str, Any] = np.array([len(__UpperCamelCase ) for t in data] )
self.check()
self.remove_long_sequences()
self.remove_empty_sequences()
self.remove_unknown_sequences()
self.check()
self.print_statistics()
def __getitem__( self , __UpperCamelCase ):
'''simple docstring'''
return (self.token_ids[index], self.lengths[index])
def __len__( self ):
'''simple docstring'''
return len(self.lengths )
def __lowerCamelCase ( self ):
'''simple docstring'''
assert len(self.token_ids ) == len(self.lengths )
assert all(self.lengths[i] == len(self.token_ids[i] ) for i in range(len(self.lengths ) ) )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Tuple = self.params.max_model_input_size
__a : Union[str, Any] = self.lengths > max_len
logger.info(f"""Splitting {sum(__UpperCamelCase )} too long sequences.""" )
def divide_chunks(__UpperCamelCase , __UpperCamelCase ):
return [l[i : i + n] for i in range(0 , len(__UpperCamelCase ) , __UpperCamelCase )]
__a : int = []
__a : Union[str, Any] = []
if self.params.mlm:
__a , __a : Any = self.params.special_tok_ids["""cls_token"""], self.params.special_tok_ids["""sep_token"""]
else:
__a , __a : str = self.params.special_tok_ids["""bos_token"""], self.params.special_tok_ids["""eos_token"""]
for seq_, len_ in zip(self.token_ids , self.lengths ):
assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_
if len_ <= max_len:
new_tok_ids.append(seq_ )
new_lengths.append(len_ )
else:
__a : Any = []
for sub_s in divide_chunks(seq_ , max_len - 2 ):
if sub_s[0] != cls_id:
__a : int = np.insert(__UpperCamelCase , 0 , __UpperCamelCase )
if sub_s[-1] != sep_id:
__a : str = np.insert(__UpperCamelCase , len(__UpperCamelCase ) , __UpperCamelCase )
assert len(__UpperCamelCase ) <= max_len
assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s
sub_seqs.append(__UpperCamelCase )
new_tok_ids.extend(__UpperCamelCase )
new_lengths.extend([len(__UpperCamelCase ) for l in sub_seqs] )
__a : Dict = np.array(__UpperCamelCase )
__a : Tuple = np.array(__UpperCamelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : List[str] = len(self )
__a : List[str] = self.lengths > 11
__a : int = self.token_ids[indices]
__a : Union[str, Any] = self.lengths[indices]
__a : Any = len(self )
logger.info(f"""Remove {init_size - new_size} too short (<=11 tokens) sequences.""" )
def __lowerCamelCase ( self ):
'''simple docstring'''
if "unk_token" not in self.params.special_tok_ids:
return
else:
__a : List[str] = self.params.special_tok_ids["""unk_token"""]
__a : str = len(self )
__a : str = np.array([np.count_nonzero(a == unk_token_id ) for a in self.token_ids] )
__a : Optional[Any] = (unk_occs / self.lengths) < 0.5
__a : List[str] = self.token_ids[indices]
__a : Optional[int] = self.lengths[indices]
__a : Any = len(self )
logger.info(f"""Remove {init_size - new_size} sequences with a high level of unknown tokens (50%).""" )
def __lowerCamelCase ( self ):
'''simple docstring'''
if not self.params.is_master:
return
logger.info(f"""{len(self )} sequences""" )
# data_len = sum(self.lengths)
# nb_unique_tokens = len(Counter(list(chain(*self.token_ids))))
# logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)')
# unk_idx = self.params.special_tok_ids['unk_token']
# nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids])
# logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)')
def __lowerCamelCase ( self , __UpperCamelCase ):
'''simple docstring'''
__a : List[str] = [t[0] for t in batch]
__a : str = [t[1] for t in batch]
assert len(__UpperCamelCase ) == len(__UpperCamelCase )
# Max for paddings
__a : Optional[int] = max(__UpperCamelCase )
# Pad token ids
if self.params.mlm:
__a : int = self.params.special_tok_ids["""pad_token"""]
else:
__a : Tuple = self.params.special_tok_ids["""unk_token"""]
__a : Any = [list(t.astype(__UpperCamelCase ) ) + [pad_idx] * (max_seq_len_ - len(__UpperCamelCase )) for t in token_ids]
assert len(tk_ ) == len(__UpperCamelCase )
assert all(len(__UpperCamelCase ) == max_seq_len_ for t in tk_ )
__a : Any = torch.tensor(tk_ ) # (bs, max_seq_len_)
__a : Optional[Any] = torch.tensor(__UpperCamelCase ) # (bs)
return tk_t, lg_t | 697 | 1 |
'''simple docstring'''
import time
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers.generation import (
MaxLengthCriteria,
MaxNewTokensCriteria,
MaxTimeCriteria,
StoppingCriteriaList,
validate_stopping_criteria,
)
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def __lowerCamelCase ( self , __UpperCamelCase ):
'''simple docstring'''
__a : Dict = 3
__a : Dict = 250
__a : str = ids_tensor((batch_size, length) , __UpperCamelCase )
__a : int = torch.ones((batch_size, length) , device=__UpperCamelCase , dtype=torch.float ) / length
return input_ids, scores
def __lowerCamelCase ( self ):
'''simple docstring'''
__a , __a : Any = self._get_tensors(5 )
__a : Optional[int] = StoppingCriteriaList(
[
MaxLengthCriteria(max_length=10 ),
MaxTimeCriteria(max_time=0.1 ),
] )
self.assertFalse(criteria(__UpperCamelCase , __UpperCamelCase ) )
__a , __a : Optional[Any] = self._get_tensors(9 )
self.assertFalse(criteria(__UpperCamelCase , __UpperCamelCase ) )
__a , __a : Tuple = self._get_tensors(10 )
self.assertTrue(criteria(__UpperCamelCase , __UpperCamelCase ) )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : List[Any] = MaxLengthCriteria(max_length=10 )
__a , __a : Any = self._get_tensors(5 )
self.assertFalse(criteria(__UpperCamelCase , __UpperCamelCase ) )
__a , __a : List[Any] = self._get_tensors(9 )
self.assertFalse(criteria(__UpperCamelCase , __UpperCamelCase ) )
__a , __a : List[str] = self._get_tensors(10 )
self.assertTrue(criteria(__UpperCamelCase , __UpperCamelCase ) )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Optional[int] = MaxNewTokensCriteria(start_length=5 , max_new_tokens=5 )
__a , __a : str = self._get_tensors(5 )
self.assertFalse(criteria(__UpperCamelCase , __UpperCamelCase ) )
__a , __a : Dict = self._get_tensors(9 )
self.assertFalse(criteria(__UpperCamelCase , __UpperCamelCase ) )
__a , __a : Optional[Any] = self._get_tensors(10 )
self.assertTrue(criteria(__UpperCamelCase , __UpperCamelCase ) )
__a : List[str] = StoppingCriteriaList([criteria] )
self.assertEqual(criteria_list.max_length , 10 )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a , __a : List[str] = self._get_tensors(5 )
__a : int = MaxTimeCriteria(max_time=0.1 )
self.assertFalse(criteria(__UpperCamelCase , __UpperCamelCase ) )
__a : List[Any] = MaxTimeCriteria(max_time=0.1 , initial_timestamp=time.time() - 0.2 )
self.assertTrue(criteria(__UpperCamelCase , __UpperCamelCase ) )
def __lowerCamelCase ( self ):
'''simple docstring'''
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 10 )
with self.assertWarns(__UpperCamelCase ):
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 11 )
__a : List[str] = validate_stopping_criteria(StoppingCriteriaList() , 11 )
self.assertEqual(len(__UpperCamelCase ) , 1 ) | 697 |
'''simple docstring'''
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
lowercase__ = ""
lowercase__ = "hf-legacy" # "hf://"" is reserved for hffs
def __init__( self , __UpperCamelCase = None , __UpperCamelCase = None , **__UpperCamelCase , ):
'''simple docstring'''
super().__init__(self , **__UpperCamelCase )
__a : int = repo_info
__a : int = token
__a : Any = None
def __lowerCamelCase ( self ):
'''simple docstring'''
if self.dir_cache is None:
__a : Union[str, Any] = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
__a : List[str] = {
"""name""": hf_file.rfilename,
"""size""": None,
"""type""": """file""",
}
self.dir_cache.update(
{
str(__UpperCamelCase ): {"""name""": str(__UpperCamelCase ), """size""": None, """type""": """directory"""}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase = "rb" , **__UpperCamelCase , ):
'''simple docstring'''
if not isinstance(self.repo_info , __UpperCamelCase ):
raise NotImplementedError(f"""Open is only implemented for dataset repositories, but got {self.repo_info}""" )
__a : Any = hf_hub_url(self.repo_info.id , __UpperCamelCase , revision=self.repo_info.sha )
return fsspec.open(
__UpperCamelCase , mode=__UpperCamelCase , headers=get_authentication_headers_for_url(__UpperCamelCase , use_auth_token=self.token ) , client_kwargs={"""trust_env""": True} , ).open()
def __lowerCamelCase ( self , __UpperCamelCase , **__UpperCamelCase ):
'''simple docstring'''
self._get_dirs()
__a : str = self._strip_protocol(__UpperCamelCase )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(__UpperCamelCase )
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase=False , **__UpperCamelCase ):
'''simple docstring'''
self._get_dirs()
__a : int = PurePosixPath(path.strip("""/""" ) )
__a : List[str] = {}
for p, f in self.dir_cache.items():
__a : str = PurePosixPath(p.strip("""/""" ) )
__a : Optional[int] = p.parent
if root == path:
__a : List[str] = f
__a : str = list(paths.values() )
if detail:
return out
else:
return sorted(f["""name"""] for f in out ) | 697 | 1 |
'''simple docstring'''
def _snake_case ( lowercase ) -> list:
if len(lowercase ) < 2:
return collection
def circle_sort_util(lowercase , lowercase , lowercase ) -> bool:
__a : Optional[int] = False
if low == high:
return swapped
__a : List[Any] = low
__a : List[str] = high
while left < right:
if collection[left] > collection[right]:
__a , __a : Optional[Any] = (
collection[right],
collection[left],
)
__a : Dict = True
left += 1
right -= 1
if left == right and collection[left] > collection[right + 1]:
__a , __a : Optional[int] = (
collection[right + 1],
collection[left],
)
__a : List[str] = True
__a : Any = low + int((high - low) / 2 )
__a : int = circle_sort_util(lowercase , lowercase , lowercase )
__a : Tuple = circle_sort_util(lowercase , mid + 1 , lowercase )
return swapped or left_swap or right_swap
__a : int = True
while is_not_sorted is True:
__a : Any = circle_sort_util(lowercase , 0 , len(lowercase ) - 1 )
return collection
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : List[str] = input('Enter numbers separated by a comma:\n').strip()
__SCREAMING_SNAKE_CASE : Dict = [int(item) for item in user_input.split(',')]
print(circle_sort(unsorted)) | 697 |
'''simple docstring'''
import inspect
import unittest
from transformers import DPTConfig
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel
from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class SCREAMING_SNAKE_CASE__ :
def __init__( self , __UpperCamelCase , __UpperCamelCase=2 , __UpperCamelCase=32 , __UpperCamelCase=16 , __UpperCamelCase=3 , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=32 , __UpperCamelCase=4 , __UpperCamelCase=[0, 1, 2, 3] , __UpperCamelCase=4 , __UpperCamelCase=37 , __UpperCamelCase="gelu" , __UpperCamelCase=0.1 , __UpperCamelCase=0.1 , __UpperCamelCase=0.0_2 , __UpperCamelCase=3 , __UpperCamelCase=[1, 384, 24, 24] , __UpperCamelCase=True , __UpperCamelCase=None , ):
'''simple docstring'''
__a : List[str] = parent
__a : Tuple = batch_size
__a : str = image_size
__a : int = patch_size
__a : Dict = num_channels
__a : int = is_training
__a : Dict = use_labels
__a : Union[str, Any] = hidden_size
__a : Dict = num_hidden_layers
__a : Dict = backbone_out_indices
__a : Optional[int] = num_attention_heads
__a : List[str] = intermediate_size
__a : Optional[Any] = hidden_act
__a : Dict = hidden_dropout_prob
__a : Tuple = attention_probs_dropout_prob
__a : Any = initializer_range
__a : Any = num_labels
__a : Optional[Any] = backbone_featmap_shape
__a : List[Any] = scope
__a : List[str] = is_hybrid
# sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token)
__a : Union[str, Any] = (image_size // patch_size) ** 2
__a : List[str] = num_patches + 1
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__a : Union[str, Any] = None
if self.use_labels:
__a : str = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__a : Tuple = self.get_config()
return config, pixel_values, labels
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : List[str] = {
"""global_padding""": """same""",
"""layer_type""": """bottleneck""",
"""depths""": [3, 4, 9],
"""out_features""": ["""stage1""", """stage2""", """stage3"""],
"""embedding_dynamic_padding""": True,
"""hidden_sizes""": [96, 192, 384, 768],
"""num_groups""": 2,
}
return DPTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , backbone_out_indices=self.backbone_out_indices , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__UpperCamelCase , initializer_range=self.initializer_range , is_hybrid=self.is_hybrid , backbone_config=__UpperCamelCase , backbone_featmap_shape=self.backbone_featmap_shape , )
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
__a : Optional[Any] = DPTModel(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
__a : List[str] = model(__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
__a : List[str] = self.num_labels
__a : Union[str, Any] = DPTForDepthEstimation(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
__a : Tuple = model(__UpperCamelCase )
self.parent.assertEqual(result.predicted_depth.shape , (self.batch_size, self.image_size, self.image_size) )
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
__a : Dict = self.num_labels
__a : Tuple = DPTForSemanticSegmentation(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
__a : str = model(__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Optional[int] = self.prepare_config_and_inputs()
__a , __a , __a : Tuple = config_and_inputs
__a : List[str] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase , __UpperCamelCase , unittest.TestCase ):
lowercase__ = (DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else ()
lowercase__ = (
{
"depth-estimation": DPTForDepthEstimation,
"feature-extraction": DPTModel,
"image-segmentation": DPTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
lowercase__ = False
lowercase__ = False
lowercase__ = False
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Optional[int] = DPTModelTester(self )
__a : List[Any] = ConfigTester(self , config_class=__UpperCamelCase , has_text_modality=__UpperCamelCase , hidden_size=37 )
def __lowerCamelCase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="""DPT does not use inputs_embeds""" )
def __lowerCamelCase ( self ):
'''simple docstring'''
pass
def __lowerCamelCase ( self ):
'''simple docstring'''
__a , __a : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a : str = model_class(__UpperCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__a : Any = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__UpperCamelCase , nn.Linear ) )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a , __a : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a : Any = model_class(__UpperCamelCase )
__a : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__a : int = [*signature.parameters.keys()]
__a : List[str] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __UpperCamelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_depth_estimation(*__UpperCamelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__UpperCamelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
__a , __a : Dict = self.model_tester.prepare_config_and_inputs_for_common()
__a : List[Any] = True
if model_class in get_values(__UpperCamelCase ):
continue
__a : str = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.train()
__a : Union[str, Any] = self._prepare_for_class(__UpperCamelCase , __UpperCamelCase , return_labels=__UpperCamelCase )
__a : List[Any] = model(**__UpperCamelCase ).loss
loss.backward()
def __lowerCamelCase ( self ):
'''simple docstring'''
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
__a , __a : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
__a : Any = False
__a : Dict = True
if model_class in get_values(__UpperCamelCase ) or not model_class.supports_gradient_checkpointing:
continue
__a : Any = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.gradient_checkpointing_enable()
model.train()
__a : List[str] = self._prepare_for_class(__UpperCamelCase , __UpperCamelCase , return_labels=__UpperCamelCase )
__a : Dict = model(**__UpperCamelCase ).loss
loss.backward()
def __lowerCamelCase ( self ):
'''simple docstring'''
__a , __a : Any = self.model_tester.prepare_config_and_inputs_for_common()
__a : Any = _config_zero_init(__UpperCamelCase )
for model_class in self.all_model_classes:
__a : Any = model_class(config=__UpperCamelCase )
# Skip the check for the backbone
__a : Optional[Any] = []
for name, module in model.named_modules():
if module.__class__.__name__ == "DPTViTHybridEmbeddings":
__a : Optional[int] = [f"""{name}.{key}""" for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def __lowerCamelCase ( self ):
'''simple docstring'''
pass
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]:
__a : int = DPTModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a , __a : int = self.model_tester.prepare_config_and_inputs_for_common()
__a : Optional[int] = """add"""
with self.assertRaises(__UpperCamelCase ):
__a : int = DPTForDepthEstimation(__UpperCamelCase )
def _snake_case ( ) -> Any:
__a : Dict = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
@slow
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : int = DPTImageProcessor.from_pretrained("""Intel/dpt-hybrid-midas""" )
__a : int = DPTForDepthEstimation.from_pretrained("""Intel/dpt-hybrid-midas""" ).to(__UpperCamelCase )
__a : Union[str, Any] = prepare_img()
__a : Any = image_processor(images=__UpperCamelCase , return_tensors="""pt""" ).to(__UpperCamelCase )
# forward pass
with torch.no_grad():
__a : Optional[Any] = model(**__UpperCamelCase )
__a : int = outputs.predicted_depth
# verify the predicted depth
__a : Any = torch.Size((1, 384, 384) )
self.assertEqual(predicted_depth.shape , __UpperCamelCase )
__a : int = torch.tensor(
[[[5.6_4_3_7, 5.6_1_4_6, 5.6_5_1_1], [5.4_3_7_1, 5.5_6_4_9, 5.5_9_5_8], [5.5_2_1_5, 5.5_1_8_4, 5.5_2_9_3]]] ).to(__UpperCamelCase )
self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 100 , __UpperCamelCase , atol=1E-4 ) ) | 697 | 1 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ....tokenization_utils_fast import PreTrainedTokenizerFast
from ....utils import logging
from .tokenization_retribert import RetriBertTokenizer
__SCREAMING_SNAKE_CASE : List[str] = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : Union[str, Any] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
__SCREAMING_SNAKE_CASE : Any = {
'vocab_file': {
'yjernite/retribert-base-uncased': (
'https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'yjernite/retribert-base-uncased': (
'https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/tokenizer.json'
),
},
}
__SCREAMING_SNAKE_CASE : Dict = {
'yjernite/retribert-base-uncased': 512,
}
__SCREAMING_SNAKE_CASE : Dict = {
'yjernite/retribert-base-uncased': {'do_lower_case': True},
}
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
lowercase__ = VOCAB_FILES_NAMES
lowercase__ = PRETRAINED_VOCAB_FILES_MAP
lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ = PRETRAINED_INIT_CONFIGURATION
lowercase__ = RetriBertTokenizer
lowercase__ = ["input_ids", "attention_mask"]
def __init__( self , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=True , __UpperCamelCase="[UNK]" , __UpperCamelCase="[SEP]" , __UpperCamelCase="[PAD]" , __UpperCamelCase="[CLS]" , __UpperCamelCase="[MASK]" , __UpperCamelCase=True , __UpperCamelCase=None , **__UpperCamelCase , ):
'''simple docstring'''
super().__init__(
__UpperCamelCase , tokenizer_file=__UpperCamelCase , do_lower_case=__UpperCamelCase , unk_token=__UpperCamelCase , sep_token=__UpperCamelCase , pad_token=__UpperCamelCase , cls_token=__UpperCamelCase , mask_token=__UpperCamelCase , tokenize_chinese_chars=__UpperCamelCase , strip_accents=__UpperCamelCase , **__UpperCamelCase , )
__a : Optional[int] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , __UpperCamelCase ) != do_lower_case
or normalizer_state.get("""strip_accents""" , __UpperCamelCase ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , __UpperCamelCase ) != tokenize_chinese_chars
):
__a : Optional[Any] = getattr(__UpperCamelCase , normalizer_state.pop("""type""" ) )
__a : Dict = do_lower_case
__a : Any = strip_accents
__a : Dict = tokenize_chinese_chars
__a : Union[str, Any] = normalizer_class(**__UpperCamelCase )
__a : List[str] = do_lower_case
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase=None ):
'''simple docstring'''
__a : Union[str, Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase = None ):
'''simple docstring'''
__a : str = [self.sep_token_id]
__a : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase = None ):
'''simple docstring'''
__a : Optional[Any] = self._tokenizer.model.save(__UpperCamelCase , name=__UpperCamelCase )
return tuple(__UpperCamelCase ) | 697 |
'''simple docstring'''
import unittest
from .lib import (
Matrix,
Vector,
axpy,
square_zero_matrix,
unit_basis_vector,
zero_vector,
)
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Dict = Vector([1, 2, 3] )
self.assertEqual(x.component(0 ) , 1 )
self.assertEqual(x.component(2 ) , 3 )
__a : Optional[int] = Vector()
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Any = Vector([0, 0, 0, 0, 0, 1] )
self.assertEqual(str(__UpperCamelCase ) , """(0,0,0,0,0,1)""" )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Tuple = Vector([1, 2, 3, 4] )
self.assertEqual(len(__UpperCamelCase ) , 4 )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Optional[Any] = Vector([1, 2] )
__a : List[str] = Vector([1, 2, 3, 4, 5] )
__a : Optional[int] = Vector([0, 0, 0, 0, 0, 0, 0, 0, 0, 0] )
__a : Dict = Vector([1, -1, 1, -1, 2, -3, 4, -5] )
self.assertAlmostEqual(x.euclidean_length() , 2.2_3_6 , 3 )
self.assertAlmostEqual(y.euclidean_length() , 7.4_1_6 , 3 )
self.assertEqual(z.euclidean_length() , 0 )
self.assertAlmostEqual(w.euclidean_length() , 7.6_1_6 , 3 )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Dict = Vector([1, 2, 3] )
__a : Union[str, Any] = Vector([1, 1, 1] )
self.assertEqual((x + y).component(0 ) , 2 )
self.assertEqual((x + y).component(1 ) , 3 )
self.assertEqual((x + y).component(2 ) , 4 )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : List[str] = Vector([1, 2, 3] )
__a : Any = Vector([1, 1, 1] )
self.assertEqual((x - y).component(0 ) , 0 )
self.assertEqual((x - y).component(1 ) , 1 )
self.assertEqual((x - y).component(2 ) , 2 )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Tuple = Vector([1, 2, 3] )
__a : Optional[Any] = Vector([2, -1, 4] ) # for test of dot product
__a : Union[str, Any] = Vector([1, -2, -1] )
self.assertEqual(str(x * 3.0 ) , """(3.0,6.0,9.0)""" )
self.assertEqual((a * b) , 0 )
def __lowerCamelCase ( self ):
'''simple docstring'''
self.assertEqual(str(zero_vector(10 ) ).count("""0""" ) , 10 )
def __lowerCamelCase ( self ):
'''simple docstring'''
self.assertEqual(str(unit_basis_vector(3 , 1 ) ) , """(0,1,0)""" )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Dict = Vector([1, 2, 3] )
__a : Optional[int] = Vector([1, 0, 1] )
self.assertEqual(str(axpy(2 , __UpperCamelCase , __UpperCamelCase ) ) , """(3,4,7)""" )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : int = Vector([1, 0, 0, 0, 0, 0] )
__a : Any = x.copy()
self.assertEqual(str(__UpperCamelCase ) , str(__UpperCamelCase ) )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Union[str, Any] = Vector([1, 0, 0] )
x.change_component(0 , 0 )
x.change_component(1 , 1 )
self.assertEqual(str(__UpperCamelCase ) , """(0,1,0)""" )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Dict = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual("""|1,2,3|\n|2,4,5|\n|6,7,8|\n""" , str(__UpperCamelCase ) )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : List[Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
__a : List[Any] = [[-3, -14, -10], [-5, -10, -5], [-2, -1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(minors[x][y] , a.minor(__UpperCamelCase , __UpperCamelCase ) )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : List[Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
__a : Any = [[-3, 14, -10], [5, -10, 5], [-2, 1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(cofactors[x][y] , a.cofactor(__UpperCamelCase , __UpperCamelCase ) )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : List[Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(-5 , a.determinant() )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Any = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]] , 3 , 3 )
__a : List[Any] = Vector([1, 2, 3] )
self.assertEqual("""(14,32,50)""" , str(a * x ) )
self.assertEqual("""|2,4,6|\n|8,10,12|\n|14,16,18|\n""" , str(a * 2 ) )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : List[str] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
a.change_component(0 , 2 , 5 )
self.assertEqual("""|1,2,5|\n|2,4,5|\n|6,7,8|\n""" , str(__UpperCamelCase ) )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Tuple = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(7 , a.component(2 , 1 ) , 0.0_1 )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Dict = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
__a : Union[str, Any] = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 )
self.assertEqual("""|2,4,10|\n|4,8,10|\n|12,14,18|\n""" , str(a + b ) )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : List[Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
__a : List[str] = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 )
self.assertEqual("""|0,0,-4|\n|0,0,0|\n|0,0,-2|\n""" , str(a - b ) )
def __lowerCamelCase ( self ):
'''simple docstring'''
self.assertEqual(
"""|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n""" , str(square_zero_matrix(5 ) ) , )
if __name__ == "__main__":
unittest.main() | 697 | 1 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def __init__( self , __UpperCamelCase , __UpperCamelCase=13 , __UpperCamelCase=3 , __UpperCamelCase=224 , __UpperCamelCase=30 , __UpperCamelCase=400 , __UpperCamelCase=True , __UpperCamelCase=None , __UpperCamelCase=True , __UpperCamelCase=[0.5, 0.5, 0.5] , __UpperCamelCase=[0.5, 0.5, 0.5] , ):
'''simple docstring'''
__a : Tuple = size if size is not None else {"""height""": 18, """width""": 18}
__a : int = parent
__a : Tuple = batch_size
__a : Any = num_channels
__a : Optional[int] = image_size
__a : int = min_resolution
__a : int = max_resolution
__a : Tuple = do_resize
__a : str = size
__a : Dict = do_normalize
__a : str = image_mean
__a : List[str] = image_std
def __lowerCamelCase ( self ):
'''simple docstring'''
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase , unittest.TestCase ):
lowercase__ = ViTImageProcessor if is_vision_available() else None
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Union[str, Any] = EfficientFormerImageProcessorTester(self )
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.image_proc_tester.prepare_image_processor_dict()
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Dict = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__UpperCamelCase , """image_mean""" ) )
self.assertTrue(hasattr(__UpperCamelCase , """image_std""" ) )
self.assertTrue(hasattr(__UpperCamelCase , """do_normalize""" ) )
self.assertTrue(hasattr(__UpperCamelCase , """do_resize""" ) )
self.assertTrue(hasattr(__UpperCamelCase , """size""" ) )
def __lowerCamelCase ( self ):
'''simple docstring'''
pass
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__a : Optional[int] = prepare_image_inputs(self.image_proc_tester , equal_resolution=__UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCamelCase , Image.Image )
# Test not batched input
__a : Tuple = image_processor(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
# Test batched
__a : Optional[Any] = image_processor(__UpperCamelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__a : Any = prepare_image_inputs(self.image_proc_tester , equal_resolution=__UpperCamelCase , numpify=__UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCamelCase , np.ndarray )
# Test not batched input
__a : List[str] = image_processor(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
# Test batched
__a : Union[str, Any] = image_processor(__UpperCamelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__a : int = prepare_image_inputs(self.image_proc_tester , equal_resolution=__UpperCamelCase , torchify=__UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCamelCase , torch.Tensor )
# Test not batched input
__a : List[Any] = image_processor(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
# Test batched
__a : int = image_processor(__UpperCamelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , ) | 697 |
'''simple docstring'''
import os
from itertools import chain
from random import randrange, shuffle
import pytest
from .sola import PokerHand
__SCREAMING_SNAKE_CASE : List[str] = (
'4S 3H 2C 7S 5H',
'9D 8H 2C 6S 7H',
'2D 6D 9D TH 7D',
'TC 8C 2S JH 6C',
'JH 8S TH AH QH',
'TS KS 5S 9S AC',
'KD 6S 9D TH AD',
'KS 8D 4D 9S 4S', # pair
'8C 4S KH JS 4D', # pair
'QH 8H KD JH 8S', # pair
'KC 4H KS 2H 8D', # pair
'KD 4S KC 3H 8S', # pair
'AH 8S AS KC JH', # pair
'3H 4C 4H 3S 2H', # 2 pairs
'5S 5D 2C KH KH', # 2 pairs
'3C KH 5D 5S KH', # 2 pairs
'AS 3C KH AD KH', # 2 pairs
'7C 7S 3S 7H 5S', # 3 of a kind
'7C 7S KH 2H 7H', # 3 of a kind
'AC KH QH AH AS', # 3 of a kind
'2H 4D 3C AS 5S', # straight (low ace)
'3C 5C 4C 2C 6H', # straight
'6S 8S 7S 5H 9H', # straight
'JS QS 9H TS KH', # straight
'QC KH TS JS AH', # straight (high ace)
'8C 9C 5C 3C TC', # flush
'3S 8S 9S 5S KS', # flush
'4C 5C 9C 8C KC', # flush
'JH 8H AH KH QH', # flush
'3D 2H 3H 2C 2D', # full house
'2H 2C 3S 3H 3D', # full house
'KH KC 3S 3H 3D', # full house
'JC 6H JS JD JH', # 4 of a kind
'JC 7H JS JD JH', # 4 of a kind
'JC KH JS JD JH', # 4 of a kind
'2S AS 4S 5S 3S', # straight flush (low ace)
'2D 6D 3D 4D 5D', # straight flush
'5C 6C 3C 7C 4C', # straight flush
'JH 9H TH KH QH', # straight flush
'JH AH TH KH QH', # royal flush (high ace straight flush)
)
__SCREAMING_SNAKE_CASE : Optional[Any] = (
('2H 3H 4H 5H 6H', 'KS AS TS QS JS', 'Loss'),
('2H 3H 4H 5H 6H', 'AS AD AC AH JD', 'Win'),
('AS AH 2H AD AC', 'JS JD JC JH 3D', 'Win'),
('2S AH 2H AS AC', 'JS JD JC JH AD', 'Loss'),
('2S AH 2H AS AC', '2H 3H 5H 6H 7H', 'Win'),
('AS 3S 4S 8S 2S', '2H 3H 5H 6H 7H', 'Win'),
('2H 3H 5H 6H 7H', '2S 3H 4H 5S 6C', 'Win'),
('2S 3H 4H 5S 6C', '3D 4C 5H 6H 2S', 'Tie'),
('2S 3H 4H 5S 6C', 'AH AC 5H 6H AS', 'Win'),
('2S 2H 4H 5S 4C', 'AH AC 5H 6H AS', 'Loss'),
('2S 2H 4H 5S 4C', 'AH AC 5H 6H 7S', 'Win'),
('6S AD 7H 4S AS', 'AH AC 5H 6H 7S', 'Loss'),
('2S AH 4H 5S KC', 'AH AC 5H 6H 7S', 'Loss'),
('2S 3H 6H 7S 9C', '7H 3C TH 6H 9S', 'Loss'),
('4S 5H 6H TS AC', '3S 5H 6H TS AC', 'Win'),
('2S AH 4H 5S 6C', 'AD 4C 5H 6H 2C', 'Tie'),
('AS AH 3H AD AC', 'AS AH 2H AD AC', 'Win'),
('AH AC 5H 5C QS', 'AH AC 5H 5C KS', 'Loss'),
('AH AC 5H 5C QS', 'KH KC 5H 5C QS', 'Win'),
('7C 7S KH 2H 7H', '3C 3S AH 2H 3H', 'Win'),
('3C 3S AH 2H 3H', '7C 7S KH 2H 7H', 'Loss'),
('6H 5H 4H 3H 2H', '5H 4H 3H 2H AH', 'Win'),
('5H 4H 3H 2H AH', '5H 4H 3H 2H AH', 'Tie'),
('5H 4H 3H 2H AH', '6H 5H 4H 3H 2H', 'Loss'),
('AH AD KS KC AC', 'AH KD KH AC KC', 'Win'),
('2H 4D 3C AS 5S', '2H 4D 3C 6S 5S', 'Loss'),
('2H 3S 3C 3H 2S', '3S 3C 2S 2H 2D', 'Win'),
('4D 6D 5D 2D JH', '3S 8S 3H TC KH', 'Loss'),
('4S 6C 8S 3S 7S', 'AD KS 2D 7D 7C', 'Loss'),
('6S 4C 7H 8C 3H', '5H JC AH 9D 9C', 'Loss'),
('9D 9H JH TC QH', '3C 2S JS 5C 7H', 'Win'),
('2H TC 8S AD 9S', '4H TS 7H 2C 5C', 'Win'),
('9D 3S 2C 7S 7C', 'JC TD 3C TC 9H', 'Loss'),
)
__SCREAMING_SNAKE_CASE : Tuple = (
('2H 3H 4H 5H 6H', True),
('AS AH 2H AD AC', False),
('2H 3H 5H 6H 7H', True),
('KS AS TS QS JS', True),
('8H 9H QS JS TH', False),
('AS 3S 4S 8S 2S', True),
)
__SCREAMING_SNAKE_CASE : Dict = (
('2H 3H 4H 5H 6H', True),
('AS AH 2H AD AC', False),
('2H 3H 5H 6H 7H', False),
('KS AS TS QS JS', True),
('8H 9H QS JS TH', True),
)
__SCREAMING_SNAKE_CASE : Optional[int] = (
('2H 4D 3C AS 5S', True, [5, 4, 3, 2, 14]),
('2H 5D 3C AS 5S', False, [14, 5, 5, 3, 2]),
('JH QD KC AS TS', False, [14, 13, 12, 11, 10]),
('9D 3S 2C 7S 7C', False, [9, 7, 7, 3, 2]),
)
__SCREAMING_SNAKE_CASE : int = (
('JH AH TH KH QH', 0),
('JH 9H TH KH QH', 0),
('JC KH JS JD JH', 7),
('KH KC 3S 3H 3D', 6),
('8C 9C 5C 3C TC', 0),
('JS QS 9H TS KH', 0),
('7C 7S KH 2H 7H', 3),
('3C KH 5D 5S KH', 2),
('QH 8H KD JH 8S', 1),
('2D 6D 9D TH 7D', 0),
)
__SCREAMING_SNAKE_CASE : int = (
('JH AH TH KH QH', 23),
('JH 9H TH KH QH', 22),
('JC KH JS JD JH', 21),
('KH KC 3S 3H 3D', 20),
('8C 9C 5C 3C TC', 19),
('JS QS 9H TS KH', 18),
('7C 7S KH 2H 7H', 17),
('3C KH 5D 5S KH', 16),
('QH 8H KD JH 8S', 15),
('2D 6D 9D TH 7D', 14),
)
def _snake_case ( ) -> List[str]:
__a , __a : List[Any] = randrange(len(lowercase ) ), randrange(len(lowercase ) )
__a : int = ["""Loss""", """Tie""", """Win"""][(play >= oppo) + (play > oppo)]
__a , __a : int = SORTED_HANDS[play], SORTED_HANDS[oppo]
return hand, other, expected
def _snake_case ( lowercase = 1_0_0 ) -> Any:
return (generate_random_hand() for _ in range(lowercase ))
@pytest.mark.parametrize("""hand, expected""" , lowercase )
def _snake_case ( lowercase , lowercase ) -> int:
assert PokerHand(lowercase )._is_flush() == expected
@pytest.mark.parametrize("""hand, expected""" , lowercase )
def _snake_case ( lowercase , lowercase ) -> Any:
assert PokerHand(lowercase )._is_straight() == expected
@pytest.mark.parametrize("""hand, expected, card_values""" , lowercase )
def _snake_case ( lowercase , lowercase , lowercase ) -> List[str]:
__a : Union[str, Any] = PokerHand(lowercase )
assert player._is_five_high_straight() == expected
assert player._card_values == card_values
@pytest.mark.parametrize("""hand, expected""" , lowercase )
def _snake_case ( lowercase , lowercase ) -> Optional[int]:
assert PokerHand(lowercase )._is_same_kind() == expected
@pytest.mark.parametrize("""hand, expected""" , lowercase )
def _snake_case ( lowercase , lowercase ) -> Union[str, Any]:
assert PokerHand(lowercase )._hand_type == expected
@pytest.mark.parametrize("""hand, other, expected""" , lowercase )
def _snake_case ( lowercase , lowercase , lowercase ) -> Optional[int]:
assert PokerHand(lowercase ).compare_with(PokerHand(lowercase ) ) == expected
@pytest.mark.parametrize("""hand, other, expected""" , generate_random_hands() )
def _snake_case ( lowercase , lowercase , lowercase ) -> int:
assert PokerHand(lowercase ).compare_with(PokerHand(lowercase ) ) == expected
def _snake_case ( ) -> Union[str, Any]:
__a : Tuple = [PokerHand(lowercase ) for hand in SORTED_HANDS]
__a : Optional[int] = poker_hands.copy()
shuffle(lowercase )
__a : List[str] = chain(sorted(lowercase ) )
for index, hand in enumerate(lowercase ):
assert hand == poker_hands[index]
def _snake_case ( ) -> List[str]:
# Test that five high straights are compared correctly.
__a : Optional[int] = [PokerHand("""2D AC 3H 4H 5S""" ), PokerHand("""2S 3H 4H 5S 6C""" )]
pokerhands.sort(reverse=lowercase )
assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C"
def _snake_case ( ) -> List[str]:
# Multiple calls to five_high_straight function should still return True
# and shouldn't mutate the list in every call other than the first.
__a : Dict = PokerHand("""2C 4S AS 3D 5C""" )
__a : Dict = True
__a : Optional[int] = [5, 4, 3, 2, 1_4]
for _ in range(1_0 ):
assert pokerhand._is_five_high_straight() == expected
assert pokerhand._card_values == expected_card_values
def _snake_case ( ) -> Dict:
# Problem number 54 from Project Euler
# Testing from poker_hands.txt file
__a : Tuple = 0
__a : int = os.path.abspath(os.path.dirname(lowercase ) )
__a : Union[str, Any] = os.path.join(lowercase , """poker_hands.txt""" )
with open(lowercase ) as file_hand:
for line in file_hand:
__a : Union[str, Any] = line[:1_4].strip()
__a : Optional[Any] = line[1_5:].strip()
__a , __a : List[str] = PokerHand(lowercase ), PokerHand(lowercase )
__a : str = player.compare_with(lowercase )
if output == "Win":
answer += 1
assert answer == 3_7_6 | 697 | 1 |
'''simple docstring'''
def _snake_case ( lowercase , lowercase ) -> bool:
__a : int = len(lowercase ) + 1
__a : Any = len(lowercase ) + 1
# dp is a 2d matrix where dp[i][j] denotes whether prefix string of
# length i of input_string matches with prefix string of length j of
# given pattern.
# "dp" stands for dynamic programming.
__a : Any = [[0 for i in range(lowercase )] for j in range(lowercase )]
# since string of zero length match pattern of zero length
__a : Any = 1
# since pattern of zero length will never match with string of non-zero length
for i in range(1 , lowercase ):
__a : Optional[int] = 0
# since string of zero length will match with pattern where there
# is at least one * alternatively
for j in range(1 , lowercase ):
__a : Any = dp[0][j - 2] if pattern[j - 1] == """*""" else 0
# now using bottom-up approach to find for all remaining lengths
for i in range(1 , lowercase ):
for j in range(1 , lowercase ):
if input_string[i - 1] == pattern[j - 1] or pattern[j - 1] == ".":
__a : Dict = dp[i - 1][j - 1]
elif pattern[j - 1] == "*":
if dp[i][j - 2] == 1:
__a : Any = 1
elif pattern[j - 2] in (input_string[i - 1], "."):
__a : Optional[Any] = dp[i - 1][j]
else:
__a : List[str] = 0
else:
__a : Optional[Any] = 0
return bool(dp[-1][-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
# inputing the strings
# input_string = input("input a string :")
# pattern = input("input a pattern :")
__SCREAMING_SNAKE_CASE : List[Any] = 'aab'
__SCREAMING_SNAKE_CASE : int = 'c*a*b'
# using function to check whether given string matches the given pattern
if match_pattern(input_string, pattern):
print(f'''{input_string} matches the given pattern {pattern}''')
else:
print(f'''{input_string} does not match with the given pattern {pattern}''') | 697 |
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__SCREAMING_SNAKE_CASE : Optional[Any] = {'configuration_focalnet': ['FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FocalNetConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : List[Any] = [
'FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'FocalNetForImageClassification',
'FocalNetForMaskedImageModeling',
'FocalNetBackbone',
'FocalNetModel',
'FocalNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 697 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__SCREAMING_SNAKE_CASE : Tuple = {
'configuration_roformer': ['ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RoFormerConfig', 'RoFormerOnnxConfig'],
'tokenization_roformer': ['RoFormerTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : int = ['RoFormerTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : str = [
'ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'RoFormerForCausalLM',
'RoFormerForMaskedLM',
'RoFormerForMultipleChoice',
'RoFormerForQuestionAnswering',
'RoFormerForSequenceClassification',
'RoFormerForTokenClassification',
'RoFormerLayer',
'RoFormerModel',
'RoFormerPreTrainedModel',
'load_tf_weights_in_roformer',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Union[str, Any] = [
'TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFRoFormerForCausalLM',
'TFRoFormerForMaskedLM',
'TFRoFormerForMultipleChoice',
'TFRoFormerForQuestionAnswering',
'TFRoFormerForSequenceClassification',
'TFRoFormerForTokenClassification',
'TFRoFormerLayer',
'TFRoFormerModel',
'TFRoFormerPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : int = [
'FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'FlaxRoFormerForMaskedLM',
'FlaxRoFormerForMultipleChoice',
'FlaxRoFormerForQuestionAnswering',
'FlaxRoFormerForSequenceClassification',
'FlaxRoFormerForTokenClassification',
'FlaxRoFormerModel',
'FlaxRoFormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig
from .tokenization_roformer import RoFormerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roformer_fast import RoFormerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roformer import (
ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
RoFormerForCausalLM,
RoFormerForMaskedLM,
RoFormerForMultipleChoice,
RoFormerForQuestionAnswering,
RoFormerForSequenceClassification,
RoFormerForTokenClassification,
RoFormerLayer,
RoFormerModel,
RoFormerPreTrainedModel,
load_tf_weights_in_roformer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roformer import (
TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerLayer,
TFRoFormerModel,
TFRoFormerPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roformer import (
FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
FlaxRoFormerPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 697 |
'''simple docstring'''
from __future__ import annotations
import bisect
def _snake_case ( lowercase , lowercase , lowercase = 0 , lowercase = -1 ) -> int:
if hi < 0:
__a : Union[str, Any] = len(lowercase )
while lo < hi:
__a : List[str] = lo + (hi - lo) // 2
if sorted_collection[mid] < item:
__a : int = mid + 1
else:
__a : int = mid
return lo
def _snake_case ( lowercase , lowercase , lowercase = 0 , lowercase = -1 ) -> int:
if hi < 0:
__a : Any = len(lowercase )
while lo < hi:
__a : Any = lo + (hi - lo) // 2
if sorted_collection[mid] <= item:
__a : List[str] = mid + 1
else:
__a : Any = mid
return lo
def _snake_case ( lowercase , lowercase , lowercase = 0 , lowercase = -1 ) -> None:
sorted_collection.insert(bisect_left(lowercase , lowercase , lowercase , lowercase ) , lowercase )
def _snake_case ( lowercase , lowercase , lowercase = 0 , lowercase = -1 ) -> None:
sorted_collection.insert(bisect_right(lowercase , lowercase , lowercase , lowercase ) , lowercase )
def _snake_case ( lowercase , lowercase ) -> int | None:
__a : Dict = 0
__a : Any = len(lowercase ) - 1
while left <= right:
__a : str = left + (right - left) // 2
__a : List[Any] = sorted_collection[midpoint]
if current_item == item:
return midpoint
elif item < current_item:
__a : Optional[Any] = midpoint - 1
else:
__a : Optional[int] = midpoint + 1
return None
def _snake_case ( lowercase , lowercase ) -> int | None:
__a : Optional[int] = bisect.bisect_left(lowercase , lowercase )
if index != len(lowercase ) and sorted_collection[index] == item:
return index
return None
def _snake_case ( lowercase , lowercase , lowercase , lowercase ) -> int | None:
if right < left:
return None
__a : Any = left + (right - left) // 2
if sorted_collection[midpoint] == item:
return midpoint
elif sorted_collection[midpoint] > item:
return binary_search_by_recursion(lowercase , lowercase , lowercase , midpoint - 1 )
else:
return binary_search_by_recursion(lowercase , lowercase , midpoint + 1 , lowercase )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : List[Any] = input('Enter numbers separated by comma:\n').strip()
__SCREAMING_SNAKE_CASE : Optional[Any] = sorted(int(item) for item in user_input.split(','))
__SCREAMING_SNAKE_CASE : List[str] = int(input('Enter a single number to be found in the list:\n'))
__SCREAMING_SNAKE_CASE : Optional[int] = binary_search(collection, target)
if result is None:
print(f'''{target} was not found in {collection}.''')
else:
print(f'''{target} was found at position {result} in {collection}.''') | 697 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__SCREAMING_SNAKE_CASE : List[str] = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : Dict = {
'google/canine-s': 'https://huggingface.co/google/canine-s/resolve/main/config.json',
# See all CANINE models at https://huggingface.co/models?filter=canine
}
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
lowercase__ = "canine"
def __init__( self , __UpperCamelCase=768 , __UpperCamelCase=12 , __UpperCamelCase=12 , __UpperCamelCase=3072 , __UpperCamelCase="gelu" , __UpperCamelCase=0.1 , __UpperCamelCase=0.1 , __UpperCamelCase=1_6384 , __UpperCamelCase=16 , __UpperCamelCase=0.0_2 , __UpperCamelCase=1E-12 , __UpperCamelCase=0 , __UpperCamelCase=0XE0_00 , __UpperCamelCase=0XE0_01 , __UpperCamelCase=4 , __UpperCamelCase=4 , __UpperCamelCase=8 , __UpperCamelCase=1_6384 , __UpperCamelCase=128 , **__UpperCamelCase , ):
'''simple docstring'''
super().__init__(pad_token_id=__UpperCamelCase , bos_token_id=__UpperCamelCase , eos_token_id=__UpperCamelCase , **__UpperCamelCase )
__a : List[str] = max_position_embeddings
__a : int = hidden_size
__a : Optional[Any] = num_hidden_layers
__a : Optional[int] = num_attention_heads
__a : int = intermediate_size
__a : Dict = hidden_act
__a : Dict = hidden_dropout_prob
__a : List[str] = attention_probs_dropout_prob
__a : str = initializer_range
__a : Optional[Any] = type_vocab_size
__a : Optional[Any] = layer_norm_eps
# Character config:
__a : str = downsampling_rate
__a : List[str] = upsampling_kernel_size
__a : List[Any] = num_hash_functions
__a : Any = num_hash_buckets
__a : int = local_transformer_stride | 697 |
'''simple docstring'''
from itertools import product
def _snake_case ( lowercase , lowercase ) -> list[int]:
__a : Optional[int] = sides_number
__a : Union[str, Any] = max_face_number * dice_number
__a : Optional[Any] = [0] * (max_total + 1)
__a : Dict = 1
__a : str = range(lowercase , max_face_number + 1 )
for dice_numbers in product(lowercase , repeat=lowercase ):
__a : int = sum(lowercase )
totals_frequencies[total] += 1
return totals_frequencies
def _snake_case ( ) -> float:
__a : Tuple = total_frequency_distribution(
sides_number=4 , dice_number=9 )
__a : Union[str, Any] = total_frequency_distribution(
sides_number=6 , dice_number=6 )
__a : str = 0
__a : Dict = 9
__a : str = 4 * 9
__a : Any = 6
for peter_total in range(lowercase , max_peter_total + 1 ):
peter_wins_count += peter_totals_frequencies[peter_total] * sum(
colin_totals_frequencies[min_colin_total:peter_total] )
__a : str = (4**9) * (6**6)
__a : List[Any] = peter_wins_count / total_games_number
__a : List[Any] = round(lowercase , ndigits=7 )
return rounded_peter_win_probability
if __name__ == "__main__":
print(f'''{solution() = }''') | 697 | 1 |
'''simple docstring'''
import warnings
from functools import wraps
from typing import Callable
def _snake_case ( lowercase ) -> Callable:
@wraps(lowercase )
def _inner_fn(*lowercase , **lowercase ):
warnings.warn(
(F"""'{fn.__name__}' is experimental and might be subject to breaking changes in the future.""") , lowercase , )
return fn(*lowercase , **lowercase )
return _inner_fn | 697 |
'''simple docstring'''
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import DiffusionPipeline
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import logging
__SCREAMING_SNAKE_CASE : Union[str, Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
def __init__( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , ):
'''simple docstring'''
super().__init__()
self.register_modules(
vae=__UpperCamelCase , text_encoder=__UpperCamelCase , tokenizer=__UpperCamelCase , unet=__UpperCamelCase , scheduler=__UpperCamelCase , safety_checker=__UpperCamelCase , feature_extractor=__UpperCamelCase , )
def __lowerCamelCase ( self , __UpperCamelCase = "auto" ):
'''simple docstring'''
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
__a : Union[str, Any] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(__UpperCamelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
self.enable_attention_slicing(__UpperCamelCase )
@torch.no_grad()
def __call__( self , __UpperCamelCase , __UpperCamelCase = 512 , __UpperCamelCase = 512 , __UpperCamelCase = 50 , __UpperCamelCase = 7.5 , __UpperCamelCase = None , __UpperCamelCase = 1 , __UpperCamelCase = 0.0 , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = "pil" , __UpperCamelCase = True , __UpperCamelCase = None , __UpperCamelCase = 1 , __UpperCamelCase = None , **__UpperCamelCase , ):
'''simple docstring'''
if isinstance(__UpperCamelCase , __UpperCamelCase ):
__a : Union[str, Any] = 1
elif isinstance(__UpperCamelCase , __UpperCamelCase ):
__a : Tuple = len(__UpperCamelCase )
else:
raise ValueError(f"""`prompt` has to be of type `str` or `list` but is {type(__UpperCamelCase )}""" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"""`height` and `width` have to be divisible by 8 but are {height} and {width}.""" )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(__UpperCamelCase , __UpperCamelCase ) or callback_steps <= 0)
):
raise ValueError(
f"""`callback_steps` has to be a positive integer but is {callback_steps} of type"""
f""" {type(__UpperCamelCase )}.""" )
# get prompt text embeddings
__a : Tuple = self.tokenizer(
__UpperCamelCase , padding="""max_length""" , max_length=self.tokenizer.model_max_length , return_tensors="""pt""" , )
__a : Union[str, Any] = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
__a : str = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"""The following part of your input was truncated because CLIP can only handle sequences up to"""
f""" {self.tokenizer.model_max_length} tokens: {removed_text}""" )
__a : Optional[int] = text_input_ids[:, : self.tokenizer.model_max_length]
if text_embeddings is None:
__a : int = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
__a , __a , __a : Union[str, Any] = text_embeddings.shape
__a : Optional[Any] = text_embeddings.repeat(1 , __UpperCamelCase , 1 )
__a : Union[str, Any] = text_embeddings.view(bs_embed * num_images_per_prompt , __UpperCamelCase , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
__a : Any = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
__a : List[str]
if negative_prompt is None:
__a : Optional[Any] = [""""""]
elif type(__UpperCamelCase ) is not type(__UpperCamelCase ):
raise TypeError(
f"""`negative_prompt` should be the same type to `prompt`, but got {type(__UpperCamelCase )} !="""
f""" {type(__UpperCamelCase )}.""" )
elif isinstance(__UpperCamelCase , __UpperCamelCase ):
__a : Any = [negative_prompt]
elif batch_size != len(__UpperCamelCase ):
raise ValueError(
f"""`negative_prompt`: {negative_prompt} has batch size {len(__UpperCamelCase )}, but `prompt`:"""
f""" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"""
""" the batch size of `prompt`.""" )
else:
__a : Tuple = negative_prompt
__a : Any = text_input_ids.shape[-1]
__a : List[str] = self.tokenizer(
__UpperCamelCase , padding="""max_length""" , max_length=__UpperCamelCase , truncation=__UpperCamelCase , return_tensors="""pt""" , )
__a : str = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
__a : List[str] = uncond_embeddings.shape[1]
__a : List[Any] = uncond_embeddings.repeat(__UpperCamelCase , __UpperCamelCase , 1 )
__a : Tuple = uncond_embeddings.view(batch_size * num_images_per_prompt , __UpperCamelCase , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
__a : List[Any] = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
__a : Tuple = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
__a : List[Any] = (batch_size * num_images_per_prompt, self.unet.config.in_channels, 64, 64)
__a : int = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
__a : Any = torch.randn(
__UpperCamelCase , generator=__UpperCamelCase , device="""cpu""" , dtype=__UpperCamelCase ).to(self.device )
__a : Optional[Any] = torch.randn(__UpperCamelCase , generator=__UpperCamelCase , device="""cpu""" , dtype=__UpperCamelCase ).to(
self.device )
else:
__a : Optional[int] = torch.randn(
__UpperCamelCase , generator=__UpperCamelCase , device=self.device , dtype=__UpperCamelCase )
__a : str = torch.randn(__UpperCamelCase , generator=__UpperCamelCase , device=self.device , dtype=__UpperCamelCase )
else:
if latents_reference.shape != latents_shape:
raise ValueError(f"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" )
__a : Optional[Any] = latents_reference.to(self.device )
__a : str = latents.to(self.device )
# This is the key part of the pipeline where we
# try to ensure that the generated images w/ the same seed
# but different sizes actually result in similar images
__a : List[str] = (latents_shape[3] - latents_shape_reference[3]) // 2
__a : int = (latents_shape[2] - latents_shape_reference[2]) // 2
__a : int = latents_shape_reference[3] if dx >= 0 else latents_shape_reference[3] + 2 * dx
__a : Tuple = latents_shape_reference[2] if dy >= 0 else latents_shape_reference[2] + 2 * dy
__a : Optional[Any] = 0 if dx < 0 else dx
__a : Optional[Any] = 0 if dy < 0 else dy
__a : Optional[int] = max(-dx , 0 )
__a : Optional[Any] = max(-dy , 0 )
# import pdb
# pdb.set_trace()
__a : Optional[int] = latents_reference[:, :, dy : dy + h, dx : dx + w]
# set timesteps
self.scheduler.set_timesteps(__UpperCamelCase )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
__a : Dict = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
__a : Any = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
__a : List[Any] = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
__a : Optional[Any] = {}
if accepts_eta:
__a : Union[str, Any] = eta
for i, t in enumerate(self.progress_bar(__UpperCamelCase ) ):
# expand the latents if we are doing classifier free guidance
__a : List[str] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
__a : Tuple = self.scheduler.scale_model_input(__UpperCamelCase , __UpperCamelCase )
# predict the noise residual
__a : Union[str, Any] = self.unet(__UpperCamelCase , __UpperCamelCase , encoder_hidden_states=__UpperCamelCase ).sample
# perform guidance
if do_classifier_free_guidance:
__a , __a : List[str] = noise_pred.chunk(2 )
__a : Optional[int] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
__a : List[Any] = self.scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
__a : Optional[Any] = 1 / 0.1_8_2_1_5 * latents
__a : Optional[int] = self.vae.decode(__UpperCamelCase ).sample
__a : List[str] = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
__a : int = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if self.safety_checker is not None:
__a : List[str] = self.feature_extractor(self.numpy_to_pil(__UpperCamelCase ) , return_tensors="""pt""" ).to(
self.device )
__a , __a : int = self.safety_checker(
images=__UpperCamelCase , clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype ) )
else:
__a : Optional[int] = None
if output_type == "pil":
__a : str = self.numpy_to_pil(__UpperCamelCase )
if not return_dict:
return (image, has_nsfw_concept)
return StableDiffusionPipelineOutput(images=__UpperCamelCase , nsfw_content_detected=__UpperCamelCase ) | 697 | 1 |
'''simple docstring'''
from __future__ import annotations
from statistics import mean
def _snake_case ( lowercase , lowercase , lowercase ) -> list[int]:
__a : List[Any] = [0] * no_of_processes
__a : Dict = [0] * no_of_processes
# Initialize remaining_time to waiting_time.
for i in range(lowercase ):
__a : List[Any] = burst_time[i]
__a : list[int] = []
__a : Any = 0
__a : Optional[int] = 0
# When processes are not completed,
# A process whose arrival time has passed \
# and has remaining execution time is put into the ready_process.
# The shortest process in the ready_process, target_process is executed.
while completed != no_of_processes:
__a : int = []
__a : Optional[Any] = -1
for i in range(lowercase ):
if (arrival_time[i] <= total_time) and (remaining_time[i] > 0):
ready_process.append(lowercase )
if len(lowercase ) > 0:
__a : Any = ready_process[0]
for i in ready_process:
if remaining_time[i] < remaining_time[target_process]:
__a : List[Any] = i
total_time += burst_time[target_process]
completed += 1
__a : int = 0
__a : str = (
total_time - arrival_time[target_process] - burst_time[target_process]
)
else:
total_time += 1
return waiting_time
def _snake_case ( lowercase , lowercase , lowercase ) -> list[int]:
__a : List[Any] = [0] * no_of_processes
for i in range(lowercase ):
__a : str = burst_time[i] + waiting_time[i]
return turn_around_time
if __name__ == "__main__":
print('[TEST CASE 01]')
__SCREAMING_SNAKE_CASE : Dict = 4
__SCREAMING_SNAKE_CASE : List[str] = [2, 5, 3, 7]
__SCREAMING_SNAKE_CASE : Optional[int] = [0, 0, 0, 0]
__SCREAMING_SNAKE_CASE : Tuple = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
__SCREAMING_SNAKE_CASE : Optional[int] = calculate_turnaroundtime(
burst_time, no_of_processes, waiting_time
)
# Printing the Result
print('PID\tBurst Time\tArrival Time\tWaiting Time\tTurnaround Time')
for i, process_id in enumerate(list(range(1, 5))):
print(
f'''{process_id}\t{burst_time[i]}\t\t\t{arrival_time[i]}\t\t\t\t'''
f'''{waiting_time[i]}\t\t\t\t{turn_around_time[i]}'''
)
print(f'''\nAverage waiting time = {mean(waiting_time):.5f}''')
print(f'''Average turnaround time = {mean(turn_around_time):.5f}''') | 697 |
'''simple docstring'''
import numpy as np
from PIL import Image
def _snake_case ( lowercase , lowercase , lowercase ) -> np.ndarray:
__a : Any = np.array(lowercase )
if arr.shape[0] != arr.shape[1]:
raise ValueError("""The input array is not a square matrix""" )
__a : Union[str, Any] = 0
__a : Dict = 0
__a : Optional[Any] = 0
__a : Tuple = 0
# compute the shape of the output matrix
__a : Optional[int] = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape maxpool_shape
__a : int = np.zeros((maxpool_shape, maxpool_shape) )
while i < arr.shape[0]:
if i + size > arr.shape[0]:
# if the end of the matrix is reached, break
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the maximum of the pooling matrix
__a : Optional[Any] = np.max(arr[i : i + size, j : j + size] )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
__a : Optional[Any] = 0
__a : str = 0
return updated_arr
def _snake_case ( lowercase , lowercase , lowercase ) -> np.ndarray:
__a : int = np.array(lowercase )
if arr.shape[0] != arr.shape[1]:
raise ValueError("""The input array is not a square matrix""" )
__a : int = 0
__a : Optional[Any] = 0
__a : str = 0
__a : List[Any] = 0
# compute the shape of the output matrix
__a : int = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape avgpool_shape
__a : Optional[int] = np.zeros((avgpool_shape, avgpool_shape) )
while i < arr.shape[0]:
# if the end of the matrix is reached, break
if i + size > arr.shape[0]:
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the average of the pooling matrix
__a : Any = int(np.average(arr[i : i + size, j : j + size] ) )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
__a : str = 0
__a : List[Any] = 0
return updated_arr
# Main Function
if __name__ == "__main__":
from doctest import testmod
testmod(name='avgpooling', verbose=True)
# Loading the image
__SCREAMING_SNAKE_CASE : str = Image.open('path_to_image')
# Converting the image to numpy array and maxpooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(maxpooling(np.array(image), size=3, stride=2)).show()
# Converting the image to numpy array and averagepooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(avgpooling(np.array(image), size=3, stride=2)).show() | 697 | 1 |
'''simple docstring'''
import os
from math import logaa
def _snake_case ( lowercase = "base_exp.txt" ) -> int:
__a : float = 0
__a : Optional[int] = 0
for i, line in enumerate(open(os.path.join(os.path.dirname(lowercase ) , lowercase ) ) ):
__a , __a : str = list(map(lowercase , line.split(""",""" ) ) )
if x * logaa(lowercase ) > largest:
__a : Dict = x * logaa(lowercase )
__a : Any = i + 1
return result
if __name__ == "__main__":
print(solution()) | 697 |
'''simple docstring'''
import qiskit
def _snake_case ( lowercase , lowercase ) -> qiskit.result.counts.Counts:
__a : Any = qiskit.Aer.get_backend("""aer_simulator""" )
# Create a Quantum Circuit acting on the q register
__a : str = qiskit.QuantumCircuit(lowercase , lowercase )
# Map the quantum measurement to the classical bits
circuit.measure([0] , [0] )
# Execute the circuit on the simulator
__a : Any = qiskit.execute(lowercase , lowercase , shots=1_0_0_0 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(lowercase )
if __name__ == "__main__":
print(f'''Total count for various states are: {single_qubit_measure(1, 1)}''') | 697 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__SCREAMING_SNAKE_CASE : Union[str, Any] = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : List[str] = {
'camembert-base': 'https://huggingface.co/camembert-base/resolve/main/config.json',
'umberto-commoncrawl-cased-v1': (
'https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json'
),
'umberto-wikipedia-uncased-v1': (
'https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json'
),
}
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
lowercase__ = "camembert"
def __init__( self , __UpperCamelCase=3_0522 , __UpperCamelCase=768 , __UpperCamelCase=12 , __UpperCamelCase=12 , __UpperCamelCase=3072 , __UpperCamelCase="gelu" , __UpperCamelCase=0.1 , __UpperCamelCase=0.1 , __UpperCamelCase=512 , __UpperCamelCase=2 , __UpperCamelCase=0.0_2 , __UpperCamelCase=1E-12 , __UpperCamelCase=1 , __UpperCamelCase=0 , __UpperCamelCase=2 , __UpperCamelCase="absolute" , __UpperCamelCase=True , __UpperCamelCase=None , **__UpperCamelCase , ):
'''simple docstring'''
super().__init__(pad_token_id=__UpperCamelCase , bos_token_id=__UpperCamelCase , eos_token_id=__UpperCamelCase , **__UpperCamelCase )
__a : List[str] = vocab_size
__a : Any = hidden_size
__a : Any = num_hidden_layers
__a : Union[str, Any] = num_attention_heads
__a : Union[str, Any] = hidden_act
__a : Any = intermediate_size
__a : Union[str, Any] = hidden_dropout_prob
__a : Tuple = attention_probs_dropout_prob
__a : List[str] = max_position_embeddings
__a : Tuple = type_vocab_size
__a : Dict = initializer_range
__a : Union[str, Any] = layer_norm_eps
__a : str = position_embedding_type
__a : Tuple = use_cache
__a : Optional[int] = classifier_dropout
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
if self.task == "multiple-choice":
__a : Optional[int] = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
__a : Tuple = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] ) | 697 |
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConformerConfig,
WavaVecaConformerForCTC,
WavaVecaConformerForPreTraining,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
__SCREAMING_SNAKE_CASE : str = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : Any = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.linear_k': 'encoder.layers.*.self_attn.linear_k',
'self_attn.linear_v': 'encoder.layers.*.self_attn.linear_v',
'self_attn.linear_q': 'encoder.layers.*.self_attn.linear_q',
'self_attn.pos_bias_u': 'encoder.layers.*.self_attn.pos_bias_u',
'self_attn.pos_bias_v': 'encoder.layers.*.self_attn.pos_bias_v',
'self_attn.linear_out': 'encoder.layers.*.self_attn.linear_out',
'self_attn.linear_pos': 'encoder.layers.*.self_attn.linear_pos',
'self_attn.rotary_emb': 'encoder.embed_positions',
'self_attn_layer_norm': 'encoder.layers.*.self_attn_layer_norm',
'conv_module.pointwise_conv1': 'encoder.layers.*.conv_module.pointwise_conv1',
'conv_module.pointwise_conv2': 'encoder.layers.*.conv_module.pointwise_conv2',
'conv_module.depthwise_conv': 'encoder.layers.*.conv_module.depthwise_conv',
'conv_module.batch_norm': 'encoder.layers.*.conv_module.batch_norm',
'conv_module.layer_norm': 'encoder.layers.*.conv_module.layer_norm',
'ffn1.w_1': 'encoder.layers.*.ffn1.intermediate_dense',
'ffn1.w_2': 'encoder.layers.*.ffn1.output_dense',
'ffn1.layer_norm': 'encoder.layers.*.ffn1_layer_norm',
'ffn2.w_1': 'encoder.layers.*.ffn2.intermediate_dense',
'ffn2.w_2': 'encoder.layers.*.ffn2.output_dense',
'ffn2.layer_norm': 'encoder.layers.*.ffn2_layer_norm',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
}
__SCREAMING_SNAKE_CASE : Optional[Any] = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def _snake_case ( lowercase , lowercase , lowercase , lowercase , lowercase ) -> List[Any]:
for attribute in key.split(""".""" ):
__a : str = getattr(lowercase , lowercase )
if weight_type is not None:
__a : Dict = getattr(lowercase , lowercase ).shape
else:
__a : Dict = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}""" )
if weight_type == "weight":
__a : Any = value
elif weight_type == "weight_g":
__a : int = value
elif weight_type == "weight_v":
__a : int = value
elif weight_type == "bias":
__a : List[Any] = value
elif weight_type == "running_mean":
__a : Union[str, Any] = value
elif weight_type == "running_var":
__a : Tuple = value
elif weight_type == "num_batches_tracked":
__a : Optional[int] = value
elif weight_type == "inv_freq":
__a : List[str] = value
else:
__a : List[str] = value
logger.info(F"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def _snake_case ( lowercase , lowercase , lowercase ) -> Dict:
__a : Dict = []
__a : Dict = fairseq_model.state_dict()
__a : Tuple = hf_model.wavaveca_conformer.feature_extractor
for name, value in fairseq_dict.items():
__a : int = False
if "conv_layers" in name:
load_conv_layer(
lowercase , lowercase , lowercase , lowercase , hf_model.config.feat_extract_norm == """group""" , )
__a : List[Any] = True
else:
for key, mapped_key in MAPPING.items():
__a : Optional[int] = """wav2vec2_conformer.""" + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
__a : str = True
if "*" in mapped_key:
__a : Optional[int] = name.split(lowercase )[0].split(""".""" )[-2]
__a : List[Any] = mapped_key.replace("""*""" , lowercase )
if "pos_bias_u" in name:
__a : Union[str, Any] = None
elif "pos_bias_v" in name:
__a : List[Any] = None
elif "weight_g" in name:
__a : List[Any] = """weight_g"""
elif "weight_v" in name:
__a : List[Any] = """weight_v"""
elif "bias" in name:
__a : Optional[int] = """bias"""
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__a : str = """weight"""
elif "running_mean" in name:
__a : List[str] = """running_mean"""
elif "inv_freq" in name:
__a : Dict = """inv_freq"""
elif "running_var" in name:
__a : Union[str, Any] = """running_var"""
elif "num_batches_tracked" in name:
__a : int = """num_batches_tracked"""
else:
__a : Optional[int] = None
set_recursively(lowercase , lowercase , lowercase , lowercase , lowercase )
continue
if not is_used:
unused_weights.append(lowercase )
logger.warning(F"""Unused weights: {unused_weights}""" )
def _snake_case ( lowercase , lowercase , lowercase , lowercase , lowercase ) -> List[str]:
__a : Optional[Any] = full_name.split("""conv_layers.""" )[-1]
__a : Union[str, Any] = name.split(""".""" )
__a : Optional[Any] = int(items[0] )
__a : int = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
__a : Dict = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
__a : str = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" )
__a : Dict = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" )
__a : Union[str, Any] = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(lowercase )
@torch.no_grad()
def _snake_case ( lowercase , lowercase , lowercase=None , lowercase=None , lowercase=True ) -> Optional[Any]:
if config_path is not None:
__a : Any = WavaVecaConformerConfig.from_pretrained(lowercase , hidden_act="""swish""" )
else:
__a : Optional[int] = WavaVecaConformerConfig()
if "rope" in checkpoint_path:
__a : Optional[Any] = """rotary"""
if is_finetuned:
if dict_path:
__a : List[Any] = Dictionary.load(lowercase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
__a : int = target_dict.pad_index
__a : List[str] = target_dict.bos_index
__a : str = target_dict.eos_index
__a : Dict = len(target_dict.symbols )
__a : Any = os.path.join(lowercase , """vocab.json""" )
if not os.path.isdir(lowercase ):
logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(lowercase ) )
return
os.makedirs(lowercase , exist_ok=lowercase )
__a : Dict = target_dict.indices
# fairseq has the <pad> and <s> switched
__a : Optional[Any] = 0
__a : List[Any] = 1
with open(lowercase , """w""" , encoding="""utf-8""" ) as vocab_handle:
json.dump(lowercase , lowercase )
__a : int = WavaVecaCTCTokenizer(
lowercase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=lowercase , )
__a : Optional[int] = True if config.feat_extract_norm == """layer""" else False
__a : Dict = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6_0_0_0 , padding_value=0 , do_normalize=lowercase , return_attention_mask=lowercase , )
__a : str = WavaVecaProcessor(feature_extractor=lowercase , tokenizer=lowercase )
processor.save_pretrained(lowercase )
__a : List[str] = WavaVecaConformerForCTC(lowercase )
else:
__a : Optional[int] = WavaVecaConformerForPreTraining(lowercase )
if is_finetuned:
__a , __a , __a : Dict = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
else:
__a : Optional[int] = argparse.Namespace(task="""audio_pretraining""" )
__a : Tuple = fairseq.tasks.setup_task(lowercase )
__a , __a , __a : int = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=lowercase )
__a : Any = model[0].eval()
recursively_load_weights(lowercase , lowercase , not is_finetuned )
hf_wavavec.save_pretrained(lowercase )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : Dict = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
__SCREAMING_SNAKE_CASE : int = parser.parse_args()
convert_wavaveca_conformer_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
) | 697 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__SCREAMING_SNAKE_CASE : Optional[Any] = {
'configuration_luke': ['LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LukeConfig'],
'tokenization_luke': ['LukeTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Optional[Any] = [
'LUKE_PRETRAINED_MODEL_ARCHIVE_LIST',
'LukeForEntityClassification',
'LukeForEntityPairClassification',
'LukeForEntitySpanClassification',
'LukeForMultipleChoice',
'LukeForQuestionAnswering',
'LukeForSequenceClassification',
'LukeForTokenClassification',
'LukeForMaskedLM',
'LukeModel',
'LukePreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_luke import LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP, LukeConfig
from .tokenization_luke import LukeTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_luke import (
LUKE_PRETRAINED_MODEL_ARCHIVE_LIST,
LukeForEntityClassification,
LukeForEntityPairClassification,
LukeForEntitySpanClassification,
LukeForMaskedLM,
LukeForMultipleChoice,
LukeForQuestionAnswering,
LukeForSequenceClassification,
LukeForTokenClassification,
LukeModel,
LukePreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 697 |
'''simple docstring'''
import warnings
from functools import wraps
from typing import Callable
def _snake_case ( lowercase ) -> Callable:
@wraps(lowercase )
def _inner_fn(*lowercase , **lowercase ):
warnings.warn(
(F"""'{fn.__name__}' is experimental and might be subject to breaking changes in the future.""") , lowercase , )
return fn(*lowercase , **lowercase )
return _inner_fn | 697 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__SCREAMING_SNAKE_CASE : Optional[Any] = {
'configuration_bridgetower': [
'BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'BridgeTowerConfig',
'BridgeTowerTextConfig',
'BridgeTowerVisionConfig',
],
'processing_bridgetower': ['BridgeTowerProcessor'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : str = ['BridgeTowerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : List[str] = [
'BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST',
'BridgeTowerForContrastiveLearning',
'BridgeTowerForImageAndTextRetrieval',
'BridgeTowerForMaskedLM',
'BridgeTowerModel',
'BridgeTowerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_bridgetower import (
BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP,
BridgeTowerConfig,
BridgeTowerTextConfig,
BridgeTowerVisionConfig,
)
from .processing_bridgetower import BridgeTowerProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_bridgetower import BridgeTowerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bridgetower import (
BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST,
BridgeTowerForContrastiveLearning,
BridgeTowerForImageAndTextRetrieval,
BridgeTowerForMaskedLM,
BridgeTowerModel,
BridgeTowerPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure) | 697 |
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
from ....audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ....feature_extraction_sequence_utils import SequenceFeatureExtractor
from ....feature_extraction_utils import BatchFeature
from ....file_utils import PaddingStrategy, TensorType
from ....utils import logging
__SCREAMING_SNAKE_CASE : Tuple = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
lowercase__ = ["input_features", "attention_mask"]
def __init__( self , __UpperCamelCase=80 , __UpperCamelCase=1_6000 , __UpperCamelCase=0.0 , __UpperCamelCase=10 , __UpperCamelCase=25 , __UpperCamelCase="hamming_window" , __UpperCamelCase=3_2_7_6_8.0 , __UpperCamelCase=0.9_7 , __UpperCamelCase=1.0 , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=False , **__UpperCamelCase , ):
'''simple docstring'''
super().__init__(feature_size=__UpperCamelCase , sampling_rate=__UpperCamelCase , padding_value=__UpperCamelCase , **__UpperCamelCase )
__a : List[str] = feature_size
__a : List[str] = sampling_rate
__a : int = padding_value
__a : Any = hop_length
__a : int = win_length
__a : Tuple = frame_signal_scale
__a : Union[str, Any] = preemphasis_coeff
__a : List[str] = mel_floor
__a : Union[str, Any] = normalize_means
__a : Optional[Any] = normalize_vars
__a : Optional[Any] = win_function
__a : Union[str, Any] = return_attention_mask
__a : List[Any] = win_length * sampling_rate // 1000
__a : List[Any] = hop_length * sampling_rate // 1000
__a : Optional[Any] = optimal_fft_length(self.sample_size )
__a : Any = (self.n_fft // 2) + 1
def __lowerCamelCase ( self , __UpperCamelCase ):
'''simple docstring'''
if self.win_function == "hamming_window":
__a : str = window_function(window_length=self.sample_size , name=self.win_function , periodic=__UpperCamelCase )
else:
__a : Dict = window_function(window_length=self.sample_size , name=self.win_function )
__a : Optional[Any] = mel_filter_bank(
num_frequency_bins=self.n_freqs , num_mel_filters=self.feature_size , min_frequency=0.0 , max_frequency=self.sampling_rate / 2.0 , sampling_rate=self.sampling_rate , )
__a : Any = spectrogram(
one_waveform * self.frame_signal_scale , window=__UpperCamelCase , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , center=__UpperCamelCase , preemphasis=self.preemphasis_coeff , mel_filters=__UpperCamelCase , mel_floor=self.mel_floor , log_mel="""log""" , )
return msfc_features.T
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
if self.normalize_means:
__a : int = x[:input_length].mean(axis=0 )
__a : str = np.subtract(__UpperCamelCase , __UpperCamelCase )
if self.normalize_vars:
__a : Dict = x[:input_length].std(axis=0 )
__a : Dict = np.divide(__UpperCamelCase , __UpperCamelCase )
if input_length < x.shape[0]:
__a : Union[str, Any] = padding_value
# make sure array is in float32
__a : Any = x.astype(np.floataa )
return x
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase = None ):
'''simple docstring'''
__a : Tuple = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [self._normalize_one(__UpperCamelCase , __UpperCamelCase , self.padding_value ) for x, n in zip(__UpperCamelCase , __UpperCamelCase )]
def __call__( self , __UpperCamelCase , __UpperCamelCase = False , __UpperCamelCase = None , __UpperCamelCase = False , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , **__UpperCamelCase , ):
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"""The model corresponding to this feature extractor: {self} was trained using a sampling rate of"""
f""" {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with"""
f""" {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
"""It is strongly recommended to pass the ``sampling_rate`` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
__a : Tuple = isinstance(__UpperCamelCase , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f"""Only mono-channel audio is supported for input to {self}""" )
__a : Tuple = is_batched_numpy or (
isinstance(__UpperCamelCase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
__a : Tuple = [np.asarray(__UpperCamelCase , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(__UpperCamelCase , np.ndarray ):
__a : List[str] = np.asarray(__UpperCamelCase , dtype=np.floataa )
elif isinstance(__UpperCamelCase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
__a : str = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
__a : Any = [raw_speech]
# extract fbank features
__a : str = [self._extract_mfsc_features(__UpperCamelCase ) for one_waveform in raw_speech]
# convert into correct format for padding
__a : Optional[Any] = BatchFeature({"""input_features""": features} )
__a : Any = self.pad(
__UpperCamelCase , padding=__UpperCamelCase , max_length=__UpperCamelCase , truncation=__UpperCamelCase , pad_to_multiple_of=__UpperCamelCase , return_attention_mask=__UpperCamelCase , **__UpperCamelCase , )
# make sure list is in array format
__a : int = padded_inputs.get("""input_features""" )
if isinstance(input_features[0] , __UpperCamelCase ):
__a : Union[str, Any] = [np.asarray(__UpperCamelCase , dtype=np.floataa ) for feature in input_features]
__a : List[str] = padded_inputs.get("""attention_mask""" )
if attention_mask is not None:
__a : Optional[int] = [np.asarray(__UpperCamelCase , dtype=np.intaa ) for array in attention_mask]
if self.normalize_means or self.normalize_vars:
__a : Optional[Any] = (
np.array(__UpperCamelCase , dtype=np.intaa )
if self._get_padding_strategies(__UpperCamelCase , max_length=__UpperCamelCase ) is not PaddingStrategy.DO_NOT_PAD
and padding
else None
)
__a : int = self.normalize(
padded_inputs["""input_features"""] , attention_mask=__UpperCamelCase )
if return_tensors is not None:
__a : List[Any] = padded_inputs.convert_to_tensors(__UpperCamelCase )
return padded_inputs | 697 | 1 |
'''simple docstring'''
import json
import os
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from requests.exceptions import HTTPError
from transformers.utils import (
CONFIG_NAME,
FLAX_WEIGHTS_NAME,
TF2_WEIGHTS_NAME,
TRANSFORMERS_CACHE,
WEIGHTS_NAME,
cached_file,
get_file_from_repo,
has_file,
)
__SCREAMING_SNAKE_CASE : List[str] = 'hf-internal-testing/tiny-random-bert'
__SCREAMING_SNAKE_CASE : Optional[Any] = os.path.join(TRANSFORMERS_CACHE, 'models--hf-internal-testing--tiny-random-bert')
__SCREAMING_SNAKE_CASE : Optional[int] = '9b8c223d42b2188cb49d29af482996f9d0f3e5a6'
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Union[str, Any] = cached_file(__UpperCamelCase , __UpperCamelCase )
# Should have downloaded the file in here
self.assertTrue(os.path.isdir(__UpperCamelCase ) )
# Cache should contain at least those three subfolders:
for subfolder in ["blobs", "refs", "snapshots"]:
self.assertTrue(os.path.isdir(os.path.join(__UpperCamelCase , __UpperCamelCase ) ) )
with open(os.path.join(__UpperCamelCase , """refs""" , """main""" ) ) as f:
__a : Union[str, Any] = f.read()
self.assertEqual(__UpperCamelCase , os.path.join(__UpperCamelCase , """snapshots""" , __UpperCamelCase , __UpperCamelCase ) )
self.assertTrue(os.path.isfile(__UpperCamelCase ) )
# File is cached at the same place the second time.
__a : Union[str, Any] = cached_file(__UpperCamelCase , __UpperCamelCase )
self.assertEqual(__UpperCamelCase , __UpperCamelCase )
# Using a specific revision to test the full commit hash.
__a : List[str] = cached_file(__UpperCamelCase , __UpperCamelCase , revision="""9b8c223""" )
self.assertEqual(__UpperCamelCase , os.path.join(__UpperCamelCase , """snapshots""" , __UpperCamelCase , __UpperCamelCase ) )
def __lowerCamelCase ( self ):
'''simple docstring'''
with self.assertRaisesRegex(__UpperCamelCase , """is not a valid model identifier""" ):
__a : Union[str, Any] = cached_file("""tiny-random-bert""" , __UpperCamelCase )
with self.assertRaisesRegex(__UpperCamelCase , """is not a valid git identifier""" ):
__a : List[Any] = cached_file(__UpperCamelCase , __UpperCamelCase , revision="""aaaa""" )
with self.assertRaisesRegex(__UpperCamelCase , """does not appear to have a file named""" ):
__a : List[str] = cached_file(__UpperCamelCase , """conf""" )
def __lowerCamelCase ( self ):
'''simple docstring'''
with self.assertRaisesRegex(__UpperCamelCase , """does not appear to have a file named""" ):
__a : Any = cached_file(__UpperCamelCase , """conf""" )
with open(os.path.join(__UpperCamelCase , """refs""" , """main""" ) ) as f:
__a : int = f.read()
self.assertTrue(os.path.isfile(os.path.join(__UpperCamelCase , """.no_exist""" , __UpperCamelCase , """conf""" ) ) )
__a : List[str] = cached_file(__UpperCamelCase , """conf""" , _raise_exceptions_for_missing_entries=__UpperCamelCase )
self.assertIsNone(__UpperCamelCase )
__a : Union[str, Any] = cached_file(__UpperCamelCase , """conf""" , local_files_only=__UpperCamelCase , _raise_exceptions_for_missing_entries=__UpperCamelCase )
self.assertIsNone(__UpperCamelCase )
__a : List[str] = mock.Mock()
__a : List[Any] = 500
__a : Optional[int] = {}
__a : str = HTTPError
__a : Tuple = {}
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch("""requests.Session.request""" , return_value=__UpperCamelCase ) as mock_head:
__a : Optional[int] = cached_file(__UpperCamelCase , """conf""" , _raise_exceptions_for_connection_errors=__UpperCamelCase )
self.assertIsNone(__UpperCamelCase )
# This check we did call the fake head request
mock_head.assert_called()
def __lowerCamelCase ( self ):
'''simple docstring'''
self.assertTrue(has_file("""hf-internal-testing/tiny-bert-pt-only""" , __UpperCamelCase ) )
self.assertFalse(has_file("""hf-internal-testing/tiny-bert-pt-only""" , __UpperCamelCase ) )
self.assertFalse(has_file("""hf-internal-testing/tiny-bert-pt-only""" , __UpperCamelCase ) )
def __lowerCamelCase ( self ):
'''simple docstring'''
self.assertIsNone(get_file_from_repo("""bert-base-cased""" , """ahah.txt""" ) )
# The function raises if the repository does not exist.
with self.assertRaisesRegex(__UpperCamelCase , """is not a valid model identifier""" ):
get_file_from_repo("""bert-base-case""" , __UpperCamelCase )
# The function raises if the revision does not exist.
with self.assertRaisesRegex(__UpperCamelCase , """is not a valid git identifier""" ):
get_file_from_repo("""bert-base-cased""" , __UpperCamelCase , revision="""ahaha""" )
__a : Dict = get_file_from_repo("""bert-base-cased""" , __UpperCamelCase )
# The name is the cached name which is not very easy to test, so instead we load the content.
__a : Tuple = json.loads(open(__UpperCamelCase , """r""" ).read() )
self.assertEqual(config["""hidden_size"""] , 768 )
def __lowerCamelCase ( self ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
__a : Optional[int] = Path(__UpperCamelCase ) / """a.txt"""
filename.touch()
self.assertEqual(get_file_from_repo(__UpperCamelCase , """a.txt""" ) , str(__UpperCamelCase ) )
self.assertIsNone(get_file_from_repo(__UpperCamelCase , """b.txt""" ) ) | 697 |
'''simple docstring'''
__SCREAMING_SNAKE_CASE : int = 9.80_665
def _snake_case ( lowercase , lowercase , lowercase = g ) -> float:
if fluid_density <= 0:
raise ValueError("""Impossible fluid density""" )
if volume < 0:
raise ValueError("""Impossible Object volume""" )
if gravity <= 0:
raise ValueError("""Impossible Gravity""" )
return fluid_density * gravity * volume
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod() | 697 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__SCREAMING_SNAKE_CASE : Optional[Any] = {
'configuration_mobilebert': [
'MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'MobileBertConfig',
'MobileBertOnnxConfig',
],
'tokenization_mobilebert': ['MobileBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : str = ['MobileBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Union[str, Any] = [
'MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'MobileBertForMaskedLM',
'MobileBertForMultipleChoice',
'MobileBertForNextSentencePrediction',
'MobileBertForPreTraining',
'MobileBertForQuestionAnswering',
'MobileBertForSequenceClassification',
'MobileBertForTokenClassification',
'MobileBertLayer',
'MobileBertModel',
'MobileBertPreTrainedModel',
'load_tf_weights_in_mobilebert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Dict = [
'TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFMobileBertForMaskedLM',
'TFMobileBertForMultipleChoice',
'TFMobileBertForNextSentencePrediction',
'TFMobileBertForPreTraining',
'TFMobileBertForQuestionAnswering',
'TFMobileBertForSequenceClassification',
'TFMobileBertForTokenClassification',
'TFMobileBertMainLayer',
'TFMobileBertModel',
'TFMobileBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mobilebert import (
MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileBertConfig,
MobileBertOnnxConfig,
)
from .tokenization_mobilebert import MobileBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mobilebert_fast import MobileBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilebert import (
MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertLayer,
MobileBertModel,
MobileBertPreTrainedModel,
load_tf_weights_in_mobilebert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilebert import (
TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertMainLayer,
TFMobileBertModel,
TFMobileBertPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 697 |
'''simple docstring'''
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetrImageProcessor
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def __init__( self , __UpperCamelCase , __UpperCamelCase=7 , __UpperCamelCase=3 , __UpperCamelCase=30 , __UpperCamelCase=400 , __UpperCamelCase=True , __UpperCamelCase=None , __UpperCamelCase=True , __UpperCamelCase=1 / 255 , __UpperCamelCase=True , __UpperCamelCase=[0.5, 0.5, 0.5] , __UpperCamelCase=[0.5, 0.5, 0.5] , __UpperCamelCase=True , ):
'''simple docstring'''
__a : List[Any] = size if size is not None else {"""shortest_edge""": 18, """longest_edge""": 1333}
__a : Dict = parent
__a : Union[str, Any] = batch_size
__a : Optional[int] = num_channels
__a : Dict = min_resolution
__a : List[Any] = max_resolution
__a : int = do_resize
__a : str = size
__a : Optional[Any] = do_rescale
__a : Optional[Any] = rescale_factor
__a : str = do_normalize
__a : Any = image_mean
__a : Optional[Any] = image_std
__a : Dict = do_pad
def __lowerCamelCase ( self ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_pad": self.do_pad,
}
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase=False ):
'''simple docstring'''
if not batched:
__a : Union[str, Any] = image_inputs[0]
if isinstance(__UpperCamelCase , Image.Image ):
__a , __a : Tuple = image.size
else:
__a , __a : Tuple = image.shape[1], image.shape[2]
if w < h:
__a : Optional[int] = int(self.size["""shortest_edge"""] * h / w )
__a : Tuple = self.size["""shortest_edge"""]
elif w > h:
__a : Optional[Any] = self.size["""shortest_edge"""]
__a : Any = int(self.size["""shortest_edge"""] * w / h )
else:
__a : Any = self.size["""shortest_edge"""]
__a : Optional[int] = self.size["""shortest_edge"""]
else:
__a : Any = []
for image in image_inputs:
__a , __a : Any = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
__a : List[Any] = max(__UpperCamelCase , key=lambda __UpperCamelCase : item[0] )[0]
__a : Optional[Any] = max(__UpperCamelCase , key=lambda __UpperCamelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase , unittest.TestCase ):
lowercase__ = DetrImageProcessor if is_vision_available() else None
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : str = DetrImageProcessingTester(self )
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Optional[int] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__UpperCamelCase , """image_mean""" ) )
self.assertTrue(hasattr(__UpperCamelCase , """image_std""" ) )
self.assertTrue(hasattr(__UpperCamelCase , """do_normalize""" ) )
self.assertTrue(hasattr(__UpperCamelCase , """do_rescale""" ) )
self.assertTrue(hasattr(__UpperCamelCase , """rescale_factor""" ) )
self.assertTrue(hasattr(__UpperCamelCase , """do_resize""" ) )
self.assertTrue(hasattr(__UpperCamelCase , """size""" ) )
self.assertTrue(hasattr(__UpperCamelCase , """do_pad""" ) )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 18, """longest_edge""": 1333} )
self.assertEqual(image_processor.do_pad , __UpperCamelCase )
__a : List[Any] = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=__UpperCamelCase )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42, """longest_edge""": 84} )
self.assertEqual(image_processor.do_pad , __UpperCamelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
pass
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__a : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCamelCase , Image.Image )
# Test not batched input
__a : Optional[Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
__a , __a : Any = self.image_processor_tester.get_expected_values(__UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__a , __a : Optional[int] = self.image_processor_tester.get_expected_values(__UpperCamelCase , batched=__UpperCamelCase )
__a : Any = image_processing(__UpperCamelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Dict = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__a : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCamelCase , numpify=__UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCamelCase , np.ndarray )
# Test not batched input
__a : Dict = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
__a , __a : Any = self.image_processor_tester.get_expected_values(__UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__a : List[str] = image_processing(__UpperCamelCase , return_tensors="""pt""" ).pixel_values
__a , __a : str = self.image_processor_tester.get_expected_values(__UpperCamelCase , batched=__UpperCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Any = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__a : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCamelCase , torchify=__UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCamelCase , torch.Tensor )
# Test not batched input
__a : Any = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
__a , __a : Any = self.image_processor_tester.get_expected_values(__UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__a : List[str] = image_processing(__UpperCamelCase , return_tensors="""pt""" ).pixel_values
__a , __a : Any = self.image_processor_tester.get_expected_values(__UpperCamelCase , batched=__UpperCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : List[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_annotations.txt""" , """r""" ) as f:
__a : Dict = json.loads(f.read() )
__a : Optional[int] = {"""image_id""": 3_9769, """annotations""": target}
# encode them
__a : List[str] = DetrImageProcessor.from_pretrained("""facebook/detr-resnet-50""" )
__a : Tuple = image_processing(images=__UpperCamelCase , annotations=__UpperCamelCase , return_tensors="""pt""" )
# verify pixel values
__a : Union[str, Any] = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["""pixel_values"""].shape , __UpperCamelCase )
__a : List[str] = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , __UpperCamelCase , atol=1E-4 ) )
# verify area
__a : List[Any] = torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , __UpperCamelCase ) )
# verify boxes
__a : Optional[int] = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , __UpperCamelCase )
__a : Any = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , __UpperCamelCase , atol=1E-3 ) )
# verify image_id
__a : Union[str, Any] = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , __UpperCamelCase ) )
# verify is_crowd
__a : List[Any] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , __UpperCamelCase ) )
# verify class_labels
__a : Any = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , __UpperCamelCase ) )
# verify orig_size
__a : Any = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , __UpperCamelCase ) )
# verify size
__a : str = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , __UpperCamelCase ) )
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : List[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt""" , """r""" ) as f:
__a : Tuple = json.loads(f.read() )
__a : str = {"""file_name""": """000000039769.png""", """image_id""": 3_9769, """segments_info""": target}
__a : int = pathlib.Path("""./tests/fixtures/tests_samples/COCO/coco_panoptic""" )
# encode them
__a : List[str] = DetrImageProcessor.from_pretrained("""facebook/detr-resnet-50-panoptic""" )
__a : Tuple = image_processing(images=__UpperCamelCase , annotations=__UpperCamelCase , masks_path=__UpperCamelCase , return_tensors="""pt""" )
# verify pixel values
__a : List[str] = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["""pixel_values"""].shape , __UpperCamelCase )
__a : Any = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , __UpperCamelCase , atol=1E-4 ) )
# verify area
__a : Optional[Any] = torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , __UpperCamelCase ) )
# verify boxes
__a : Optional[Any] = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , __UpperCamelCase )
__a : List[str] = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , __UpperCamelCase , atol=1E-3 ) )
# verify image_id
__a : List[str] = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , __UpperCamelCase ) )
# verify is_crowd
__a : Optional[int] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , __UpperCamelCase ) )
# verify class_labels
__a : Optional[int] = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , __UpperCamelCase ) )
# verify masks
__a : Union[str, Any] = 82_2873
self.assertEqual(encoding["""labels"""][0]["""masks"""].sum().item() , __UpperCamelCase )
# verify orig_size
__a : str = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , __UpperCamelCase ) )
# verify size
__a : List[Any] = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , __UpperCamelCase ) ) | 697 | 1 |
'''simple docstring'''
from math import acos, sin
from typing import List, Tuple, Union
import numpy as np
import torch
from PIL import Image
from ...models import AutoencoderKL, UNetaDConditionModel
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput
from .mel import Mel
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
lowercase__ = ["vqvae"]
def __init__( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , ):
'''simple docstring'''
super().__init__()
self.register_modules(unet=__UpperCamelCase , scheduler=__UpperCamelCase , mel=__UpperCamelCase , vqvae=__UpperCamelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
return 50 if isinstance(self.scheduler , __UpperCamelCase ) else 1000
@torch.no_grad()
def __call__( self , __UpperCamelCase = 1 , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = 0 , __UpperCamelCase = 0 , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = 0 , __UpperCamelCase = 0 , __UpperCamelCase = None , __UpperCamelCase = 0 , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase=True , ):
'''simple docstring'''
__a : Any = steps or self.get_default_steps()
self.scheduler.set_timesteps(__UpperCamelCase )
__a : Tuple = step_generator or generator
# For backwards compatibility
if type(self.unet.config.sample_size ) == int:
__a : Optional[Any] = (self.unet.config.sample_size, self.unet.config.sample_size)
if noise is None:
__a : Optional[int] = randn_tensor(
(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size[0],
self.unet.config.sample_size[1],
) , generator=__UpperCamelCase , device=self.device , )
__a : str = noise
__a : int = None
if audio_file is not None or raw_audio is not None:
self.mel.load_audio(__UpperCamelCase , __UpperCamelCase )
__a : Tuple = self.mel.audio_slice_to_image(__UpperCamelCase )
__a : Tuple = np.frombuffer(input_image.tobytes() , dtype="""uint8""" ).reshape(
(input_image.height, input_image.width) )
__a : List[Any] = (input_image / 255) * 2 - 1
__a : Union[str, Any] = torch.tensor(input_image[np.newaxis, :, :] , dtype=torch.float ).to(self.device )
if self.vqvae is not None:
__a : Optional[Any] = self.vqvae.encode(torch.unsqueeze(__UpperCamelCase , 0 ) ).latent_dist.sample(
generator=__UpperCamelCase )[0]
__a : Optional[int] = self.vqvae.config.scaling_factor * input_images
if start_step > 0:
__a : Tuple = self.scheduler.add_noise(__UpperCamelCase , __UpperCamelCase , self.scheduler.timesteps[start_step - 1] )
__a : List[Any] = (
self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length
)
__a : Optional[Any] = int(mask_start_secs * pixels_per_second )
__a : Optional[Any] = int(mask_end_secs * pixels_per_second )
__a : Optional[int] = self.scheduler.add_noise(__UpperCamelCase , __UpperCamelCase , torch.tensor(self.scheduler.timesteps[start_step:] ) )
for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ):
if isinstance(self.unet , __UpperCamelCase ):
__a : Optional[Any] = self.unet(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )["""sample"""]
else:
__a : Optional[int] = self.unet(__UpperCamelCase , __UpperCamelCase )["""sample"""]
if isinstance(self.scheduler , __UpperCamelCase ):
__a : Dict = self.scheduler.step(
model_output=__UpperCamelCase , timestep=__UpperCamelCase , sample=__UpperCamelCase , eta=__UpperCamelCase , generator=__UpperCamelCase , )["""prev_sample"""]
else:
__a : Tuple = self.scheduler.step(
model_output=__UpperCamelCase , timestep=__UpperCamelCase , sample=__UpperCamelCase , generator=__UpperCamelCase , )["""prev_sample"""]
if mask is not None:
if mask_start > 0:
__a : Optional[Any] = mask[:, step, :, :mask_start]
if mask_end > 0:
__a : Union[str, Any] = mask[:, step, :, -mask_end:]
if self.vqvae is not None:
# 0.18215 was scaling factor used in training to ensure unit variance
__a : Dict = 1 / self.vqvae.config.scaling_factor * images
__a : Union[str, Any] = self.vqvae.decode(__UpperCamelCase )["""sample"""]
__a : Optional[int] = (images / 2 + 0.5).clamp(0 , 1 )
__a : Tuple = images.cpu().permute(0 , 2 , 3 , 1 ).numpy()
__a : List[Any] = (images * 255).round().astype("""uint8""" )
__a : List[str] = list(
(Image.fromarray(_[:, :, 0] ) for _ in images)
if images.shape[3] == 1
else (Image.fromarray(__UpperCamelCase , mode="""RGB""" ).convert("""L""" ) for _ in images) )
__a : List[str] = [self.mel.image_to_audio(__UpperCamelCase ) for _ in images]
if not return_dict:
return images, (self.mel.get_sample_rate(), audios)
return BaseOutput(**AudioPipelineOutput(np.array(__UpperCamelCase )[:, np.newaxis, :] ) , **ImagePipelineOutput(__UpperCamelCase ) )
@torch.no_grad()
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase = 50 ):
'''simple docstring'''
assert isinstance(self.scheduler , __UpperCamelCase )
self.scheduler.set_timesteps(__UpperCamelCase )
__a : int = np.array(
[np.frombuffer(image.tobytes() , dtype="""uint8""" ).reshape((1, image.height, image.width) ) for image in images] )
__a : Tuple = (sample / 255) * 2 - 1
__a : List[Any] = torch.Tensor(__UpperCamelCase ).to(self.device )
for t in self.progress_bar(torch.flip(self.scheduler.timesteps , (0,) ) ):
__a : Union[str, Any] = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
__a : Optional[Any] = self.scheduler.alphas_cumprod[t]
__a : Optional[int] = (
self.scheduler.alphas_cumprod[prev_timestep]
if prev_timestep >= 0
else self.scheduler.final_alpha_cumprod
)
__a : int = 1 - alpha_prod_t
__a : Tuple = self.unet(__UpperCamelCase , __UpperCamelCase )["""sample"""]
__a : Union[str, Any] = (1 - alpha_prod_t_prev) ** 0.5 * model_output
__a : List[str] = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5)
__a : Any = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output
return sample
@staticmethod
def __lowerCamelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
__a : Any = acos(torch.dot(torch.flatten(__UpperCamelCase ) , torch.flatten(__UpperCamelCase ) ) / torch.norm(__UpperCamelCase ) / torch.norm(__UpperCamelCase ) )
return sin((1 - alpha) * theta ) * xa / sin(__UpperCamelCase ) + sin(alpha * theta ) * xa / sin(__UpperCamelCase ) | 697 |
'''simple docstring'''
import argparse
import logging
import os
import time
import timeit
import datasets
import numpy as np
import pycuda.autoinit # noqa: F401
import pycuda.driver as cuda
import tensorrt as trt
import torch
from absl import logging as absl_logging
from accelerate import Accelerator
from datasets import load_dataset, load_metric
from torch.utils.data import DataLoader
from utils_qa import postprocess_qa_predictions
import transformers
from transformers import AutoTokenizer, EvalPrediction, default_data_collator, set_seed
from transformers.trainer_pt_utils import nested_concat, nested_truncate
__SCREAMING_SNAKE_CASE : Optional[int] = trt.Logger(trt.Logger.WARNING)
__SCREAMING_SNAKE_CASE : Tuple = absl_logging.get_absl_logger()
absl_logger.setLevel(logging.WARNING)
__SCREAMING_SNAKE_CASE : Any = logging.getLogger(__name__)
__SCREAMING_SNAKE_CASE : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--onnx_model_path',
default=None,
type=str,
required=True,
help='Path to ONNX model: ',
)
parser.add_argument(
'--output_dir',
default=None,
type=str,
required=True,
help='The output directory where the model checkpoints and predictions will be written.',
)
# Other parameters
parser.add_argument(
'--tokenizer_name',
default='',
type=str,
required=True,
help='Pretrained tokenizer name or path if not the same as model_name',
)
parser.add_argument(
'--version_2_with_negative',
action='store_true',
help='If true, the SQuAD examples contain some that do not have an answer.',
)
parser.add_argument(
'--null_score_diff_threshold',
type=float,
default=0.0,
help='If null_score - best_non_null is greater than the threshold predict null.',
)
parser.add_argument(
'--max_seq_length',
default=384,
type=int,
help=(
'The maximum total input sequence length after WordPiece tokenization. Sequences '
'longer than this will be truncated, and sequences shorter than this will be padded.'
),
)
parser.add_argument(
'--doc_stride',
default=128,
type=int,
help='When splitting up a long document into chunks, how much stride to take between chunks.',
)
parser.add_argument('--per_device_eval_batch_size', default=8, type=int, help='Batch size per GPU/CPU for evaluation.')
parser.add_argument(
'--n_best_size',
default=20,
type=int,
help='The total number of n-best predictions to generate in the nbest_predictions.json output file.',
)
parser.add_argument(
'--max_answer_length',
default=30,
type=int,
help=(
'The maximum length of an answer that can be generated. This is needed because the start '
'and end predictions are not conditioned on one another.'
),
)
parser.add_argument('--seed', type=int, default=42, help='random seed for initialization')
parser.add_argument(
'--dataset_name',
type=str,
default=None,
required=True,
help='The name of the dataset to use (via the datasets library).',
)
parser.add_argument(
'--dataset_config_name',
type=str,
default=None,
help='The configuration name of the dataset to use (via the datasets library).',
)
parser.add_argument(
'--preprocessing_num_workers', type=int, default=4, help='A csv or a json file containing the training data.'
)
parser.add_argument('--overwrite_cache', action='store_true', help='Overwrite the cached training and evaluation sets')
parser.add_argument(
'--fp16',
action='store_true',
help='Whether to use 16-bit (mixed) precision instead of 32-bit',
)
parser.add_argument(
'--int8',
action='store_true',
help='Whether to use INT8',
)
__SCREAMING_SNAKE_CASE : Optional[int] = parser.parse_args()
if args.tokenizer_name:
__SCREAMING_SNAKE_CASE : str = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=True)
else:
raise ValueError(
'You are instantiating a new tokenizer from scratch. This is not supported by this script.'
'You can do it from another script, save it, and load it from here, using --tokenizer_name.'
)
logger.info('Training/evaluation parameters %s', args)
__SCREAMING_SNAKE_CASE : List[Any] = args.per_device_eval_batch_size
__SCREAMING_SNAKE_CASE : int = (args.eval_batch_size, args.max_seq_length)
# TRT Engine properties
__SCREAMING_SNAKE_CASE : Optional[Any] = True
__SCREAMING_SNAKE_CASE : Tuple = 'temp_engine/bert-fp32.engine'
if args.fpaa:
__SCREAMING_SNAKE_CASE : Dict = 'temp_engine/bert-fp16.engine'
if args.inta:
__SCREAMING_SNAKE_CASE : Tuple = 'temp_engine/bert-int8.engine'
# import ONNX file
if not os.path.exists('temp_engine'):
os.makedirs('temp_engine')
__SCREAMING_SNAKE_CASE : Optional[Any] = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network, trt.OnnxParser(
network, TRT_LOGGER
) as parser:
with open(args.onnx_model_path, 'rb') as model:
if not parser.parse(model.read()):
for error in range(parser.num_errors):
print(parser.get_error(error))
# Query input names and shapes from parsed TensorRT network
__SCREAMING_SNAKE_CASE : List[Any] = [network.get_input(i) for i in range(network.num_inputs)]
__SCREAMING_SNAKE_CASE : List[Any] = [_input.name for _input in network_inputs] # ex: ["actual_input1"]
with builder.create_builder_config() as config:
__SCREAMING_SNAKE_CASE : Tuple = 1 << 50
if STRICT_TYPES:
config.set_flag(trt.BuilderFlag.STRICT_TYPES)
if args.fpaa:
config.set_flag(trt.BuilderFlag.FPaa)
if args.inta:
config.set_flag(trt.BuilderFlag.INTa)
__SCREAMING_SNAKE_CASE : Dict = builder.create_optimization_profile()
config.add_optimization_profile(profile)
for i in range(len(input_names)):
profile.set_shape(input_names[i], INPUT_SHAPE, INPUT_SHAPE, INPUT_SHAPE)
__SCREAMING_SNAKE_CASE : Union[str, Any] = builder.build_engine(network, config)
# serialize_engine and store in file (can be directly loaded and deserialized):
with open(engine_name, 'wb') as f:
f.write(engine.serialize())
def _snake_case ( lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> List[Any]:
__a : Dict = np.asarray(inputs["""input_ids"""] , dtype=np.intaa )
__a : List[Any] = np.asarray(inputs["""attention_mask"""] , dtype=np.intaa )
__a : str = np.asarray(inputs["""token_type_ids"""] , dtype=np.intaa )
# Copy inputs
cuda.memcpy_htod_async(d_inputs[0] , input_ids.ravel() , lowercase )
cuda.memcpy_htod_async(d_inputs[1] , attention_mask.ravel() , lowercase )
cuda.memcpy_htod_async(d_inputs[2] , token_type_ids.ravel() , lowercase )
# start time
__a : Optional[Any] = time.time()
# Run inference
context.execute_async(
bindings=[int(lowercase ) for d_inp in d_inputs] + [int(lowercase ), int(lowercase )] , stream_handle=stream.handle )
# Transfer predictions back from GPU
cuda.memcpy_dtoh_async(lowercase , lowercase , lowercase )
cuda.memcpy_dtoh_async(lowercase , lowercase , lowercase )
# Synchronize the stream and take time
stream.synchronize()
# end time
__a : str = time.time()
__a : Any = end_time - start_time
__a : Optional[int] = (h_outputa, h_outputa)
# print(outputs)
return outputs, infer_time
# Initialize the accelerator. We will let the accelerator handle device placement for us in this example.
__SCREAMING_SNAKE_CASE : Optional[Any] = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO,
)
# Setup logging, we only want one process per machine to log things on the screen.
# accelerator.is_local_main_process is only True for one process per machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR)
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed)
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
if args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
__SCREAMING_SNAKE_CASE : List[str] = load_dataset(args.dataset_name, args.dataset_config_name)
else:
raise ValueError('Evaluation requires a dataset name')
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Preprocessing the datasets.
# Preprocessing is slighlty different for training and evaluation.
__SCREAMING_SNAKE_CASE : int = raw_datasets['validation'].column_names
__SCREAMING_SNAKE_CASE : Tuple = 'question' if 'question' in column_names else column_names[0]
__SCREAMING_SNAKE_CASE : List[Any] = 'context' if 'context' in column_names else column_names[1]
__SCREAMING_SNAKE_CASE : Tuple = 'answers' if 'answers' in column_names else column_names[2]
# Padding side determines if we do (question|context) or (context|question).
__SCREAMING_SNAKE_CASE : Tuple = tokenizer.padding_side == 'right'
if args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f'''The max_seq_length passed ({args.max_seq_length}) is larger than the maximum length for the'''
f'''model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.'''
)
__SCREAMING_SNAKE_CASE : Dict = min(args.max_seq_length, tokenizer.model_max_length)
def _snake_case ( lowercase ) -> Tuple:
# Some of the questions have lots of whitespace on the left, which is not useful and will make the
# truncation of the context fail (the tokenized question will take a lots of space). So we remove that
# left whitespace
__a : Optional[Any] = [q.lstrip() for q in examples[question_column_name]]
# Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results
# in one example possible giving several features when a context is long, each of those features having a
# context that overlaps a bit the context of the previous feature.
__a : Optional[int] = tokenizer(
examples[question_column_name if pad_on_right else context_column_name] , examples[context_column_name if pad_on_right else question_column_name] , truncation="""only_second""" if pad_on_right else """only_first""" , max_length=lowercase , stride=args.doc_stride , return_overflowing_tokens=lowercase , return_offsets_mapping=lowercase , padding="""max_length""" , )
# Since one example might give us several features if it has a long context, we need a map from a feature to
# its corresponding example. This key gives us just that.
__a : Optional[Any] = tokenized_examples.pop("""overflow_to_sample_mapping""" )
# For evaluation, we will need to convert our predictions to substrings of the context, so we keep the
# corresponding example_id and we will store the offset mappings.
__a : Optional[Any] = []
for i in range(len(tokenized_examples["""input_ids"""] ) ):
# Grab the sequence corresponding to that example (to know what is the context and what is the question).
__a : Dict = tokenized_examples.sequence_ids(lowercase )
__a : Optional[Any] = 1 if pad_on_right else 0
# One example can give several spans, this is the index of the example containing this span of text.
__a : Union[str, Any] = sample_mapping[i]
tokenized_examples["example_id"].append(examples["""id"""][sample_index] )
# Set to None the offset_mapping that are not part of the context so it's easy to determine if a token
# position is part of the context or not.
__a : int = [
(o if sequence_ids[k] == context_index else None)
for k, o in enumerate(tokenized_examples["""offset_mapping"""][i] )
]
return tokenized_examples
__SCREAMING_SNAKE_CASE : int = raw_datasets['validation']
# Validation Feature Creation
__SCREAMING_SNAKE_CASE : Union[str, Any] = eval_examples.map(
prepare_validation_features,
batched=True,
num_proc=args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not args.overwrite_cache,
desc='Running tokenizer on validation dataset',
)
__SCREAMING_SNAKE_CASE : List[Any] = default_data_collator
__SCREAMING_SNAKE_CASE : Union[str, Any] = eval_dataset.remove_columns(['example_id', 'offset_mapping'])
__SCREAMING_SNAKE_CASE : List[str] = DataLoader(
eval_dataset_for_model, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size
)
def _snake_case ( lowercase , lowercase , lowercase , lowercase="eval" ) -> Any:
# Post-processing: we match the start logits and end logits to answers in the original context.
__a : List[str] = postprocess_qa_predictions(
examples=lowercase , features=lowercase , predictions=lowercase , version_2_with_negative=args.version_2_with_negative , n_best_size=args.n_best_size , max_answer_length=args.max_answer_length , null_score_diff_threshold=args.null_score_diff_threshold , output_dir=args.output_dir , prefix=lowercase , )
# Format the result to the format the metric expects.
if args.version_2_with_negative:
__a : List[str] = [
{"""id""": k, """prediction_text""": v, """no_answer_probability""": 0.0} for k, v in predictions.items()
]
else:
__a : List[str] = [{"""id""": k, """prediction_text""": v} for k, v in predictions.items()]
__a : Optional[Any] = [{"""id""": ex["""id"""], """answers""": ex[answer_column_name]} for ex in examples]
return EvalPrediction(predictions=lowercase , label_ids=lowercase )
__SCREAMING_SNAKE_CASE : List[Any] = load_metric('squad_v2' if args.version_2_with_negative else 'squad')
# Evaluation!
logger.info('Loading ONNX model %s for evaluation', args.onnx_model_path)
with open(engine_name, 'rb') as f, trt.Runtime(TRT_LOGGER) as runtime, runtime.deserialize_cuda_engine(
f.read()
) as engine, engine.create_execution_context() as context:
# setup for TRT inferrence
for i in range(len(input_names)):
context.set_binding_shape(i, INPUT_SHAPE)
assert context.all_binding_shapes_specified
def _snake_case ( lowercase ) -> Optional[int]:
return trt.volume(engine.get_binding_shape(lowercase ) ) * engine.get_binding_dtype(lowercase ).itemsize
# Allocate device memory for inputs and outputs.
__SCREAMING_SNAKE_CASE : List[str] = [cuda.mem_alloc(binding_nbytes(binding)) for binding in engine if engine.binding_is_input(binding)]
# Allocate output buffer
__SCREAMING_SNAKE_CASE : str = cuda.pagelocked_empty(tuple(context.get_binding_shape(3)), dtype=np.floataa)
__SCREAMING_SNAKE_CASE : Union[str, Any] = cuda.pagelocked_empty(tuple(context.get_binding_shape(4)), dtype=np.floataa)
__SCREAMING_SNAKE_CASE : str = cuda.mem_alloc(h_outputa.nbytes)
__SCREAMING_SNAKE_CASE : Tuple = cuda.mem_alloc(h_outputa.nbytes)
# Create a stream in which to copy inputs/outputs and run inference.
__SCREAMING_SNAKE_CASE : Tuple = cuda.Stream()
# Evaluation
logger.info('***** Running Evaluation *****')
logger.info(f''' Num examples = {len(eval_dataset)}''')
logger.info(f''' Batch size = {args.per_device_eval_batch_size}''')
__SCREAMING_SNAKE_CASE : Union[str, Any] = 0.0
__SCREAMING_SNAKE_CASE : str = 0
__SCREAMING_SNAKE_CASE : str = timeit.default_timer()
__SCREAMING_SNAKE_CASE : Dict = None
for step, batch in enumerate(eval_dataloader):
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Union[str, Any] = model_infer(batch, context, d_inputs, h_outputa, h_outputa, d_outputa, d_outputa, stream)
total_time += infer_time
niter += 1
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Optional[Any] = outputs
__SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor(start_logits)
__SCREAMING_SNAKE_CASE : Tuple = torch.tensor(end_logits)
# necessary to pad predictions and labels for being gathered
__SCREAMING_SNAKE_CASE : Optional[int] = accelerator.pad_across_processes(start_logits, dim=1, pad_index=-100)
__SCREAMING_SNAKE_CASE : Dict = accelerator.pad_across_processes(end_logits, dim=1, pad_index=-100)
__SCREAMING_SNAKE_CASE : List[str] = (accelerator.gather(start_logits).cpu().numpy(), accelerator.gather(end_logits).cpu().numpy())
__SCREAMING_SNAKE_CASE : List[str] = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-100)
if all_preds is not None:
__SCREAMING_SNAKE_CASE : Tuple = nested_truncate(all_preds, len(eval_dataset))
__SCREAMING_SNAKE_CASE : str = timeit.default_timer() - start_time
logger.info(' Evaluation done in total %f secs (%f sec per example)', evalTime, evalTime / len(eval_dataset))
# Inference time from TRT
logger.info('Average Inference Time = {:.3f} ms'.format(total_time * 1_000 / niter))
logger.info('Total Inference Time = {:.3f} ms'.format(total_time * 1_000))
logger.info('Total Number of Inference = %d', niter)
__SCREAMING_SNAKE_CASE : Optional[int] = post_processing_function(eval_examples, eval_dataset, all_preds)
__SCREAMING_SNAKE_CASE : List[Any] = metric.compute(predictions=prediction.predictions, references=prediction.label_ids)
logger.info(f'''Evaluation metrics: {eval_metric}''') | 697 | 1 |
'''simple docstring'''
from __future__ import annotations
from collections import deque
from collections.abc import Sequence
from dataclasses import dataclass
from typing import Any
@dataclass
class SCREAMING_SNAKE_CASE__ :
lowercase__ = 42
lowercase__ = None
lowercase__ = None
def _snake_case ( ) -> Node | None:
__a : Tuple = Node(1 )
__a : Union[str, Any] = Node(2 )
__a : List[Any] = Node(3 )
__a : Dict = Node(4 )
__a : Dict = Node(5 )
return tree
def _snake_case ( lowercase ) -> list[int]:
return [root.data, *preorder(root.left ), *preorder(root.right )] if root else []
def _snake_case ( lowercase ) -> list[int]:
return postorder(root.left ) + postorder(root.right ) + [root.data] if root else []
def _snake_case ( lowercase ) -> list[int]:
return [*inorder(root.left ), root.data, *inorder(root.right )] if root else []
def _snake_case ( lowercase ) -> int:
return (max(height(root.left ) , height(root.right ) ) + 1) if root else 0
def _snake_case ( lowercase ) -> Sequence[Node | None]:
__a : list[Any] = []
if root is None:
return output
__a : Optional[Any] = deque([root] )
while process_queue:
__a : Optional[Any] = process_queue.popleft()
output.append(node.data )
if node.left:
process_queue.append(node.left )
if node.right:
process_queue.append(node.right )
return output
def _snake_case ( lowercase , lowercase ) -> Sequence[Node | None]:
__a : list[Any] = []
def populate_output(lowercase , lowercase ) -> None:
if not root:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.left , level - 1 )
populate_output(root.right , level - 1 )
populate_output(lowercase , lowercase )
return output
def _snake_case ( lowercase , lowercase ) -> Sequence[Node | None]:
__a : list[Any] = []
def populate_output(lowercase , lowercase ) -> None:
if root is None:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.right , level - 1 )
populate_output(root.left , level - 1 )
populate_output(lowercase , lowercase )
return output
def _snake_case ( lowercase ) -> Sequence[Node | None] | list[Any]:
if root is None:
return []
__a : list[Sequence[Node | None]] = []
__a : str = 0
__a : Union[str, Any] = height(lowercase )
for h in range(1 , height_tree + 1 ):
if not flag:
output.append(get_nodes_from_left_to_right(lowercase , lowercase ) )
__a : Union[str, Any] = 1
else:
output.append(get_nodes_from_right_to_left(lowercase , lowercase ) )
__a : str = 0
return output
def _snake_case ( ) -> None: # Main function for testing.
__a : List[Any] = make_tree()
print(F"""In-order Traversal: {inorder(lowercase )}""" )
print(F"""Pre-order Traversal: {preorder(lowercase )}""" )
print(F"""Post-order Traversal: {postorder(lowercase )}""" , """\n""" )
print(F"""Height of Tree: {height(lowercase )}""" , """\n""" )
print("""Complete Level Order Traversal: """ )
print(level_order(lowercase ) , """\n""" )
print("""Level-wise order Traversal: """ )
for level in range(1 , height(lowercase ) + 1 ):
print(F"""Level {level}:""" , get_nodes_from_left_to_right(lowercase , level=lowercase ) )
print("""\nZigZag order Traversal: """ )
print(zigzag(lowercase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main() | 697 |
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import KarrasVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
lowercase__ = 42
lowercase__ = 42
def __init__( self , __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
super().__init__()
self.register_modules(unet=__UpperCamelCase , scheduler=__UpperCamelCase )
@torch.no_grad()
def __call__( self , __UpperCamelCase = 1 , __UpperCamelCase = 50 , __UpperCamelCase = None , __UpperCamelCase = "pil" , __UpperCamelCase = True , **__UpperCamelCase , ):
'''simple docstring'''
__a : int = self.unet.config.sample_size
__a : Optional[int] = (batch_size, 3, img_size, img_size)
__a : Union[str, Any] = self.unet
# sample x_0 ~ N(0, sigma_0^2 * I)
__a : Dict = randn_tensor(__UpperCamelCase , generator=__UpperCamelCase , device=self.device ) * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(__UpperCamelCase )
for t in self.progress_bar(self.scheduler.timesteps ):
# here sigma_t == t_i from the paper
__a : Dict = self.scheduler.schedule[t]
__a : Any = self.scheduler.schedule[t - 1] if t > 0 else 0
# 1. Select temporarily increased noise level sigma_hat
# 2. Add new noise to move from sample_i to sample_hat
__a , __a : Tuple = self.scheduler.add_noise_to_input(__UpperCamelCase , __UpperCamelCase , generator=__UpperCamelCase )
# 3. Predict the noise residual given the noise magnitude `sigma_hat`
# The model inputs and output are adjusted by following eq. (213) in [1].
__a : List[Any] = (sigma_hat / 2) * model((sample_hat + 1) / 2 , sigma_hat / 2 ).sample
# 4. Evaluate dx/dt at sigma_hat
# 5. Take Euler step from sigma to sigma_prev
__a : str = self.scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
if sigma_prev != 0:
# 6. Apply 2nd order correction
# The model inputs and output are adjusted by following eq. (213) in [1].
__a : Union[str, Any] = (sigma_prev / 2) * model((step_output.prev_sample + 1) / 2 , sigma_prev / 2 ).sample
__a : Tuple = self.scheduler.step_correct(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , step_output.prev_sample , step_output["""derivative"""] , )
__a : Tuple = step_output.prev_sample
__a : Optional[Any] = (sample / 2 + 0.5).clamp(0 , 1 )
__a : Dict = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__a : List[Any] = self.numpy_to_pil(__UpperCamelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__UpperCamelCase ) | 697 | 1 |
Subsets and Splits